hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
ae27ad0d88e86f482a6afe1cac72c3607d0cd3e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __INITIALIZE_CU__ #define __INITIALIZE_CU__ #include "gPET.h" #include "externCUDA.h" int nmat_h; using namespace std; Isotopes loadIsotopes() { Isotopes isotopes; ifstream infile("data/isotopes.txt"); FILEEXIST(infile); printf("reading /data/isotopes.txt\n"); infile >> isotopes.Ntype; infile.ignore(512,'#'); isotopes.halftime=new float[isotopes.Ntype]; isotopes.decayRatio=new float[isotopes.Ntype]; isotopes.coef=new float[isotopes.Ntype*8]; for(int i=0;i<isotopes.Ntype;i++) { infile >> isotopes.halftime[i]>>isotopes.decayRatio[i]>>isotopes.coef[8*i]>>isotopes.coef[8*i+1]>>isotopes.coef[8*i+2]; infile >> isotopes.coef[8*i+3]>>isotopes.coef[8*i+4]>>isotopes.coef[8*i+5]>>isotopes.coef[8*i+6]>>isotopes.coef[8*i+7]; cout << i <<" "<< isotopes.halftime[i]<<" " <<isotopes.decayRatio[i]<<" "<< \ isotopes.coef[8*i]<<" "<< isotopes.coef[8*i+7]<<endl; } printf("finish read isotopes;\n\n"); infile.close(); return isotopes; } Phantom loadPhantom(char matfile[100], char denfile[100],int* pdim, float* poffset, float* psize) /******************************************************************* c* Reads voxel geometry from an input file * * c******************************************************************/ { Phantom phantom; cout<<"\n\nloading phantom\n"; phantom.Unxvox = pdim[0]; phantom.Unyvox = pdim[1]; phantom.Unzvox = pdim[2]; printf("CT dimension: %f %f %f\n", phantom.Unxvox, phantom.Unyvox, phantom.Unzvox); phantom.Offsetx = poffset[0]; phantom.Offsety = poffset[1]; phantom.Offsetz = poffset[2]; printf("CT offset: %f %f %f\n", phantom.Offsetx, phantom.Offsety, phantom.Offsetz); phantom.Sizex = psize[0]; phantom.Sizey = psize[1]; phantom.Sizez = psize[2]; printf("CT Size: %f %f %f\n", phantom.Sizex, phantom.Sizey, phantom.Sizez); int numvox=phantom.Unxvox*phantom.Unyvox*phantom.Unzvox; phantom.mat = new int[numvox]; ifstream infilemat(matfile,ios::binary); FILEEXIST(infilemat); infilemat.read(reinterpret_cast <char*> (&phantom.mat[0]), sizeof(int)*numvox); infilemat.close(); phantom.dens = new float[numvox]; ifstream infileden(denfile,ios::binary); FILEEXIST(infileden); infileden.read(reinterpret_cast <char*> (&phantom.dens[0]), sizeof(float)*numvox); infileden.close(); phantom.dx = phantom.Sizex/phantom.Unxvox; phantom.dy = phantom.Sizey/phantom.Unyvox; phantom.dz = phantom.Sizez/phantom.Unzvox; cout<<"finish loading phantom"<<endl; cout<<"resolution is "<<phantom.dx << phantom.dy<< phantom.dz<<endl; return phantom; } Particle readParticle(char sourcefile[100],int NParticle) { Particle particle; double data[8]; ifstream infile(sourcefile,ios::binary); FILEEXIST(infile); printf("reading %s\n", sourcefile); int start, stop; start=infile.tellg(); infile.seekg(0, ios::end); stop=infile.tellg(); if(NParticle>(stop-start)/64) { cout<<"Do not have enough particles in PSF, Changing simulated number from "<<NParticle<<" to "<< (stop-start)/64 <<endl; NParticle = (stop-start)/64; } particle.xbuffer=new float3[NParticle]; particle.vxbuffer=new float4[NParticle]; particle.eventid=new int[NParticle]; particle.time=new double[NParticle]; infile.seekg(0, ios::beg); for(int i=0;i<NParticle;i++) { infile.read(reinterpret_cast <char*> (&data), sizeof(data)); particle.xbuffer[i]=make_float3(data[0],data[1],data[2]); particle.vxbuffer[i]=make_float4(data[4],data[5],data[6],data[7]); particle.eventid[i]=i; particle.time[i] = data[3]; if(i<6) { printf("the first %d particle: %f %f %f\n",i,particle.xbuffer[i].x,particle.vxbuffer[i].x,particle.time[i] ); } } printf("finish read: source PSF;\n\n"); infile.close(); particle.NParticle = NParticle; return particle; } Source readSource(char sourcefile[100]) { Source source; ifstream infile(sourcefile); FILEEXIST(infile); printf("reading %s\n", sourcefile); infile >> source.NSource; cout<< source.NSource<<"\n"; infile.ignore(512,'#'); source.natom=new unsigned int[source.NSource]; source.type=new int[source.NSource]; source.shape=new int[source.NSource]; source.shapecoeff=new float[source.NSource*6]; for(int i=0;i<source.NSource;i++) { infile >> source.natom[i] >> source.type[i] >> source.shape[i]; cout<< i <<" "<< source.natom[i]<<" " << source.type[i]<<" "<< source.shape[i]; for(int j=0;j<6;j++) { infile>>source.shapecoeff[6*i+j]; cout<<" "<<source.shapecoeff[6*i+j]; } cout<<endl; }//*/ printf("finish read: source;\n\n"); infile.close(); return source; } void spline(float *X, float *Y, float *A, float *B, float *C, float *D, float S1, float SN, int N) // possible error from FORTRAN to C /* CUBIC SPLINE INTERPOLATION BETWEEN TABULATED DATA. C INPUT: C X(I) (I=1, ...,N) ........ GRID POINTS. C (THE X VALUES MUST BE IN INCREASING ORDER). C Y(I) (I=1, ...,N) ........ CORRESPONDING FUNCTION VALUES. C S1,SN ..... SECOND DERIVATIVES AT X(1) AND X(N). C (THE NATURAL SPLINE CORRESPONDS TO TAKING S1=SN=0). C N ........................ NUMBER OF GRID POINTS. C C THE INTERPOLATING POLYNOMIAL IN THE I-TH INTERVAL, FROM C X(I) TO X(I+1), IS PI(X)=A(I)+X*(B(I)+X*(C(I)+X*D(I))). C C OUTPUT: C A(I),B(I),C(I),D(I) ...... SPLINE COEFFICIENTS. C C REF.: M.J. MARON, 'NUMERICAL ANALYSIS: A PRACTICAL C APPROACH', MACMILLAN PUBL. CO., NEW YORK 1982. C*************************************************************/ { // linear interpolation, you can use the for loop here and comment the following lines. /* for(int i = 0; i< N-1; i++) { B[i] = (Y[i+1]-Y[i])/(X[i+1]-X[i]); A[i] = (Y[i]*X[i+1] - X[i]*Y[i+1])/(X[i+1]-X[i]); C[i] = 0.0; D[i] = 0.0; }*/ //IMPLICIT DOUBLE PRECISION (A-H,O-Z) // DIMENSION X(N),Y(N),A(N),B(N),C(N),D(N) if(N < 4) { printf("SPLINE INTERPOLATION CANNOT BE PERFORMED WITH %d POINTS. STOP.\n",N); exit(1); } int N1 = N-1; int N2 = N-2; // AUXILIARY ARRAYS H(=A) AND DELTA(=D). for(int i = 0; i < N1; i++) { if(X[i+1]-X[i] < 1.0e-10) { printf("SPLINE X VALUES NOT IN INCREASING ORDER. STOP.\n"); exit(1); } A[i] = X[i+1] - X[i]; D[i] = (Y[i+1] - Y[i])/A[i]; } // SYMMETRIC COEFFICIENT MATRIX (AUGMENTED). for(int i = 0; i < N2; i++) { B[i] = 2.0F * (A[i] + A[i+1]); int k = N1 - i - 1; D[k] = 6.0F * (D[k] - D[k-1]); } D[1] -= A[0] * S1; D[N1-1] -= A[N1-1] * SN; // GAUSS SOLUTION OF THE TRIDIAGONAL SYSTEM. for(int i = 1; i < N2; i++) { float R = A[i]/B[i-1]; B[i] -= R * A[i]; D[i+1] -= R * D[i]; } // THE SIGMA COEFFICIENTS ARE STORED IN ARRAY D. D[N1-1] = D[N1-1]/B[N2-1]; for(int i = 1; i < N2; i++) { int k = N1 - i - 1; D[k] = (D[k] - A[k] * D[k+1])/B[k-1]; } D[N-1] = SN; // SPLINE COEFFICIENTS. float SI1 = S1; for(int i = 0; i < N1; i++) { float SI = SI1; SI1 = D[i+1]; float H = A[i]; float HI = 1.0F/H; A[i] = (HI/6.0F)*(SI*X[i+1]*X[i+1]*X[i+1]-SI1*X[i]*X[i]*X[i]) +HI*(Y[i]*X[i+1]-Y[i+1]*X[i]) +(H/6.0F)*(SI1*X[i]-SI*X[i+1]); B[i] = (HI/2.0F)*(SI1*X[i]*X[i]-SI*X[i+1]*X[i+1]) +HI*(Y[i+1]-Y[i])+(H/6.0F)*(SI-SI1); C[i] = (HI/2.0F)*(SI*X[i+1]-SI1*X[i]); D[i] = (HI/6.0F)*(SI1-SI); } return; } void inirngG() /******************************************************************* c* Set iseed1 and iseed2 for all threads with random numbers * c* * c* Input: * c* Output: * c* iseed1 -> random number * c* iseed2 -> random number * c******************************************************************/ { srand( (unsigned int)time(NULL) ); // generate randseed at CPU int *iseed1_h = (int*) malloc(sizeof(int)*NRAND); for(int i = 0; i < NRAND; i++) { iseed1_h[i] = rand(); } int *iseed1; hipMalloc((void**) &iseed1, sizeof(int)*NRAND); // copy to GPU hipMemcpy(iseed1, iseed1_h, sizeof(int)*NRAND, hipMemcpyHostToDevice); free(iseed1_h); int nblocks; nblocks = 1 + (NRAND - 1)/NTHREAD_PER_BLOCK_GPET ; hipLaunchKernelGGL(( setupcuseed), dim3(nblocks), dim3(NTHREAD_PER_BLOCK_GPET), 0, 0, iseed1); hipDeviceSynchronize(); hipFree(iseed1); } void rmater(float *eminph, float *emax) /******************************************************************* c* Reads material data from file * c* * c* Output: * c* fname -> input file name * c* [Emin,Eminph,Emax] -> interval where data will be gen (eV) * c* refz -> total atomic no of the reference material * c* refz2 -> atomic no^2 of the reference material * c* refmas -> atomic weight of the reference material * c* refden -> density of the reference material (g/cm^3) * c******************************************************************/ { char buffer[100]; float shigh,slow,ecross, temp,wcc,wcb; //char fname[] = "data/pre4phot.matter"; char fname[] = "data/input4gPET.matter"; printf("rmater: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); FILEEXIST(fp); fgets(buffer, 100, fp); fgets(buffer, 100, fp); fgets(buffer, 100, fp); fgets(buffer, 100, fp); printf("%s\n",buffer); fscanf(fp,"%f %f %f\n",eminph, &temp, emax); printf("%e %e %e\n",*eminph,temp, *emax); fgets(buffer, 100, fp); //printf("%s\n",buffer); fscanf(fp,"%f %f\n",&wcc, &wcb); //printf("%e %e\n",wcc,wcb); fgets(buffer, 100, fp); //printf("%s\n",buffer); fscanf(fp,"%f %f %f\n",&shigh,&slow,&ecross); //printf("%e %e %e\n",shigh,slow,ecross); fgets(buffer, 100, fp); //printf("%s\n",buffer); fscanf(fp,"%d\n", &nmat_h); //printf("%d\n",nmat_h); hipMemcpyToSymbol(nmat, &nmat_h, sizeof(int), 0, hipMemcpyHostToDevice) ; if (nmat_h > MAXMAT) { printf("rmater:error: Too many materials.\n"); exit(1); } for(int i = 0; i < nmat_h; i++) { // Read name of material, remove trailing blanks: float matden; int nelem; fgets(buffer,100,fp); //printf("%s\n", buffer); fgets(buffer, 100, fp); //printf("%s\n", buffer); fscanf(fp,"%f\n", &matden); //printf("%e\n", matden); fgets(buffer, 100, fp); //printf("%s\n",buffer); fscanf(fp,"%d\n",&nelem); //printf("%d\n", nelem); for(int j = 0; j < nelem; j++) { fgets(buffer, 100, fp); //printf("%s\n",buffer); } fgets(buffer, 100, fp); //printf("%s\n",buffer); float atnotemp,atno2temp; fscanf(fp,"%f %f %f\n",&atnotemp, &atno2temp, &temp); //printf("%e %e\n", atnotemp,atno2temp); fgets(buffer, 100, fp); //printf("%s\n",buffer); float mass; fscanf(fp,"%f\n", &mass); //printf("%e\n", mass); fgets(buffer, 100, fp); //printf("%s\n",buffer); float zmass,z2mass; fscanf(fp,"%f %f\n", &zmass,&z2mass); //printf("%e %e\n", zmass,z2mass); } fclose(fp); printf("\nread material: Done.\n"); } void rlamph() /******************************************************************* c* Reads photon total inverse mean free path data from file and * c* sets up interpolation matrices * c* * c* Input: * c* fname -> input file name * c******************************************************************/ { char buffer[100]; int ndata; float dummya[NLAPH],dummyb[NLAPH],dummyc[NLAPH],dummyd[NLAPH]; //char fname[] = "data/pre4phot.lamph"; char fname[]="data/input4gPET.lamph"; printf("rlamph: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); FILEEXIST(fp); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { fgets(buffer,100,fp); float temp; fscanf(fp,"%d %f %f %f %f\n",&ndata,&temp,&temp,&temp,&temp); if (ndata != NLAPH) { printf("rlamph:error: Array dim do not match:\n"); printf("%d %d\n", ndata,NLAPH); exit(1); } fgets(buffer,100,fp); // Preparing interpolation: for(int i = 0; i < NLAPH; i++) { fscanf(fp,"%f %f\n",&elaph_h[i],&lamph_h[ind2To1(j,i,MAXMAT,NLAPH)]);//excess ind2To1 equal to j*NLAPH+i,linearization //if(i<3) //printf("material %d, energy %e, cross section %e\n",j, elaph_h[i],lamph_h[ind2To1(j,i,MAXMAT,NLAPH)]); } fgets(buffer,100,fp); spline(elaph_h, &lamph_h[ind2To1(j,0,MAXMAT,NLAPH)],dummya,dummyb,dummyc,dummyd,0.0F,0.0F,NLAPH); // Loading dummy arrays into multimaterial sp matrices: for(int i = 0; i < NLAPH; i++) { lampha_h[ind2To1(j,i,MAXMAT,NLAPH)] = dummya[i]; lamphb_h[ind2To1(j,i,MAXMAT,NLAPH)] = dummyb[i]; lamphc_h[ind2To1(j,i,MAXMAT,NLAPH)] = dummyc[i]; lamphd_h[ind2To1(j,i,MAXMAT,NLAPH)] = dummyd[i]; } } fclose(fp); idleph_h = (NLAPH-1)/(elaph_h[NLAPH-1]-elaph_h[0]); hipMemcpyToSymbol(idleph, &idleph_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(elaph0, &elaph_h[0], sizeof(float), 0, hipMemcpyHostToDevice); hipMallocArray(&lamph, &lamph_tex.channelDesc, NLAPH*MAXMAT, 1); hipMemcpyToArray(lamph, 0, 0, lamph_h, sizeof(float)*NLAPH*MAXMAT, hipMemcpyHostToDevice); lamph_tex.filterMode = hipFilterModeLinear; hipBindTextureToArray(lamph_tex, lamph); } void rcompt() /******************************************************************* c* Reads Compton inverse mean free path data from file and sets * c* up interpolation matrices * c* * c* Input: * c* fname -> input file name * c******************************************************************/ { char buffer[100]; int ndata; //char fname[] = "data/pre4phot.compt"; char fname[]= "data/input4gPET.compt"; printf("rcompt: Reading %s\n", fname); FILE *fp = fopen(fname, "r"); FILEEXIST(fp); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { fgets(buffer,100,fp); float temp; fscanf(fp,"%d %f %f %f %f\n",&ndata,&temp,&temp,&temp,&temp); if (ndata != NCMPT) { printf("rcompt:error: Array dim do not match:\n"); printf("%d %d \n", ndata,NCMPT); exit(1); } fgets(buffer,100,fp); // Preparing interpolation: for(int i = 0; i <NCMPT; i++) { fscanf(fp,"%f %f\n",&ecmpt_h[i],&compt_h[ind2To1(j,i,MAXMAT,NCMPT)]); // if(j == nmat-1) // printf("%e %e\n",ecmpt[i],compt[i]); } fgets(buffer,100,fp); } fclose(fp); idlecp_h = (NCMPT-1)/(ecmpt_h[NCMPT-1]-ecmpt_h[0]); hipMemcpyToSymbol(idlecp, &idlecp_h, sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(ecmpt0, &ecmpt_h[0], sizeof(float), 0, hipMemcpyHostToDevice) ; hipMallocArray(&compt, &compt_tex.channelDesc, NCMPT*MAXMAT, 1); hipMemcpyToArray(compt, 0, 0, compt_h, sizeof(float)*NCMPT*MAXMAT, hipMemcpyHostToDevice); compt_tex.filterMode = hipFilterModeLinear; hipBindTextureToArray(compt_tex, compt); } void rcmpsf() /******************************************************************* c* Reads Compton scattering function data from file and * c* sets up interpolation matrices * c******************************************************************/ { char buffer[100]; //char fname[] = "data/pre4phot.cmpsf"; char fname[]= "data/input4gPET.cmpsf"; printf("rcmpsf: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { // read sf data fgets(buffer,100,fp); float temp; int ndata; fscanf(fp,"%d %f %f %f\n",&ndata,&temp,&temp,&temp); fgets(buffer,100,fp); for(int i = 0; i < ndata; i++) { fscanf(fp,"%f %f %f\n",&temp, &temp, &temp); } // read s surface fgets(buffer,100,fp); int ncp, ne; float dcp, de; fscanf(fp,"%d %f %f %f %d %f %f %f\n", &ncp, &temp, &temp, &dcp, &ne, &temp, &temp, &de); if (ncp != NCPCM) { printf("rcmpsf:error: NCP dim do not match:\n"); printf("%d %d\n", ncp,NCPCM); exit(1); } if (ne != NECM) { printf("rcmpsf:error: NE dim do not match:\n"); printf("%d %d\n", ne,NECM); exit(1); } idcpcm_h = 1.0f/dcp; idecm_h = 1.0f/de; for(int icp=0; icp <ncp; icp++) fscanf(fp,"%f ",&temp); fscanf(fp,"\n"); for(int ie=0; ie <ne; ie++) fscanf(fp,"%f ",&temp); fscanf(fp,"\n"); for(int icp=0; icp <ncp; icp++) { for(int ie = 0; ie<ne; ie++) { fscanf(fp,"%f ",&mucmpt_h[j*NCPCM*NECM+icp*NECM+ie]); // if(mucmpt_h[j*NCPCM*NECM+icp*NECM+ie] > 1.0f || mucmpt_h[j*NCPCM*NECM+icp*NECM+ie]<-1.0f) // cout << "error in data" << mucmpt_h[j*NCPCM*NECM+icp*NECM+ie] << endl; } fscanf(fp,"\n"); } fscanf(fp,"\n"); } fclose(fp); // load to GPU hipMemcpyToSymbol(idcpcm, &idcpcm_h, sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(idecm, &idecm_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); const hipExtent volumeSize = make_hipExtent(NECM, NCPCM, MAXMAT); hipMalloc3DArray(&sArray, &channelDesc, volumeSize) ; hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)mucmpt_h, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = sArray; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams) ; s_tex.normalized = false; s_tex.filterMode = hipFilterModeLinear; hipBindTextureToArray(s_tex, sArray, channelDesc); } void rphote() /******************************************************************* c* Reads photoelectric inverse mean free path data from file and* c* sets up interpolation matrices * c******************************************************************/ { char buffer[100]; int ndata; //char fname[] = "data/pre4phot.phote"; char fname[]= "data/input4gPET.phote"; printf("rphote: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { fgets(buffer,100,fp); float temp; fscanf(fp,"%d %f %f %f %f\n",&ndata,&temp,&temp,&temp,&temp); if (ndata != NPHTE) { printf("rphote:error: Array dim do not match:\n"); printf("%d %d\n", ndata,NPHTE); exit(1); } fgets(buffer,100,fp); // Preparing interpolation for(int i = 0; i < NPHTE; i++) { fscanf(fp,"%f %f\n",&ephte_h[i],&phote_h[ind2To1(j,i,MAXMAT,NPHTE)]); // if(j == nmat-1) // printf("%e %e\n",ephte[i],phote[i]); } fgets(buffer,100,fp); } fclose(fp); idlepe_h = (NPHTE-1)/(ephte_h[NPHTE-1]-ephte_h[0]); hipMemcpyToSymbol(idlepe, &idlepe_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(ephte0, &ephte_h[0], sizeof(float), 0, hipMemcpyHostToDevice); hipMallocArray(&phote, &phote_tex.channelDesc, NPHTE*MAXMAT, 1); hipMemcpyToArray(phote, 0, 0, phote_h, sizeof(float)*NPHTE*MAXMAT, hipMemcpyHostToDevice); phote_tex.filterMode = hipFilterModeLinear; hipBindTextureToArray(phote_tex, phote); } void rrayle() /******************************************************************* c* Reads rayleigh inverse mean free path data from file and * c* sets up interpolation matrices * c* * c* Input: * c* fname -> input file name * c******************************************************************/ { char buffer[100]; int ndata; //char fname[] = "data/pre4phot.rayle"; char fname[]="data/input4gPET.rayle"; printf("rrayle: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { fgets(buffer,100,fp); float temp; fscanf(fp,"%d %f %f %f %f\n",&ndata,&temp,&temp,&temp,&temp); if (ndata != NRAYL) { printf("rrayle:error: Array dim do not match:\n"); printf("%d %d\n", ndata,NRAYL); exit(1); } fgets(buffer,100,fp); // Preparing interpolation for(int i = 0; i < NRAYL; i++) { fscanf(fp,"%f %f\n",&erayl_h[i],&rayle_h[ind2To1(j,i,MAXMAT,NRAYL)]); } fgets(buffer,100,fp); } fclose(fp); idlerl_h = (NRAYL-1)/(erayl_h[NRAYL-1]-erayl_h[0]); hipMemcpyToSymbol(idlerl, &idlerl_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(erayl0, &erayl_h[0], sizeof(float), 0, hipMemcpyHostToDevice) ; hipMallocArray(&rayle, &rayle_tex.channelDesc, NRAYL*MAXMAT, 1); hipMemcpyToArray(rayle, 0, 0, rayle_h, sizeof(float)*NRAYL*MAXMAT, hipMemcpyHostToDevice); rayle_tex.filterMode = hipFilterModeLinear; hipBindTextureToArray(rayle_tex, rayle); } void rrayff() /******************************************************************* c* Reads Rayleigh scattering form factor data from file and * c* sets up interpolation matrices * c******************************************************************/ { char buffer[100]; //char fname[] = "data/pre4phot.rayff"; char fname[]= "data/input4gPET.rayff"; printf("rrayff: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { // read ff data fgets(buffer,100,fp); float temp; int ndata; fscanf(fp,"%d %f %f %f\n",&ndata,&temp,&temp,&temp); fgets(buffer,100,fp); for(int i = 0; i < ndata; i++) { fscanf(fp,"%f %f %f\n",&temp, &temp, &temp); } // read f surface fgets(buffer,100,fp); int ncp, ne; float dcp, de; fscanf(fp,"%d %f %f %f %d %f %f %f\n", &ncp, &temp, &temp, &dcp, &ne, &temp, &temp, &de); if (ncp != NCPRL) { printf("rrayff:error: NCP dim do not match:\n"); printf("%d %d\n", ncp,NCPRL); exit(1); } if (ne != NERL) { printf("rrayff:error: NE dim do not match:\n"); printf("%d %d\n", ne,NERL); exit(1); } idcprl_h = 1.0f/dcp; iderl_h = 1.0f/de; for(int icp=0; icp <ncp; icp++) fscanf(fp,"%f ",&temp); fscanf(fp,"\n"); for(int ie=0; ie <ne; ie++) fscanf(fp,"%f ",&temp); fscanf(fp,"\n"); for(int icp=0; icp <ncp; icp++) { for(int ie = 0; ie<ne; ie++) { fscanf(fp,"%f ",&murayl_h[j*NCPRL*NERL+icp*NERL+ie]); // if(murayl_h[j*NCPRL*NERL+icp*NERL+ie] > 1.0f || murayl_h[j*NCPRL*NERL+icp*NERL+ie]<-1.0f) // cout << "error in data" << murayl_h[j*NCPRL*NERL+icp*NERL+ie] << endl; } fscanf(fp,"\n"); } fscanf(fp,"\n"); // cout << murayl_h[j*NCPRL*NERL+(NCPRL-2)*NERL+1] << endl; } fclose(fp); // load to GPU hipMemcpyToSymbol(idcprl, &idcprl_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(iderl, &iderl_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); const hipExtent volumeSize = make_hipExtent(NERL, NCPRL, MAXMAT); hipMalloc3DArray(&fArray, &channelDesc, volumeSize) ; hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)murayl_h, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = fArray; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); f_tex.normalized = false; f_tex.filterMode = hipFilterModeLinear; hipBindTextureToArray(f_tex, fArray, channelDesc); } float itphip(int matid, float e) /******************************************************************* c* Photon total inverse mean free path --3spline interpolation * c* * c* Input: * c* matid -> material id# * c* e -> kinetic energy in eV * c* Output: * c* Total inverse mean free path in cm^2/g * c******************************************************************/ { int i; i = int(idleph_h*(e-elaph_h[0])); return lampha_h[ind2To1(matid,i,MAXMAT,NLAPH)] + e*(lamphb_h[ind2To1(matid,i,MAXMAT,NLAPH)] + e*(lamphc_h[ind2To1(matid,i,MAXMAT,NLAPH)] + e*lamphd_h[ind2To1(matid,i,MAXMAT,NLAPH)] )); } void iniwck(float eminph,float emax, Phantom phantom) /******************************************************************* c* Finds information used to transport photons with the Woodcock* c* technique * c* * c* Input: * c* eminph -> minimum photon energy in data files (eV) * c* emax -> maximum photon energy in data files (eV) * c* Output * c* bytes -> space allocated for arrays * c* Comments: * c* -> common /dpmsrc/ must be loaded previously * c* -> rlamph() must be called previously * c* -> emax reduced to avoid reaching the end of interpol table* c******************************************************************/ { float maxden[MAXMAT],de,e,ymax,ycanbe; const float eps = 1.0e-10F; unsigned int NXYZ = phantom.Unxvox*phantom.Unyvox*phantom.Unzvox; printf("iniwck phantom: Started.\n"); // Find the largest density for each present material: for(int i = 0; i < MAXMAT; i++) { maxden[i] = 0.0F; } for(int vox = 0; vox < NXYZ; vox++) { if (phantom.dens[vox] > maxden[phantom.mat[vox]]) maxden[phantom.mat[vox]] = phantom.dens[vox]; } // Prepare data: wcke0_h = eminph; de = (emax*(1.0F - eps ) - wcke0_h ) / NWCK; idlewk_h = 1.0F/de; for(int i = 0; i < NWCK; i++) { e = wcke0_h + de*i; ymax = 0.0; for(int j = 0; j < nmat_h; j++) { ycanbe = itphip(j,e)*maxden[j]; if (ycanbe > ymax) ymax = ycanbe; } woock_h[i] = 1.0F/ymax; } hipMemcpyToSymbol(idlewk, &idlewk_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(wcke0, &wcke0_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMallocArray(&woock, &woock_tex.channelDesc, NWCK, 1); hipMemcpyToArray(woock, 0, 0, woock_h, sizeof(float)*NWCK, hipMemcpyHostToDevice); woock_tex.filterMode = hipFilterModeLinear; hipBindTextureToArray(woock_tex, woock); } void initPhantom(Phantom phantom) { printf("CT dimension: %d %d %d\n", phantom.Unxvox, phantom.Unyvox, phantom.Unzvox); printf("CT resolution: %f %f %f\n", phantom.dx, phantom.dy, phantom.dz); hipMemcpyToSymbol(Unxvox, &phantom.Unxvox, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Unyvox, &phantom.Unyvox, sizeof(int), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(Unzvox, &phantom.Unzvox, sizeof(int), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(dx_gBrachy, &phantom.dx, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(dy_gBrachy, &phantom.dy, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(dz_gBrachy, &phantom.dz, sizeof(float), 0, hipMemcpyHostToDevice) ; float idx_gBrachy_h,idy_gBrachy_h,idz_gBrachy_h; idx_gBrachy_h = 1.0F/phantom.dx; hipMemcpyToSymbol(idx_gBrachy, &idx_gBrachy_h, sizeof(float), 0, hipMemcpyHostToDevice) ; idy_gBrachy_h = 1.0F/phantom.dy; hipMemcpyToSymbol(idy_gBrachy, &idy_gBrachy_h, sizeof(float), 0, hipMemcpyHostToDevice) ; idz_gBrachy_h = 1.0F/phantom.dz; hipMemcpyToSymbol(idz_gBrachy, &idz_gBrachy_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(Offsetx_gBrachy, &phantom.Offsetx, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(Offsety_gBrachy, &phantom.Offsety, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(Offsetz_gBrachy, &phantom.Offsetz, sizeof(float), 0, hipMemcpyHostToDevice) ; hipExtent volumeSize = make_hipExtent(phantom.Unxvox, phantom.Unyvox, phantom.Unzvox); CUDA_CALL(hipMalloc3DArray(&mat, &mat_tex.channelDesc, volumeSize)); CUDA_CALL(hipMalloc3DArray(&dens, &dens_tex.channelDesc, volumeSize)); // create a 3d array on device hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)phantom.mat, volumeSize.width*sizeof(int), volumeSize.width, volumeSize.height); copyParams.dstArray = mat; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams) ; // copy data from host to device mat_tex.normalized = false; mat_tex.filterMode = hipFilterModePoint; hipBindTextureToArray(mat_tex, mat, mat_tex.channelDesc); // bind to texture memory copyParams.srcPtr = make_hipPitchedPtr((void*)phantom.dens, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = dens; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams) ; // copy data from host to device dens_tex.normalized = false; dens_tex.filterMode = hipFilterModePoint; hipBindTextureToArray(dens_tex, dens, dens_tex.channelDesc); // bind to texture memory }//*/ void init(Phantom phantom) /******************************************************************* c* Initializes the gCTD system * c******************************************************************/ { initPhantom(phantom); hipMemcpyToSymbol(eabsph, &eabsph_h, sizeof(float), 0, hipMemcpyHostToDevice); // in GPU, initialize rand seed with rand numbers inirngG(); rmater(&eminph, &emax);//no use? printf("\n"); if(eabsph_h <eminph) { printf("init:error: Eabs out of range.\n"); exit(1); } // load total cross section rlamph(); // load compton cross section rcompt(); rcmpsf(); // load photoelectric cross section rphote(); // load rayleigh cross section and form factors rrayle(); rrayff(); // iniwck must be called after reading esrc & eabsph: iniwck(eminph, emax, phantom); printf("\n\nInitialize : Done.\n");//*/ } void iniwck(float eminph,float emax, struct object_v* objectMaterial) //for detector { float maxden[MAXMAT],de,e,ymax,ycanbe; const float eps = 1.0e-10F; printf("\n"); printf("\n"); printf("iniwck detector: Started.\n"); // Find the largest density for each present material: for(int i = 0; i < MAXMAT; i++) { maxden[i] = 0.0F; } for(int i=0; i<2; i++) { if (objectMaterial[i].density > maxden[objectMaterial[i].material]) maxden[objectMaterial[i].material] = objectMaterial[i].density; } // Prepare data: wcke0_h = eminph; de = (emax*(1.0F - eps ) - wcke0_h ) / NWCK; idlewk_h = 1.0F/de; for(int i = 0; i < NWCK; i++) { e = wcke0_h + de*i; ymax = 0.0; for(int j = 0; j < nmat_h; j++) { ycanbe = itphip(j,e)*maxden[j]; if (ycanbe > ymax) ymax = ycanbe; } woock_h[i] = 1.0F/ymax; /*if (i<1100 && i>1090) printf("1/lamda=%f\n",woock_h[i]);*/ } hipMemcpyToSymbol(idlewk, &idlewk_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMemcpyToSymbol(wcke0, &wcke0_h, sizeof(float), 0, hipMemcpyHostToDevice) ; hipMallocArray(&woockde, &woockde_tex.channelDesc, NWCK, 1); hipMemcpyToArray(woockde, 0, 0, woock_h, sizeof(float)*NWCK, hipMemcpyHostToDevice); woockde_tex.filterMode = hipFilterModeLinear; hipBindTextureToArray(woockde_tex, woockde); } void iniPanel(struct object_t* objectArray, struct object_v* objectMaterial,int totalOb) /******************************************************************* c* Initializes the module system * c******************************************************************/ { printf(" \n"); printf("init: Panel geometry;\n"); // copy arrays from host to device int *ma=new int[2]; float *den=new float[2]; int *p_id=new int[totalOb]; float *lx_m=new float[totalOb]; float *ly_m=new float[totalOb]; float *lz_m=new float[totalOb]; float *Mx_m=new float[totalOb]; float *My_m=new float[totalOb]; float *Mz_m=new float[totalOb]; float *Msx_m=new float[totalOb]; float *Msy_m=new float[totalOb]; float *Msz_m=new float[totalOb]; float *Lx_m=new float[totalOb]; float *Ly_m=new float[totalOb]; float *Lz_m=new float[totalOb]; float *sx_m=new float[totalOb]; float *sy_m=new float[totalOb]; float *sz_m=new float[totalOb]; float *ox_m=new float[totalOb]; float *oy_m=new float[totalOb]; float *oz_m=new float[totalOb]; float *dx_m=new float[totalOb]; float *dy_m=new float[totalOb]; float *dz_m=new float[totalOb]; float *UXx_m=new float[totalOb]; float *UXy_m=new float[totalOb]; float *UXz_m=new float[totalOb]; float *UYx_m=new float[totalOb]; float *UYy_m=new float[totalOb]; float *UYz_m=new float[totalOb]; float *UZx_m=new float[totalOb]; float *UZy_m=new float[totalOb]; float *UZz_m=new float[totalOb]; for (int i=0;i<2;i++) { ma[i]=objectMaterial[i].material; den[i]=objectMaterial[i].density; } for (int i=0;i<totalOb;i++) { p_id[i]=objectArray[i].panel; lx_m[i]=objectArray[i].lengthx; ly_m[i]=objectArray[i].lengthy; lz_m[i]=objectArray[i].lengthz; Mx_m[i]=objectArray[i].MODx; My_m[i]=objectArray[i].MODy; Mz_m[i]=objectArray[i].MODz; Msx_m[i]=objectArray[i].Mspacex; Msy_m[i]=objectArray[i].Mspacey; Msz_m[i]=objectArray[i].Mspacez; Lx_m[i]=objectArray[i].LSOx; Ly_m[i]=objectArray[i].LSOy; Lz_m[i]=objectArray[i].LSOz; sx_m[i]=objectArray[i].spacex; sy_m[i]=objectArray[i].spacey; sz_m[i]=objectArray[i].spacez; ox_m[i]=objectArray[i].offsetx; oy_m[i]=objectArray[i].offsety; oz_m[i]=objectArray[i].offsetz; dx_m[i]=objectArray[i].directionx; dy_m[i]=objectArray[i].directiony; dz_m[i]=objectArray[i].directionz; UXx_m[i]=objectArray[i].UniXx; UXy_m[i]=objectArray[i].UniXy; UXz_m[i]=objectArray[i].UniXz; UYx_m[i]=objectArray[i].UniYx; UYy_m[i]=objectArray[i].UniYy; UYz_m[i]=objectArray[i].UniYz; UZx_m[i]=objectArray[i].UniZx; UZy_m[i]=objectArray[i].UniZy; UZz_m[i]=objectArray[i].UniZz; } int Mn, Ln; Mn=floorf(ly_m[0]/(My_m[0]+Msy_m[0]))+1; Ln=floorf(My_m[0]/(Ly_m[0]+sy_m[0]))+1; //printf("Mn %d Ln %d\n", Mn, Ln); hipMemcpyToSymbol(crystalNy, &Ln, sizeof(int)); hipMemcpyToSymbol(moduleNy, &Mn, sizeof(int)); Mn*=floorf(lz_m[0]/(Mz_m[0]+Msz_m[0]))+1; Ln*=floorf(Mz_m[0]/(Lz_m[0]+sz_m[0]))+1; //printf("Mn %d Ln %d\n", Mn, Ln); hipMemcpyToSymbol(crystalN, &Ln, sizeof(int)); hipMemcpyToSymbol(moduleN, &Mn, sizeof(int)); hipMemcpyToSymbol(dev_totalPanels, &totalOb, sizeof(int), 0, hipMemcpyHostToDevice); hipMalloc((void**)&mat_panel, 2 * sizeof(int)); hipMalloc((void**)&dens_panel, 2 * sizeof(float)); hipMalloc((void**)&panelID, totalOb * sizeof(int)); hipMalloc((void**)&lengthx_panel, totalOb * sizeof(float)); hipMalloc((void**)&lengthy_panel, totalOb * sizeof(float)); hipMalloc((void**)&lengthz_panel, totalOb * sizeof(float)); hipMalloc((void**)&MODx_panel, totalOb * sizeof(float)); hipMalloc((void**)&MODy_panel, totalOb * sizeof(float)); hipMalloc((void**)&MODz_panel, totalOb * sizeof(float)); hipMalloc((void**)&Mspacex_panel, totalOb * sizeof(float)); hipMalloc((void**)&Mspacey_panel, totalOb * sizeof(float)); hipMalloc((void**)&Mspacez_panel, totalOb * sizeof(float)); hipMalloc((void**)&LSOx_panel, totalOb * sizeof(float)); hipMalloc((void**)&LSOy_panel, totalOb * sizeof(float)); hipMalloc((void**)&LSOz_panel, totalOb * sizeof(float)); hipMalloc((void**)&spacex_panel, totalOb * sizeof(float)); hipMalloc((void**)&spacey_panel, totalOb * sizeof(float)); hipMalloc((void**)&spacez_panel, totalOb * sizeof(float)); hipMalloc((void**)&offsetx_panel, totalOb * sizeof(float)); hipMalloc((void**)&offsety_panel, totalOb * sizeof(float)); hipMalloc((void**)&offsetz_panel, totalOb * sizeof(float)); hipMalloc((void**)&directionx_panel, totalOb * sizeof(float)); hipMalloc((void**)&directiony_panel, totalOb * sizeof(float)); hipMalloc((void**)&directionz_panel, totalOb * sizeof(float)); hipMalloc((void**)&UniXx_panel, totalOb * sizeof(float)); hipMalloc((void**)&UniXy_panel, totalOb * sizeof(float)); hipMalloc((void**)&UniXz_panel, totalOb * sizeof(float)); hipMalloc((void**)&UniYx_panel, totalOb * sizeof(float)); hipMalloc((void**)&UniYy_panel, totalOb * sizeof(float)); hipMalloc((void**)&UniYz_panel, totalOb * sizeof(float)); hipMalloc((void**)&UniZx_panel, totalOb * sizeof(float)); hipMalloc((void**)&UniZy_panel, totalOb * sizeof(float)); hipMalloc((void**)&UniZz_panel, totalOb * sizeof(float)); hipMemcpy(mat_panel, ma, 2*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dens_panel, den, 2*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(panelID, p_id, totalOb*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(lengthx_panel, lx_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(lengthy_panel, ly_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(lengthz_panel, lz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(MODx_panel, Mx_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(MODy_panel, My_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(MODz_panel, Mz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(Mspacex_panel, Msx_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(Mspacey_panel, Msy_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(Mspacez_panel, Msz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(LSOx_panel, Lx_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(LSOy_panel, Ly_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(LSOz_panel, Lz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(spacex_panel, sx_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(spacey_panel, sy_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(spacez_panel, sz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(offsetx_panel, ox_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(offsety_panel, oy_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(offsetz_panel, oz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(directionx_panel, dx_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(directiony_panel, dy_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(directionz_panel, dz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(UniXx_panel, UXx_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(UniXy_panel, UXy_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(UniXz_panel, UXz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(UniYx_panel, UYx_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(UniYy_panel, UYy_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(UniYz_panel, UYz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(UniZx_panel, UZx_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(UniZy_panel, UZy_m, totalOb*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(UniZz_panel, UZz_m, totalOb*sizeof(float), hipMemcpyHostToDevice); delete[] ma; delete[] den; delete[] p_id; delete[] lx_m; delete[] ly_m; delete[] lz_m; delete[] Mx_m; delete[] My_m; delete[] Mz_m; delete[] Msx_m; delete[] Msy_m; delete[] Msz_m; delete[] Lx_m; delete[] Ly_m; delete[] Lz_m; delete[] sx_m; delete[] sy_m; delete[] sz_m; delete[] ox_m; delete[] oy_m; delete[] oz_m; delete[] dx_m; delete[] dy_m; delete[] dz_m; delete[] UXx_m; delete[] UXy_m; delete[] UXz_m; delete[] UYx_m; delete[] UYy_m; delete[] UYz_m; delete[] UZx_m; delete[] UZy_m; delete[] UZz_m; printf("finish init: Module geometry;\n\n"); } #endif
ae27ad0d88e86f482a6afe1cac72c3607d0cd3e9.cu
#ifndef __INITIALIZE_CU__ #define __INITIALIZE_CU__ #include "gPET.h" #include "externCUDA.h" int nmat_h; using namespace std; Isotopes loadIsotopes() { Isotopes isotopes; ifstream infile("data/isotopes.txt"); FILEEXIST(infile); printf("reading /data/isotopes.txt\n"); infile >> isotopes.Ntype; infile.ignore(512,'#'); isotopes.halftime=new float[isotopes.Ntype]; isotopes.decayRatio=new float[isotopes.Ntype]; isotopes.coef=new float[isotopes.Ntype*8]; for(int i=0;i<isotopes.Ntype;i++) { infile >> isotopes.halftime[i]>>isotopes.decayRatio[i]>>isotopes.coef[8*i]>>isotopes.coef[8*i+1]>>isotopes.coef[8*i+2]; infile >> isotopes.coef[8*i+3]>>isotopes.coef[8*i+4]>>isotopes.coef[8*i+5]>>isotopes.coef[8*i+6]>>isotopes.coef[8*i+7]; cout << i <<" "<< isotopes.halftime[i]<<" " <<isotopes.decayRatio[i]<<" "<< \ isotopes.coef[8*i]<<" "<< isotopes.coef[8*i+7]<<endl; } printf("finish read isotopes;\n\n"); infile.close(); return isotopes; } Phantom loadPhantom(char matfile[100], char denfile[100],int* pdim, float* poffset, float* psize) /******************************************************************* c* Reads voxel geometry from an input file * * c******************************************************************/ { Phantom phantom; cout<<"\n\nloading phantom\n"; phantom.Unxvox = pdim[0]; phantom.Unyvox = pdim[1]; phantom.Unzvox = pdim[2]; printf("CT dimension: %f %f %f\n", phantom.Unxvox, phantom.Unyvox, phantom.Unzvox); phantom.Offsetx = poffset[0]; phantom.Offsety = poffset[1]; phantom.Offsetz = poffset[2]; printf("CT offset: %f %f %f\n", phantom.Offsetx, phantom.Offsety, phantom.Offsetz); phantom.Sizex = psize[0]; phantom.Sizey = psize[1]; phantom.Sizez = psize[2]; printf("CT Size: %f %f %f\n", phantom.Sizex, phantom.Sizey, phantom.Sizez); int numvox=phantom.Unxvox*phantom.Unyvox*phantom.Unzvox; phantom.mat = new int[numvox]; ifstream infilemat(matfile,ios::binary); FILEEXIST(infilemat); infilemat.read(reinterpret_cast <char*> (&phantom.mat[0]), sizeof(int)*numvox); infilemat.close(); phantom.dens = new float[numvox]; ifstream infileden(denfile,ios::binary); FILEEXIST(infileden); infileden.read(reinterpret_cast <char*> (&phantom.dens[0]), sizeof(float)*numvox); infileden.close(); phantom.dx = phantom.Sizex/phantom.Unxvox; phantom.dy = phantom.Sizey/phantom.Unyvox; phantom.dz = phantom.Sizez/phantom.Unzvox; cout<<"finish loading phantom"<<endl; cout<<"resolution is "<<phantom.dx << phantom.dy<< phantom.dz<<endl; return phantom; } Particle readParticle(char sourcefile[100],int NParticle) { Particle particle; double data[8]; ifstream infile(sourcefile,ios::binary); FILEEXIST(infile); printf("reading %s\n", sourcefile); int start, stop; start=infile.tellg(); infile.seekg(0, ios::end); stop=infile.tellg(); if(NParticle>(stop-start)/64) { cout<<"Do not have enough particles in PSF, Changing simulated number from "<<NParticle<<" to "<< (stop-start)/64 <<endl; NParticle = (stop-start)/64; } particle.xbuffer=new float3[NParticle]; particle.vxbuffer=new float4[NParticle]; particle.eventid=new int[NParticle]; particle.time=new double[NParticle]; infile.seekg(0, ios::beg); for(int i=0;i<NParticle;i++) { infile.read(reinterpret_cast <char*> (&data), sizeof(data)); particle.xbuffer[i]=make_float3(data[0],data[1],data[2]); particle.vxbuffer[i]=make_float4(data[4],data[5],data[6],data[7]); particle.eventid[i]=i; particle.time[i] = data[3]; if(i<6) { printf("the first %d particle: %f %f %f\n",i,particle.xbuffer[i].x,particle.vxbuffer[i].x,particle.time[i] ); } } printf("finish read: source PSF;\n\n"); infile.close(); particle.NParticle = NParticle; return particle; } Source readSource(char sourcefile[100]) { Source source; ifstream infile(sourcefile); FILEEXIST(infile); printf("reading %s\n", sourcefile); infile >> source.NSource; cout<< source.NSource<<"\n"; infile.ignore(512,'#'); source.natom=new unsigned int[source.NSource]; source.type=new int[source.NSource]; source.shape=new int[source.NSource]; source.shapecoeff=new float[source.NSource*6]; for(int i=0;i<source.NSource;i++) { infile >> source.natom[i] >> source.type[i] >> source.shape[i]; cout<< i <<" "<< source.natom[i]<<" " << source.type[i]<<" "<< source.shape[i]; for(int j=0;j<6;j++) { infile>>source.shapecoeff[6*i+j]; cout<<" "<<source.shapecoeff[6*i+j]; } cout<<endl; }//*/ printf("finish read: source;\n\n"); infile.close(); return source; } void spline(float *X, float *Y, float *A, float *B, float *C, float *D, float S1, float SN, int N) // possible error from FORTRAN to C /* CUBIC SPLINE INTERPOLATION BETWEEN TABULATED DATA. C INPUT: C X(I) (I=1, ...,N) ........ GRID POINTS. C (THE X VALUES MUST BE IN INCREASING ORDER). C Y(I) (I=1, ...,N) ........ CORRESPONDING FUNCTION VALUES. C S1,SN ..... SECOND DERIVATIVES AT X(1) AND X(N). C (THE NATURAL SPLINE CORRESPONDS TO TAKING S1=SN=0). C N ........................ NUMBER OF GRID POINTS. C C THE INTERPOLATING POLYNOMIAL IN THE I-TH INTERVAL, FROM C X(I) TO X(I+1), IS PI(X)=A(I)+X*(B(I)+X*(C(I)+X*D(I))). C C OUTPUT: C A(I),B(I),C(I),D(I) ...... SPLINE COEFFICIENTS. C C REF.: M.J. MARON, 'NUMERICAL ANALYSIS: A PRACTICAL C APPROACH', MACMILLAN PUBL. CO., NEW YORK 1982. C*************************************************************/ { // linear interpolation, you can use the for loop here and comment the following lines. /* for(int i = 0; i< N-1; i++) { B[i] = (Y[i+1]-Y[i])/(X[i+1]-X[i]); A[i] = (Y[i]*X[i+1] - X[i]*Y[i+1])/(X[i+1]-X[i]); C[i] = 0.0; D[i] = 0.0; }*/ //IMPLICIT DOUBLE PRECISION (A-H,O-Z) // DIMENSION X(N),Y(N),A(N),B(N),C(N),D(N) if(N < 4) { printf("SPLINE INTERPOLATION CANNOT BE PERFORMED WITH %d POINTS. STOP.\n",N); exit(1); } int N1 = N-1; int N2 = N-2; // AUXILIARY ARRAYS H(=A) AND DELTA(=D). for(int i = 0; i < N1; i++) { if(X[i+1]-X[i] < 1.0e-10) { printf("SPLINE X VALUES NOT IN INCREASING ORDER. STOP.\n"); exit(1); } A[i] = X[i+1] - X[i]; D[i] = (Y[i+1] - Y[i])/A[i]; } // SYMMETRIC COEFFICIENT MATRIX (AUGMENTED). for(int i = 0; i < N2; i++) { B[i] = 2.0F * (A[i] + A[i+1]); int k = N1 - i - 1; D[k] = 6.0F * (D[k] - D[k-1]); } D[1] -= A[0] * S1; D[N1-1] -= A[N1-1] * SN; // GAUSS SOLUTION OF THE TRIDIAGONAL SYSTEM. for(int i = 1; i < N2; i++) { float R = A[i]/B[i-1]; B[i] -= R * A[i]; D[i+1] -= R * D[i]; } // THE SIGMA COEFFICIENTS ARE STORED IN ARRAY D. D[N1-1] = D[N1-1]/B[N2-1]; for(int i = 1; i < N2; i++) { int k = N1 - i - 1; D[k] = (D[k] - A[k] * D[k+1])/B[k-1]; } D[N-1] = SN; // SPLINE COEFFICIENTS. float SI1 = S1; for(int i = 0; i < N1; i++) { float SI = SI1; SI1 = D[i+1]; float H = A[i]; float HI = 1.0F/H; A[i] = (HI/6.0F)*(SI*X[i+1]*X[i+1]*X[i+1]-SI1*X[i]*X[i]*X[i]) +HI*(Y[i]*X[i+1]-Y[i+1]*X[i]) +(H/6.0F)*(SI1*X[i]-SI*X[i+1]); B[i] = (HI/2.0F)*(SI1*X[i]*X[i]-SI*X[i+1]*X[i+1]) +HI*(Y[i+1]-Y[i])+(H/6.0F)*(SI-SI1); C[i] = (HI/2.0F)*(SI*X[i+1]-SI1*X[i]); D[i] = (HI/6.0F)*(SI1-SI); } return; } void inirngG() /******************************************************************* c* Set iseed1 and iseed2 for all threads with random numbers * c* * c* Input: * c* Output: * c* iseed1 -> random number * c* iseed2 -> random number * c******************************************************************/ { srand( (unsigned int)time(NULL) ); // generate randseed at CPU int *iseed1_h = (int*) malloc(sizeof(int)*NRAND); for(int i = 0; i < NRAND; i++) { iseed1_h[i] = rand(); } int *iseed1; cudaMalloc((void**) &iseed1, sizeof(int)*NRAND); // copy to GPU cudaMemcpy(iseed1, iseed1_h, sizeof(int)*NRAND, cudaMemcpyHostToDevice); free(iseed1_h); int nblocks; nblocks = 1 + (NRAND - 1)/NTHREAD_PER_BLOCK_GPET ; setupcuseed<<<nblocks, NTHREAD_PER_BLOCK_GPET>>>(iseed1); cudaDeviceSynchronize(); cudaFree(iseed1); } void rmater(float *eminph, float *emax) /******************************************************************* c* Reads material data from file * c* * c* Output: * c* fname -> input file name * c* [Emin,Eminph,Emax] -> interval where data will be gen (eV) * c* refz -> total atomic no of the reference material * c* refz2 -> atomic no^2 of the reference material * c* refmas -> atomic weight of the reference material * c* refden -> density of the reference material (g/cm^3) * c******************************************************************/ { char buffer[100]; float shigh,slow,ecross, temp,wcc,wcb; //char fname[] = "data/pre4phot.matter"; char fname[] = "data/input4gPET.matter"; printf("rmater: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); FILEEXIST(fp); fgets(buffer, 100, fp); fgets(buffer, 100, fp); fgets(buffer, 100, fp); fgets(buffer, 100, fp); printf("%s\n",buffer); fscanf(fp,"%f %f %f\n",eminph, &temp, emax); printf("%e %e %e\n",*eminph,temp, *emax); fgets(buffer, 100, fp); //printf("%s\n",buffer); fscanf(fp,"%f %f\n",&wcc, &wcb); //printf("%e %e\n",wcc,wcb); fgets(buffer, 100, fp); //printf("%s\n",buffer); fscanf(fp,"%f %f %f\n",&shigh,&slow,&ecross); //printf("%e %e %e\n",shigh,slow,ecross); fgets(buffer, 100, fp); //printf("%s\n",buffer); fscanf(fp,"%d\n", &nmat_h); //printf("%d\n",nmat_h); cudaMemcpyToSymbol(nmat, &nmat_h, sizeof(int), 0, cudaMemcpyHostToDevice) ; if (nmat_h > MAXMAT) { printf("rmater:error: Too many materials.\n"); exit(1); } for(int i = 0; i < nmat_h; i++) { // Read name of material, remove trailing blanks: float matden; int nelem; fgets(buffer,100,fp); //printf("%s\n", buffer); fgets(buffer, 100, fp); //printf("%s\n", buffer); fscanf(fp,"%f\n", &matden); //printf("%e\n", matden); fgets(buffer, 100, fp); //printf("%s\n",buffer); fscanf(fp,"%d\n",&nelem); //printf("%d\n", nelem); for(int j = 0; j < nelem; j++) { fgets(buffer, 100, fp); //printf("%s\n",buffer); } fgets(buffer, 100, fp); //printf("%s\n",buffer); float atnotemp,atno2temp; fscanf(fp,"%f %f %f\n",&atnotemp, &atno2temp, &temp); //printf("%e %e\n", atnotemp,atno2temp); fgets(buffer, 100, fp); //printf("%s\n",buffer); float mass; fscanf(fp,"%f\n", &mass); //printf("%e\n", mass); fgets(buffer, 100, fp); //printf("%s\n",buffer); float zmass,z2mass; fscanf(fp,"%f %f\n", &zmass,&z2mass); //printf("%e %e\n", zmass,z2mass); } fclose(fp); printf("\nread material: Done.\n"); } void rlamph() /******************************************************************* c* Reads photon total inverse mean free path data from file and * c* sets up interpolation matrices * c* * c* Input: * c* fname -> input file name * c******************************************************************/ { char buffer[100]; int ndata; float dummya[NLAPH],dummyb[NLAPH],dummyc[NLAPH],dummyd[NLAPH]; //char fname[] = "data/pre4phot.lamph"; char fname[]="data/input4gPET.lamph"; printf("rlamph: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); FILEEXIST(fp); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { fgets(buffer,100,fp); float temp; fscanf(fp,"%d %f %f %f %f\n",&ndata,&temp,&temp,&temp,&temp); if (ndata != NLAPH) { printf("rlamph:error: Array dim do not match:\n"); printf("%d %d\n", ndata,NLAPH); exit(1); } fgets(buffer,100,fp); // Preparing interpolation: for(int i = 0; i < NLAPH; i++) { fscanf(fp,"%f %f\n",&elaph_h[i],&lamph_h[ind2To1(j,i,MAXMAT,NLAPH)]);//excess ind2To1 equal to j*NLAPH+i,linearization //if(i<3) //printf("material %d, energy %e, cross section %e\n",j, elaph_h[i],lamph_h[ind2To1(j,i,MAXMAT,NLAPH)]); } fgets(buffer,100,fp); spline(elaph_h, &lamph_h[ind2To1(j,0,MAXMAT,NLAPH)],dummya,dummyb,dummyc,dummyd,0.0F,0.0F,NLAPH); // Loading dummy arrays into multimaterial sp matrices: for(int i = 0; i < NLAPH; i++) { lampha_h[ind2To1(j,i,MAXMAT,NLAPH)] = dummya[i]; lamphb_h[ind2To1(j,i,MAXMAT,NLAPH)] = dummyb[i]; lamphc_h[ind2To1(j,i,MAXMAT,NLAPH)] = dummyc[i]; lamphd_h[ind2To1(j,i,MAXMAT,NLAPH)] = dummyd[i]; } } fclose(fp); idleph_h = (NLAPH-1)/(elaph_h[NLAPH-1]-elaph_h[0]); cudaMemcpyToSymbol(idleph, &idleph_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(elaph0, &elaph_h[0], sizeof(float), 0, cudaMemcpyHostToDevice); cudaMallocArray(&lamph, &lamph_tex.channelDesc, NLAPH*MAXMAT, 1); cudaMemcpyToArray(lamph, 0, 0, lamph_h, sizeof(float)*NLAPH*MAXMAT, cudaMemcpyHostToDevice); lamph_tex.filterMode = cudaFilterModeLinear; cudaBindTextureToArray(lamph_tex, lamph); } void rcompt() /******************************************************************* c* Reads Compton inverse mean free path data from file and sets * c* up interpolation matrices * c* * c* Input: * c* fname -> input file name * c******************************************************************/ { char buffer[100]; int ndata; //char fname[] = "data/pre4phot.compt"; char fname[]= "data/input4gPET.compt"; printf("rcompt: Reading %s\n", fname); FILE *fp = fopen(fname, "r"); FILEEXIST(fp); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { fgets(buffer,100,fp); float temp; fscanf(fp,"%d %f %f %f %f\n",&ndata,&temp,&temp,&temp,&temp); if (ndata != NCMPT) { printf("rcompt:error: Array dim do not match:\n"); printf("%d %d \n", ndata,NCMPT); exit(1); } fgets(buffer,100,fp); // Preparing interpolation: for(int i = 0; i <NCMPT; i++) { fscanf(fp,"%f %f\n",&ecmpt_h[i],&compt_h[ind2To1(j,i,MAXMAT,NCMPT)]); // if(j == nmat-1) // printf("%e %e\n",ecmpt[i],compt[i]); } fgets(buffer,100,fp); } fclose(fp); idlecp_h = (NCMPT-1)/(ecmpt_h[NCMPT-1]-ecmpt_h[0]); cudaMemcpyToSymbol(idlecp, &idlecp_h, sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(ecmpt0, &ecmpt_h[0], sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMallocArray(&compt, &compt_tex.channelDesc, NCMPT*MAXMAT, 1); cudaMemcpyToArray(compt, 0, 0, compt_h, sizeof(float)*NCMPT*MAXMAT, cudaMemcpyHostToDevice); compt_tex.filterMode = cudaFilterModeLinear; cudaBindTextureToArray(compt_tex, compt); } void rcmpsf() /******************************************************************* c* Reads Compton scattering function data from file and * c* sets up interpolation matrices * c******************************************************************/ { char buffer[100]; //char fname[] = "data/pre4phot.cmpsf"; char fname[]= "data/input4gPET.cmpsf"; printf("rcmpsf: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { // read sf data fgets(buffer,100,fp); float temp; int ndata; fscanf(fp,"%d %f %f %f\n",&ndata,&temp,&temp,&temp); fgets(buffer,100,fp); for(int i = 0; i < ndata; i++) { fscanf(fp,"%f %f %f\n",&temp, &temp, &temp); } // read s surface fgets(buffer,100,fp); int ncp, ne; float dcp, de; fscanf(fp,"%d %f %f %f %d %f %f %f\n", &ncp, &temp, &temp, &dcp, &ne, &temp, &temp, &de); if (ncp != NCPCM) { printf("rcmpsf:error: NCP dim do not match:\n"); printf("%d %d\n", ncp,NCPCM); exit(1); } if (ne != NECM) { printf("rcmpsf:error: NE dim do not match:\n"); printf("%d %d\n", ne,NECM); exit(1); } idcpcm_h = 1.0f/dcp; idecm_h = 1.0f/de; for(int icp=0; icp <ncp; icp++) fscanf(fp,"%f ",&temp); fscanf(fp,"\n"); for(int ie=0; ie <ne; ie++) fscanf(fp,"%f ",&temp); fscanf(fp,"\n"); for(int icp=0; icp <ncp; icp++) { for(int ie = 0; ie<ne; ie++) { fscanf(fp,"%f ",&mucmpt_h[j*NCPCM*NECM+icp*NECM+ie]); // if(mucmpt_h[j*NCPCM*NECM+icp*NECM+ie] > 1.0f || mucmpt_h[j*NCPCM*NECM+icp*NECM+ie]<-1.0f) // cout << "error in data" << mucmpt_h[j*NCPCM*NECM+icp*NECM+ie] << endl; } fscanf(fp,"\n"); } fscanf(fp,"\n"); } fclose(fp); // load to GPU cudaMemcpyToSymbol(idcpcm, &idcpcm_h, sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(idecm, &idecm_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); const cudaExtent volumeSize = make_cudaExtent(NECM, NCPCM, MAXMAT); cudaMalloc3DArray(&sArray, &channelDesc, volumeSize) ; cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)mucmpt_h, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = sArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams) ; s_tex.normalized = false; s_tex.filterMode = cudaFilterModeLinear; cudaBindTextureToArray(s_tex, sArray, channelDesc); } void rphote() /******************************************************************* c* Reads photoelectric inverse mean free path data from file and* c* sets up interpolation matrices * c******************************************************************/ { char buffer[100]; int ndata; //char fname[] = "data/pre4phot.phote"; char fname[]= "data/input4gPET.phote"; printf("rphote: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { fgets(buffer,100,fp); float temp; fscanf(fp,"%d %f %f %f %f\n",&ndata,&temp,&temp,&temp,&temp); if (ndata != NPHTE) { printf("rphote:error: Array dim do not match:\n"); printf("%d %d\n", ndata,NPHTE); exit(1); } fgets(buffer,100,fp); // Preparing interpolation for(int i = 0; i < NPHTE; i++) { fscanf(fp,"%f %f\n",&ephte_h[i],&phote_h[ind2To1(j,i,MAXMAT,NPHTE)]); // if(j == nmat-1) // printf("%e %e\n",ephte[i],phote[i]); } fgets(buffer,100,fp); } fclose(fp); idlepe_h = (NPHTE-1)/(ephte_h[NPHTE-1]-ephte_h[0]); cudaMemcpyToSymbol(idlepe, &idlepe_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(ephte0, &ephte_h[0], sizeof(float), 0, cudaMemcpyHostToDevice); cudaMallocArray(&phote, &phote_tex.channelDesc, NPHTE*MAXMAT, 1); cudaMemcpyToArray(phote, 0, 0, phote_h, sizeof(float)*NPHTE*MAXMAT, cudaMemcpyHostToDevice); phote_tex.filterMode = cudaFilterModeLinear; cudaBindTextureToArray(phote_tex, phote); } void rrayle() /******************************************************************* c* Reads rayleigh inverse mean free path data from file and * c* sets up interpolation matrices * c* * c* Input: * c* fname -> input file name * c******************************************************************/ { char buffer[100]; int ndata; //char fname[] = "data/pre4phot.rayle"; char fname[]="data/input4gPET.rayle"; printf("rrayle: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { fgets(buffer,100,fp); float temp; fscanf(fp,"%d %f %f %f %f\n",&ndata,&temp,&temp,&temp,&temp); if (ndata != NRAYL) { printf("rrayle:error: Array dim do not match:\n"); printf("%d %d\n", ndata,NRAYL); exit(1); } fgets(buffer,100,fp); // Preparing interpolation for(int i = 0; i < NRAYL; i++) { fscanf(fp,"%f %f\n",&erayl_h[i],&rayle_h[ind2To1(j,i,MAXMAT,NRAYL)]); } fgets(buffer,100,fp); } fclose(fp); idlerl_h = (NRAYL-1)/(erayl_h[NRAYL-1]-erayl_h[0]); cudaMemcpyToSymbol(idlerl, &idlerl_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(erayl0, &erayl_h[0], sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMallocArray(&rayle, &rayle_tex.channelDesc, NRAYL*MAXMAT, 1); cudaMemcpyToArray(rayle, 0, 0, rayle_h, sizeof(float)*NRAYL*MAXMAT, cudaMemcpyHostToDevice); rayle_tex.filterMode = cudaFilterModeLinear; cudaBindTextureToArray(rayle_tex, rayle); } void rrayff() /******************************************************************* c* Reads Rayleigh scattering form factor data from file and * c* sets up interpolation matrices * c******************************************************************/ { char buffer[100]; //char fname[] = "data/pre4phot.rayff"; char fname[]= "data/input4gPET.rayff"; printf("rrayff: Reading %s\n", fname); FILE *fp = fopen(fname,"r"); fgets(buffer,100,fp); fgets(buffer,100,fp); for(int j = 0; j < nmat_h; j++) { // read ff data fgets(buffer,100,fp); float temp; int ndata; fscanf(fp,"%d %f %f %f\n",&ndata,&temp,&temp,&temp); fgets(buffer,100,fp); for(int i = 0; i < ndata; i++) { fscanf(fp,"%f %f %f\n",&temp, &temp, &temp); } // read f surface fgets(buffer,100,fp); int ncp, ne; float dcp, de; fscanf(fp,"%d %f %f %f %d %f %f %f\n", &ncp, &temp, &temp, &dcp, &ne, &temp, &temp, &de); if (ncp != NCPRL) { printf("rrayff:error: NCP dim do not match:\n"); printf("%d %d\n", ncp,NCPRL); exit(1); } if (ne != NERL) { printf("rrayff:error: NE dim do not match:\n"); printf("%d %d\n", ne,NERL); exit(1); } idcprl_h = 1.0f/dcp; iderl_h = 1.0f/de; for(int icp=0; icp <ncp; icp++) fscanf(fp,"%f ",&temp); fscanf(fp,"\n"); for(int ie=0; ie <ne; ie++) fscanf(fp,"%f ",&temp); fscanf(fp,"\n"); for(int icp=0; icp <ncp; icp++) { for(int ie = 0; ie<ne; ie++) { fscanf(fp,"%f ",&murayl_h[j*NCPRL*NERL+icp*NERL+ie]); // if(murayl_h[j*NCPRL*NERL+icp*NERL+ie] > 1.0f || murayl_h[j*NCPRL*NERL+icp*NERL+ie]<-1.0f) // cout << "error in data" << murayl_h[j*NCPRL*NERL+icp*NERL+ie] << endl; } fscanf(fp,"\n"); } fscanf(fp,"\n"); // cout << murayl_h[j*NCPRL*NERL+(NCPRL-2)*NERL+1] << endl; } fclose(fp); // load to GPU cudaMemcpyToSymbol(idcprl, &idcprl_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(iderl, &iderl_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); const cudaExtent volumeSize = make_cudaExtent(NERL, NCPRL, MAXMAT); cudaMalloc3DArray(&fArray, &channelDesc, volumeSize) ; cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)murayl_h, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = fArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); f_tex.normalized = false; f_tex.filterMode = cudaFilterModeLinear; cudaBindTextureToArray(f_tex, fArray, channelDesc); } float itphip(int matid, float e) /******************************************************************* c* Photon total inverse mean free path --3spline interpolation * c* * c* Input: * c* matid -> material id# * c* e -> kinetic energy in eV * c* Output: * c* Total inverse mean free path in cm^2/g * c******************************************************************/ { int i; i = int(idleph_h*(e-elaph_h[0])); return lampha_h[ind2To1(matid,i,MAXMAT,NLAPH)] + e*(lamphb_h[ind2To1(matid,i,MAXMAT,NLAPH)] + e*(lamphc_h[ind2To1(matid,i,MAXMAT,NLAPH)] + e*lamphd_h[ind2To1(matid,i,MAXMAT,NLAPH)] )); } void iniwck(float eminph,float emax, Phantom phantom) /******************************************************************* c* Finds information used to transport photons with the Woodcock* c* technique * c* * c* Input: * c* eminph -> minimum photon energy in data files (eV) * c* emax -> maximum photon energy in data files (eV) * c* Output * c* bytes -> space allocated for arrays * c* Comments: * c* -> common /dpmsrc/ must be loaded previously * c* -> rlamph() must be called previously * c* -> emax reduced to avoid reaching the end of interpol table* c******************************************************************/ { float maxden[MAXMAT],de,e,ymax,ycanbe; const float eps = 1.0e-10F; unsigned int NXYZ = phantom.Unxvox*phantom.Unyvox*phantom.Unzvox; printf("iniwck phantom: Started.\n"); // Find the largest density for each present material: for(int i = 0; i < MAXMAT; i++) { maxden[i] = 0.0F; } for(int vox = 0; vox < NXYZ; vox++) { if (phantom.dens[vox] > maxden[phantom.mat[vox]]) maxden[phantom.mat[vox]] = phantom.dens[vox]; } // Prepare data: wcke0_h = eminph; de = (emax*(1.0F - eps ) - wcke0_h ) / NWCK; idlewk_h = 1.0F/de; for(int i = 0; i < NWCK; i++) { e = wcke0_h + de*i; ymax = 0.0; for(int j = 0; j < nmat_h; j++) { ycanbe = itphip(j,e)*maxden[j]; if (ycanbe > ymax) ymax = ycanbe; } woock_h[i] = 1.0F/ymax; } cudaMemcpyToSymbol(idlewk, &idlewk_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(wcke0, &wcke0_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMallocArray(&woock, &woock_tex.channelDesc, NWCK, 1); cudaMemcpyToArray(woock, 0, 0, woock_h, sizeof(float)*NWCK, cudaMemcpyHostToDevice); woock_tex.filterMode = cudaFilterModeLinear; cudaBindTextureToArray(woock_tex, woock); } void initPhantom(Phantom phantom) { printf("CT dimension: %d %d %d\n", phantom.Unxvox, phantom.Unyvox, phantom.Unzvox); printf("CT resolution: %f %f %f\n", phantom.dx, phantom.dy, phantom.dz); cudaMemcpyToSymbol(Unxvox, &phantom.Unxvox, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Unyvox, &phantom.Unyvox, sizeof(int), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(Unzvox, &phantom.Unzvox, sizeof(int), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(dx_gBrachy, &phantom.dx, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(dy_gBrachy, &phantom.dy, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(dz_gBrachy, &phantom.dz, sizeof(float), 0, cudaMemcpyHostToDevice) ; float idx_gBrachy_h,idy_gBrachy_h,idz_gBrachy_h; idx_gBrachy_h = 1.0F/phantom.dx; cudaMemcpyToSymbol(idx_gBrachy, &idx_gBrachy_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; idy_gBrachy_h = 1.0F/phantom.dy; cudaMemcpyToSymbol(idy_gBrachy, &idy_gBrachy_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; idz_gBrachy_h = 1.0F/phantom.dz; cudaMemcpyToSymbol(idz_gBrachy, &idz_gBrachy_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(Offsetx_gBrachy, &phantom.Offsetx, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(Offsety_gBrachy, &phantom.Offsety, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(Offsetz_gBrachy, &phantom.Offsetz, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaExtent volumeSize = make_cudaExtent(phantom.Unxvox, phantom.Unyvox, phantom.Unzvox); CUDA_CALL(cudaMalloc3DArray(&mat, &mat_tex.channelDesc, volumeSize)); CUDA_CALL(cudaMalloc3DArray(&dens, &dens_tex.channelDesc, volumeSize)); // create a 3d array on device cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)phantom.mat, volumeSize.width*sizeof(int), volumeSize.width, volumeSize.height); copyParams.dstArray = mat; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams) ; // copy data from host to device mat_tex.normalized = false; mat_tex.filterMode = cudaFilterModePoint; cudaBindTextureToArray(mat_tex, mat, mat_tex.channelDesc); // bind to texture memory copyParams.srcPtr = make_cudaPitchedPtr((void*)phantom.dens, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = dens; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams) ; // copy data from host to device dens_tex.normalized = false; dens_tex.filterMode = cudaFilterModePoint; cudaBindTextureToArray(dens_tex, dens, dens_tex.channelDesc); // bind to texture memory }//*/ void init(Phantom phantom) /******************************************************************* c* Initializes the gCTD system * c******************************************************************/ { initPhantom(phantom); cudaMemcpyToSymbol(eabsph, &eabsph_h, sizeof(float), 0, cudaMemcpyHostToDevice); // in GPU, initialize rand seed with rand numbers inirngG(); rmater(&eminph, &emax);//no use? printf("\n"); if(eabsph_h <eminph) { printf("init:error: Eabs out of range.\n"); exit(1); } // load total cross section rlamph(); // load compton cross section rcompt(); rcmpsf(); // load photoelectric cross section rphote(); // load rayleigh cross section and form factors rrayle(); rrayff(); // iniwck must be called after reading esrc & eabsph: iniwck(eminph, emax, phantom); printf("\n\nInitialize : Done.\n");//*/ } void iniwck(float eminph,float emax, struct object_v* objectMaterial) //for detector { float maxden[MAXMAT],de,e,ymax,ycanbe; const float eps = 1.0e-10F; printf("\n"); printf("\n"); printf("iniwck detector: Started.\n"); // Find the largest density for each present material: for(int i = 0; i < MAXMAT; i++) { maxden[i] = 0.0F; } for(int i=0; i<2; i++) { if (objectMaterial[i].density > maxden[objectMaterial[i].material]) maxden[objectMaterial[i].material] = objectMaterial[i].density; } // Prepare data: wcke0_h = eminph; de = (emax*(1.0F - eps ) - wcke0_h ) / NWCK; idlewk_h = 1.0F/de; for(int i = 0; i < NWCK; i++) { e = wcke0_h + de*i; ymax = 0.0; for(int j = 0; j < nmat_h; j++) { ycanbe = itphip(j,e)*maxden[j]; if (ycanbe > ymax) ymax = ycanbe; } woock_h[i] = 1.0F/ymax; /*if (i<1100 && i>1090) printf("1/lamda=%f\n",woock_h[i]);*/ } cudaMemcpyToSymbol(idlewk, &idlewk_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMemcpyToSymbol(wcke0, &wcke0_h, sizeof(float), 0, cudaMemcpyHostToDevice) ; cudaMallocArray(&woockde, &woockde_tex.channelDesc, NWCK, 1); cudaMemcpyToArray(woockde, 0, 0, woock_h, sizeof(float)*NWCK, cudaMemcpyHostToDevice); woockde_tex.filterMode = cudaFilterModeLinear; cudaBindTextureToArray(woockde_tex, woockde); } void iniPanel(struct object_t* objectArray, struct object_v* objectMaterial,int totalOb) /******************************************************************* c* Initializes the module system * c******************************************************************/ { printf(" \n"); printf("init: Panel geometry;\n"); // copy arrays from host to device int *ma=new int[2]; float *den=new float[2]; int *p_id=new int[totalOb]; float *lx_m=new float[totalOb]; float *ly_m=new float[totalOb]; float *lz_m=new float[totalOb]; float *Mx_m=new float[totalOb]; float *My_m=new float[totalOb]; float *Mz_m=new float[totalOb]; float *Msx_m=new float[totalOb]; float *Msy_m=new float[totalOb]; float *Msz_m=new float[totalOb]; float *Lx_m=new float[totalOb]; float *Ly_m=new float[totalOb]; float *Lz_m=new float[totalOb]; float *sx_m=new float[totalOb]; float *sy_m=new float[totalOb]; float *sz_m=new float[totalOb]; float *ox_m=new float[totalOb]; float *oy_m=new float[totalOb]; float *oz_m=new float[totalOb]; float *dx_m=new float[totalOb]; float *dy_m=new float[totalOb]; float *dz_m=new float[totalOb]; float *UXx_m=new float[totalOb]; float *UXy_m=new float[totalOb]; float *UXz_m=new float[totalOb]; float *UYx_m=new float[totalOb]; float *UYy_m=new float[totalOb]; float *UYz_m=new float[totalOb]; float *UZx_m=new float[totalOb]; float *UZy_m=new float[totalOb]; float *UZz_m=new float[totalOb]; for (int i=0;i<2;i++) { ma[i]=objectMaterial[i].material; den[i]=objectMaterial[i].density; } for (int i=0;i<totalOb;i++) { p_id[i]=objectArray[i].panel; lx_m[i]=objectArray[i].lengthx; ly_m[i]=objectArray[i].lengthy; lz_m[i]=objectArray[i].lengthz; Mx_m[i]=objectArray[i].MODx; My_m[i]=objectArray[i].MODy; Mz_m[i]=objectArray[i].MODz; Msx_m[i]=objectArray[i].Mspacex; Msy_m[i]=objectArray[i].Mspacey; Msz_m[i]=objectArray[i].Mspacez; Lx_m[i]=objectArray[i].LSOx; Ly_m[i]=objectArray[i].LSOy; Lz_m[i]=objectArray[i].LSOz; sx_m[i]=objectArray[i].spacex; sy_m[i]=objectArray[i].spacey; sz_m[i]=objectArray[i].spacez; ox_m[i]=objectArray[i].offsetx; oy_m[i]=objectArray[i].offsety; oz_m[i]=objectArray[i].offsetz; dx_m[i]=objectArray[i].directionx; dy_m[i]=objectArray[i].directiony; dz_m[i]=objectArray[i].directionz; UXx_m[i]=objectArray[i].UniXx; UXy_m[i]=objectArray[i].UniXy; UXz_m[i]=objectArray[i].UniXz; UYx_m[i]=objectArray[i].UniYx; UYy_m[i]=objectArray[i].UniYy; UYz_m[i]=objectArray[i].UniYz; UZx_m[i]=objectArray[i].UniZx; UZy_m[i]=objectArray[i].UniZy; UZz_m[i]=objectArray[i].UniZz; } int Mn, Ln; Mn=floorf(ly_m[0]/(My_m[0]+Msy_m[0]))+1; Ln=floorf(My_m[0]/(Ly_m[0]+sy_m[0]))+1; //printf("Mn %d Ln %d\n", Mn, Ln); cudaMemcpyToSymbol(crystalNy, &Ln, sizeof(int)); cudaMemcpyToSymbol(moduleNy, &Mn, sizeof(int)); Mn*=floorf(lz_m[0]/(Mz_m[0]+Msz_m[0]))+1; Ln*=floorf(Mz_m[0]/(Lz_m[0]+sz_m[0]))+1; //printf("Mn %d Ln %d\n", Mn, Ln); cudaMemcpyToSymbol(crystalN, &Ln, sizeof(int)); cudaMemcpyToSymbol(moduleN, &Mn, sizeof(int)); cudaMemcpyToSymbol(dev_totalPanels, &totalOb, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMalloc((void**)&mat_panel, 2 * sizeof(int)); cudaMalloc((void**)&dens_panel, 2 * sizeof(float)); cudaMalloc((void**)&panelID, totalOb * sizeof(int)); cudaMalloc((void**)&lengthx_panel, totalOb * sizeof(float)); cudaMalloc((void**)&lengthy_panel, totalOb * sizeof(float)); cudaMalloc((void**)&lengthz_panel, totalOb * sizeof(float)); cudaMalloc((void**)&MODx_panel, totalOb * sizeof(float)); cudaMalloc((void**)&MODy_panel, totalOb * sizeof(float)); cudaMalloc((void**)&MODz_panel, totalOb * sizeof(float)); cudaMalloc((void**)&Mspacex_panel, totalOb * sizeof(float)); cudaMalloc((void**)&Mspacey_panel, totalOb * sizeof(float)); cudaMalloc((void**)&Mspacez_panel, totalOb * sizeof(float)); cudaMalloc((void**)&LSOx_panel, totalOb * sizeof(float)); cudaMalloc((void**)&LSOy_panel, totalOb * sizeof(float)); cudaMalloc((void**)&LSOz_panel, totalOb * sizeof(float)); cudaMalloc((void**)&spacex_panel, totalOb * sizeof(float)); cudaMalloc((void**)&spacey_panel, totalOb * sizeof(float)); cudaMalloc((void**)&spacez_panel, totalOb * sizeof(float)); cudaMalloc((void**)&offsetx_panel, totalOb * sizeof(float)); cudaMalloc((void**)&offsety_panel, totalOb * sizeof(float)); cudaMalloc((void**)&offsetz_panel, totalOb * sizeof(float)); cudaMalloc((void**)&directionx_panel, totalOb * sizeof(float)); cudaMalloc((void**)&directiony_panel, totalOb * sizeof(float)); cudaMalloc((void**)&directionz_panel, totalOb * sizeof(float)); cudaMalloc((void**)&UniXx_panel, totalOb * sizeof(float)); cudaMalloc((void**)&UniXy_panel, totalOb * sizeof(float)); cudaMalloc((void**)&UniXz_panel, totalOb * sizeof(float)); cudaMalloc((void**)&UniYx_panel, totalOb * sizeof(float)); cudaMalloc((void**)&UniYy_panel, totalOb * sizeof(float)); cudaMalloc((void**)&UniYz_panel, totalOb * sizeof(float)); cudaMalloc((void**)&UniZx_panel, totalOb * sizeof(float)); cudaMalloc((void**)&UniZy_panel, totalOb * sizeof(float)); cudaMalloc((void**)&UniZz_panel, totalOb * sizeof(float)); cudaMemcpy(mat_panel, ma, 2*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dens_panel, den, 2*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(panelID, p_id, totalOb*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(lengthx_panel, lx_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(lengthy_panel, ly_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(lengthz_panel, lz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(MODx_panel, Mx_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(MODy_panel, My_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(MODz_panel, Mz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(Mspacex_panel, Msx_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(Mspacey_panel, Msy_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(Mspacez_panel, Msz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(LSOx_panel, Lx_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(LSOy_panel, Ly_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(LSOz_panel, Lz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(spacex_panel, sx_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(spacey_panel, sy_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(spacez_panel, sz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(offsetx_panel, ox_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(offsety_panel, oy_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(offsetz_panel, oz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(directionx_panel, dx_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(directiony_panel, dy_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(directionz_panel, dz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(UniXx_panel, UXx_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(UniXy_panel, UXy_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(UniXz_panel, UXz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(UniYx_panel, UYx_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(UniYy_panel, UYy_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(UniYz_panel, UYz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(UniZx_panel, UZx_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(UniZy_panel, UZy_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(UniZz_panel, UZz_m, totalOb*sizeof(float), cudaMemcpyHostToDevice); delete[] ma; delete[] den; delete[] p_id; delete[] lx_m; delete[] ly_m; delete[] lz_m; delete[] Mx_m; delete[] My_m; delete[] Mz_m; delete[] Msx_m; delete[] Msy_m; delete[] Msz_m; delete[] Lx_m; delete[] Ly_m; delete[] Lz_m; delete[] sx_m; delete[] sy_m; delete[] sz_m; delete[] ox_m; delete[] oy_m; delete[] oz_m; delete[] dx_m; delete[] dy_m; delete[] dz_m; delete[] UXx_m; delete[] UXy_m; delete[] UXz_m; delete[] UYx_m; delete[] UYy_m; delete[] UYz_m; delete[] UZx_m; delete[] UZy_m; delete[] UZz_m; printf("finish init: Module geometry;\n\n"); } #endif
cb982ce7329ee7715b0a1cf74d3929c353f1223c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> const int DIM = 32; // print GB/s void postprocess(int n, float ms) { printf("%21f\t", n * sizeof(double)*1e-6 / ms ); //can be multiplied by 2 -> once for reading the matrix and the other //for writing. } //Read the in matrix using pieces of 32 items. // naive transpose __global__ void transposeNaive(double *out,double *in,int BLOCK) { int x = blockIdx.x * DIM + threadIdx.x; int y = blockIdx.y * DIM + threadIdx.y; for (int j = 0; j < DIM; j+= BLOCK) out[x*(gridDim.x * DIM) + (y+j)] = in[(y+j)*(gridDim.x * DIM) + x]; //each thread executing transpose DIM/BLOCK_i elements from in column into out row. } __global__ void transposeImproved(double *out, double *in,int BLOCK) { __shared__ double aux_mat[DIM][DIM]; int x = blockIdx.x * DIM + threadIdx.x; int y = blockIdx.y * DIM + threadIdx.y; for (int j = 0; j < DIM; j += BLOCK) aux_mat[threadIdx.y+j][threadIdx.x] = in[(y+j)*(gridDim.x * DIM) + x]; //use shared memory in order to transpose the matrix and write back to out in row-wise. __syncthreads(); //needed in order to ensure that all the writes are performed. x = blockIdx.y * DIM + threadIdx.x; // transpose block offset y = blockIdx.x * DIM + threadIdx.y; for (int j = 0; j < DIM; j += BLOCK) out[(y+j)*(gridDim.x * DIM) + x] = aux_mat[threadIdx.x][threadIdx.y + j]; } void RunTest(int BLOCK,const int nx,const int ny,const int size){ dim3 dimGrid(nx/DIM, ny/DIM, 1); dim3 dimBlock(DIM, BLOCK, 1); printf("%d\t",DIM*BLOCK); double *h_in = (double*)malloc(size); double *h_out = (double*)malloc(size); double *d_in, *d_out; hipMalloc(&d_in, size); hipMalloc(&d_out, size); // host for (int j = 0; j < ny; j++) for (int i = 0; i < nx; i++) h_in[j*nx + i] = i; // device hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice); // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); float time_m; //time measures might contain overhead due to kernel lauch hipMemset(d_out, 0, size); hipEventRecord(startEvent, 0); hipLaunchKernelGGL(( transposeNaive), dim3(dimGrid), dim3(dimBlock), 0, 0, d_out, d_in,BLOCK); hipEventRecord(stopEvent, 0); hipEventSynchronize(stopEvent); hipEventElapsedTime(&time_m, startEvent, stopEvent); //milliseconds printf("%21f\t",time_m); hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost); postprocess(nx * ny, time_m); hipMemset(d_out, 0, size); //Reset matrix so i don't have to allocate a new one hipEventRecord(startEvent, 0); hipLaunchKernelGGL(( transposeImproved), dim3(dimGrid), dim3(dimBlock), 0, 0, d_out, d_in,BLOCK); hipEventRecord(stopEvent, 0); hipEventSynchronize(stopEvent); hipEventElapsedTime(&time_m, startEvent, stopEvent); printf("%21f\t",time_m); hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost); postprocess(nx * ny, time_m); printf("\n"); // cleanup hipEventDestroy(startEvent); hipEventDestroy(stopEvent); hipFree(d_out); hipFree(d_in); free(h_in); free(h_out); } int main(int argc, char **argv) { printf("# Threads Naive Transpose(ms) Naive Bandwidth (GB/s) Improved Transpose(ms) Improved Bandwidth (GB/s)\n"); //it's convenient to have num of threads < than elements in a submatrix. int BLOCK_1 = 2; //each thread transpose DIM/BLOCK_i elements in the matrix. int BLOCK_2 = 16; int BLOCK_3 = 32; //each block transpose a submatrix of DIMxDIM size const int nx = 8192; const int ny = 8192; const int size = nx*ny*sizeof(double); RunTest(BLOCK_1,nx,ny,size); RunTest(BLOCK_2,nx,ny,size); RunTest(BLOCK_3,nx,ny,size); }
cb982ce7329ee7715b0a1cf74d3929c353f1223c.cu
#include <stdio.h> #include <assert.h> const int DIM = 32; // print GB/s void postprocess(int n, float ms) { printf("%21f\t", n * sizeof(double)*1e-6 / ms ); //can be multiplied by 2 -> once for reading the matrix and the other //for writing. } //Read the in matrix using pieces of 32 items. // naive transpose __global__ void transposeNaive(double *out,double *in,int BLOCK) { int x = blockIdx.x * DIM + threadIdx.x; int y = blockIdx.y * DIM + threadIdx.y; for (int j = 0; j < DIM; j+= BLOCK) out[x*(gridDim.x * DIM) + (y+j)] = in[(y+j)*(gridDim.x * DIM) + x]; //each thread executing transpose DIM/BLOCK_i elements from in column into out row. } __global__ void transposeImproved(double *out, double *in,int BLOCK) { __shared__ double aux_mat[DIM][DIM]; int x = blockIdx.x * DIM + threadIdx.x; int y = blockIdx.y * DIM + threadIdx.y; for (int j = 0; j < DIM; j += BLOCK) aux_mat[threadIdx.y+j][threadIdx.x] = in[(y+j)*(gridDim.x * DIM) + x]; //use shared memory in order to transpose the matrix and write back to out in row-wise. __syncthreads(); //needed in order to ensure that all the writes are performed. x = blockIdx.y * DIM + threadIdx.x; // transpose block offset y = blockIdx.x * DIM + threadIdx.y; for (int j = 0; j < DIM; j += BLOCK) out[(y+j)*(gridDim.x * DIM) + x] = aux_mat[threadIdx.x][threadIdx.y + j]; } void RunTest(int BLOCK,const int nx,const int ny,const int size){ dim3 dimGrid(nx/DIM, ny/DIM, 1); dim3 dimBlock(DIM, BLOCK, 1); printf("%d\t",DIM*BLOCK); double *h_in = (double*)malloc(size); double *h_out = (double*)malloc(size); double *d_in, *d_out; cudaMalloc(&d_in, size); cudaMalloc(&d_out, size); // host for (int j = 0; j < ny; j++) for (int i = 0; i < nx; i++) h_in[j*nx + i] = i; // device cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice); // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); float time_m; //time measures might contain overhead due to kernel lauch cudaMemset(d_out, 0, size); cudaEventRecord(startEvent, 0); transposeNaive<<<dimGrid, dimBlock>>>(d_out, d_in,BLOCK); cudaEventRecord(stopEvent, 0); cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time_m, startEvent, stopEvent); //milliseconds printf("%21f\t",time_m); cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost); postprocess(nx * ny, time_m); cudaMemset(d_out, 0, size); //Reset matrix so i don't have to allocate a new one cudaEventRecord(startEvent, 0); transposeImproved<<<dimGrid, dimBlock>>>(d_out, d_in,BLOCK); cudaEventRecord(stopEvent, 0); cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time_m, startEvent, stopEvent); printf("%21f\t",time_m); cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost); postprocess(nx * ny, time_m); printf("\n"); // cleanup cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); cudaFree(d_out); cudaFree(d_in); free(h_in); free(h_out); } int main(int argc, char **argv) { printf("# Threads Naive Transpose(ms) Naive Bandwidth (GB/s) Improved Transpose(ms) Improved Bandwidth (GB/s)\n"); //it's convenient to have num of threads < than elements in a submatrix. int BLOCK_1 = 2; //each thread transpose DIM/BLOCK_i elements in the matrix. int BLOCK_2 = 16; int BLOCK_3 = 32; //each block transpose a submatrix of DIMxDIM size const int nx = 8192; const int ny = 8192; const int size = nx*ny*sizeof(double); RunTest(BLOCK_1,nx,ny,size); RunTest(BLOCK_2,nx,ny,size); RunTest(BLOCK_3,nx,ny,size); }
bceb08cac9810950dcaca52a92e016e62fb525f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include "rocblas.h" #include "headers/kernels.h" #include "headers/parameters.h" #include "headers/boundary_conditions.h" __constant__ struct SimulationParametes parameters_device; __constant__ struct Constants constants_device; __constant__ struct BoundaryInfo boundary_info_device; __constant__ int coords_device[18]; __constant__ real weights_device[9]; __constant__ int inverse_indices_device[9]; void CUDA_CHECK_ERROR() { hipError_t error = hipGetLastError(); if (error != hipSuccess) { printf("CUDA ERROR: %s\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } } void HANDLE_ERROR(hipError_t error) { if (error != hipSuccess) { printf("CUDA STATUS: %s\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } } void HANDLE_CUBLAS_ERROR(hipblasStatus_t stat) { if (stat != HIPBLAS_STATUS_SUCCESS) { printf("ERROR: cublas failed\n"); exit(EXIT_FAILURE); } } void CopyConstantsToDevice(const struct SimulationParametes parameters, const struct Constants constants, const struct BoundaryInfo boundary_info, int *coords, real *weights) { HANDLE_ERROR(hipMemcpyToSymbol(parameters_device, &parameters, sizeof(struct SimulationParametes))); HANDLE_ERROR(hipMemcpyToSymbol(constants_device, &constants, sizeof(struct Constants))); HANDLE_ERROR(hipMemcpyToSymbol(boundary_info_device, &boundary_info, sizeof(struct BoundaryInfo))); HANDLE_ERROR(hipMemcpyToSymbol(coords_device, coords, 18 * sizeof(int))); HANDLE_ERROR(hipMemcpyToSymbol(weights_device, weights, 9 * sizeof(real))); } __global__ void CheckConstMemoryCopy() { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id == MASTER) { printf(" --- Values allocated in the constant memory--- \n"); printf(" -> num_lattices %d\n", parameters_device.num_lattices); printf(" -> width %d\n", parameters_device.width); printf(" -> constant one %f\n", constants_device.one); printf(" -> wall velocity %f\n", boundary_info_device.wall_velocity_x); printf(" -> weight %f\n\n", weights_device[0]); } } __device__ int GetIndexDevice(int index_i, int index_j) { return index_i + index_j * parameters_device.width; } __global__ void InitArrayDevice(real *array, real init_value, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { array[thread_id] = init_value; thread_id += blockDim.x * gridDim.x; } } __global__ void StreamDevice(real *population, real *swap_buffer, int *flag_field) { int num_lattices = parameters_device.num_lattices; int num_directions = parameters_device.discretization; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < num_lattices) { if (flag_field[thread_id] == FLUID) { for (int component = 0; component < num_directions; ++component) { int ii = coords_device[component]; int jj = coords_device[num_directions + component]; int neighbour_index = thread_id + GetIndexDevice(ii, jj); int shift = component * num_lattices; swap_buffer[shift + neighbour_index] = population[shift + thread_id]; } } thread_id += blockDim.x * gridDim.x; } } __global__ void UpdateDensityFieldDevice(real *density, real *population, int *flag_field) { int num_lattices = parameters_device.num_lattices; short int num_directions = parameters_device.discretization; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < num_lattices) { if (flag_field[thread_id] == FLUID) { real lattice_density = 0.0; for (short int component = 0; component < num_directions; ++component) { lattice_density += population[component * num_lattices + thread_id]; } density[thread_id] = lattice_density; } thread_id += blockDim.x * gridDim.x; } } __global__ void UpdateVelocityFieldDevice(real *velocity, real *population, real *density, int *flag_field) { int num_lattices = parameters_device.num_lattices; short int num_directions = parameters_device.discretization; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < num_lattices) { if (flag_field[thread_id] == FLUID) { real lattice_velocity_x = 0.0; real lattice_velocity_y = 0.0; for (short int component = 0; component < num_directions; ++component) { real distribution = population[component * num_lattices + thread_id]; lattice_velocity_x += coords_device[component] * distribution; lattice_velocity_y += coords_device[num_directions + component] * distribution; } real inverse_density = 1.0 / density[thread_id]; velocity[thread_id] = inverse_density * lattice_velocity_x; velocity[num_lattices + thread_id] = inverse_density * lattice_velocity_y; } thread_id += blockDim.x * gridDim.x; } } __global__ void UpdatePopulationFieldDevice(real *velocity, real *population, real *density) { //int num_lattices = parameters_device.num_lattices; short int num_directions = parameters_device.discretization; //real relaxation = parameters_device.relaxation; real const_one = constants_device.one; real const_two = constants_device.two; real const_three = constants_device.three; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < parameters_device.num_lattices) { real local_velocity_x = velocity[thread_id]; real local_velocity_y = velocity[parameters_device.num_lattices + thread_id]; real dot_product_uu = local_velocity_x * local_velocity_x + local_velocity_y * local_velocity_y; for (int component = 0; component < num_directions; ++component) { real vector_component_x = coords_device[component]; real vector_component_y = coords_device[num_directions + component]; real dot_product_cu = vector_component_x * local_velocity_x + vector_component_y * local_velocity_y; real velocity_expansion = (const_one * dot_product_cu) + (const_two * dot_product_cu * dot_product_cu) - (const_three * dot_product_uu) + 1.0; real equilibrium = weights_device[component] * density[thread_id] * velocity_expansion; int shift = component * parameters_device.num_lattices; population[shift + thread_id] -= (parameters_device.relaxation * (population[shift + thread_id] - equilibrium)); } thread_id += blockDim.x * gridDim.x; } } __global__ void PrintBC(struct BoundaryConditions *boundary_conditions) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id == MASTER) { printf("GPU: num walls %d\n", boundary_conditions->num_wall_elements); printf("GPU: num moving walls %d\n", boundary_conditions->num_moving_wall_elements); } } __global__ void TreatNonSlipBC(int *indices, real *population, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { int source = indices[thread_id]; int target = indices[size + thread_id]; population[target] = population[source]; thread_id += blockDim.x * gridDim.x; } } __global__ void TreatSlipBC(int *indices, real *data, real *density, real *population, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { int source = indices[thread_id]; int target = indices[size + thread_id]; int index = indices[2 * size + thread_id]; population[target] = population[source] + data[thread_id] * density[index]; thread_id += blockDim.x * gridDim.x; } } __global__ void TreatInflowBC(int *indices, real *data, real *density, real *population, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { int target = indices[thread_id]; int index = indices[size + thread_id]; population[target] = data[thread_id] * density[index]; thread_id += blockDim.x * gridDim.x; } } __global__ void TreatOutflowBC(int *indices, real *velocity, real *density, real *population, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; int num_lattices = parameters_device.num_lattices; int num_directions = parameters_device.discretization; real const_one = constants_device.one; real const_two = constants_device.two; real const_three = constants_device.three; while (thread_id < size) { int source = indices[thread_id]; int index = indices[size + thread_id]; int target = indices[2 * size + thread_id]; int component = indices[3 * size + thread_id]; real neighbour_velocity_x = velocity[index]; real neighbour_velocity_y = velocity[num_lattices + index]; real i = coords_device[component]; real j = coords_device[num_directions + component]; real dot_product_uu = neighbour_velocity_x * neighbour_velocity_x + neighbour_velocity_y * neighbour_velocity_y; real dot_product_cu = -i * neighbour_velocity_x - j * neighbour_velocity_y; real dot_product_cu_inv = i * neighbour_velocity_x + j * neighbour_velocity_y; real velocity_expansion = (const_one * dot_product_cu) + (const_two * dot_product_cu * dot_product_cu) - (const_three * dot_product_uu) + 1.0; real velocity_expansion_inv = (const_one * dot_product_cu_inv) + (const_two * dot_product_cu_inv * dot_product_cu_inv) - (const_three * dot_product_uu) + 1.0; /* real equilibrium = weights_device[component] * density[index] * velocity_expansion; real equilibrium_inv = weights_device[component] * density[index] * velocity_expansion_inv; population[target] = equilibrium + equilibrium_inv - population[source]; */ real expansion_sum = velocity_expansion + velocity_expansion_inv; population[target] = (weights_device[component] * density[index] * expansion_sum) - population[source]; thread_id += blockDim.x * gridDim.x; } } __global__ void ComputeVelocityMagnitude(real *velocity, real *velocity_magnitude) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; int size = parameters_device.num_lattices; while (thread_id < size) { real velocity_x = velocity[thread_id]; real velocity_y = velocity[size + thread_id]; velocity_magnitude[thread_id] = sqrt(velocity_x * velocity_x + velocity_y * velocity_y); thread_id += blockDim.x * gridDim.x; } } __global__ void DrawFluid(uchar4 *ptr, real* velocity_magnitude, int* indices, int size) { // map from threadIdx/BlockIdx to pixel position int thread_id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = thread_id; i < size; i += stride) { int index = indices[thread_id]; real velocity = velocity_magnitude[index]; // max --> red // min --> blue // avg --> green real max_velocity = parameters_device.max_velocity_rendering; real min_velocity = parameters_device.min_velocity_rendering; real avg_velocity = (max_velocity + min_velocity) / 2; real distance = max_velocity - avg_velocity; unsigned int red_hue, blue_hue, green_hue; // assuming no negative velocities // green slope one -> min -> average // green slope two -> average -> max real blue_slope = ( - 255 ) / distance; real red_slope = ( 255 ) / distance; real green_slope_one = (255) / distance; real green_slope_two = (-255) / distance; int c_blue = -1 * (blue_slope * avg_velocity); int c_red = -1 * (red_slope * avg_velocity); int c_green_one = -1 * (green_slope_one * min_velocity); int c_green_two = -1 * (green_slope_two * max_velocity); if (velocity <= avg_velocity) { red_hue = 0; blue_hue = blue_slope * velocity + c_blue; green_hue = green_slope_one * velocity + c_green_one; } else { red_hue = red_slope * velocity + c_red; blue_hue = 0; green_hue = green_slope_two * velocity + c_green_two; } red_hue = red_hue <= 255 ? red_hue : 255; blue_hue = blue_hue <= 255 ? blue_hue : 255; green_hue = green_hue <= 255 ? green_hue : 255; ptr[index].x = red_hue; ptr[index].y = green_hue; ptr[index].z = blue_hue; ptr[index].w = 255; } } __global__ void DrawObstacles(uchar4 *ptr, int* indices, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { int index = indices[thread_id]; ptr[index].x = 0; ptr[index].y = 0; ptr[index].z = 0; ptr[index].w = 255; thread_id += blockDim.x * gridDim.x; } } __global__ void PrintMaxMinDensity(real *density, int max_index, int min_index, int time) { printf("time step: %d; max density: %4.6f; min density: %4.6f\n", time, density[max_index], density[min_index]); } __global__ void SynchStreams() { }
bceb08cac9810950dcaca52a92e016e62fb525f2.cu
#include <stdio.h> #include <math.h> #include "cublas_v2.h" #include "headers/kernels.h" #include "headers/parameters.h" #include "headers/boundary_conditions.h" __constant__ struct SimulationParametes parameters_device; __constant__ struct Constants constants_device; __constant__ struct BoundaryInfo boundary_info_device; __constant__ int coords_device[18]; __constant__ real weights_device[9]; __constant__ int inverse_indices_device[9]; void CUDA_CHECK_ERROR() { cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("CUDA ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } } void HANDLE_ERROR(cudaError_t error) { if (error != cudaSuccess) { printf("CUDA STATUS: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } } void HANDLE_CUBLAS_ERROR(cublasStatus_t stat) { if (stat != CUBLAS_STATUS_SUCCESS) { printf("ERROR: cublas failed\n"); exit(EXIT_FAILURE); } } void CopyConstantsToDevice(const struct SimulationParametes parameters, const struct Constants constants, const struct BoundaryInfo boundary_info, int *coords, real *weights) { HANDLE_ERROR(cudaMemcpyToSymbol(parameters_device, &parameters, sizeof(struct SimulationParametes))); HANDLE_ERROR(cudaMemcpyToSymbol(constants_device, &constants, sizeof(struct Constants))); HANDLE_ERROR(cudaMemcpyToSymbol(boundary_info_device, &boundary_info, sizeof(struct BoundaryInfo))); HANDLE_ERROR(cudaMemcpyToSymbol(coords_device, coords, 18 * sizeof(int))); HANDLE_ERROR(cudaMemcpyToSymbol(weights_device, weights, 9 * sizeof(real))); } __global__ void CheckConstMemoryCopy() { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id == MASTER) { printf(" --- Values allocated in the constant memory--- \n"); printf(" -> num_lattices %d\n", parameters_device.num_lattices); printf(" -> width %d\n", parameters_device.width); printf(" -> constant one %f\n", constants_device.one); printf(" -> wall velocity %f\n", boundary_info_device.wall_velocity_x); printf(" -> weight %f\n\n", weights_device[0]); } } __device__ int GetIndexDevice(int index_i, int index_j) { return index_i + index_j * parameters_device.width; } __global__ void InitArrayDevice(real *array, real init_value, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { array[thread_id] = init_value; thread_id += blockDim.x * gridDim.x; } } __global__ void StreamDevice(real *population, real *swap_buffer, int *flag_field) { int num_lattices = parameters_device.num_lattices; int num_directions = parameters_device.discretization; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < num_lattices) { if (flag_field[thread_id] == FLUID) { for (int component = 0; component < num_directions; ++component) { int ii = coords_device[component]; int jj = coords_device[num_directions + component]; int neighbour_index = thread_id + GetIndexDevice(ii, jj); int shift = component * num_lattices; swap_buffer[shift + neighbour_index] = population[shift + thread_id]; } } thread_id += blockDim.x * gridDim.x; } } __global__ void UpdateDensityFieldDevice(real *density, real *population, int *flag_field) { int num_lattices = parameters_device.num_lattices; short int num_directions = parameters_device.discretization; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < num_lattices) { if (flag_field[thread_id] == FLUID) { real lattice_density = 0.0; for (short int component = 0; component < num_directions; ++component) { lattice_density += population[component * num_lattices + thread_id]; } density[thread_id] = lattice_density; } thread_id += blockDim.x * gridDim.x; } } __global__ void UpdateVelocityFieldDevice(real *velocity, real *population, real *density, int *flag_field) { int num_lattices = parameters_device.num_lattices; short int num_directions = parameters_device.discretization; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < num_lattices) { if (flag_field[thread_id] == FLUID) { real lattice_velocity_x = 0.0; real lattice_velocity_y = 0.0; for (short int component = 0; component < num_directions; ++component) { real distribution = population[component * num_lattices + thread_id]; lattice_velocity_x += coords_device[component] * distribution; lattice_velocity_y += coords_device[num_directions + component] * distribution; } real inverse_density = 1.0 / density[thread_id]; velocity[thread_id] = inverse_density * lattice_velocity_x; velocity[num_lattices + thread_id] = inverse_density * lattice_velocity_y; } thread_id += blockDim.x * gridDim.x; } } __global__ void UpdatePopulationFieldDevice(real *velocity, real *population, real *density) { //int num_lattices = parameters_device.num_lattices; short int num_directions = parameters_device.discretization; //real relaxation = parameters_device.relaxation; real const_one = constants_device.one; real const_two = constants_device.two; real const_three = constants_device.three; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < parameters_device.num_lattices) { real local_velocity_x = velocity[thread_id]; real local_velocity_y = velocity[parameters_device.num_lattices + thread_id]; real dot_product_uu = local_velocity_x * local_velocity_x + local_velocity_y * local_velocity_y; for (int component = 0; component < num_directions; ++component) { real vector_component_x = coords_device[component]; real vector_component_y = coords_device[num_directions + component]; real dot_product_cu = vector_component_x * local_velocity_x + vector_component_y * local_velocity_y; real velocity_expansion = (const_one * dot_product_cu) + (const_two * dot_product_cu * dot_product_cu) - (const_three * dot_product_uu) + 1.0; real equilibrium = weights_device[component] * density[thread_id] * velocity_expansion; int shift = component * parameters_device.num_lattices; population[shift + thread_id] -= (parameters_device.relaxation * (population[shift + thread_id] - equilibrium)); } thread_id += blockDim.x * gridDim.x; } } __global__ void PrintBC(struct BoundaryConditions *boundary_conditions) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id == MASTER) { printf("GPU: num walls %d\n", boundary_conditions->num_wall_elements); printf("GPU: num moving walls %d\n", boundary_conditions->num_moving_wall_elements); } } __global__ void TreatNonSlipBC(int *indices, real *population, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { int source = indices[thread_id]; int target = indices[size + thread_id]; population[target] = population[source]; thread_id += blockDim.x * gridDim.x; } } __global__ void TreatSlipBC(int *indices, real *data, real *density, real *population, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { int source = indices[thread_id]; int target = indices[size + thread_id]; int index = indices[2 * size + thread_id]; population[target] = population[source] + data[thread_id] * density[index]; thread_id += blockDim.x * gridDim.x; } } __global__ void TreatInflowBC(int *indices, real *data, real *density, real *population, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { int target = indices[thread_id]; int index = indices[size + thread_id]; population[target] = data[thread_id] * density[index]; thread_id += blockDim.x * gridDim.x; } } __global__ void TreatOutflowBC(int *indices, real *velocity, real *density, real *population, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; int num_lattices = parameters_device.num_lattices; int num_directions = parameters_device.discretization; real const_one = constants_device.one; real const_two = constants_device.two; real const_three = constants_device.three; while (thread_id < size) { int source = indices[thread_id]; int index = indices[size + thread_id]; int target = indices[2 * size + thread_id]; int component = indices[3 * size + thread_id]; real neighbour_velocity_x = velocity[index]; real neighbour_velocity_y = velocity[num_lattices + index]; real i = coords_device[component]; real j = coords_device[num_directions + component]; real dot_product_uu = neighbour_velocity_x * neighbour_velocity_x + neighbour_velocity_y * neighbour_velocity_y; real dot_product_cu = -i * neighbour_velocity_x - j * neighbour_velocity_y; real dot_product_cu_inv = i * neighbour_velocity_x + j * neighbour_velocity_y; real velocity_expansion = (const_one * dot_product_cu) + (const_two * dot_product_cu * dot_product_cu) - (const_three * dot_product_uu) + 1.0; real velocity_expansion_inv = (const_one * dot_product_cu_inv) + (const_two * dot_product_cu_inv * dot_product_cu_inv) - (const_three * dot_product_uu) + 1.0; /* real equilibrium = weights_device[component] * density[index] * velocity_expansion; real equilibrium_inv = weights_device[component] * density[index] * velocity_expansion_inv; population[target] = equilibrium + equilibrium_inv - population[source]; */ real expansion_sum = velocity_expansion + velocity_expansion_inv; population[target] = (weights_device[component] * density[index] * expansion_sum) - population[source]; thread_id += blockDim.x * gridDim.x; } } __global__ void ComputeVelocityMagnitude(real *velocity, real *velocity_magnitude) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; int size = parameters_device.num_lattices; while (thread_id < size) { real velocity_x = velocity[thread_id]; real velocity_y = velocity[size + thread_id]; velocity_magnitude[thread_id] = sqrt(velocity_x * velocity_x + velocity_y * velocity_y); thread_id += blockDim.x * gridDim.x; } } __global__ void DrawFluid(uchar4 *ptr, real* velocity_magnitude, int* indices, int size) { // map from threadIdx/BlockIdx to pixel position int thread_id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = thread_id; i < size; i += stride) { int index = indices[thread_id]; real velocity = velocity_magnitude[index]; // max --> red // min --> blue // avg --> green real max_velocity = parameters_device.max_velocity_rendering; real min_velocity = parameters_device.min_velocity_rendering; real avg_velocity = (max_velocity + min_velocity) / 2; real distance = max_velocity - avg_velocity; unsigned int red_hue, blue_hue, green_hue; // assuming no negative velocities // green slope one -> min -> average // green slope two -> average -> max real blue_slope = ( - 255 ) / distance; real red_slope = ( 255 ) / distance; real green_slope_one = (255) / distance; real green_slope_two = (-255) / distance; int c_blue = -1 * (blue_slope * avg_velocity); int c_red = -1 * (red_slope * avg_velocity); int c_green_one = -1 * (green_slope_one * min_velocity); int c_green_two = -1 * (green_slope_two * max_velocity); if (velocity <= avg_velocity) { red_hue = 0; blue_hue = blue_slope * velocity + c_blue; green_hue = green_slope_one * velocity + c_green_one; } else { red_hue = red_slope * velocity + c_red; blue_hue = 0; green_hue = green_slope_two * velocity + c_green_two; } red_hue = red_hue <= 255 ? red_hue : 255; blue_hue = blue_hue <= 255 ? blue_hue : 255; green_hue = green_hue <= 255 ? green_hue : 255; ptr[index].x = red_hue; ptr[index].y = green_hue; ptr[index].z = blue_hue; ptr[index].w = 255; } } __global__ void DrawObstacles(uchar4 *ptr, int* indices, int size) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; while (thread_id < size) { int index = indices[thread_id]; ptr[index].x = 0; ptr[index].y = 0; ptr[index].z = 0; ptr[index].w = 255; thread_id += blockDim.x * gridDim.x; } } __global__ void PrintMaxMinDensity(real *density, int max_index, int min_index, int time) { printf("time step: %d; max density: %4.6f; min density: %4.6f\n", time, density[max_index], density[min_index]); } __global__ void SynchStreams() { }
44d9b31bdcde3be813d75d9b2cbf9994ad38112f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> const int INF = 10000000; const int V = 10010; void input(char *inFileName); void output(char *outFileName); void block_FW(int B); int ceil(int a, int b); int n, m; // Number of vertices, edges int Dist[V][V]; __global__ void cal(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height, int* Dist, int n) { int block_end_x = block_start_x + block_height; int block_end_y = block_start_y + block_width; __shared__ int Dist_shared[100][100]; //if(threadIdx.x == 0 && threadIdx.y == 0){ // for(int a = 0, p = 0; a < n; a++){ // for(int b = 0; b < n; b++, p++){ // Dist_shared[a][b] = Dist[p]; // } // } //} if((blockIdx.y * B + threadIdx.y) < n && (blockIdx.x * B + threadIdx.x) < n && n < 100){ Dist_shared[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x] = Dist[(blockIdx.y * B + threadIdx.y) * n + (blockIdx.x * B + threadIdx.x)]; /*Dist_shared[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x] = Dist[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x];*/ } __syncthreads(); //for(int g = 0; g < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0; g++){ // for(int a = 0; a < n; a++){ // if(Dist[g*n+a] == 10000000) // printf(""); // else // printf("%d ", Dist[g * n + a]); // //printf("%d ", Dist[g][a]); // } // printf("\n"); // if(g == n-1) // printf("############Dist_dev#######################\n"); //} //for(int g = 0; g < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0 && n < 100; g++){ // for(int a = 0; a < n; a++){ // printf("%d ", Dist_shared[g][a]); // //printf("%d ", Dist[g * n + a]); // } // printf("\n"); // if(g == n-1) // printf("******Dist_shared before*********\n"); //} //printf("bx:%d by:%d tx:%d ty:%d hey\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y); __syncthreads(); int b_i = blockIdx.x; int b_j = blockIdx.y; if(b_i < block_end_x && b_i >= block_start_x && (blockIdx.x * B + threadIdx.x) < n) { if(b_j < block_end_y && b_j >= block_start_y && (blockIdx.y * B + threadIdx.y) < n) { // To calculate B*B elements in the block (b_i, b_j) // For each block, it need to compute B times for (int k = Round * B; k < (Round +1) * B && k < n; ++k) { // To calculate original index of elements in the block (b_i, b_j) // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2 //int block_internal_start_x = b_i * B; //int block_internal_end_x = (b_i +1) * B; //int block_internal_start_y = b_j * B; //int block_internal_end_y = (b_j +1) * B; //if (block_internal_end_x > n) block_internal_end_x = n; //if (block_internal_end_y > n) block_internal_end_y = n; int i = blockIdx.x * B + threadIdx.x; int j = blockIdx.y * B + threadIdx.y; if(n < 100 && threadIdx.x < n && threadIdx.y < n && Dist_shared[i][k] + Dist_shared[k][j] < Dist_shared[i][j] ){ Dist_shared[i][j] = Dist_shared[i][k] + Dist_shared[k][j]; } else if(threadIdx.x < n && threadIdx.y < n && Dist[i*n+k] + Dist[k*n+j] < Dist[i*n+j] ) { Dist[i*n+j] = Dist[i*n+k] + Dist[k*n+j]; } __syncthreads(); /* for (int i = block_internal_start_x; i < block_internal_end_x; ++i) { for (int j = block_internal_start_y; j < block_internal_end_y; ++j) { if (Dist[i][k] + Dist[k][j] < Dist[i][j]) Dist[i][j] = Dist[i][k] + Dist[k][j]; } } */ } } } __syncthreads(); //for(int h = 0; n < 100 && h < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0; h++){ // for(int b = 0; b < n; b++){ // printf("%d ", Dist_shared[h][b]); // //printf("%d ", Dist_shared[h * n + b]); // } // printf("\n"); // if(h == n-1) // printf("~~~~~~~~~~~~~Dist_shared after~~~~~~~~~~~~~~~~~~\n"); //} //if(threadIdx.x == 0 && threadIdx.y == 0){ for(int a = 0, p = 0; n < 100 && a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[p] = Dist_shared[a][b]; } } //} if(n < 100 && (blockIdx.y * B + threadIdx.y) < n && (blockIdx.x * B + threadIdx.x) < n){ Dist[(blockIdx.y * B + threadIdx.y) * n + (blockIdx.x * B + threadIdx.x)] = Dist_shared[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x] ; /*Dist_shared[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x] = Dist[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x];*/ } __syncthreads(); //for(int h = 0; n < 100 && h < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0; h++){ // for(int b = 0; b < n; b++){ // printf("%d ", Dist_shared[h][b]); // //printf("%d ", Dist_shared[h * n + b]); // } // printf("\n"); // if(h == n-1) // printf("~~~~~~~~~~~~~Dist_shared before leaving kernel~~~~~~~~~~~~~~~~~~\n"); //} //for(int g = 0; g < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0; g++){ // for(int a = 0; a < n; a++){ // printf("%d ", Dist[g * n + a]); // //printf("%d ", Dist[g][a]); // } // printf("\n"); // if(g == n-1) // printf("############Dist_dev before leaving kernel#######################\n"); //} } void cal1(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) { int block_end_x = block_start_x + block_height; int block_end_y = block_start_y + block_width; for (int b_i = block_start_x; b_i < block_end_x; ++b_i) { for (int b_j = block_start_y; b_j < block_end_y; ++b_j) { // To calculate B*B elements in the block (b_i, b_j) // For each block, it need to compute B times for (int k = Round * B; k < (Round +1) * B && k < n; ++k) { // To calculate original index of elements in the block (b_i, b_j) // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2 int block_internal_start_x = b_i * B; int block_internal_end_x = (b_i +1) * B; int block_internal_start_y = b_j * B; int block_internal_end_y = (b_j +1) * B; if (block_internal_end_x > n) block_internal_end_x = n; if (block_internal_end_y > n) block_internal_end_y = n; for (int i = block_internal_start_x; i < block_internal_end_x; ++i) { for (int j = block_internal_start_y; j < block_internal_end_y; ++j) { if (Dist[i][k] + Dist[k][j] < Dist[i][j]) Dist[i][j] = Dist[i][k] + Dist[k][j]; } } } } } } int main(int argc, char* argv[]) { struct timeval ts, tnow; gettimeofday(&ts, NULL); input(argv[1]); int B = atoi(argv[3]); if(B > 16){ B = 16; } if(n <= B){ B = n; } block_FW(B); output(argv[2]); gettimeofday(&tnow, NULL); printf("time: %d millisecond\n", ((tnow.tv_sec - ts.tv_sec) * 1000000 + (tnow.tv_usec - ts.tv_usec)) / 1000); return 0; } void input(char *inFileName) { FILE *infile = fopen(inFileName, "r"); fscanf(infile, "%d %d", &n, &m); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == j) Dist[i][j] = 0; else Dist[i][j] = INF; } } while (--m >= 0) { int a, b, v; fscanf(infile, "%d %d %d", &a, &b, &v); --a, --b; Dist[a][b] = v; } } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (Dist[i][j] >= INF) fprintf(outfile, "INF "); else fprintf(outfile, "%d ", Dist[i][j]); } fprintf(outfile, "\n"); } } int ceil(int a, int b) { return (a + b -1)/b; } void block_FW(int B) { int *temp; temp = (int*) malloc(n * n * sizeof(int)); //cuda malloc int *Dist_dev; hipMalloc((void **) &Dist_dev, n * n * sizeof(int)); //round int round = ceil(n, B); //cuda block & threads dim3 threadsPerBlock(B, B); dim3 numOfBlock(round, round); //Dist to temp for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ temp[p] = Dist[a][b]; } } //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(temp[h*n+g] == 10000000) // printf(""); // else // printf("%d ", temp[h * n + g]); // } // printf("\n"); //} //printf("------------------------------\n"); for (int r = 0; r < round; ++r) { //cuda memory copy hipMemcpy(Dist_dev, temp, n * n * sizeof(int), hipMemcpyHostToDevice); //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", temp[h * n + g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("------------------------------\n"); /* Phase 1*/ hipLaunchKernelGGL(( cal) , dim3(numOfBlock), dim3(threadsPerBlock), 0, 0, B, r, r, r, 1, 1, Dist_dev, n); hipMemcpy(temp, Dist_dev, n * n * sizeof(int), hipMemcpyDeviceToHost); //temp to Dist for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[a][b] = temp[p]; } } //cal1(B, r, r, r, 1, 1); //printf("$$$$$$$$$$$$$$kernel finish$$$$$$$$$$$$$$$$$$$$$$$$\n"); //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", Dist[h][g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("+++++++++++++++++ after phase1+++++++++++++++\n"); /* Phase 2*/ //hipMalloc((void **) &Dist_dev, n * n * sizeof(int)); hipMemcpy(Dist_dev, temp, n * n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cal) , dim3(numOfBlock), dim3(threadsPerBlock), 0, 0, B, r, r, 0, r, 1, Dist_dev, n); hipMemcpy(temp, Dist_dev, n * n * sizeof(int), hipMemcpyDeviceToHost); //temp to Dist for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[a][b] = temp[p]; } } //hipMalloc((void **) &Dist_dev, n * n * sizeof(int)); hipMemcpy(Dist_dev, temp, n * n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cal) , dim3(numOfBlock), dim3(threadsPerBlock), 0, 0, B, r, r, r+1, round-r-1, 1, Dist_dev, n); hipMemcpy(temp, Dist_dev, n * n * sizeof(int), hipMemcpyDeviceToHost); //hipMalloc((void **) &Dist_dev, n * n * sizeof(int)); hipMemcpy(Dist_dev, temp, n * n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cal) , dim3(numOfBlock), dim3(threadsPerBlock), 0, 0, B, r, 0, r, 1, r, Dist_dev, n); hipMemcpy(temp, Dist_dev, n * n * sizeof(int), hipMemcpyDeviceToHost); //hipMalloc((void **) &Dist_dev, n * n * sizeof(int)); hipMemcpy(Dist_dev, temp, n * n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cal) , dim3(numOfBlock), dim3(threadsPerBlock), 0, 0, B, r, r+1, r, 1, round-r-1, Dist_dev, n); hipMemcpy(temp, Dist_dev, n * n * sizeof(int), hipMemcpyDeviceToHost); //cal1(B, r, r, 0, r, 1); //cal1(B, r, r, r +1, round - r -1, 1); //cal1(B, r, 0, r, 1, r); //cal1(B, r, r +1, r, 1, round - r -1); //temp to Dist for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[a][b] = temp[p]; } } //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", Dist[h][g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("+++++++++++++++++ after phase2+++++++++++++++\n"); /* Phase 3*/ hipMemcpy(Dist_dev, temp, n * n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cal) , dim3(numOfBlock), dim3(threadsPerBlock), 0, 0, B, r, 0, 0, r, r, Dist_dev, n); hipMemcpy(temp, Dist_dev, n * n * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(Dist_dev, temp, n * n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cal) , dim3(numOfBlock), dim3(threadsPerBlock), 0, 0, B, r, 0, r+1, round-r-1, r, Dist_dev, n); hipMemcpy(temp, Dist_dev, n * n * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(Dist_dev, temp, n * n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cal) , dim3(numOfBlock), dim3(threadsPerBlock), 0, 0, B, r, r+1, 0, r, round-r-1, Dist_dev, n); hipMemcpy(temp, Dist_dev, n * n * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(Dist_dev, temp, n * n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cal) , dim3(numOfBlock), dim3(threadsPerBlock), 0, 0, B, r, r+1, r+1, round-r-1, round-r-1, Dist_dev, n); //printf("OK\n"); hipMemcpy(temp, Dist_dev, n * n * sizeof(int), hipMemcpyDeviceToHost); //cal1(B, r, 0, 0, r, r); //cal1(B, r, 0, r +1, round -r -1, r); //cal1(B, r, r +1, 0, r, round - r -1); //cal1(B, r, r +1, r +1, round -r -1, round - r -1); //get the answer back //Dist //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", Dist[h][g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("+++++++++++++++++Dist after phase2 , after phase3 before temp to Dist+++++++++++++++\n"); //temp to Dist for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[a][b] = temp[p]; } } //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(temp[h*n+g] == 10000000) // printf(""); // else // printf("%d ", temp[h*n + g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } // printf("\n"); //} //printf("+++++++++++++++++temp after phase3+++++++++++++++\n"); //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", Dist[h][g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("+++++++++++++++++Dist after phase3+++++++++++++++\n"); } hipFree(Dist_dev); }
44d9b31bdcde3be813d75d9b2cbf9994ad38112f.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <cuda.h> const int INF = 10000000; const int V = 10010; void input(char *inFileName); void output(char *outFileName); void block_FW(int B); int ceil(int a, int b); int n, m; // Number of vertices, edges int Dist[V][V]; __global__ void cal(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height, int* Dist, int n) { int block_end_x = block_start_x + block_height; int block_end_y = block_start_y + block_width; __shared__ int Dist_shared[100][100]; //if(threadIdx.x == 0 && threadIdx.y == 0){ // for(int a = 0, p = 0; a < n; a++){ // for(int b = 0; b < n; b++, p++){ // Dist_shared[a][b] = Dist[p]; // } // } //} if((blockIdx.y * B + threadIdx.y) < n && (blockIdx.x * B + threadIdx.x) < n && n < 100){ Dist_shared[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x] = Dist[(blockIdx.y * B + threadIdx.y) * n + (blockIdx.x * B + threadIdx.x)]; /*Dist_shared[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x] = Dist[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x];*/ } __syncthreads(); //for(int g = 0; g < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0; g++){ // for(int a = 0; a < n; a++){ // if(Dist[g*n+a] == 10000000) // printf(""); // else // printf("%d ", Dist[g * n + a]); // //printf("%d ", Dist[g][a]); // } // printf("\n"); // if(g == n-1) // printf("############Dist_dev#######################\n"); //} //for(int g = 0; g < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0 && n < 100; g++){ // for(int a = 0; a < n; a++){ // printf("%d ", Dist_shared[g][a]); // //printf("%d ", Dist[g * n + a]); // } // printf("\n"); // if(g == n-1) // printf("******Dist_shared before*********\n"); //} //printf("bx:%d by:%d tx:%d ty:%d hey\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y); __syncthreads(); int b_i = blockIdx.x; int b_j = blockIdx.y; if(b_i < block_end_x && b_i >= block_start_x && (blockIdx.x * B + threadIdx.x) < n) { if(b_j < block_end_y && b_j >= block_start_y && (blockIdx.y * B + threadIdx.y) < n) { // To calculate B*B elements in the block (b_i, b_j) // For each block, it need to compute B times for (int k = Round * B; k < (Round +1) * B && k < n; ++k) { // To calculate original index of elements in the block (b_i, b_j) // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2 //int block_internal_start_x = b_i * B; //int block_internal_end_x = (b_i +1) * B; //int block_internal_start_y = b_j * B; //int block_internal_end_y = (b_j +1) * B; //if (block_internal_end_x > n) block_internal_end_x = n; //if (block_internal_end_y > n) block_internal_end_y = n; int i = blockIdx.x * B + threadIdx.x; int j = blockIdx.y * B + threadIdx.y; if(n < 100 && threadIdx.x < n && threadIdx.y < n && Dist_shared[i][k] + Dist_shared[k][j] < Dist_shared[i][j] ){ Dist_shared[i][j] = Dist_shared[i][k] + Dist_shared[k][j]; } else if(threadIdx.x < n && threadIdx.y < n && Dist[i*n+k] + Dist[k*n+j] < Dist[i*n+j] ) { Dist[i*n+j] = Dist[i*n+k] + Dist[k*n+j]; } __syncthreads(); /* for (int i = block_internal_start_x; i < block_internal_end_x; ++i) { for (int j = block_internal_start_y; j < block_internal_end_y; ++j) { if (Dist[i][k] + Dist[k][j] < Dist[i][j]) Dist[i][j] = Dist[i][k] + Dist[k][j]; } } */ } } } __syncthreads(); //for(int h = 0; n < 100 && h < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0; h++){ // for(int b = 0; b < n; b++){ // printf("%d ", Dist_shared[h][b]); // //printf("%d ", Dist_shared[h * n + b]); // } // printf("\n"); // if(h == n-1) // printf("~~~~~~~~~~~~~Dist_shared after~~~~~~~~~~~~~~~~~~\n"); //} //if(threadIdx.x == 0 && threadIdx.y == 0){ for(int a = 0, p = 0; n < 100 && a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[p] = Dist_shared[a][b]; } } //} if(n < 100 && (blockIdx.y * B + threadIdx.y) < n && (blockIdx.x * B + threadIdx.x) < n){ Dist[(blockIdx.y * B + threadIdx.y) * n + (blockIdx.x * B + threadIdx.x)] = Dist_shared[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x] ; /*Dist_shared[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x] = Dist[blockIdx.y * B + threadIdx.y][blockIdx.x * B + threadIdx.x];*/ } __syncthreads(); //for(int h = 0; n < 100 && h < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0; h++){ // for(int b = 0; b < n; b++){ // printf("%d ", Dist_shared[h][b]); // //printf("%d ", Dist_shared[h * n + b]); // } // printf("\n"); // if(h == n-1) // printf("~~~~~~~~~~~~~Dist_shared before leaving kernel~~~~~~~~~~~~~~~~~~\n"); //} //for(int g = 0; g < n && threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0; g++){ // for(int a = 0; a < n; a++){ // printf("%d ", Dist[g * n + a]); // //printf("%d ", Dist[g][a]); // } // printf("\n"); // if(g == n-1) // printf("############Dist_dev before leaving kernel#######################\n"); //} } void cal1(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) { int block_end_x = block_start_x + block_height; int block_end_y = block_start_y + block_width; for (int b_i = block_start_x; b_i < block_end_x; ++b_i) { for (int b_j = block_start_y; b_j < block_end_y; ++b_j) { // To calculate B*B elements in the block (b_i, b_j) // For each block, it need to compute B times for (int k = Round * B; k < (Round +1) * B && k < n; ++k) { // To calculate original index of elements in the block (b_i, b_j) // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2 int block_internal_start_x = b_i * B; int block_internal_end_x = (b_i +1) * B; int block_internal_start_y = b_j * B; int block_internal_end_y = (b_j +1) * B; if (block_internal_end_x > n) block_internal_end_x = n; if (block_internal_end_y > n) block_internal_end_y = n; for (int i = block_internal_start_x; i < block_internal_end_x; ++i) { for (int j = block_internal_start_y; j < block_internal_end_y; ++j) { if (Dist[i][k] + Dist[k][j] < Dist[i][j]) Dist[i][j] = Dist[i][k] + Dist[k][j]; } } } } } } int main(int argc, char* argv[]) { struct timeval ts, tnow; gettimeofday(&ts, NULL); input(argv[1]); int B = atoi(argv[3]); if(B > 16){ B = 16; } if(n <= B){ B = n; } block_FW(B); output(argv[2]); gettimeofday(&tnow, NULL); printf("time: %d millisecond\n", ((tnow.tv_sec - ts.tv_sec) * 1000000 + (tnow.tv_usec - ts.tv_usec)) / 1000); return 0; } void input(char *inFileName) { FILE *infile = fopen(inFileName, "r"); fscanf(infile, "%d %d", &n, &m); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == j) Dist[i][j] = 0; else Dist[i][j] = INF; } } while (--m >= 0) { int a, b, v; fscanf(infile, "%d %d %d", &a, &b, &v); --a, --b; Dist[a][b] = v; } } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (Dist[i][j] >= INF) fprintf(outfile, "INF "); else fprintf(outfile, "%d ", Dist[i][j]); } fprintf(outfile, "\n"); } } int ceil(int a, int b) { return (a + b -1)/b; } void block_FW(int B) { int *temp; temp = (int*) malloc(n * n * sizeof(int)); //cuda malloc int *Dist_dev; cudaMalloc((void **) &Dist_dev, n * n * sizeof(int)); //round int round = ceil(n, B); //cuda block & threads dim3 threadsPerBlock(B, B); dim3 numOfBlock(round, round); //Dist to temp for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ temp[p] = Dist[a][b]; } } //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(temp[h*n+g] == 10000000) // printf(""); // else // printf("%d ", temp[h * n + g]); // } // printf("\n"); //} //printf("------------------------------\n"); for (int r = 0; r < round; ++r) { //cuda memory copy cudaMemcpy(Dist_dev, temp, n * n * sizeof(int), cudaMemcpyHostToDevice); //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", temp[h * n + g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("------------------------------\n"); /* Phase 1*/ cal <<<numOfBlock, threadsPerBlock>>> (B, r, r, r, 1, 1, Dist_dev, n); cudaMemcpy(temp, Dist_dev, n * n * sizeof(int), cudaMemcpyDeviceToHost); //temp to Dist for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[a][b] = temp[p]; } } //cal1(B, r, r, r, 1, 1); //printf("$$$$$$$$$$$$$$kernel finish$$$$$$$$$$$$$$$$$$$$$$$$\n"); //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", Dist[h][g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("+++++++++++++++++ after phase1+++++++++++++++\n"); /* Phase 2*/ //cudaMalloc((void **) &Dist_dev, n * n * sizeof(int)); cudaMemcpy(Dist_dev, temp, n * n * sizeof(int), cudaMemcpyHostToDevice); cal <<<numOfBlock, threadsPerBlock>>> (B, r, r, 0, r, 1, Dist_dev, n); cudaMemcpy(temp, Dist_dev, n * n * sizeof(int), cudaMemcpyDeviceToHost); //temp to Dist for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[a][b] = temp[p]; } } //cudaMalloc((void **) &Dist_dev, n * n * sizeof(int)); cudaMemcpy(Dist_dev, temp, n * n * sizeof(int), cudaMemcpyHostToDevice); cal <<<numOfBlock, threadsPerBlock>>> (B, r, r, r+1, round-r-1, 1, Dist_dev, n); cudaMemcpy(temp, Dist_dev, n * n * sizeof(int), cudaMemcpyDeviceToHost); //cudaMalloc((void **) &Dist_dev, n * n * sizeof(int)); cudaMemcpy(Dist_dev, temp, n * n * sizeof(int), cudaMemcpyHostToDevice); cal <<<numOfBlock, threadsPerBlock>>> (B, r, 0, r, 1, r, Dist_dev, n); cudaMemcpy(temp, Dist_dev, n * n * sizeof(int), cudaMemcpyDeviceToHost); //cudaMalloc((void **) &Dist_dev, n * n * sizeof(int)); cudaMemcpy(Dist_dev, temp, n * n * sizeof(int), cudaMemcpyHostToDevice); cal <<<numOfBlock, threadsPerBlock>>> (B, r, r+1, r, 1, round-r-1, Dist_dev, n); cudaMemcpy(temp, Dist_dev, n * n * sizeof(int), cudaMemcpyDeviceToHost); //cal1(B, r, r, 0, r, 1); //cal1(B, r, r, r +1, round - r -1, 1); //cal1(B, r, 0, r, 1, r); //cal1(B, r, r +1, r, 1, round - r -1); //temp to Dist for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[a][b] = temp[p]; } } //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", Dist[h][g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("+++++++++++++++++ after phase2+++++++++++++++\n"); /* Phase 3*/ cudaMemcpy(Dist_dev, temp, n * n * sizeof(int), cudaMemcpyHostToDevice); cal <<<numOfBlock, threadsPerBlock>>> (B, r, 0, 0, r, r, Dist_dev, n); cudaMemcpy(temp, Dist_dev, n * n * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(Dist_dev, temp, n * n * sizeof(int), cudaMemcpyHostToDevice); cal <<<numOfBlock, threadsPerBlock>>> (B, r, 0, r+1, round-r-1, r, Dist_dev, n); cudaMemcpy(temp, Dist_dev, n * n * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(Dist_dev, temp, n * n * sizeof(int), cudaMemcpyHostToDevice); cal <<<numOfBlock, threadsPerBlock>>> (B, r, r+1, 0, r, round-r-1, Dist_dev, n); cudaMemcpy(temp, Dist_dev, n * n * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(Dist_dev, temp, n * n * sizeof(int), cudaMemcpyHostToDevice); cal <<<numOfBlock, threadsPerBlock>>> (B, r, r+1, r+1, round-r-1, round-r-1, Dist_dev, n); //printf("OK\n"); cudaMemcpy(temp, Dist_dev, n * n * sizeof(int), cudaMemcpyDeviceToHost); //cal1(B, r, 0, 0, r, r); //cal1(B, r, 0, r +1, round -r -1, r); //cal1(B, r, r +1, 0, r, round - r -1); //cal1(B, r, r +1, r +1, round -r -1, round - r -1); //get the answer back //Dist //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", Dist[h][g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("+++++++++++++++++Dist after phase2 , after phase3 before temp to Dist+++++++++++++++\n"); //temp to Dist for(int a = 0, p = 0; a < n; a++){ for(int b = 0; b < n; b++, p++){ Dist[a][b] = temp[p]; } } //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(temp[h*n+g] == 10000000) // printf(""); // else // printf("%d ", temp[h*n + g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } // printf("\n"); //} //printf("+++++++++++++++++temp after phase3+++++++++++++++\n"); //for(int h = 0; h < n; h++){ // for(int g = 0; g < n; g++){ // if(r <= 5) // printf("%d ", Dist[h][g]); // //if(g == n-1) // //printf("rrr %d\n", r); // } //printf("\n"); //} //printf("+++++++++++++++++Dist after phase3+++++++++++++++\n"); } cudaFree(Dist_dev); }
980aaa8007c28927f50e3dda8f022382c23ead36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date @author Tobias Ribizel @precisions normal z -> s d c */ #include "magma_sampleselect.h" #include <cstdint> #define PRECISION_z #if (CUDA_ARCH >= 350) namespace magma_sampleselect { __global__ void compute_abs(const magmaDoubleComplex* __restrict__ in, double* __restrict__ out, int32_t size) { auto idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= size) { return; } auto v = in[idx]; out[idx] = real(v) * real(v) + imag(v) * imag(v); } } // namespace magma_sampleselect using namespace magma_sampleselect; /** Purpose ------- This routine selects a threshold separating the subset_size smallest magnitude elements from the rest. Arguments --------- @param[in] total_size magma_int_t size of array val @param[in] subset_size magma_int_t number of smallest elements to separate @param[in] val magmaDoubleComplex array containing the values @param[out] thrs double* computed threshold @param[inout] tmp_ptr magma_ptr* pointer to pointer to temporary storage. May be reallocated during execution. @param[inout] tmp_size magma_int_t* pointer to size of temporary storage. May be increased during execution. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zsampleselect( magma_int_t total_size, magma_int_t subset_size, magmaDoubleComplex *val, double *thrs, magma_ptr *tmp_ptr, magma_int_t *tmp_size, magma_queue_t queue ) { magma_int_t info = 0; magma_int_t num_blocks = magma_ceildiv(total_size, block_size); magma_int_t required_size = sizeof(double) * (total_size * 2 + searchtree_size) + sizeof(int32_t) * sampleselect_alloc_size(total_size); auto realloc_result = realloc_if_necessary(tmp_ptr, tmp_size, required_size); double* gputmp1 = (double*)*tmp_ptr; double* gputmp2 = gputmp1 + total_size; double* gputree = gputmp2 + total_size; double* gpuresult = gputree + searchtree_size; int32_t* gpuints = (int32_t*)(gpuresult + 1); CHECK(realloc_result); hipLaunchKernelGGL(( compute_abs), dim3(num_blocks), dim3(block_size), 0, queue->cuda_stream(), val, gputmp1, total_size); hipLaunchKernelGGL(( sampleselect), dim3(1), dim3(1), 0, queue->cuda_stream(), gputmp1, gputmp2, gputree, gpuints, total_size, subset_size, gpuresult); magma_dgetvector(1, gpuresult, 1, thrs, 1, queue ); *thrs = std::sqrt(*thrs); cleanup: return info; } #endif /** Purpose ------- This routine selects an approximate threshold separating the subset_size smallest magnitude elements from the rest. Arguments --------- @param[in] total_size magma_int_t size of array val @param[in] subset_size magma_int_t number of smallest elements to separate @param[in] val magmaDoubleComplex array containing the values @param[out] thrs double* computed threshold @param[inout] tmp_ptr magma_ptr* pointer to pointer to temporary storage. May be reallocated during execution. @param[inout] tmp_size magma_int_t* pointer to size of temporary storage. May be increased during execution. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zsampleselect_approx( magma_int_t total_size, magma_int_t subset_size, magmaDoubleComplex *val, double *thrs, magma_ptr *tmp_ptr, magma_int_t *tmp_size, magma_queue_t queue ) { magma_int_t info = 0; #if (CUDA_ARCH >= 350) auto num_blocks = magma_ceildiv(total_size, block_size); auto local_work = (total_size + num_threads - 1) / num_threads; auto required_size = sizeof(double) * (total_size + searchtree_size) + sizeof(int32_t) * (searchtree_width * (num_grouped_blocks + 1) + 1); auto realloc_result = realloc_if_necessary(tmp_ptr, tmp_size, required_size); double* gputmp = (double*)*tmp_ptr; double* gputree = gputmp + total_size; uint32_t* gpubucketidx = (uint32_t*)(gputree + searchtree_size); int32_t* gpurankout = (int32_t*)(gpubucketidx + 1); int32_t* gpucounts = gpurankout + 1; int32_t* gpulocalcounts = gpucounts + searchtree_width; uint32_t bucketidx{}; CHECK(realloc_result); hipLaunchKernelGGL(( compute_abs), dim3(num_blocks), dim3(block_size), 0, queue->cuda_stream(), val, gputmp, total_size); hipLaunchKernelGGL(( build_searchtree), dim3(1), dim3(sample_size), 0, queue->cuda_stream(), gputmp, gputree, total_size); hipLaunchKernelGGL(( count_buckets), dim3(num_grouped_blocks), dim3(block_size), 0, queue->cuda_stream(), gputmp, gputree, gpulocalcounts, total_size, local_work); hipLaunchKernelGGL(( reduce_counts), dim3(searchtree_width), dim3(num_grouped_blocks), 0, queue->cuda_stream(), gpulocalcounts, gpucounts, num_grouped_blocks); hipLaunchKernelGGL(( sampleselect_findbucket), dim3(1), dim3(searchtree_width / 2), 0, queue->cuda_stream(), gpucounts, subset_size, gpubucketidx, gpurankout); magma_getvector(1, sizeof(uint32_t), gpubucketidx, 1, &bucketidx, 1, queue); magma_dgetvector(1, gputree + searchtree_width - 1 + bucketidx, 1, thrs, 1, queue); *thrs = std::sqrt(*thrs); #else printf("error: this functionality needs CUDA architecture >= 3.5\n"); inf = ERR_NOT_SUPPORTED; #endif cleanup: return info; }
980aaa8007c28927f50e3dda8f022382c23ead36.cu
/* -- MAGMA (version 2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date @author Tobias Ribizel @precisions normal z -> s d c */ #include "magma_sampleselect.h" #include <cstdint> #define PRECISION_z #if (CUDA_ARCH >= 350) namespace magma_sampleselect { __global__ void compute_abs(const magmaDoubleComplex* __restrict__ in, double* __restrict__ out, int32_t size) { auto idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= size) { return; } auto v = in[idx]; out[idx] = real(v) * real(v) + imag(v) * imag(v); } } // namespace magma_sampleselect using namespace magma_sampleselect; /** Purpose ------- This routine selects a threshold separating the subset_size smallest magnitude elements from the rest. Arguments --------- @param[in] total_size magma_int_t size of array val @param[in] subset_size magma_int_t number of smallest elements to separate @param[in] val magmaDoubleComplex array containing the values @param[out] thrs double* computed threshold @param[inout] tmp_ptr magma_ptr* pointer to pointer to temporary storage. May be reallocated during execution. @param[inout] tmp_size magma_int_t* pointer to size of temporary storage. May be increased during execution. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zsampleselect( magma_int_t total_size, magma_int_t subset_size, magmaDoubleComplex *val, double *thrs, magma_ptr *tmp_ptr, magma_int_t *tmp_size, magma_queue_t queue ) { magma_int_t info = 0; magma_int_t num_blocks = magma_ceildiv(total_size, block_size); magma_int_t required_size = sizeof(double) * (total_size * 2 + searchtree_size) + sizeof(int32_t) * sampleselect_alloc_size(total_size); auto realloc_result = realloc_if_necessary(tmp_ptr, tmp_size, required_size); double* gputmp1 = (double*)*tmp_ptr; double* gputmp2 = gputmp1 + total_size; double* gputree = gputmp2 + total_size; double* gpuresult = gputree + searchtree_size; int32_t* gpuints = (int32_t*)(gpuresult + 1); CHECK(realloc_result); compute_abs<<<num_blocks, block_size, 0, queue->cuda_stream()>>> (val, gputmp1, total_size); sampleselect<<<1, 1, 0, queue->cuda_stream()>>> (gputmp1, gputmp2, gputree, gpuints, total_size, subset_size, gpuresult); magma_dgetvector(1, gpuresult, 1, thrs, 1, queue ); *thrs = std::sqrt(*thrs); cleanup: return info; } #endif /** Purpose ------- This routine selects an approximate threshold separating the subset_size smallest magnitude elements from the rest. Arguments --------- @param[in] total_size magma_int_t size of array val @param[in] subset_size magma_int_t number of smallest elements to separate @param[in] val magmaDoubleComplex array containing the values @param[out] thrs double* computed threshold @param[inout] tmp_ptr magma_ptr* pointer to pointer to temporary storage. May be reallocated during execution. @param[inout] tmp_size magma_int_t* pointer to size of temporary storage. May be increased during execution. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zsampleselect_approx( magma_int_t total_size, magma_int_t subset_size, magmaDoubleComplex *val, double *thrs, magma_ptr *tmp_ptr, magma_int_t *tmp_size, magma_queue_t queue ) { magma_int_t info = 0; #if (CUDA_ARCH >= 350) auto num_blocks = magma_ceildiv(total_size, block_size); auto local_work = (total_size + num_threads - 1) / num_threads; auto required_size = sizeof(double) * (total_size + searchtree_size) + sizeof(int32_t) * (searchtree_width * (num_grouped_blocks + 1) + 1); auto realloc_result = realloc_if_necessary(tmp_ptr, tmp_size, required_size); double* gputmp = (double*)*tmp_ptr; double* gputree = gputmp + total_size; uint32_t* gpubucketidx = (uint32_t*)(gputree + searchtree_size); int32_t* gpurankout = (int32_t*)(gpubucketidx + 1); int32_t* gpucounts = gpurankout + 1; int32_t* gpulocalcounts = gpucounts + searchtree_width; uint32_t bucketidx{}; CHECK(realloc_result); compute_abs<<<num_blocks, block_size, 0, queue->cuda_stream()>>> (val, gputmp, total_size); build_searchtree<<<1, sample_size, 0, queue->cuda_stream()>>> (gputmp, gputree, total_size); count_buckets<<<num_grouped_blocks, block_size, 0, queue->cuda_stream()>>> (gputmp, gputree, gpulocalcounts, total_size, local_work); reduce_counts<<<searchtree_width, num_grouped_blocks, 0, queue->cuda_stream()>>> (gpulocalcounts, gpucounts, num_grouped_blocks); sampleselect_findbucket<<<1, searchtree_width / 2, 0, queue->cuda_stream()>>> (gpucounts, subset_size, gpubucketidx, gpurankout); magma_getvector(1, sizeof(uint32_t), gpubucketidx, 1, &bucketidx, 1, queue); magma_dgetvector(1, gputree + searchtree_width - 1 + bucketidx, 1, thrs, 1, queue); *thrs = std::sqrt(*thrs); #else printf("error: this functionality needs CUDA architecture >= 3.5\n"); inf = ERR_NOT_SUPPORTED; #endif cleanup: return info; }
a484411f30fa1b8bc9601a84e95e7a47a937e9c5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <hip/hip_runtime.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 //Tamanhos dos blocos das threads #define BLOCK_SIZE 32 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void cudaHistogram(PPMPixel* data, int rows, int cols, float* h){ //Definindo variaveis locais na funcao na GPU int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int tid = (cols)*row + col; int j, k, l; //Verificao para os limites das threads if(col < (cols) && row < (rows)){ //Searching for the right value of the pixel int x = 0; for (j = 0; j <= 3; j++) { for (k = 0; k <= 3; k++) { for (l = 0; l <= 3; l++) { if (data[tid].red == j && data[tid].green == k && data[tid].blue == l) { atomicAdd(&h[x], 1); } x++; } } } } } void Histogram(PPMImage *image, float *h) { hipEvent_t start, stop; float milliseconds = 0; PPMPixel *pixels_dev; float* h_dev; float n = image->y * image->x; //printf("%d, %d\n", rows, cols ); int i; for (i = 0; i < n; i++) { image->data[i].red = floor((image->data[i].red * 4) / 256); image->data[i].blue = floor((image->data[i].blue * 4) / 256); image->data[i].green = floor((image->data[i].green * 4) / 256); } //Processo para calcular o tempo de alocar memoria na GPU hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipMalloc(&pixels_dev, sizeof(PPMPixel)*image->x*image->y); hipMalloc(&h_dev, sizeof(float)*64); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); //printf("Alocar Memoria = %f\n",milliseconds/1000); //Calular o tempo de copiar dados da CPU para a GPU hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipMemcpy(pixels_dev, image->data, image->x*image->y*sizeof(PPMPixel), hipMemcpyHostToDevice); hipMemcpy(h_dev, h, 64*sizeof(float), hipMemcpyHostToDevice); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); //printf("\nOffload do buffer = %f\n",milliseconds/1000); dim3 blocks(1,1,1); //variavel para threadsPerBlock e o tamanho do block para cada dimensao 2D dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1); //define a quantidade de blocos por dimensao/BLOCK_SIZE. se dimensao < block_size, entao define como 1 block blocks.x=((image->y/BLOCK_SIZE) + (((image->y)%BLOCK_SIZE)==0?0:1)); blocks.y=((image->x/BLOCK_SIZE) + (((image->x)%BLOCK_SIZE)==0?0:1)); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( cudaHistogram), dim3(blocks), dim3(threadsPerBlock), 0, 0, pixels_dev, image->x, image->y, h_dev); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); //printf("\nTempo de kernel = %f\n",milliseconds/1000); //GPU para CPU hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipMemcpy(h, h_dev, 64*sizeof(float), hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); //printf("\nTempo de offload para receber = %f\n",milliseconds/1001); hipFree(h_dev); } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } double t_start, t_end; int i; char *filename = argv[1]; //Recebendo o arquivo!; //scanf("%s", filename); PPMImage *image = readPPM(filename); float n = image->y * image->x; float *h = (float*)malloc(sizeof(float) * 64); //Inicializar h for(i=0; i < 64; i++) h[i] = 0.0; t_start = rtclock(); Histogram(image, h); t_end = rtclock(); for (i = 0; i < 64; i++){ printf("%0.3f ", h[i]/n); } printf("\n"); fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); free(h); }
a484411f30fa1b8bc9601a84e95e7a47a937e9c5.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <cuda.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 //Tamanhos dos blocos das threads #define BLOCK_SIZE 32 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void cudaHistogram(PPMPixel* data, int rows, int cols, float* h){ //Definindo variaveis locais na funcao na GPU int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int tid = (cols)*row + col; int j, k, l; //Verificao para os limites das threads if(col < (cols) && row < (rows)){ //Searching for the right value of the pixel int x = 0; for (j = 0; j <= 3; j++) { for (k = 0; k <= 3; k++) { for (l = 0; l <= 3; l++) { if (data[tid].red == j && data[tid].green == k && data[tid].blue == l) { atomicAdd(&h[x], 1); } x++; } } } } } void Histogram(PPMImage *image, float *h) { cudaEvent_t start, stop; float milliseconds = 0; PPMPixel *pixels_dev; float* h_dev; float n = image->y * image->x; //printf("%d, %d\n", rows, cols ); int i; for (i = 0; i < n; i++) { image->data[i].red = floor((image->data[i].red * 4) / 256); image->data[i].blue = floor((image->data[i].blue * 4) / 256); image->data[i].green = floor((image->data[i].green * 4) / 256); } //Processo para calcular o tempo de alocar memoria na GPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMalloc(&pixels_dev, sizeof(PPMPixel)*image->x*image->y); cudaMalloc(&h_dev, sizeof(float)*64); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); //printf("Alocar Memoria = %f\n",milliseconds/1000); //Calular o tempo de copiar dados da CPU para a GPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(pixels_dev, image->data, image->x*image->y*sizeof(PPMPixel), cudaMemcpyHostToDevice); cudaMemcpy(h_dev, h, 64*sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); //printf("\nOffload do buffer = %f\n",milliseconds/1000); dim3 blocks(1,1,1); //variavel para threadsPerBlock e o tamanho do block para cada dimensao 2D dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1); //define a quantidade de blocos por dimensao/BLOCK_SIZE. se dimensao < block_size, entao define como 1 block blocks.x=((image->y/BLOCK_SIZE) + (((image->y)%BLOCK_SIZE)==0?0:1)); blocks.y=((image->x/BLOCK_SIZE) + (((image->x)%BLOCK_SIZE)==0?0:1)); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaHistogram<<<blocks, threadsPerBlock>>> (pixels_dev, image->x, image->y, h_dev); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); //printf("\nTempo de kernel = %f\n",milliseconds/1000); //GPU para CPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(h, h_dev, 64*sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); //printf("\nTempo de offload para receber = %f\n",milliseconds/1001); cudaFree(h_dev); } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } double t_start, t_end; int i; char *filename = argv[1]; //Recebendo o arquivo!; //scanf("%s", filename); PPMImage *image = readPPM(filename); float n = image->y * image->x; float *h = (float*)malloc(sizeof(float) * 64); //Inicializar h for(i=0; i < 64; i++) h[i] = 0.0; t_start = rtclock(); Histogram(image, h); t_end = rtclock(); for (i = 0; i < 64; i++){ printf("%0.3f ", h[i]/n); } printf("\n"); fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); free(h); }
053eebcea276511542a31373243c8ad796fba48c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaConvolution.h" #include <math.h> #include <stdio.h> #include <string.h> // Size of the blocks on the GPU. This is the smallest possible // square block size that is an integer multiple of a warp. You may // modify these values if you want. #define BLOCK_SIZE_X 128 #define BLOCK_SIZE_Y 128 // Size of the stencils. Do not modify. #define STENCIL_WIDTH_X 21 #define STENCIL_WIDTH_Y 11 // Global variables to store the convolution stencils. float *hos_stencil_1dx = NULL; float *hos_stencil_1dy = NULL; __constant__ float STENCIL_1DX[STENCIL_WIDTH_X]; __constant__ float STENCIL_1DY[STENCIL_WIDTH_Y]; //////////////////////////////////////////////////////////////// ///////////////////////// CUDA kernels ///////////////////////// //////////////////////////////////////////////////////////////// // TO DO: Modify the code in the kernels below to answer the homework // questions. __global__ void conv1h_basic_kernel(int width, int height, float *dev_input, float *dev_output) { // TODO: This is only an example kernel: it reverses the greyscale // value of the input image but does not otherwise modify it. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if ((x < width) && (y < height)) { int image_offset = y * width + x; int start = image_offset - (STENCIL_WIDTH_X / 2); float sum = 0; for (int i = 0; i < STENCIL_WIDTH_X; i++) { int index = start + i; int new_x = x + (index - image_offset); if (new_x >= 0 && new_x < width) { sum += STENCIL_1DX[i] * dev_input[index]; } } dev_output[image_offset] = sum; } } __global__ void conv1v_basic_kernel(int width, int height, float *dev_input, float *dev_output) { // TODO: This is only an example kernel: it reverses the greyscale // value of the input image but does not otherwise modify it. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if ((x < width) && (y < height)) { int y_offset = (STENCIL_WIDTH_Y / 2); float sum = 0; int image_offset = y * width + x; for (int i = 0; i < STENCIL_WIDTH_Y; i++) { int new_y = -y_offset + i + y; if (new_y >= 0 && new_y < height) { int index = new_y * width + x; sum += STENCIL_1DY[i] * dev_input[index]; } } dev_output[image_offset] = sum; } } __global__ void conv1h_tiled_kernel(int width, int height, float *dev_input, float *dev_output) { // This code is adapted from the textbook. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int i = y * width + x; int shared_size = BLOCK_SIZE_X + STENCIL_WIDTH_X - 1; __shared__ float N_ds[BLOCK_SIZE_X + STENCIL_WIDTH_X - 1]; int n = STENCIL_WIDTH_X / 2; int dimx = blockDim.x; int halo_offsets[4] = {dimx, -dimx}; //, 2*dimx, -2*dimx}; for (int j = 0; j < 4; j++) { int new_x = x + halo_offsets[j]; int shared_index = threadIdx.x + n + halo_offsets[j]; if (shared_index < shared_size && shared_index >= 0) { int halo_index = y * width + new_x; N_ds[shared_index] = (new_x < 0 || new_x >= width) ? 0 : dev_input[halo_index]; } } N_ds[n + threadIdx.x] = (x < 0 || x >= width) ? 0 : dev_input[i]; __syncthreads(); if ((x < width) && (y < height)) { float Pvalue = 0; for (int j = 0; j < STENCIL_WIDTH_X; j++) { Pvalue += N_ds[threadIdx.x + j] * STENCIL_1DX[j]; } dev_output[i] = Pvalue; } } __global__ void conv1v_tiled_kernel(int width, int height, float *dev_input, float *dev_output) { // This code is adapted from the textbook. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int i = y * width + x; __shared__ float N_ds[BLOCK_SIZE_Y + STENCIL_WIDTH_Y - 1]; int n = STENCIL_WIDTH_Y / 2; int y_left = y - blockDim.y; int halo_index_left = y_left * width + x; if (threadIdx.y >= blockDim.y - n) { N_ds[threadIdx.y - (blockDim.y - n)] = (y_left < 0) ? 0 : dev_input[halo_index_left]; } N_ds[n + threadIdx.y] = (y < 0 || y >= height) ? 0 : dev_input[i]; int y_right = y + blockDim.y; int halo_index_right = y_right * width + x; if (threadIdx.y < n) { N_ds[n + blockDim.y + threadIdx.y] = (y_right >= height) ? 0 : dev_input[halo_index_right]; } __syncthreads(); if ((x < width) && (y < height)) { float Pvalue = 0; for (int j = 0; j < STENCIL_WIDTH_Y; j++) { Pvalue += N_ds[threadIdx.y + j] * STENCIL_1DY[j]; } dev_output[i] = Pvalue; } } ////////////////////////////////////////////////////////////////// ///////////////////////// Host functions ///////////////////////// ////////////////////////////////////////////////////////////////// // TO DO: Modify the code in the kernels below to answer the homework // questions. // // Notes: // // float *hos_stencil_1dx is a host global pointer containing a 1D // array of length STENCIL_SIZE_X with the stencil data to be used for the // horizontal convolution. // // float *hos_stencil_1dy is a host global pointer containing a 1D // array of length STENCIL_SIZE_Y with the stencil data to be used for the // vertical convolution. void conv1h_basic(int width, int height, float *hos_data_in, float *hos_data_out) { // TODO: This host function is mostly complete, but you will need to // add some code to set up the constant memory on the device to // store the stencil and you may want to modify the grid and block // structure for the kernel. float *dev_image_in_buffer; float *dev_image_out_buffer; // Allocate space on the device and copy over the input image. int image_size = width * height * sizeof(float); hipMemcpyToSymbol(STENCIL_1DX, hos_stencil_1dx, STENCIL_WIDTH_X * sizeof(float)); hipMalloc(&dev_image_in_buffer, image_size); hipMalloc(&dev_image_out_buffer, image_size); hipMemcpy(dev_image_in_buffer, hos_data_in, image_size, hipMemcpyHostToDevice); // Compute grid and block size dim3 blockDim(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); int grid_size_x = ceil((double)width / BLOCK_SIZE_X); int grid_size_y = ceil((double)height / BLOCK_SIZE_Y); printf("\ngrid_size (%d, %d)\n", grid_size_x, grid_size_y); dim3 gridDim(grid_size_x, grid_size_y, 1); // Launch kernel hipLaunchKernelGGL(( conv1h_basic_kernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, dev_image_in_buffer, dev_image_out_buffer); // Synchronization hipDeviceSynchronize(); // Retrieve the output image and free the memory on the device. hipMemcpy(hos_data_out, dev_image_out_buffer, image_size, hipMemcpyDeviceToHost); hipFree(dev_image_in_buffer); hipFree(dev_image_out_buffer); } // Q2 (b) void conv1v_basic(int width, int height, float *hos_data_in, float *hos_data_out) { float *dev_image_in_buffer; float *dev_image_out_buffer; // Allocate space on the device and copy over the input image. int image_size = width * height * sizeof(float); hipMemcpyToSymbol(STENCIL_1DY, hos_stencil_1dy, STENCIL_WIDTH_Y * sizeof(float)); hipMalloc(&dev_image_in_buffer, image_size); hipMalloc(&dev_image_out_buffer, image_size); hipMemcpy(dev_image_in_buffer, hos_data_in, image_size, hipMemcpyHostToDevice); // Compute grid and block size dim3 blockDim(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); int grid_size_x = ceil((double)width / BLOCK_SIZE_X); int grid_size_y = ceil((double)height / BLOCK_SIZE_Y); printf("\ngrid_size (%d, %d)\n", grid_size_x, grid_size_y); dim3 gridDim(grid_size_x, grid_size_y, 1); // Launch kernel hipLaunchKernelGGL(( conv1v_basic_kernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, dev_image_in_buffer, dev_image_out_buffer); // Synchronization hipDeviceSynchronize(); // Retrieve the output image and free the memory on the device. hipMemcpy(hos_data_out, dev_image_out_buffer, image_size, hipMemcpyDeviceToHost); hipFree(dev_image_in_buffer); hipFree(dev_image_out_buffer); } // Q2 (c) void conv1to2_basic(int width, int height, float *hos_data_in, float *hos_data_out) { float *temp = (float *)malloc(width * height * sizeof(float)); conv1h_basic(width, height, hos_data_in, temp); conv1v_basic(width, height, temp, hos_data_out); free(temp); } // Q3 (a) void conv1h_tiled(int width, int height, float *hos_data_in, float *hos_data_out) { float *dev_image_in_buffer; float *dev_image_out_buffer; // Allocate space on the device and copy over the input image. int image_size = width * height * sizeof(float); hipMemcpyToSymbol(STENCIL_1DX, hos_stencil_1dx, STENCIL_WIDTH_X * sizeof(float)); hipMalloc(&dev_image_in_buffer, image_size); hipMalloc(&dev_image_out_buffer, image_size); hipMemcpy(dev_image_in_buffer, hos_data_in, image_size, hipMemcpyHostToDevice); // Compute grid and block size dim3 blockDim(BLOCK_SIZE_X, 1, 1); int grid_size_x = ceil((double)width / blockDim.x); int grid_size_y = ceil((double)height / blockDim.y); printf("\ngrid_size (%d, %d)\n", grid_size_x, grid_size_y); dim3 gridDim(grid_size_x, grid_size_y, 1); // Launch kernel hipLaunchKernelGGL(( conv1h_tiled_kernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, dev_image_in_buffer, dev_image_out_buffer); // Synchronization hipDeviceSynchronize(); // Retrieve the output image and free the memory on the device. hipMemcpy(hos_data_out, dev_image_out_buffer, image_size, hipMemcpyDeviceToHost); hipFree(dev_image_in_buffer); hipFree(dev_image_out_buffer); } void conv1v_tiled(int width, int height, float *hos_data_in, float *hos_data_out) { float *dev_image_in_buffer; float *dev_image_out_buffer; // Allocate space on the device and copy over the input image. int image_size = width * height * sizeof(float); hipMemcpyToSymbol(STENCIL_1DY, hos_stencil_1dy, STENCIL_WIDTH_Y * sizeof(float)); hipMalloc(&dev_image_in_buffer, image_size); hipMalloc(&dev_image_out_buffer, image_size); hipMemcpy(dev_image_in_buffer, hos_data_in, image_size, hipMemcpyHostToDevice); // Compute grid and block size dim3 blockDim(1, BLOCK_SIZE_Y, 1); int grid_size_x = ceil((double)width / blockDim.x); int grid_size_y = ceil((double)height / blockDim.y); printf("\ngrid_size (%d, %d)\n", grid_size_x, grid_size_y); dim3 gridDim(grid_size_x, grid_size_y, 1); // Launch kernel hipLaunchKernelGGL(( conv1v_tiled_kernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, dev_image_in_buffer, dev_image_out_buffer); // Synchronization hipDeviceSynchronize(); // Retrieve the output image and free the memory on the device. hipMemcpy(hos_data_out, dev_image_out_buffer, image_size, hipMemcpyDeviceToHost); hipFree(dev_image_in_buffer); hipFree(dev_image_out_buffer); } void conv1to2_tiled(int width, int height, float *hos_data_in, float *hos_data_out) { float *temp = (float *)malloc(width * height * sizeof(float)); conv1h_tiled(width, height, hos_data_in, temp); conv1v_tiled(width, height, temp, hos_data_out); free(temp); } ///////////////////////////////////////////////////////////////////////// ///////////////// No change to code after this point //////////////////// ///////////////////////////////////////////////////////////////////////// // DO NOT CHANGE THE CODE BELOW THIS COMMENT (or in any of the other // files). Modification of the code below or in the other files may // cause the autograder to fail, and you may receive a zero for the // corresponding questions in the homework. Convolution::~Convolution() { free(hos_stencil_1dx); free(hos_stencil_1dy); hos_stencil_1dx = hos_stencil_1dy = NULL; } void Convolution::evaluate_gaussian_stencil(float sigma, float *stencil_array, int stencil_width) { // Compute the stencil float normalization = 0.0f; int half_width = stencil_width / 2; float interval = 20.0f / stencil_width; for (int i = 0; i < stencil_width; i++) { float x = (i - half_width) * interval; float gaussian = ::exp(-(x * x) / (2 * sigma * sigma)); stencil_array[i] = gaussian; normalization += gaussian; // printf("%d: %f - %f\n", i, x, gaussian); } // Normalize so that stencil sums to 1 and store to stencil_array. for (int i = 0; i < stencil_width; i++) stencil_array[i] /= normalization; } void Convolution::setup_host_gaussian_stencil(float sigma) { // Allocate memory, freed in destructor. hos_stencil_1dx = (float *)malloc(STENCIL_WIDTH_X * sizeof(float)); hos_stencil_1dy = (float *)malloc(STENCIL_WIDTH_Y * sizeof(float)); // Evaluate Gaussian function to create the stencils. evaluate_gaussian_stencil(sigma, hos_stencil_1dx, STENCIL_WIDTH_X); evaluate_gaussian_stencil(sigma, hos_stencil_1dy, STENCIL_WIDTH_Y); } void Convolution::setup_device(int width, int height) { hipDeviceProp_t prop; int ndev; hipGetDeviceCount(&ndev); if (ndev < 1) { fprintf(stderr, "No CUDA device found\n"); exit(-1); } hipGetDeviceProperties(&prop, 0); printf("The GPU is a %s\n", prop.name); printf("Cuda capability %d.%d.\n", prop.major, prop.minor); printf("Shared memory per block %d bytes.\n", prop.sharedMemPerBlock); } void Convolution::load_image_input(const Image &image) { if (image.channel != 1) { printf("Error: Input image has %d channels (should be 1).\n", image.channel); } int w = image.width, h = image.height; // Allocate host input image buffer if (!hos_image_in.pixels) { hos_image_in.alloc(image.width, image.height, image.channel); } memcpy(hos_image_in.pixels, image.pixels, w * h * sizeof(float)); hos_image_out.alloc(w, h, 1); } // Since we've copied input to device in function load_image_input, // we can just launch kernels here: void Convolution::run_horizontal_1d() { int width = hos_image_in.width, height = hos_image_in.height; // Call student's code conv1h_basic(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_vertical_1d() { int width = hos_image_in.width, height = hos_image_in.height; // Call student's code conv1v_basic(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_1to2() { int width = hos_image_in.width, height = hos_image_in.height; conv1to2_basic(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_1to2_tiling() { int width = hos_image_in.width, height = hos_image_in.height; conv1to2_tiled(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_2d_tiling() {} void Convolution::run_horizontal_1d_tiling() { int width = hos_image_in.width, height = hos_image_in.height; // Call student's code conv1h_tiled(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_vertical_1d_tiling() { int width = hos_image_in.width, height = hos_image_in.height; // Call student's code conv1v_tiled(width, height, hos_image_in.pixels, hos_image_out.pixels); } const float *Convolution::get_image_buffer() { return hos_image_out.pixels; } const Image *Convolution::get_image_ptr() { return &hos_image_out; }
053eebcea276511542a31373243c8ad796fba48c.cu
#include "cudaConvolution.h" #include <math.h> #include <stdio.h> #include <string.h> // Size of the blocks on the GPU. This is the smallest possible // square block size that is an integer multiple of a warp. You may // modify these values if you want. #define BLOCK_SIZE_X 128 #define BLOCK_SIZE_Y 128 // Size of the stencils. Do not modify. #define STENCIL_WIDTH_X 21 #define STENCIL_WIDTH_Y 11 // Global variables to store the convolution stencils. float *hos_stencil_1dx = NULL; float *hos_stencil_1dy = NULL; __constant__ float STENCIL_1DX[STENCIL_WIDTH_X]; __constant__ float STENCIL_1DY[STENCIL_WIDTH_Y]; //////////////////////////////////////////////////////////////// ///////////////////////// CUDA kernels ///////////////////////// //////////////////////////////////////////////////////////////// // TO DO: Modify the code in the kernels below to answer the homework // questions. __global__ void conv1h_basic_kernel(int width, int height, float *dev_input, float *dev_output) { // TODO: This is only an example kernel: it reverses the greyscale // value of the input image but does not otherwise modify it. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if ((x < width) && (y < height)) { int image_offset = y * width + x; int start = image_offset - (STENCIL_WIDTH_X / 2); float sum = 0; for (int i = 0; i < STENCIL_WIDTH_X; i++) { int index = start + i; int new_x = x + (index - image_offset); if (new_x >= 0 && new_x < width) { sum += STENCIL_1DX[i] * dev_input[index]; } } dev_output[image_offset] = sum; } } __global__ void conv1v_basic_kernel(int width, int height, float *dev_input, float *dev_output) { // TODO: This is only an example kernel: it reverses the greyscale // value of the input image but does not otherwise modify it. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if ((x < width) && (y < height)) { int y_offset = (STENCIL_WIDTH_Y / 2); float sum = 0; int image_offset = y * width + x; for (int i = 0; i < STENCIL_WIDTH_Y; i++) { int new_y = -y_offset + i + y; if (new_y >= 0 && new_y < height) { int index = new_y * width + x; sum += STENCIL_1DY[i] * dev_input[index]; } } dev_output[image_offset] = sum; } } __global__ void conv1h_tiled_kernel(int width, int height, float *dev_input, float *dev_output) { // This code is adapted from the textbook. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int i = y * width + x; int shared_size = BLOCK_SIZE_X + STENCIL_WIDTH_X - 1; __shared__ float N_ds[BLOCK_SIZE_X + STENCIL_WIDTH_X - 1]; int n = STENCIL_WIDTH_X / 2; int dimx = blockDim.x; int halo_offsets[4] = {dimx, -dimx}; //, 2*dimx, -2*dimx}; for (int j = 0; j < 4; j++) { int new_x = x + halo_offsets[j]; int shared_index = threadIdx.x + n + halo_offsets[j]; if (shared_index < shared_size && shared_index >= 0) { int halo_index = y * width + new_x; N_ds[shared_index] = (new_x < 0 || new_x >= width) ? 0 : dev_input[halo_index]; } } N_ds[n + threadIdx.x] = (x < 0 || x >= width) ? 0 : dev_input[i]; __syncthreads(); if ((x < width) && (y < height)) { float Pvalue = 0; for (int j = 0; j < STENCIL_WIDTH_X; j++) { Pvalue += N_ds[threadIdx.x + j] * STENCIL_1DX[j]; } dev_output[i] = Pvalue; } } __global__ void conv1v_tiled_kernel(int width, int height, float *dev_input, float *dev_output) { // This code is adapted from the textbook. int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int i = y * width + x; __shared__ float N_ds[BLOCK_SIZE_Y + STENCIL_WIDTH_Y - 1]; int n = STENCIL_WIDTH_Y / 2; int y_left = y - blockDim.y; int halo_index_left = y_left * width + x; if (threadIdx.y >= blockDim.y - n) { N_ds[threadIdx.y - (blockDim.y - n)] = (y_left < 0) ? 0 : dev_input[halo_index_left]; } N_ds[n + threadIdx.y] = (y < 0 || y >= height) ? 0 : dev_input[i]; int y_right = y + blockDim.y; int halo_index_right = y_right * width + x; if (threadIdx.y < n) { N_ds[n + blockDim.y + threadIdx.y] = (y_right >= height) ? 0 : dev_input[halo_index_right]; } __syncthreads(); if ((x < width) && (y < height)) { float Pvalue = 0; for (int j = 0; j < STENCIL_WIDTH_Y; j++) { Pvalue += N_ds[threadIdx.y + j] * STENCIL_1DY[j]; } dev_output[i] = Pvalue; } } ////////////////////////////////////////////////////////////////// ///////////////////////// Host functions ///////////////////////// ////////////////////////////////////////////////////////////////// // TO DO: Modify the code in the kernels below to answer the homework // questions. // // Notes: // // float *hos_stencil_1dx is a host global pointer containing a 1D // array of length STENCIL_SIZE_X with the stencil data to be used for the // horizontal convolution. // // float *hos_stencil_1dy is a host global pointer containing a 1D // array of length STENCIL_SIZE_Y with the stencil data to be used for the // vertical convolution. void conv1h_basic(int width, int height, float *hos_data_in, float *hos_data_out) { // TODO: This host function is mostly complete, but you will need to // add some code to set up the constant memory on the device to // store the stencil and you may want to modify the grid and block // structure for the kernel. float *dev_image_in_buffer; float *dev_image_out_buffer; // Allocate space on the device and copy over the input image. int image_size = width * height * sizeof(float); cudaMemcpyToSymbol(STENCIL_1DX, hos_stencil_1dx, STENCIL_WIDTH_X * sizeof(float)); cudaMalloc(&dev_image_in_buffer, image_size); cudaMalloc(&dev_image_out_buffer, image_size); cudaMemcpy(dev_image_in_buffer, hos_data_in, image_size, cudaMemcpyHostToDevice); // Compute grid and block size dim3 blockDim(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); int grid_size_x = ceil((double)width / BLOCK_SIZE_X); int grid_size_y = ceil((double)height / BLOCK_SIZE_Y); printf("\ngrid_size (%d, %d)\n", grid_size_x, grid_size_y); dim3 gridDim(grid_size_x, grid_size_y, 1); // Launch kernel conv1h_basic_kernel<<<gridDim, blockDim>>>(width, height, dev_image_in_buffer, dev_image_out_buffer); // Synchronization cudaThreadSynchronize(); // Retrieve the output image and free the memory on the device. cudaMemcpy(hos_data_out, dev_image_out_buffer, image_size, cudaMemcpyDeviceToHost); cudaFree(dev_image_in_buffer); cudaFree(dev_image_out_buffer); } // Q2 (b) void conv1v_basic(int width, int height, float *hos_data_in, float *hos_data_out) { float *dev_image_in_buffer; float *dev_image_out_buffer; // Allocate space on the device and copy over the input image. int image_size = width * height * sizeof(float); cudaMemcpyToSymbol(STENCIL_1DY, hos_stencil_1dy, STENCIL_WIDTH_Y * sizeof(float)); cudaMalloc(&dev_image_in_buffer, image_size); cudaMalloc(&dev_image_out_buffer, image_size); cudaMemcpy(dev_image_in_buffer, hos_data_in, image_size, cudaMemcpyHostToDevice); // Compute grid and block size dim3 blockDim(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); int grid_size_x = ceil((double)width / BLOCK_SIZE_X); int grid_size_y = ceil((double)height / BLOCK_SIZE_Y); printf("\ngrid_size (%d, %d)\n", grid_size_x, grid_size_y); dim3 gridDim(grid_size_x, grid_size_y, 1); // Launch kernel conv1v_basic_kernel<<<gridDim, blockDim>>>(width, height, dev_image_in_buffer, dev_image_out_buffer); // Synchronization cudaThreadSynchronize(); // Retrieve the output image and free the memory on the device. cudaMemcpy(hos_data_out, dev_image_out_buffer, image_size, cudaMemcpyDeviceToHost); cudaFree(dev_image_in_buffer); cudaFree(dev_image_out_buffer); } // Q2 (c) void conv1to2_basic(int width, int height, float *hos_data_in, float *hos_data_out) { float *temp = (float *)malloc(width * height * sizeof(float)); conv1h_basic(width, height, hos_data_in, temp); conv1v_basic(width, height, temp, hos_data_out); free(temp); } // Q3 (a) void conv1h_tiled(int width, int height, float *hos_data_in, float *hos_data_out) { float *dev_image_in_buffer; float *dev_image_out_buffer; // Allocate space on the device and copy over the input image. int image_size = width * height * sizeof(float); cudaMemcpyToSymbol(STENCIL_1DX, hos_stencil_1dx, STENCIL_WIDTH_X * sizeof(float)); cudaMalloc(&dev_image_in_buffer, image_size); cudaMalloc(&dev_image_out_buffer, image_size); cudaMemcpy(dev_image_in_buffer, hos_data_in, image_size, cudaMemcpyHostToDevice); // Compute grid and block size dim3 blockDim(BLOCK_SIZE_X, 1, 1); int grid_size_x = ceil((double)width / blockDim.x); int grid_size_y = ceil((double)height / blockDim.y); printf("\ngrid_size (%d, %d)\n", grid_size_x, grid_size_y); dim3 gridDim(grid_size_x, grid_size_y, 1); // Launch kernel conv1h_tiled_kernel<<<gridDim, blockDim>>>(width, height, dev_image_in_buffer, dev_image_out_buffer); // Synchronization cudaThreadSynchronize(); // Retrieve the output image and free the memory on the device. cudaMemcpy(hos_data_out, dev_image_out_buffer, image_size, cudaMemcpyDeviceToHost); cudaFree(dev_image_in_buffer); cudaFree(dev_image_out_buffer); } void conv1v_tiled(int width, int height, float *hos_data_in, float *hos_data_out) { float *dev_image_in_buffer; float *dev_image_out_buffer; // Allocate space on the device and copy over the input image. int image_size = width * height * sizeof(float); cudaMemcpyToSymbol(STENCIL_1DY, hos_stencil_1dy, STENCIL_WIDTH_Y * sizeof(float)); cudaMalloc(&dev_image_in_buffer, image_size); cudaMalloc(&dev_image_out_buffer, image_size); cudaMemcpy(dev_image_in_buffer, hos_data_in, image_size, cudaMemcpyHostToDevice); // Compute grid and block size dim3 blockDim(1, BLOCK_SIZE_Y, 1); int grid_size_x = ceil((double)width / blockDim.x); int grid_size_y = ceil((double)height / blockDim.y); printf("\ngrid_size (%d, %d)\n", grid_size_x, grid_size_y); dim3 gridDim(grid_size_x, grid_size_y, 1); // Launch kernel conv1v_tiled_kernel<<<gridDim, blockDim>>>(width, height, dev_image_in_buffer, dev_image_out_buffer); // Synchronization cudaThreadSynchronize(); // Retrieve the output image and free the memory on the device. cudaMemcpy(hos_data_out, dev_image_out_buffer, image_size, cudaMemcpyDeviceToHost); cudaFree(dev_image_in_buffer); cudaFree(dev_image_out_buffer); } void conv1to2_tiled(int width, int height, float *hos_data_in, float *hos_data_out) { float *temp = (float *)malloc(width * height * sizeof(float)); conv1h_tiled(width, height, hos_data_in, temp); conv1v_tiled(width, height, temp, hos_data_out); free(temp); } ///////////////////////////////////////////////////////////////////////// ///////////////// No change to code after this point //////////////////// ///////////////////////////////////////////////////////////////////////// // DO NOT CHANGE THE CODE BELOW THIS COMMENT (or in any of the other // files). Modification of the code below or in the other files may // cause the autograder to fail, and you may receive a zero for the // corresponding questions in the homework. Convolution::~Convolution() { free(hos_stencil_1dx); free(hos_stencil_1dy); hos_stencil_1dx = hos_stencil_1dy = NULL; } void Convolution::evaluate_gaussian_stencil(float sigma, float *stencil_array, int stencil_width) { // Compute the stencil float normalization = 0.0f; int half_width = stencil_width / 2; float interval = 20.0f / stencil_width; for (int i = 0; i < stencil_width; i++) { float x = (i - half_width) * interval; float gaussian = std::exp(-(x * x) / (2 * sigma * sigma)); stencil_array[i] = gaussian; normalization += gaussian; // printf("%d: %f - %f\n", i, x, gaussian); } // Normalize so that stencil sums to 1 and store to stencil_array. for (int i = 0; i < stencil_width; i++) stencil_array[i] /= normalization; } void Convolution::setup_host_gaussian_stencil(float sigma) { // Allocate memory, freed in destructor. hos_stencil_1dx = (float *)malloc(STENCIL_WIDTH_X * sizeof(float)); hos_stencil_1dy = (float *)malloc(STENCIL_WIDTH_Y * sizeof(float)); // Evaluate Gaussian function to create the stencils. evaluate_gaussian_stencil(sigma, hos_stencil_1dx, STENCIL_WIDTH_X); evaluate_gaussian_stencil(sigma, hos_stencil_1dy, STENCIL_WIDTH_Y); } void Convolution::setup_device(int width, int height) { cudaDeviceProp prop; int ndev; cudaGetDeviceCount(&ndev); if (ndev < 1) { fprintf(stderr, "No CUDA device found\n"); exit(-1); } cudaGetDeviceProperties(&prop, 0); printf("The GPU is a %s\n", prop.name); printf("Cuda capability %d.%d.\n", prop.major, prop.minor); printf("Shared memory per block %d bytes.\n", prop.sharedMemPerBlock); } void Convolution::load_image_input(const Image &image) { if (image.channel != 1) { printf("Error: Input image has %d channels (should be 1).\n", image.channel); } int w = image.width, h = image.height; // Allocate host input image buffer if (!hos_image_in.pixels) { hos_image_in.alloc(image.width, image.height, image.channel); } memcpy(hos_image_in.pixels, image.pixels, w * h * sizeof(float)); hos_image_out.alloc(w, h, 1); } // Since we've copied input to device in function load_image_input, // we can just launch kernels here: void Convolution::run_horizontal_1d() { int width = hos_image_in.width, height = hos_image_in.height; // Call student's code conv1h_basic(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_vertical_1d() { int width = hos_image_in.width, height = hos_image_in.height; // Call student's code conv1v_basic(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_1to2() { int width = hos_image_in.width, height = hos_image_in.height; conv1to2_basic(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_1to2_tiling() { int width = hos_image_in.width, height = hos_image_in.height; conv1to2_tiled(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_2d_tiling() {} void Convolution::run_horizontal_1d_tiling() { int width = hos_image_in.width, height = hos_image_in.height; // Call student's code conv1h_tiled(width, height, hos_image_in.pixels, hos_image_out.pixels); } void Convolution::run_vertical_1d_tiling() { int width = hos_image_in.width, height = hos_image_in.height; // Call student's code conv1v_tiled(width, height, hos_image_in.pixels, hos_image_out.pixels); } const float *Convolution::get_image_buffer() { return hos_image_out.pixels; } const Image *Convolution::get_image_ptr() { return &hos_image_out; }
6ca8678ff8fbb4dee3f9cfc41bb6181d3ae27168.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_search/cuda_hetro_gbfs.cuh" #include "evaluator.h" #include "open_list_factory.h" #include "cuda_common/cuda_check.cuh" #include "cuda_search/cuda_random_walk.cuh" namespace pplanner { using std::vector; extern __constant__ CudaSASPlus cuda_problem; extern __constant__ CudaSuccessorGenerator cuda_generator; extern __constant__ CudaLandmarkGraph cuda_landmark_graph; extern __constant__ CudaZobristHash cuda_c_hash; extern __constant__ CudaZobristHash cuda_d_hash; std::size_t CudaHetroGBFS::AllocateMessage() { m_.h_min = new int[n_threads_]; m_.states = new int[n_threads_ * problem_->n_variables()]; m_.accepted = new uint8_t[n_threads_ * graph_->n_landmarks_bytes()]; CUDA_CHECK(hipMalloc((void**)&cuda_steps_, n_threads_ * sizeof(int))); CUDA_CHECK(hipMalloc((void**)&cuda_offsets_, n_threads_ * sizeof(int))); return 2 * n_threads_ * sizeof(int); } void CudaHetroGBFS::Init(const boost::property_tree::ptree &pt) { if (auto opt = pt.get_optional<int>("n_grid")) n_grid_ = opt.get(); if (auto opt = pt.get_optional<int>("n_block")) n_block_ = opt.get(); n_threads_ = n_grid_ * n_block_; if (auto opt = pt.get_optional<int>("gpu_threshold")) gpu_threshold_ = opt.get(); graph_->InitLandmarks(lmcount_->landmark_graph()); size_t ram = 5000000000; if (auto opt = pt.get_optional<size_t>("ram")) ram = opt.get(); graph_->ReserveByRAMSize(ram); auto open_list_option = pt.get_child("open_list"); open_ = OpenListFactory<int, int>(open_list_option); std::size_t gpu_ram = 8000000000; if (auto opt = pt.get_optional<size_t>("gpu_ram")) gpu_ram = opt.get(); gpu_ram -= InitCudaSASPlus(problem_, cuda_problem_); gpu_ram -= InitCudaSuccessorGenerator(generator_, cuda_generator_); gpu_ram -= InitCudaLandmarkGraph(lmcount_->landmark_graph(), cuda_landmark_graph_); gpu_ram -= InitCudaZobristHash(graph_->hash(), cuda_c_hash_); gpu_ram -= InitCudaZobristHash(d_hash_, cuda_d_hash_); CUDA_CHECK(hipMemcpyToSymbol(cuda_problem, cuda_problem_, sizeof(CudaSASPlus))); CUDA_CHECK(hipMemcpyToSymbol(cuda_generator, cuda_generator_, sizeof(CudaSuccessorGenerator))); CUDA_CHECK(hipMemcpyToSymbol(cuda_landmark_graph, cuda_landmark_graph_, sizeof(CudaLandmarkGraph))); CUDA_CHECK(hipMemcpyToSymbol(cuda_c_hash, cuda_c_hash_, sizeof(CudaZobristHash))); CUDA_CHECK(hipMemcpyToSymbol(cuda_d_hash, cuda_d_hash_, sizeof(CudaZobristHash))); gpu_ram -= CudaInitializeOpenList(n_threads_, lmcount_->landmark_graph()->n_landmarks(), &cuda_open_); int n_successors_max = 10 * n_threads_; gpu_ram -= 10 * n_threads_ * sizeof(int); InitializeGBFSMessage(n_threads_, &m_); gpu_ram -= AllocateMessage(); gpu_ram -= CudaInitializeGBFSMessage(problem_, graph_, n_threads_, n_successors_max, &cuda_m_); int closed_exponent = 13; std::size_t closed_size = 1u << closed_exponent; gpu_ram -= CudaInitializeClosedList(n_threads_, closed_size, &cuda_closed_); gpu_ram -= 2000000000; InitCudaSearchGraph(problem_, graph_, closed_exponent, gpu_ram, &cuda_graph_); } CudaHetroGBFS::~CudaHetroGBFS() { FreeCudaSASPlus(cuda_problem_); FreeCudaSuccessorGenerator(cuda_generator_); FreeCudaLandmarkGraph(cuda_landmark_graph_); FreeCudaZobristHash(cuda_c_hash_); FreeCudaZobristHash(cuda_d_hash_); FreeCudaSearchGraph(&cuda_graph_); CUDA_CHECK(hipFree(cuda_steps_)); CUDA_CHECK(hipFree(cuda_offsets_)); CudaFreeGBFSMessage(&cuda_m_); delete[] m_.h_min; delete[] m_.states; delete[] m_.accepted; FreeGBFSMessage(&m_); delete cuda_problem_; delete cuda_generator_; delete cuda_landmark_graph_; delete cuda_c_hash_; delete cuda_d_hash_; } void CudaHetroGBFS::ClearGPU() { hipLaunchKernelGGL(( CudaNPlanStep), dim3(n_grid_), dim3(n_block_), 0, 0, cuda_graph_, cuda_m_.nodes, cuda_steps_); std::vector<int> steps(n_threads_); CUDA_CHECK(hipMemcpy(steps.data(), cuda_steps_, n_threads_ * sizeof(int), hipMemcpyDeviceToHost)); std::vector<int> offsets(n_threads_, 0); offsets[0] = steps[0]; for (int i = 1; i < n_threads_; ++i) offsets[i] = steps[i] + offsets[i - 1]; CUDA_CHECK(hipMemcpy(cuda_offsets_, offsets.data(), n_threads_ * sizeof(int), hipMemcpyHostToDevice)); int *cuda_plans = nullptr; CUDA_CHECK(hipMalloc((void**)&cuda_plans, offsets.back() * sizeof(int))); hipLaunchKernelGGL(( CudaExtractPlan), dim3(n_grid_), dim3(n_block_), 0, 0, cuda_graph_, cuda_offsets_, cuda_m_.nodes, cuda_plans); std::vector<int> plans(offsets.back()); std::size_t n_bytes = graph_->n_landmarks_bytes(); CUDA_CHECK(hipMemcpy(plans.data(), cuda_plans, offsets.back() * sizeof(int), hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(m_.states, cuda_m_.states, n_threads_ * problem_->n_variables() * sizeof(int), hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(m_.accepted, cuda_m_.accepted, n_threads_ * n_bytes * sizeof(uint8_t), hipMemcpyDeviceToHost)); std::vector<int> state(problem_->n_variables()); int h_max = lmcount_->landmark_graph()->n_landmarks() + 1; for (int i = 0; i < n_threads_; ++i) { int h = m_.h_min[i]; if (h == -1 || h == h_max) continue; memcpy(state.data(), &m_.states[i * problem_->n_variables()], problem_->n_variables() * sizeof(int)); int node = graph_->GenerateNodeIfNotClosed(-1, gpu_start_, state); if (node != -1) { graph_->SetH(node, h); graph_->SetLandmark(node, &m_.accepted[i * n_bytes]); int plan_start = i == 0 ? 0 : offsets[i - 1]; sequences_[node] = std::vector<int>(steps[i]); for (int j = 0; j < steps[i]; ++j) sequences_[node][j] = plans[plan_start + j]; open_->Push(h, node, false); if (h < best_h_) { best_h_ = h; best_node_ = node; n_plateau_ = 0; std::cout << "New best heuristic value: " << best_h_ << " (found by GPU)" << std::endl; std::cout << "[" << expanded_ << " expanded]" << std::endl; } } } CUDA_CHECK(hipFree(cuda_plans)); } void CudaHetroGBFS::InitialPushGPU(int node, int h) { gpu_start_ = node; std::vector<int> state(problem_->n_variables()); graph_->State(node, state); CUDA_CHECK(hipMemcpy(cuda_m_.states, state.data(), problem_->n_variables() * sizeof(int), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(cuda_m_.accepted, graph_->Landmark(node), graph_->n_landmarks_bytes() * sizeof(uint8_t), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(cuda_m_.h_min, &h, sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( CudaInitialPush), dim3(n_grid_), dim3(n_block_), 0, 0, cuda_graph_, cuda_m_, cuda_open_, n_threads_); m_.n_nodes = 1; cuda_m_.n_nodes = 1; CUDA_CHECK(hipDeviceSynchronize()); } void CudaHetroGBFS::InitialEvaluateAndPush() { auto state = problem_->initial(); int node = graph_->GenerateNode(-1, -1, state); ++generated_; best_h_ = lmcount_->Evaluate(state, nullptr, graph_->Landmark(node)); graph_->SetH(node, best_h_); std::cout << "Initial heuristic value: " << best_h_ << std::endl; best_node_ = node; open_->Push(best_h_, node, false); InitialPushGPU(node, best_h_); } int CudaHetroGBFS::CpuExpand() { thread_local vector<int> state(problem_->n_variables()); thread_local vector<int> child(problem_->n_variables()); thread_local vector<int> applicable; if (open_->IsEmpty()) return -1; int node = open_->Pop(); if (!graph_->CloseIfNot(node)) return -1; ++expanded_; graph_->Expand(node, state); if (problem_->IsGoal(state)) return node; generator_->Generate(state, applicable); if (applicable.empty()) return -1; for (auto o : applicable) { problem_->ApplyEffect(o, state, child); int child_node = graph_->GenerateNodeIfNotClosed(o, node, state, child); if (child_node == -1) continue; ++generated_; int h = lmcount_->Evaluate(child, graph_->Landmark(node), graph_->Landmark(child_node)); if (h == -1) continue; graph_->SetH(child_node, h); open_->Push(h, child_node, false); ++n_plateau_; if (h < best_h_) { best_h_ = h; best_node_ = child_node; n_plateau_ = 0; std::cout << "New best heuristic value: " << best_h_ << std::endl; std::cout << "[" << expanded_ << " expanded]" << std::endl; } } return -1; } int CudaHetroGBFS::Search() { InitialEvaluateAndPush(); m_.n_nodes += 1; cuda_m_.n_nodes += 1; int goal = -1; int *cuda_goal = nullptr; CUDA_CHECK(hipMalloc((void**)&cuda_goal, sizeof(int))); CUDA_CHECK(hipMemcpy(cuda_goal, &goal, sizeof(int), hipMemcpyHostToDevice)); while (goal == -1) { hipEvent_t pop_fin; CUDA_CHECK(hipEventCreate(&pop_fin)); hipLaunchKernelGGL(( CudaPop), dim3(n_grid_), dim3(n_block_), 0, 0, cuda_graph_, cuda_m_, cuda_open_, cuda_closed_); CUDA_CHECK(hipEventRecord(pop_fin, 0)); while (hipEventQuery(pop_fin) == hipErrorNotReady) { goal = CpuExpand(); if (goal != -1) { CUDA_CHECK(hipFree(cuda_goal)); return goal; } } expanded_ += PrepareExpansion(n_threads_, &m_, &cuda_m_); hipEvent_t expand_fin; CUDA_CHECK(hipEventCreate(&expand_fin)); hipLaunchKernelGGL(( CudaExpand), dim3(n_grid_), dim3(n_block_), 0, 0, cuda_graph_, cuda_m_, n_threads_, cuda_goal); CUDA_CHECK(hipEventRecord(expand_fin, 0)); while (hipEventQuery(expand_fin) == hipErrorNotReady) { goal = CpuExpand(); if (goal != -1) { CUDA_CHECK(hipFree(cuda_goal)); return goal; } } CUDA_CHECK(hipMemcpy(&goal, cuda_goal, sizeof(int), hipMemcpyDeviceToHost)); if (goal != -1) { std::cout << "goal found by GPU" << std::endl; goal_on_gpu_ = true; break; } generated_ += PrepareSort(n_threads_, &m_, &cuda_m_); CUDA_CHECK(hipMemcpy(m_.h_min, cuda_m_.h_min, n_threads_ * sizeof(int), hipMemcpyDeviceToHost)); int h_min = -1; for (int i = 0; i < n_threads_; ++i) { if (m_.h_min[i] != -1 && (h_min == -1 || m_.h_min[i] < h_min)) h_min = m_.h_min[i]; } if (m_.n_nodes >= static_cast<int>(cuda_graph_.node_max - 1) || h_min < best_h_) { std::cout << "clear GPU RAM" << std::endl; ClearGPU(); CudaClearOpenList(n_threads_, &cuda_open_); CudaClearClosedList(n_threads_, &cuda_closed_); m_.n_nodes = 0; cuda_m_.n_nodes = 0; InitialPushGPU(best_node_, best_h_); continue; } else if (gpu_threshold_ > 0 && n_plateau_ > gpu_threshold_) { std::cout << "clear GPU RAM" << std::endl; ClearGPU(); if (gpu_threshold_ > 0 && n_plateau_ > gpu_threshold_) { CudaClearOpenList(n_threads_, &cuda_open_); CudaClearClosedList(n_threads_, &cuda_closed_); m_.n_nodes = 0; cuda_m_.n_nodes = 0; InitialPushGPU(best_node_, best_h_); n_plateau_ = 0; continue; } } hipEvent_t sort_fin; CUDA_CHECK(hipEventCreate(&sort_fin)); hipLaunchKernelGGL(( CudaSortChildren), dim3(n_grid_), dim3(n_block_), 0, 0, cuda_graph_, cuda_m_); CUDA_CHECK(hipEventRecord(sort_fin, 0)); while(hipEventQuery(sort_fin) == hipErrorNotReady){ goal = CpuExpand(); if (goal != -1) { CUDA_CHECK(hipFree(cuda_goal)); return goal; } } hipEvent_t push_fin; CUDA_CHECK(hipEventCreate(&push_fin)); hipLaunchKernelGGL(( CudaPush), dim3(n_grid_), dim3(n_block_), 0, 0, cuda_graph_, cuda_m_, cuda_closed_, cuda_open_); CUDA_CHECK(hipEventRecord(push_fin, 0)); while(hipEventQuery(push_fin) == hipErrorNotReady){ goal = CpuExpand(); if (goal != -1) { CUDA_CHECK(hipFree(cuda_goal)); return goal; } } } CUDA_CHECK(hipFree(cuda_goal)); return goal; } std::vector<int> CudaHetroGBFS::ExtractPlan(int node) { if (node == -1) return std::vector<int>{-1}; int current = goal_on_gpu_ ? gpu_start_ : node; vector<int> result; while (graph_->Parent(current) != -1) { if (graph_->Action(current) == -1) { result.insert(result.begin(), sequences_[current].begin(), sequences_[current].end()); } else { result.insert(result.begin(), graph_->Action(current)); } current = graph_->Parent(current); } if (!goal_on_gpu_) return result; int *goals = nullptr; CUDA_CHECK(hipMalloc((void**)&goals, sizeof(int))); CUDA_CHECK(hipMemcpy(goals, &node, sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( CudaNPlanStep), dim3(1), dim3(1), 0, 0, cuda_graph_, goals, cuda_steps_); int step; CUDA_CHECK(hipMemcpy(&step, cuda_steps_, sizeof(int), hipMemcpyDeviceToHost)); int *cuda_plan = nullptr; CUDA_CHECK(hipMalloc((void**)&cuda_plan, step * sizeof(int))); hipLaunchKernelGGL(( CudaExtractPlan), dim3(1), dim3(1), 0, 0, cuda_graph_, cuda_steps_, goals, cuda_plan); int *plan = new int[step]; CUDA_CHECK(hipMemcpy(plan, cuda_plan, step * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < step; ++i) result.push_back(plan[i]); delete[] plan; CUDA_CHECK(hipFree(cuda_plan)); return result; } void CudaHetroGBFS::DumpStatistics() const { std::cout << "Expanded " << expanded_ << " state(s)" << std::endl; std::cout << "Generated " << generated_ << " state(s)" << std::endl; } } // namespace pplanner
6ca8678ff8fbb4dee3f9cfc41bb6181d3ae27168.cu
#include "cuda_search/cuda_hetro_gbfs.cuh" #include "evaluator.h" #include "open_list_factory.h" #include "cuda_common/cuda_check.cuh" #include "cuda_search/cuda_random_walk.cuh" namespace pplanner { using std::vector; extern __constant__ CudaSASPlus cuda_problem; extern __constant__ CudaSuccessorGenerator cuda_generator; extern __constant__ CudaLandmarkGraph cuda_landmark_graph; extern __constant__ CudaZobristHash cuda_c_hash; extern __constant__ CudaZobristHash cuda_d_hash; std::size_t CudaHetroGBFS::AllocateMessage() { m_.h_min = new int[n_threads_]; m_.states = new int[n_threads_ * problem_->n_variables()]; m_.accepted = new uint8_t[n_threads_ * graph_->n_landmarks_bytes()]; CUDA_CHECK(cudaMalloc((void**)&cuda_steps_, n_threads_ * sizeof(int))); CUDA_CHECK(cudaMalloc((void**)&cuda_offsets_, n_threads_ * sizeof(int))); return 2 * n_threads_ * sizeof(int); } void CudaHetroGBFS::Init(const boost::property_tree::ptree &pt) { if (auto opt = pt.get_optional<int>("n_grid")) n_grid_ = opt.get(); if (auto opt = pt.get_optional<int>("n_block")) n_block_ = opt.get(); n_threads_ = n_grid_ * n_block_; if (auto opt = pt.get_optional<int>("gpu_threshold")) gpu_threshold_ = opt.get(); graph_->InitLandmarks(lmcount_->landmark_graph()); size_t ram = 5000000000; if (auto opt = pt.get_optional<size_t>("ram")) ram = opt.get(); graph_->ReserveByRAMSize(ram); auto open_list_option = pt.get_child("open_list"); open_ = OpenListFactory<int, int>(open_list_option); std::size_t gpu_ram = 8000000000; if (auto opt = pt.get_optional<size_t>("gpu_ram")) gpu_ram = opt.get(); gpu_ram -= InitCudaSASPlus(problem_, cuda_problem_); gpu_ram -= InitCudaSuccessorGenerator(generator_, cuda_generator_); gpu_ram -= InitCudaLandmarkGraph(lmcount_->landmark_graph(), cuda_landmark_graph_); gpu_ram -= InitCudaZobristHash(graph_->hash(), cuda_c_hash_); gpu_ram -= InitCudaZobristHash(d_hash_, cuda_d_hash_); CUDA_CHECK(cudaMemcpyToSymbol(cuda_problem, cuda_problem_, sizeof(CudaSASPlus))); CUDA_CHECK(cudaMemcpyToSymbol(cuda_generator, cuda_generator_, sizeof(CudaSuccessorGenerator))); CUDA_CHECK(cudaMemcpyToSymbol(cuda_landmark_graph, cuda_landmark_graph_, sizeof(CudaLandmarkGraph))); CUDA_CHECK(cudaMemcpyToSymbol(cuda_c_hash, cuda_c_hash_, sizeof(CudaZobristHash))); CUDA_CHECK(cudaMemcpyToSymbol(cuda_d_hash, cuda_d_hash_, sizeof(CudaZobristHash))); gpu_ram -= CudaInitializeOpenList(n_threads_, lmcount_->landmark_graph()->n_landmarks(), &cuda_open_); int n_successors_max = 10 * n_threads_; gpu_ram -= 10 * n_threads_ * sizeof(int); InitializeGBFSMessage(n_threads_, &m_); gpu_ram -= AllocateMessage(); gpu_ram -= CudaInitializeGBFSMessage(problem_, graph_, n_threads_, n_successors_max, &cuda_m_); int closed_exponent = 13; std::size_t closed_size = 1u << closed_exponent; gpu_ram -= CudaInitializeClosedList(n_threads_, closed_size, &cuda_closed_); gpu_ram -= 2000000000; InitCudaSearchGraph(problem_, graph_, closed_exponent, gpu_ram, &cuda_graph_); } CudaHetroGBFS::~CudaHetroGBFS() { FreeCudaSASPlus(cuda_problem_); FreeCudaSuccessorGenerator(cuda_generator_); FreeCudaLandmarkGraph(cuda_landmark_graph_); FreeCudaZobristHash(cuda_c_hash_); FreeCudaZobristHash(cuda_d_hash_); FreeCudaSearchGraph(&cuda_graph_); CUDA_CHECK(cudaFree(cuda_steps_)); CUDA_CHECK(cudaFree(cuda_offsets_)); CudaFreeGBFSMessage(&cuda_m_); delete[] m_.h_min; delete[] m_.states; delete[] m_.accepted; FreeGBFSMessage(&m_); delete cuda_problem_; delete cuda_generator_; delete cuda_landmark_graph_; delete cuda_c_hash_; delete cuda_d_hash_; } void CudaHetroGBFS::ClearGPU() { CudaNPlanStep<<<n_grid_, n_block_>>>(cuda_graph_, cuda_m_.nodes, cuda_steps_); std::vector<int> steps(n_threads_); CUDA_CHECK(cudaMemcpy(steps.data(), cuda_steps_, n_threads_ * sizeof(int), cudaMemcpyDeviceToHost)); std::vector<int> offsets(n_threads_, 0); offsets[0] = steps[0]; for (int i = 1; i < n_threads_; ++i) offsets[i] = steps[i] + offsets[i - 1]; CUDA_CHECK(cudaMemcpy(cuda_offsets_, offsets.data(), n_threads_ * sizeof(int), cudaMemcpyHostToDevice)); int *cuda_plans = nullptr; CUDA_CHECK(cudaMalloc((void**)&cuda_plans, offsets.back() * sizeof(int))); CudaExtractPlan<<<n_grid_, n_block_>>>(cuda_graph_, cuda_offsets_, cuda_m_.nodes, cuda_plans); std::vector<int> plans(offsets.back()); std::size_t n_bytes = graph_->n_landmarks_bytes(); CUDA_CHECK(cudaMemcpy(plans.data(), cuda_plans, offsets.back() * sizeof(int), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(m_.states, cuda_m_.states, n_threads_ * problem_->n_variables() * sizeof(int), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(m_.accepted, cuda_m_.accepted, n_threads_ * n_bytes * sizeof(uint8_t), cudaMemcpyDeviceToHost)); std::vector<int> state(problem_->n_variables()); int h_max = lmcount_->landmark_graph()->n_landmarks() + 1; for (int i = 0; i < n_threads_; ++i) { int h = m_.h_min[i]; if (h == -1 || h == h_max) continue; memcpy(state.data(), &m_.states[i * problem_->n_variables()], problem_->n_variables() * sizeof(int)); int node = graph_->GenerateNodeIfNotClosed(-1, gpu_start_, state); if (node != -1) { graph_->SetH(node, h); graph_->SetLandmark(node, &m_.accepted[i * n_bytes]); int plan_start = i == 0 ? 0 : offsets[i - 1]; sequences_[node] = std::vector<int>(steps[i]); for (int j = 0; j < steps[i]; ++j) sequences_[node][j] = plans[plan_start + j]; open_->Push(h, node, false); if (h < best_h_) { best_h_ = h; best_node_ = node; n_plateau_ = 0; std::cout << "New best heuristic value: " << best_h_ << " (found by GPU)" << std::endl; std::cout << "[" << expanded_ << " expanded]" << std::endl; } } } CUDA_CHECK(cudaFree(cuda_plans)); } void CudaHetroGBFS::InitialPushGPU(int node, int h) { gpu_start_ = node; std::vector<int> state(problem_->n_variables()); graph_->State(node, state); CUDA_CHECK(cudaMemcpy(cuda_m_.states, state.data(), problem_->n_variables() * sizeof(int), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(cuda_m_.accepted, graph_->Landmark(node), graph_->n_landmarks_bytes() * sizeof(uint8_t), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(cuda_m_.h_min, &h, sizeof(int), cudaMemcpyHostToDevice)); CudaInitialPush<<<n_grid_, n_block_>>>(cuda_graph_, cuda_m_, cuda_open_, n_threads_); m_.n_nodes = 1; cuda_m_.n_nodes = 1; CUDA_CHECK(cudaDeviceSynchronize()); } void CudaHetroGBFS::InitialEvaluateAndPush() { auto state = problem_->initial(); int node = graph_->GenerateNode(-1, -1, state); ++generated_; best_h_ = lmcount_->Evaluate(state, nullptr, graph_->Landmark(node)); graph_->SetH(node, best_h_); std::cout << "Initial heuristic value: " << best_h_ << std::endl; best_node_ = node; open_->Push(best_h_, node, false); InitialPushGPU(node, best_h_); } int CudaHetroGBFS::CpuExpand() { thread_local vector<int> state(problem_->n_variables()); thread_local vector<int> child(problem_->n_variables()); thread_local vector<int> applicable; if (open_->IsEmpty()) return -1; int node = open_->Pop(); if (!graph_->CloseIfNot(node)) return -1; ++expanded_; graph_->Expand(node, state); if (problem_->IsGoal(state)) return node; generator_->Generate(state, applicable); if (applicable.empty()) return -1; for (auto o : applicable) { problem_->ApplyEffect(o, state, child); int child_node = graph_->GenerateNodeIfNotClosed(o, node, state, child); if (child_node == -1) continue; ++generated_; int h = lmcount_->Evaluate(child, graph_->Landmark(node), graph_->Landmark(child_node)); if (h == -1) continue; graph_->SetH(child_node, h); open_->Push(h, child_node, false); ++n_plateau_; if (h < best_h_) { best_h_ = h; best_node_ = child_node; n_plateau_ = 0; std::cout << "New best heuristic value: " << best_h_ << std::endl; std::cout << "[" << expanded_ << " expanded]" << std::endl; } } return -1; } int CudaHetroGBFS::Search() { InitialEvaluateAndPush(); m_.n_nodes += 1; cuda_m_.n_nodes += 1; int goal = -1; int *cuda_goal = nullptr; CUDA_CHECK(cudaMalloc((void**)&cuda_goal, sizeof(int))); CUDA_CHECK(cudaMemcpy(cuda_goal, &goal, sizeof(int), cudaMemcpyHostToDevice)); while (goal == -1) { cudaEvent_t pop_fin; CUDA_CHECK(cudaEventCreate(&pop_fin)); CudaPop<<<n_grid_, n_block_>>>(cuda_graph_, cuda_m_, cuda_open_, cuda_closed_); CUDA_CHECK(cudaEventRecord(pop_fin, 0)); while (cudaEventQuery(pop_fin) == cudaErrorNotReady) { goal = CpuExpand(); if (goal != -1) { CUDA_CHECK(cudaFree(cuda_goal)); return goal; } } expanded_ += PrepareExpansion(n_threads_, &m_, &cuda_m_); cudaEvent_t expand_fin; CUDA_CHECK(cudaEventCreate(&expand_fin)); CudaExpand<<<n_grid_, n_block_>>>(cuda_graph_, cuda_m_, n_threads_, cuda_goal); CUDA_CHECK(cudaEventRecord(expand_fin, 0)); while (cudaEventQuery(expand_fin) == cudaErrorNotReady) { goal = CpuExpand(); if (goal != -1) { CUDA_CHECK(cudaFree(cuda_goal)); return goal; } } CUDA_CHECK(cudaMemcpy(&goal, cuda_goal, sizeof(int), cudaMemcpyDeviceToHost)); if (goal != -1) { std::cout << "goal found by GPU" << std::endl; goal_on_gpu_ = true; break; } generated_ += PrepareSort(n_threads_, &m_, &cuda_m_); CUDA_CHECK(cudaMemcpy(m_.h_min, cuda_m_.h_min, n_threads_ * sizeof(int), cudaMemcpyDeviceToHost)); int h_min = -1; for (int i = 0; i < n_threads_; ++i) { if (m_.h_min[i] != -1 && (h_min == -1 || m_.h_min[i] < h_min)) h_min = m_.h_min[i]; } if (m_.n_nodes >= static_cast<int>(cuda_graph_.node_max - 1) || h_min < best_h_) { std::cout << "clear GPU RAM" << std::endl; ClearGPU(); CudaClearOpenList(n_threads_, &cuda_open_); CudaClearClosedList(n_threads_, &cuda_closed_); m_.n_nodes = 0; cuda_m_.n_nodes = 0; InitialPushGPU(best_node_, best_h_); continue; } else if (gpu_threshold_ > 0 && n_plateau_ > gpu_threshold_) { std::cout << "clear GPU RAM" << std::endl; ClearGPU(); if (gpu_threshold_ > 0 && n_plateau_ > gpu_threshold_) { CudaClearOpenList(n_threads_, &cuda_open_); CudaClearClosedList(n_threads_, &cuda_closed_); m_.n_nodes = 0; cuda_m_.n_nodes = 0; InitialPushGPU(best_node_, best_h_); n_plateau_ = 0; continue; } } cudaEvent_t sort_fin; CUDA_CHECK(cudaEventCreate(&sort_fin)); CudaSortChildren<<<n_grid_, n_block_>>>(cuda_graph_, cuda_m_); CUDA_CHECK(cudaEventRecord(sort_fin, 0)); while(cudaEventQuery(sort_fin) == cudaErrorNotReady){ goal = CpuExpand(); if (goal != -1) { CUDA_CHECK(cudaFree(cuda_goal)); return goal; } } cudaEvent_t push_fin; CUDA_CHECK(cudaEventCreate(&push_fin)); CudaPush<<<n_grid_, n_block_>>>(cuda_graph_, cuda_m_, cuda_closed_, cuda_open_); CUDA_CHECK(cudaEventRecord(push_fin, 0)); while(cudaEventQuery(push_fin) == cudaErrorNotReady){ goal = CpuExpand(); if (goal != -1) { CUDA_CHECK(cudaFree(cuda_goal)); return goal; } } } CUDA_CHECK(cudaFree(cuda_goal)); return goal; } std::vector<int> CudaHetroGBFS::ExtractPlan(int node) { if (node == -1) return std::vector<int>{-1}; int current = goal_on_gpu_ ? gpu_start_ : node; vector<int> result; while (graph_->Parent(current) != -1) { if (graph_->Action(current) == -1) { result.insert(result.begin(), sequences_[current].begin(), sequences_[current].end()); } else { result.insert(result.begin(), graph_->Action(current)); } current = graph_->Parent(current); } if (!goal_on_gpu_) return result; int *goals = nullptr; CUDA_CHECK(cudaMalloc((void**)&goals, sizeof(int))); CUDA_CHECK(cudaMemcpy(goals, &node, sizeof(int), cudaMemcpyHostToDevice)); CudaNPlanStep<<<1, 1>>>(cuda_graph_, goals, cuda_steps_); int step; CUDA_CHECK(cudaMemcpy(&step, cuda_steps_, sizeof(int), cudaMemcpyDeviceToHost)); int *cuda_plan = nullptr; CUDA_CHECK(cudaMalloc((void**)&cuda_plan, step * sizeof(int))); CudaExtractPlan<<<1, 1>>>(cuda_graph_, cuda_steps_, goals, cuda_plan); int *plan = new int[step]; CUDA_CHECK(cudaMemcpy(plan, cuda_plan, step * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < step; ++i) result.push_back(plan[i]); delete[] plan; CUDA_CHECK(cudaFree(cuda_plan)); return result; } void CudaHetroGBFS::DumpStatistics() const { std::cout << "Expanded " << expanded_ << " state(s)" << std::endl; std::cout << "Generated " << generated_ << " state(s)" << std::endl; } } // namespace pplanner
dea25e809fd7a2bcc4ddae5ed0e7d19010f14046.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "ContextProjectionOp.h" namespace paddle { template <bool padding> __global__ void KeContextProjectionForward(const real* input, const int* sequence, const real* weight, real* output, int input_dim, int context_length, int context_start, int begin_pad) { int idx = threadIdx.x; int block_size = blockDim.x; int sequenceId = blockIdx.x; int seq_start = sequence[sequenceId]; int seq_end = sequence[sequenceId+1]; real value = 0; int instances = seq_end - seq_start + context_length - 1; output += seq_start * input_dim * context_length; input += seq_start * input_dim; for (int k = 0; k <= input_dim / block_size; k++) { if (idx < input_dim) { for (int i = 0; i < instances; i++) { // i + context_start; if ((i + context_start) < 0) { if (padding) { value = weight[i * input_dim + idx]; } else { continue; } } else if ((i + context_start) >= (seq_end - seq_start)) { if (padding) { value = weight[(begin_pad + i + context_start - (seq_end - seq_start)) * input_dim + idx]; } else { continue; } } else { value = input[(i + context_start) * input_dim + idx]; } int outx = (i - context_length) < 0 ? i : (context_length - 1); int outy = (i - context_length) < 0 ? 0 : (i - (context_length - 1)); real* output_r = output + outy * input_dim * context_length + outx * input_dim; for (int j = outy; j < seq_end - seq_start; j++) { output_r[idx] += value; if (j - outy == outx) break; output_r += (context_length - 1) * input_dim; } } } idx += block_size; } } /** * @brief Context projection forward. * * @param[in] input input sequence. * @param[in] sequence sequence index. * @param[in] weight padding data. * @param[out] output output sequence. * @param[in] num_sequences number of sequences. * @param[in] input_dim input sequence dimension. * @param[in] context_length context length. * @param[in] context_start context start. * @param[in] begin_pad number of extra timesteps added at the * beginning. * */ void hl_context_projection_forward(const real* input, const int* sequence, const real* weight, real* output, size_t num_sequences, size_t input_dim, size_t context_length, int context_start, size_t begin_pad) { CHECK_NOTNULL(input); CHECK_NOTNULL(sequence); CHECK_NOTNULL(output); int block_size = 128; int blocks_x = num_sequences; int blocks_y = 1; dim3 threads(block_size, 1); dim3 grid(blocks_x, blocks_y); if (weight) { hipLaunchKernelGGL(( KeContextProjectionForward<true>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT , input, sequence, weight, output, input_dim, context_length, context_start, begin_pad); } else { hipLaunchKernelGGL(( KeContextProjectionForward<false>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT , input, sequence, weight, output, input_dim, context_length, context_start, begin_pad); } CHECK_SYNC("hl_context_projection_forward failed"); } template <> void ContextProjectionForward<DEVICE_TYPE_GPU>(GpuMatrix& output, const GpuMatrix& input, const GpuMatrix& weight, const GpuIVector& sequence, size_t context_length, int context_start, size_t begin_pad) { hl_context_projection_forward(input.getData(), sequence.getData(), weight ? weight.getData() : nullptr, output.getData(), sequence.getSize() - 1, input.getWidth(), context_length, context_start, begin_pad); } __global__ void KeContextProjectionBackwardData(real* out_grad, const int* sequence, real* in_grad, int input_dim, int context_length, int context_start) { int idx = threadIdx.x; int block_size = blockDim.x; int sequenceId = blockIdx.x; int seq_start = sequence[sequenceId]; int seq_end = sequence[sequenceId+1]; real value = 0; int instances = seq_end - seq_start + context_length - 1; out_grad += seq_start * input_dim * context_length; in_grad += seq_start * input_dim; for (int k = 0; k <= input_dim / block_size; k++) { if (idx < input_dim) { for (int i = 0; i < instances; i++) { if ((i + context_start) < 0) { continue; } else if ((i + context_start) >= (seq_end - seq_start)) { continue; } else { // value = 0; value = in_grad[(i + context_start) * input_dim + idx]; } int outx = (i - context_length) < 0 ? i : (context_length - 1); int outy = (i - context_length) < 0 ? 0 : (i - (context_length - 1)); real* output_r = out_grad + outy * input_dim * context_length + outx * input_dim; for (int j = outy; j < seq_end - seq_start; j++) { value += output_r[idx]; if (j - outy == outx) break; output_r += (context_length - 1) * input_dim; } in_grad[(i + context_start) * input_dim + idx] = value; } } idx += block_size; } } /** * @brief Context projection backward data. * * @param[in] out_grad output gradient. * @param[in] sequence sequence index. * @param[out] input_grad input gradient. * @param[in] num_sequences number of sequences. * @param[in] input_dim input sequence dimension. * @param[in] context_length context length. * @param[in] context_start context start. * */ void hl_context_projection_backward_data(real* out_grad, const int* sequence, real* input_grad, size_t num_sequences, size_t input_dim, size_t context_length, int context_start) { CHECK_NOTNULL(out_grad); CHECK_NOTNULL(sequence); CHECK_NOTNULL(input_grad); int block_size = 128; int blocks_x = num_sequences; int blocks_y = 1; dim3 threads(block_size, 1); dim3 grid(blocks_x, blocks_y); hipLaunchKernelGGL(( KeContextProjectionBackwardData), dim3(grid), dim3(threads), 0, STREAM_DEFAULT , out_grad, sequence, input_grad, input_dim, context_length, context_start); CHECK_SYNC("hl_context_projection_backward_data failed"); } template <> void ContextProjectionBackwardData<DEVICE_TYPE_GPU>(GpuMatrix& out_grad, GpuMatrix& in_grad, const GpuIVector& sequence, size_t context_length, int context_start) { hl_context_projection_backward_data(out_grad.getData(), sequence.getData(), in_grad.getData(), sequence.getSize() - 1, in_grad.getWidth(), context_length, context_start); } template<int THREADS_X, int THREADS_Y> __global__ void KeContextProjectionBackwardWeight(real* out_grad, const int* sequence, real* w_grad, int num_sequences, int w_dim, int context_length, int context_start, int begin_pad) { __shared__ real sum_s[THREADS_Y][THREADS_X]; int pad_of_block = (w_dim + THREADS_X - 1) / THREADS_X; const int idx = threadIdx.x; const int idy = threadIdx.y; int padId = blockIdx.x / pad_of_block; int weight_idx = idx + THREADS_X * (blockIdx.x % pad_of_block); int instanceId; real value = 0; real* output_r; sum_s[idy][idx] = 0.0f; if (weight_idx < w_dim) { for (int seqId = idy; seqId < num_sequences; seqId += THREADS_Y) { int seq_start = sequence[seqId]; int seq_end = sequence[seqId+1]; output_r = out_grad + seq_start * w_dim * context_length; if (context_start < 0) { if (padId + context_start < 0) { instanceId = padId; } else { // begin_pad > 0; instanceId = (padId - begin_pad) + (seq_end - seq_start) - context_start; } } else { if (padId + (seq_end - seq_start) < context_start) { continue; } else { // begin_pad == 0; instanceId = padId + (seq_end - seq_start) - context_start; } } int outx = (instanceId - context_length) < 0 ? instanceId : (context_length - 1); int outy = (instanceId - context_length) < 0 ? 0 : (instanceId - (context_length - 1)); output_r += outy * w_dim * context_length + outx * w_dim; for (int j = outy; j < seq_end - seq_start; j++) { value += output_r[weight_idx]; if (j - outy == outx) break; output_r += (context_length - 1) * w_dim; } } sum_s[idy][idx] = value; } __syncthreads(); for (int stride = THREADS_Y/2; stride > 0; stride = stride/2) { if (idy < stride) { sum_s[idy][idx] += sum_s[idy + stride][idx]; } __syncthreads(); } __syncthreads(); if (weight_idx < w_dim) { if (idy == 0) { w_grad[padId * w_dim + weight_idx] += sum_s[0][idx]; } } } /** * @brief Context projection backward weight. * * @param[in] out_grad output gradient. * @param[in] sequence sequence index. * @param[out] w_grad weight gradient. * @param[in] num_sequences number of sequences. * @param[in] w_dim input sequence dimension. * @param[in] total_pad number of extra timesteps. * @param[in] context_length context length. * @param[in] context_start context start. * @param[in] begin_pad number of extra timesteps added at the * beginning. * */ void hl_context_projection_backward_weight(real* out_grad, const int* sequence, real* w_grad, size_t num_sequences, size_t w_dim, size_t total_pad, size_t context_length, int context_start, size_t begin_pad) { CHECK_NOTNULL(out_grad); CHECK_NOTNULL(sequence); CHECK_NOTNULL(w_grad); int threads_x = 32; int threads_y = 32; int blocks_x = total_pad * ((w_dim + threads_x - 1) / threads_x); dim3 threads(threads_x, threads_y); dim3 grid(blocks_x, 1); hipLaunchKernelGGL(( KeContextProjectionBackwardWeight<32, 32>) , dim3(grid), dim3(threads), 0, STREAM_DEFAULT , out_grad, sequence, w_grad, num_sequences, w_dim, context_length, context_start, begin_pad); CHECK_SYNC("hl_context_projection_backward_weight failed"); } template <> void ContextProjectionBackwardWeight<DEVICE_TYPE_GPU>( GpuMatrix& out_grad, GpuMatrix& w_grad, const GpuIVector& seq_vec, size_t context_length, int context_start, size_t total_pad, size_t begin_pad) { hl_context_projection_backward_weight(out_grad.getData(), seq_vec.getData(), w_grad.getData(), seq_vec.getSize() - 1, w_grad.getWidth(), total_pad, context_length, context_start, begin_pad); } template <> void ContextProjectionBackward<DEVICE_TYPE_GPU>(GpuMatrix& out_grad, GpuMatrix& in_grad, GpuMatrix& w_grad, const GpuIVector& sequence, size_t context_length, int context_start, size_t begin_pad, bool is_padding, size_t total_pad) { if (in_grad) { ContextProjectionBackwardData<DEVICE_TYPE_GPU>( out_grad, in_grad, sequence, context_length, context_start); } if (is_padding && w_grad) { ContextProjectionBackwardWeight<DEVICE_TYPE_GPU>( out_grad, w_grad, sequence, context_length, context_start, total_pad, begin_pad); } } } // namespace paddle
dea25e809fd7a2bcc4ddae5ed0e7d19010f14046.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "ContextProjectionOp.h" namespace paddle { template <bool padding> __global__ void KeContextProjectionForward(const real* input, const int* sequence, const real* weight, real* output, int input_dim, int context_length, int context_start, int begin_pad) { int idx = threadIdx.x; int block_size = blockDim.x; int sequenceId = blockIdx.x; int seq_start = sequence[sequenceId]; int seq_end = sequence[sequenceId+1]; real value = 0; int instances = seq_end - seq_start + context_length - 1; output += seq_start * input_dim * context_length; input += seq_start * input_dim; for (int k = 0; k <= input_dim / block_size; k++) { if (idx < input_dim) { for (int i = 0; i < instances; i++) { // i + context_start; if ((i + context_start) < 0) { if (padding) { value = weight[i * input_dim + idx]; } else { continue; } } else if ((i + context_start) >= (seq_end - seq_start)) { if (padding) { value = weight[(begin_pad + i + context_start - (seq_end - seq_start)) * input_dim + idx]; } else { continue; } } else { value = input[(i + context_start) * input_dim + idx]; } int outx = (i - context_length) < 0 ? i : (context_length - 1); int outy = (i - context_length) < 0 ? 0 : (i - (context_length - 1)); real* output_r = output + outy * input_dim * context_length + outx * input_dim; for (int j = outy; j < seq_end - seq_start; j++) { output_r[idx] += value; if (j - outy == outx) break; output_r += (context_length - 1) * input_dim; } } } idx += block_size; } } /** * @brief Context projection forward. * * @param[in] input input sequence. * @param[in] sequence sequence index. * @param[in] weight padding data. * @param[out] output output sequence. * @param[in] num_sequences number of sequences. * @param[in] input_dim input sequence dimension. * @param[in] context_length context length. * @param[in] context_start context start. * @param[in] begin_pad number of extra timesteps added at the * beginning. * */ void hl_context_projection_forward(const real* input, const int* sequence, const real* weight, real* output, size_t num_sequences, size_t input_dim, size_t context_length, int context_start, size_t begin_pad) { CHECK_NOTNULL(input); CHECK_NOTNULL(sequence); CHECK_NOTNULL(output); int block_size = 128; int blocks_x = num_sequences; int blocks_y = 1; dim3 threads(block_size, 1); dim3 grid(blocks_x, blocks_y); if (weight) { KeContextProjectionForward<true><<< grid, threads, 0, STREAM_DEFAULT >>> (input, sequence, weight, output, input_dim, context_length, context_start, begin_pad); } else { KeContextProjectionForward<false><<< grid, threads, 0, STREAM_DEFAULT >>> (input, sequence, weight, output, input_dim, context_length, context_start, begin_pad); } CHECK_SYNC("hl_context_projection_forward failed"); } template <> void ContextProjectionForward<DEVICE_TYPE_GPU>(GpuMatrix& output, const GpuMatrix& input, const GpuMatrix& weight, const GpuIVector& sequence, size_t context_length, int context_start, size_t begin_pad) { hl_context_projection_forward(input.getData(), sequence.getData(), weight ? weight.getData() : nullptr, output.getData(), sequence.getSize() - 1, input.getWidth(), context_length, context_start, begin_pad); } __global__ void KeContextProjectionBackwardData(real* out_grad, const int* sequence, real* in_grad, int input_dim, int context_length, int context_start) { int idx = threadIdx.x; int block_size = blockDim.x; int sequenceId = blockIdx.x; int seq_start = sequence[sequenceId]; int seq_end = sequence[sequenceId+1]; real value = 0; int instances = seq_end - seq_start + context_length - 1; out_grad += seq_start * input_dim * context_length; in_grad += seq_start * input_dim; for (int k = 0; k <= input_dim / block_size; k++) { if (idx < input_dim) { for (int i = 0; i < instances; i++) { if ((i + context_start) < 0) { continue; } else if ((i + context_start) >= (seq_end - seq_start)) { continue; } else { // value = 0; value = in_grad[(i + context_start) * input_dim + idx]; } int outx = (i - context_length) < 0 ? i : (context_length - 1); int outy = (i - context_length) < 0 ? 0 : (i - (context_length - 1)); real* output_r = out_grad + outy * input_dim * context_length + outx * input_dim; for (int j = outy; j < seq_end - seq_start; j++) { value += output_r[idx]; if (j - outy == outx) break; output_r += (context_length - 1) * input_dim; } in_grad[(i + context_start) * input_dim + idx] = value; } } idx += block_size; } } /** * @brief Context projection backward data. * * @param[in] out_grad output gradient. * @param[in] sequence sequence index. * @param[out] input_grad input gradient. * @param[in] num_sequences number of sequences. * @param[in] input_dim input sequence dimension. * @param[in] context_length context length. * @param[in] context_start context start. * */ void hl_context_projection_backward_data(real* out_grad, const int* sequence, real* input_grad, size_t num_sequences, size_t input_dim, size_t context_length, int context_start) { CHECK_NOTNULL(out_grad); CHECK_NOTNULL(sequence); CHECK_NOTNULL(input_grad); int block_size = 128; int blocks_x = num_sequences; int blocks_y = 1; dim3 threads(block_size, 1); dim3 grid(blocks_x, blocks_y); KeContextProjectionBackwardData<<< grid, threads, 0, STREAM_DEFAULT >>> (out_grad, sequence, input_grad, input_dim, context_length, context_start); CHECK_SYNC("hl_context_projection_backward_data failed"); } template <> void ContextProjectionBackwardData<DEVICE_TYPE_GPU>(GpuMatrix& out_grad, GpuMatrix& in_grad, const GpuIVector& sequence, size_t context_length, int context_start) { hl_context_projection_backward_data(out_grad.getData(), sequence.getData(), in_grad.getData(), sequence.getSize() - 1, in_grad.getWidth(), context_length, context_start); } template<int THREADS_X, int THREADS_Y> __global__ void KeContextProjectionBackwardWeight(real* out_grad, const int* sequence, real* w_grad, int num_sequences, int w_dim, int context_length, int context_start, int begin_pad) { __shared__ real sum_s[THREADS_Y][THREADS_X]; int pad_of_block = (w_dim + THREADS_X - 1) / THREADS_X; const int idx = threadIdx.x; const int idy = threadIdx.y; int padId = blockIdx.x / pad_of_block; int weight_idx = idx + THREADS_X * (blockIdx.x % pad_of_block); int instanceId; real value = 0; real* output_r; sum_s[idy][idx] = 0.0f; if (weight_idx < w_dim) { for (int seqId = idy; seqId < num_sequences; seqId += THREADS_Y) { int seq_start = sequence[seqId]; int seq_end = sequence[seqId+1]; output_r = out_grad + seq_start * w_dim * context_length; if (context_start < 0) { if (padId + context_start < 0) { instanceId = padId; } else { // begin_pad > 0; instanceId = (padId - begin_pad) + (seq_end - seq_start) - context_start; } } else { if (padId + (seq_end - seq_start) < context_start) { continue; } else { // begin_pad == 0; instanceId = padId + (seq_end - seq_start) - context_start; } } int outx = (instanceId - context_length) < 0 ? instanceId : (context_length - 1); int outy = (instanceId - context_length) < 0 ? 0 : (instanceId - (context_length - 1)); output_r += outy * w_dim * context_length + outx * w_dim; for (int j = outy; j < seq_end - seq_start; j++) { value += output_r[weight_idx]; if (j - outy == outx) break; output_r += (context_length - 1) * w_dim; } } sum_s[idy][idx] = value; } __syncthreads(); for (int stride = THREADS_Y/2; stride > 0; stride = stride/2) { if (idy < stride) { sum_s[idy][idx] += sum_s[idy + stride][idx]; } __syncthreads(); } __syncthreads(); if (weight_idx < w_dim) { if (idy == 0) { w_grad[padId * w_dim + weight_idx] += sum_s[0][idx]; } } } /** * @brief Context projection backward weight. * * @param[in] out_grad output gradient. * @param[in] sequence sequence index. * @param[out] w_grad weight gradient. * @param[in] num_sequences number of sequences. * @param[in] w_dim input sequence dimension. * @param[in] total_pad number of extra timesteps. * @param[in] context_length context length. * @param[in] context_start context start. * @param[in] begin_pad number of extra timesteps added at the * beginning. * */ void hl_context_projection_backward_weight(real* out_grad, const int* sequence, real* w_grad, size_t num_sequences, size_t w_dim, size_t total_pad, size_t context_length, int context_start, size_t begin_pad) { CHECK_NOTNULL(out_grad); CHECK_NOTNULL(sequence); CHECK_NOTNULL(w_grad); int threads_x = 32; int threads_y = 32; int blocks_x = total_pad * ((w_dim + threads_x - 1) / threads_x); dim3 threads(threads_x, threads_y); dim3 grid(blocks_x, 1); KeContextProjectionBackwardWeight<32, 32> <<< grid, threads, 0, STREAM_DEFAULT >>> (out_grad, sequence, w_grad, num_sequences, w_dim, context_length, context_start, begin_pad); CHECK_SYNC("hl_context_projection_backward_weight failed"); } template <> void ContextProjectionBackwardWeight<DEVICE_TYPE_GPU>( GpuMatrix& out_grad, GpuMatrix& w_grad, const GpuIVector& seq_vec, size_t context_length, int context_start, size_t total_pad, size_t begin_pad) { hl_context_projection_backward_weight(out_grad.getData(), seq_vec.getData(), w_grad.getData(), seq_vec.getSize() - 1, w_grad.getWidth(), total_pad, context_length, context_start, begin_pad); } template <> void ContextProjectionBackward<DEVICE_TYPE_GPU>(GpuMatrix& out_grad, GpuMatrix& in_grad, GpuMatrix& w_grad, const GpuIVector& sequence, size_t context_length, int context_start, size_t begin_pad, bool is_padding, size_t total_pad) { if (in_grad) { ContextProjectionBackwardData<DEVICE_TYPE_GPU>( out_grad, in_grad, sequence, context_length, context_start); } if (is_padding && w_grad) { ContextProjectionBackwardWeight<DEVICE_TYPE_GPU>( out_grad, w_grad, sequence, context_length, context_start, total_pad, begin_pad); } } } // namespace paddle
5e445259dd3c545a2104c91823dacae4c7b506be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cstdio> #include<opencv2/core/core.hpp> #include<opencv2/highgui/highgui.hpp> #include<cuda_runtime.h> using std::cout; using std::endl; #define SQR(x) ((x)*(x)) #define POW2(x) SQR(x) #define POW3(x) ((x)*(x)*(x)) #define POW4(x) (POW2(x)*POW2(x)) #define POW7(x) (POW3(x)*POW3(x)*(x)) #define DegToRad(x) ((x)*M_PI/180) #define RadToDeg(x) ((x)/M_PI*180) static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number) { if(err!=hipSuccess) { fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__) // __device__ std::vector<unsigned char> rgb2lab(const unsigned char r,const unsigned char g,const unsigned char b){ // r = r / 255, // g = g / 255, // b = b / 255, // double x, y, z; // r = (r > 0.04045) ? Math.pow((r + 0.055) / 1.055, 2.4) : r / 12.92; // g = (g > 0.04045) ? Math.pow((g + 0.055) / 1.055, 2.4) : g / 12.92; // b = (b > 0.04045) ? Math.pow((b + 0.055) / 1.055, 2.4) : b / 12.92; // x = (r * 0.4124 + g * 0.3576 + b * 0.1805) / 0.95047; // y = (r * 0.2126 + g * 0.7152 + b * 0.0722) / 1.00000; // z = (r * 0.0193 + g * 0.1192 + b * 0.9505) / 1.08883; // x = (x > 0.008856) ? Math.pow(x, 1/3) : (7.787 * x) + 16/116; // y = (y > 0.008856) ? Math.pow(y, 1/3) : (7.787 * y) + 16/116; // z = (z > 0.008856) ? Math.pow(z, 1/3) : (7.787 * z) + 16/116; // std::vector<unsigned char> lab; // unsigned char l,a,bb; // l = (116 * y) - 16; // a = 500 * (x - y); // bb = 200 * (y - z); // lab.push_back(l); // lab.push_back(a); // lab.push_back(bb); // return lab; // } __device__ double color_distance(const unsigned char l1,const unsigned char a1,const unsigned char b1, const unsigned char l2,const unsigned char a2,const unsigned char b2){ double eps = 1e-5; double c1 = sqrtf(SQR(a1) + SQR(b1)); double c2 = sqrtf(SQR(a2) + SQR(b2)); double meanC = (c1 + c2) / 2.0; double meanC7 = POW7(meanC); double g = 0.5*(1 - sqrtf(meanC7 / (meanC7 + 6103515625.))); // 0.5*(1-sqrt(meanC^7/(meanC^7+25^7))) double a1p = a1 * (1 + g); double a2p = a2 * (1 + g); c1 = sqrtf(SQR(a1p) + SQR(b1)); c2 = sqrtf(SQR(a2p) + SQR(b2)); double h1 = fmodf(atan2f(b1, a1p) + 2*M_PI, 2*M_PI); double h2 = fmodf(atan2f(b2, a2p) + 2*M_PI, 2*M_PI); // compute deltaL, deltaC, deltaH double deltaL = l2 - l1; double deltaC = c2 - c1; double deltah; if (c1*c2 < eps) { deltah = 0; } if (std::abs(h2 - h1) <= M_PI) { deltah = h2 - h1; } else if (h2 > h1) { deltah = h2 - h1 - 2* M_PI; } else { deltah = h2 - h1 + 2 * M_PI; } double deltaH = 2 * sqrtf(c1*c2)*sinf(deltah / 2); // calculate CIEDE2000 double meanL = (l1 + l2) / 2; meanC = (c1 + c2) / 2.0; meanC7 = POW7(meanC); double meanH; if (c1*c2 < eps) { meanH = h1 + h2; } if (std::abs(h1 - h2) <= M_PI + eps) { meanH = (h1 + h2) / 2; } else if (h1 + h2 < 2*M_PI) { meanH = (h1 + h2 + 2*M_PI) / 2; } else { meanH = (h1 + h2 - 2*M_PI) / 2; } double T = 1 - 0.17*cosf(meanH - DegToRad(30)) + 0.24*cosf(2 * meanH) + 0.32*cosf(3 * meanH + DegToRad(6)) - 0.2*cosf(4 * meanH - DegToRad(63)); double sl = 1 + (0.015*SQR(meanL - 50)) / sqrtf(20 + SQR(meanL - 50)); double sc = 1 + 0.045*meanC; double sh = 1 + 0.015*meanC*T; double rc = 2 * sqrtf(meanC7 / (meanC7 + 6103515625.)); double rt = -sinf(DegToRad(60 * expf(-SQR((RadToDeg(meanH) - 275) / 25)))) * rc; double cur_dist = sqrtf(SQR(deltaL / sl) + SQR(deltaC / sc) + SQR(deltaH / sh) + rt * deltaC / sc * deltaH / sh); return cur_dist; } __global__ void bgr_to_gray_kernel( unsigned char* input, unsigned char* input1, unsigned char* output, int width, int height, int colorWidthStep, int grayWidthStep) { //2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; int real_distance; //Only valid threads perform memory I/O if((xIndex<width) && (yIndex<height)) { //Location of colored pixel in input bool valid = false; int real_distance; const int color_tid = yIndex * colorWidthStep + (3 * xIndex); const unsigned char l1 = input[color_tid]; const unsigned char a1 = input[color_tid + 1]; const unsigned char b1 = input[color_tid + 2]; //Location of gray pixel in output const int gray_tid = yIndex * grayWidthStep + xIndex; for(int i = -2; i <3;i++){ int row = yIndex+i; int col = xIndex+i; if(row >= 0 && row <height && col >= 0 && col <width){ const int color_tid_input1 = (row) * colorWidthStep + (3 * col); const unsigned char l2 = input1[color_tid_input1]; const unsigned char a2 = input1[color_tid_input1 + 1]; const unsigned char b2 = input1[color_tid_input1 + 2]; double cur_dist=color_distance(l1,a1,b1,l2,a2,b2); if(cur_dist < 20){ valid = true; } if(i==0){ real_distance = cur_dist; } } } float gray; if(valid){ gray = 0; }else{ gray = 1; } output[gray_tid] = static_cast<unsigned char>(gray); } } int *difffilter(const cv::Mat& input,const cv::Mat& input1, cv::Mat& output) { //Calculate total number of bytes of input and output image const int colorBytes = input.step * input.rows; const int grayBytes = output.step * output.rows; unsigned char *d_input,*d_input1, *d_output; //Allocate device memory SAFE_CALL(hipMalloc<unsigned char>(&d_input,colorBytes),"CUDA Malloc Failed"); SAFE_CALL(hipMalloc<unsigned char>(&d_input1,colorBytes),"CUDA Malloc Failed"); SAFE_CALL(hipMalloc<unsigned char>(&d_output,grayBytes),"CUDA Malloc Failed"); //Copy data from OpenCV input image to device memory SAFE_CALL(hipMemcpy(d_input,input.ptr(),colorBytes,hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed"); SAFE_CALL(hipMemcpy(d_input1,input1.ptr(),colorBytes,hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed"); //Specify a reasonable block size const dim3 block(16,16); //Calculate grid size to cover the whole image const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y); //Launch the color conversion kernel hipLaunchKernelGGL(( bgr_to_gray_kernel), dim3(grid),dim3(block), 0, 0, d_input,d_input1,d_output,input.cols,input.rows,input.step,output.step); //Synchronize to check for any kernel launch errors SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed"); //Copy back data from destination device meory to OpenCV output image SAFE_CALL(hipMemcpy(output.ptr(),d_output,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed"); //Free the device memory SAFE_CALL(hipFree(d_input),"CUDA Free Failed"); SAFE_CALL(hipFree(d_input1),"CUDA Free Failed"); SAFE_CALL(hipFree(d_output),"CUDA Free Failed"); return 0; }
5e445259dd3c545a2104c91823dacae4c7b506be.cu
#include<iostream> #include<cstdio> #include<opencv2/core/core.hpp> #include<opencv2/highgui/highgui.hpp> #include<cuda_runtime.h> using std::cout; using std::endl; #define SQR(x) ((x)*(x)) #define POW2(x) SQR(x) #define POW3(x) ((x)*(x)*(x)) #define POW4(x) (POW2(x)*POW2(x)) #define POW7(x) (POW3(x)*POW3(x)*(x)) #define DegToRad(x) ((x)*M_PI/180) #define RadToDeg(x) ((x)/M_PI*180) static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number) { if(err!=cudaSuccess) { fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__) // __device__ std::vector<unsigned char> rgb2lab(const unsigned char r,const unsigned char g,const unsigned char b){ // r = r / 255, // g = g / 255, // b = b / 255, // double x, y, z; // r = (r > 0.04045) ? Math.pow((r + 0.055) / 1.055, 2.4) : r / 12.92; // g = (g > 0.04045) ? Math.pow((g + 0.055) / 1.055, 2.4) : g / 12.92; // b = (b > 0.04045) ? Math.pow((b + 0.055) / 1.055, 2.4) : b / 12.92; // x = (r * 0.4124 + g * 0.3576 + b * 0.1805) / 0.95047; // y = (r * 0.2126 + g * 0.7152 + b * 0.0722) / 1.00000; // z = (r * 0.0193 + g * 0.1192 + b * 0.9505) / 1.08883; // x = (x > 0.008856) ? Math.pow(x, 1/3) : (7.787 * x) + 16/116; // y = (y > 0.008856) ? Math.pow(y, 1/3) : (7.787 * y) + 16/116; // z = (z > 0.008856) ? Math.pow(z, 1/3) : (7.787 * z) + 16/116; // std::vector<unsigned char> lab; // unsigned char l,a,bb; // l = (116 * y) - 16; // a = 500 * (x - y); // bb = 200 * (y - z); // lab.push_back(l); // lab.push_back(a); // lab.push_back(bb); // return lab; // } __device__ double color_distance(const unsigned char l1,const unsigned char a1,const unsigned char b1, const unsigned char l2,const unsigned char a2,const unsigned char b2){ double eps = 1e-5; double c1 = sqrtf(SQR(a1) + SQR(b1)); double c2 = sqrtf(SQR(a2) + SQR(b2)); double meanC = (c1 + c2) / 2.0; double meanC7 = POW7(meanC); double g = 0.5*(1 - sqrtf(meanC7 / (meanC7 + 6103515625.))); // 0.5*(1-sqrt(meanC^7/(meanC^7+25^7))) double a1p = a1 * (1 + g); double a2p = a2 * (1 + g); c1 = sqrtf(SQR(a1p) + SQR(b1)); c2 = sqrtf(SQR(a2p) + SQR(b2)); double h1 = fmodf(atan2f(b1, a1p) + 2*M_PI, 2*M_PI); double h2 = fmodf(atan2f(b2, a2p) + 2*M_PI, 2*M_PI); // compute deltaL, deltaC, deltaH double deltaL = l2 - l1; double deltaC = c2 - c1; double deltah; if (c1*c2 < eps) { deltah = 0; } if (std::abs(h2 - h1) <= M_PI) { deltah = h2 - h1; } else if (h2 > h1) { deltah = h2 - h1 - 2* M_PI; } else { deltah = h2 - h1 + 2 * M_PI; } double deltaH = 2 * sqrtf(c1*c2)*sinf(deltah / 2); // calculate CIEDE2000 double meanL = (l1 + l2) / 2; meanC = (c1 + c2) / 2.0; meanC7 = POW7(meanC); double meanH; if (c1*c2 < eps) { meanH = h1 + h2; } if (std::abs(h1 - h2) <= M_PI + eps) { meanH = (h1 + h2) / 2; } else if (h1 + h2 < 2*M_PI) { meanH = (h1 + h2 + 2*M_PI) / 2; } else { meanH = (h1 + h2 - 2*M_PI) / 2; } double T = 1 - 0.17*cosf(meanH - DegToRad(30)) + 0.24*cosf(2 * meanH) + 0.32*cosf(3 * meanH + DegToRad(6)) - 0.2*cosf(4 * meanH - DegToRad(63)); double sl = 1 + (0.015*SQR(meanL - 50)) / sqrtf(20 + SQR(meanL - 50)); double sc = 1 + 0.045*meanC; double sh = 1 + 0.015*meanC*T; double rc = 2 * sqrtf(meanC7 / (meanC7 + 6103515625.)); double rt = -sinf(DegToRad(60 * expf(-SQR((RadToDeg(meanH) - 275) / 25)))) * rc; double cur_dist = sqrtf(SQR(deltaL / sl) + SQR(deltaC / sc) + SQR(deltaH / sh) + rt * deltaC / sc * deltaH / sh); return cur_dist; } __global__ void bgr_to_gray_kernel( unsigned char* input, unsigned char* input1, unsigned char* output, int width, int height, int colorWidthStep, int grayWidthStep) { //2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; int real_distance; //Only valid threads perform memory I/O if((xIndex<width) && (yIndex<height)) { //Location of colored pixel in input bool valid = false; int real_distance; const int color_tid = yIndex * colorWidthStep + (3 * xIndex); const unsigned char l1 = input[color_tid]; const unsigned char a1 = input[color_tid + 1]; const unsigned char b1 = input[color_tid + 2]; //Location of gray pixel in output const int gray_tid = yIndex * grayWidthStep + xIndex; for(int i = -2; i <3;i++){ int row = yIndex+i; int col = xIndex+i; if(row >= 0 && row <height && col >= 0 && col <width){ const int color_tid_input1 = (row) * colorWidthStep + (3 * col); const unsigned char l2 = input1[color_tid_input1]; const unsigned char a2 = input1[color_tid_input1 + 1]; const unsigned char b2 = input1[color_tid_input1 + 2]; double cur_dist=color_distance(l1,a1,b1,l2,a2,b2); if(cur_dist < 20){ valid = true; } if(i==0){ real_distance = cur_dist; } } } float gray; if(valid){ gray = 0; }else{ gray = 1; } output[gray_tid] = static_cast<unsigned char>(gray); } } int *difffilter(const cv::Mat& input,const cv::Mat& input1, cv::Mat& output) { //Calculate total number of bytes of input and output image const int colorBytes = input.step * input.rows; const int grayBytes = output.step * output.rows; unsigned char *d_input,*d_input1, *d_output; //Allocate device memory SAFE_CALL(cudaMalloc<unsigned char>(&d_input,colorBytes),"CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<unsigned char>(&d_input1,colorBytes),"CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<unsigned char>(&d_output,grayBytes),"CUDA Malloc Failed"); //Copy data from OpenCV input image to device memory SAFE_CALL(cudaMemcpy(d_input,input.ptr(),colorBytes,cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed"); SAFE_CALL(cudaMemcpy(d_input1,input1.ptr(),colorBytes,cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed"); //Specify a reasonable block size const dim3 block(16,16); //Calculate grid size to cover the whole image const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y); //Launch the color conversion kernel bgr_to_gray_kernel<<<grid,block>>>(d_input,d_input1,d_output,input.cols,input.rows,input.step,output.step); //Synchronize to check for any kernel launch errors SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed"); //Copy back data from destination device meory to OpenCV output image SAFE_CALL(cudaMemcpy(output.ptr(),d_output,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed"); //Free the device memory SAFE_CALL(cudaFree(d_input),"CUDA Free Failed"); SAFE_CALL(cudaFree(d_input1),"CUDA Free Failed"); SAFE_CALL(cudaFree(d_output),"CUDA Free Failed"); return 0; }
de8d7f0e96d43dca71b7ac7cf57ba7b44e0c4172.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <helper_functions.h> // helper for shared functions common to CUDA Samples #include <helper_cuda.h> // helper functions for CUDA error checking and initialization #include <stdio.h> void initialData(float *ip, const int size) { int i; for(i = 0; i < size; i++) { ip[i] = (float)(rand() & 0xFF ) / 10.0f; } return; } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { float *ia = A; float *ib = B; float *ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } return; } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("%d: host %f gpu %f\n", i, hostRef[i], gpuRef[i]); break; } } if (match) printf("PASS\n\n"); else printf("FAIL\n\n"); } __global__ void sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny) { // Program kernel codes properly, otherwise your system could crash /* FIXME */ // int ix, iy, idx; unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; unsigned int idx = iy * nx + ix; if ((ix < nx) && (iy < ny)) { MatC[idx] = MatA[idx] + MatB[idx]; } } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); checkCudaErrors(hipSetDevice(dev)); // set up data size of matrix int nx = 1 << 14; int ny = 1 << 14; int nxy = nx * ny; int nBytes = nxy * sizeof(float); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result checks sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); // malloc device global memory float *d_MatA, *d_MatB, *d_MatC; checkCudaErrors(hipMalloc((void **)&d_MatA, nBytes)); checkCudaErrors(hipMalloc((void **)&d_MatB, nBytes)); checkCudaErrors(hipMalloc((void **)&d_MatC, nBytes)); // transfer data from host to device checkCudaErrors(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice)); // invoke kernel at host side int dimx = 16; int dimy = 16; dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); hipLaunchKernelGGL(( sumMatrixOnGPU), dim3(grid), dim3(block) , 0, 0, d_MatA, d_MatB, d_MatC, nx, ny); // checkCudaErrors kernel error checkCudaErrors(hipGetLastError()); // copy kernel result back to host side checkCudaErrors(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost)); // checkCudaErrors device results checkResult(hostRef, gpuRef, nxy); printf("Haenara Shin, A53233226, #29\n"); // free device global memory checkCudaErrors(hipFree(d_MatA)); checkCudaErrors(hipFree(d_MatB)); checkCudaErrors(hipFree(d_MatC)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device checkCudaErrors(hipDeviceReset()); return (0); }
de8d7f0e96d43dca71b7ac7cf57ba7b44e0c4172.cu
#include <cuda_runtime.h> #include <helper_functions.h> // helper for shared functions common to CUDA Samples #include <helper_cuda.h> // helper functions for CUDA error checking and initialization #include <stdio.h> void initialData(float *ip, const int size) { int i; for(i = 0; i < size; i++) { ip[i] = (float)(rand() & 0xFF ) / 10.0f; } return; } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { float *ia = A; float *ib = B; float *ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } return; } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("%d: host %f gpu %f\n", i, hostRef[i], gpuRef[i]); break; } } if (match) printf("PASS\n\n"); else printf("FAIL\n\n"); } __global__ void sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny) { // Program kernel codes properly, otherwise your system could crash /* FIXME */ // int ix, iy, idx; unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; unsigned int idx = iy * nx + ix; if ((ix < nx) && (iy < ny)) { MatC[idx] = MatA[idx] + MatB[idx]; } } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); checkCudaErrors(cudaSetDevice(dev)); // set up data size of matrix int nx = 1 << 14; int ny = 1 << 14; int nxy = nx * ny; int nBytes = nxy * sizeof(float); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result checks sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); // malloc device global memory float *d_MatA, *d_MatB, *d_MatC; checkCudaErrors(cudaMalloc((void **)&d_MatA, nBytes)); checkCudaErrors(cudaMalloc((void **)&d_MatB, nBytes)); checkCudaErrors(cudaMalloc((void **)&d_MatC, nBytes)); // transfer data from host to device checkCudaErrors(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice)); // invoke kernel at host side int dimx = 16; int dimy = 16; dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); sumMatrixOnGPU<<<grid, block >>>(d_MatA, d_MatB, d_MatC, nx, ny); // checkCudaErrors kernel error checkCudaErrors(cudaGetLastError()); // copy kernel result back to host side checkCudaErrors(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost)); // checkCudaErrors device results checkResult(hostRef, gpuRef, nxy); printf("Haenara Shin, A53233226, #29\n"); // free device global memory checkCudaErrors(cudaFree(d_MatA)); checkCudaErrors(cudaFree(d_MatB)); checkCudaErrors(cudaFree(d_MatC)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device checkCudaErrors(cudaDeviceReset()); return (0); }
e0ba3480a825231ff496c31cb5554aa3f552f42f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* This sample is a templatized version of the template project. * It also shows how to correctly templatize dynamically allocated shared * memory arrays. * Device code. */ #ifndef _RIJNDAEL_GPU_KERNEL_H_ #define _RIJNDAEL_GPU_KERNEL_H_ #include <stdio.h> #define xmult(a) ((a)<<1) ^ (((a)&128) ? 0x01B : 0) // const device memory __device__ __constant__ unsigned char d_byte_sub_const[256]; __device__ __constant__ unsigned char d_gf2_8_inv_const[256]; __device__ __constant__ unsigned char d_inv_byte_sub_const[256]; __device__ __constant__ unsigned long d_Rcon_const[60]; __device__ __constant__ unsigned char d_shift_row_map_const[16]; __device__ __constant__ unsigned char d_inv_shift_row_map_const[16]; __device__ __constant__ unsigned char d_mult_by_2_const[256]; __device__ __constant__ unsigned char d_mult_by_3_const[256]; __device__ __constant__ unsigned char d_mult_by_9_const[256]; __device__ __constant__ unsigned char d_mult_by_11_const[256]; __device__ __constant__ unsigned char d_mult_by_13_const[256]; __device__ __constant__ unsigned char d_mult_by_14_const[256]; __device__ __constant__ unsigned char d_key_const[480]; struct SharedMemory { __device__ int* getPointer() { extern __shared__ int s_int[]; return s_int; } }; //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void testKernel( int* g_idata, int* g_odata) { // Shared mem size is determined by the host app at run time SharedMemory smem; int* sdata = smem.getPointer(); // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; //CUPRINTF("\tValue is:%d\n", tid); // read in input data from global memory sdata[tid] = g_idata[tid]; __syncthreads(); // perform some computations sdata[tid] = (int) num_threads * sdata[tid]; __syncthreads(); // write data to global memory g_odata[tid] = sdata[tid]; } __global__ void d_Round( unsigned char* g_state_idata, unsigned char* g_state_odata, unsigned char* g_key, int Nr , int RowSize) { __device__ __shared__ unsigned char s_state[64]; //__device__ __shared__ unsigned char s_temp_state[64]; //__device__ __shared__ char key[480]; //4*8*15 // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; // thread row index const unsigned int Row = tid/RowSize; // thread col index const unsigned int Col = tid%RowSize; //const unsigned int keyIndex = Nr*RowSize*4+tid; const unsigned int Row1stIndex = Row * RowSize; //const unsigned int Col1stIndex = Col * 4; s_state[tid] = g_state_idata[tid]; __syncthreads(); // Round0: s_state[tid] ^= g_key[tid]; __syncthreads(); //CUPRINTF("Round0: state[%d] = 0x%x\n",tid,s_state[tid]); for (int i = 1; i < Nr; i++) { //CUPRINTF("Round%d: state[%d] = 0x%x\n",i,tid,s_state[tid]); s_state[tid] = d_byte_sub_const[s_state[tid]]; __syncthreads(); //CUPRINTF("after ByteSub: state[%d] = 0x%x\n",tid,s_state[tid]); s_state[tid] = s_state[d_shift_row_map_const[tid]]; __syncthreads(); //CUPRINTF("after shiftRows: state[%d] = 0x%x\n",tid,s_state[tid]); //s_temp_state[tid] = s_state[tid]; //__syncthreads(); s_state[tid] = s_state[tid] ^ s_state[Row1stIndex] ^ s_state[Row1stIndex+1] ^ s_state[Row1stIndex+2] ^ s_state[Row1stIndex+3] ^ xmult(s_state[tid]) ^ xmult(s_state[Row1stIndex+((tid+1)%RowSize)]); __syncthreads(); //CUPRINTF("after MixColumn: state[%d] = 0x%x\n",tid,s_state[tid]); s_state[tid] ^= g_key[i*RowSize*4+tid]; __syncthreads(); //CUPRINTF("after AddRoundKey: state[%d] = 0x%x\n",tid,s_state[tid]); } s_state[tid] = d_byte_sub_const[s_state[tid]]; __syncthreads(); s_state[tid] = s_state[d_shift_row_map_const[tid]]; __syncthreads(); s_state[tid] ^= g_key[Nr*RowSize*4+tid]; __syncthreads(); // write data to global memory g_state_odata[tid] = s_state[tid]; } __global__ void d_inv_Round( unsigned char* g_state_idata, unsigned char* g_state_odata, int Nr , int RowSize) { __device__ __shared__ unsigned char s_state[64]; //__device__ __shared__ unsigned char s_temp_state[64]; //__device__ __shared__ char key[480]; //4*8*15 // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; // thread row index const unsigned int Row = tid/RowSize; // thread col index const unsigned int Col = tid%RowSize; //const unsigned int keyIndex = Nr*RowSize*4+tid; const unsigned int Row1stIndex = Row * RowSize; //const unsigned int Col1stIndex = Col * 4; s_state[tid] = g_state_idata[tid]; __syncthreads(); // AddRoundKey(Nr) s_state[tid] ^= d_key_const[Nr*RowSize*4+tid]; __syncthreads(); // InvShiftRow(Nr) s_state[tid] = s_state[d_inv_shift_row_map_const[tid]]; __syncthreads(); //InvByteSub(Nr) s_state[tid] = d_inv_byte_sub_const[s_state[tid]]; __syncthreads(); for (int i = Nr-1; i > 0; i--) { //AddRoundKey(i) s_state[tid] ^= d_key_const[i*RowSize*4+tid]; __syncthreads(); //InvMixColumn(i) s_state[tid] = d_mult_by_14_const[s_state[tid]] ^ d_mult_by_11_const[s_state[Row1stIndex+(tid+1)%RowSize]] ^ d_mult_by_13_const[s_state[Row1stIndex+(tid+2)%RowSize]] ^ d_mult_by_9_const[s_state[Row1stIndex+(tid+3)%RowSize]]; __syncthreads(); //InvShiftByte(i) s_state[tid] = s_state[d_inv_shift_row_map_const[tid]]; __syncthreads(); //InvByteSub(i) s_state[tid] = d_inv_byte_sub_const[s_state[tid]]; __syncthreads(); } // AddRoundKey(0) s_state[tid] ^= d_key_const[tid]; __syncthreads(); // write data to global memory g_state_odata[tid] = s_state[tid]; } __global__ void d_inv_Round_multiBlock( unsigned char* g_state_idata, unsigned char* g_state_odata, int Nr , int RowSize) { //allocate shared memory __device__ __shared__ unsigned char s_state[256]; // access number of threads in this block const unsigned int num_threads = blockDim.x * blockDim.y; // = 256 //if(threadIdx.x == 1 && threadIdx.y == 1) CUPRINTF("blockIdx.x = %d, blockIdx.y = %d, gridDim.x = %d, gridDim.y = %d\n",blockIdx.x,blockIdx.y,gridDim.x,gridDim.y); // block shared memory location const unsigned int s_mem_idx = (blockIdx.y * gridDim.x + blockIdx.x) * num_threads; // access thread id const unsigned int tid = threadIdx.x + threadIdx.y * 16; // access thread row first idx const unsigned int state_offset = threadIdx.y * 16; // aaccess thread id within cypher block const unsigned int ctid = tid % 16; // thread row index const unsigned int Row = ctid/RowSize; // thread col index const unsigned int Col = ctid%RowSize; //const unsigned int keyIndex = Nr*RowSize*4+tid; const unsigned int Row1stIndex = Row * RowSize; //CUPRINTF("kernel vars: gridDim.x = %d , gridDim.y = %d, blockDim.x = %d, blockDim.y = %d, blockIdx.x = %d, blockIdx.y = %d, threadIdx.x = $d, threadIdx.y = %d\n",gridDim.x, gridDim.y,blockDim.x,blockDim.y,blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y); //CUPRINTF("registers values: num_threads = %d, s_mem_idx = %d, tid = %d, state_offset = %d, ctid = %d, Row = %d, Col = %d, Row1stIndex = %d \n",num_threads,s_mem_idx,tid,state_offset,ctid,Row,Col); s_state[tid] = g_state_idata[tid + s_mem_idx]; __syncthreads(); // AddRoundKey(Nr) s_state[tid] ^= d_key_const[Nr*RowSize*4+ctid]; __syncthreads(); // InvShiftRow(Nr) s_state[tid] = s_state[d_inv_shift_row_map_const[ctid] + state_offset]; __syncthreads(); //InvByteSub(Nr) s_state[tid] = d_inv_byte_sub_const[s_state[tid]]; __syncthreads(); for (int i = Nr-1; i > 0; i--) { //AddRoundKey(i) s_state[tid] ^= d_key_const[i*RowSize*4+ctid]; __syncthreads(); //InvMixColumn(i) s_state[tid] = d_mult_by_14_const[s_state[tid]] ^ d_mult_by_11_const[s_state[Row1stIndex + (ctid+1)%RowSize + state_offset]] ^ d_mult_by_13_const[s_state[Row1stIndex + (tid+2)%RowSize + state_offset]] ^ d_mult_by_9_const[s_state[Row1stIndex + (tid+3)%RowSize + state_offset]]; __syncthreads(); //InvShiftByte(i) s_state[tid] = s_state[d_inv_shift_row_map_const[ctid] + state_offset]; __syncthreads(); //InvByteSub(i) s_state[tid] = d_inv_byte_sub_const[s_state[tid]]; __syncthreads(); } // AddRoundKey(0) s_state[tid] ^= d_key_const[ctid]; __syncthreads(); // write data to global memory g_state_odata[tid + s_mem_idx] = s_state[tid]; } #endif // #ifndef_RIJNDAEL_GPU_KERNEL_H_
e0ba3480a825231ff496c31cb5554aa3f552f42f.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* This sample is a templatized version of the template project. * It also shows how to correctly templatize dynamically allocated shared * memory arrays. * Device code. */ #ifndef _RIJNDAEL_GPU_KERNEL_H_ #define _RIJNDAEL_GPU_KERNEL_H_ #include <stdio.h> #define xmult(a) ((a)<<1) ^ (((a)&128) ? 0x01B : 0) // const device memory __device__ __constant__ unsigned char d_byte_sub_const[256]; __device__ __constant__ unsigned char d_gf2_8_inv_const[256]; __device__ __constant__ unsigned char d_inv_byte_sub_const[256]; __device__ __constant__ unsigned long d_Rcon_const[60]; __device__ __constant__ unsigned char d_shift_row_map_const[16]; __device__ __constant__ unsigned char d_inv_shift_row_map_const[16]; __device__ __constant__ unsigned char d_mult_by_2_const[256]; __device__ __constant__ unsigned char d_mult_by_3_const[256]; __device__ __constant__ unsigned char d_mult_by_9_const[256]; __device__ __constant__ unsigned char d_mult_by_11_const[256]; __device__ __constant__ unsigned char d_mult_by_13_const[256]; __device__ __constant__ unsigned char d_mult_by_14_const[256]; __device__ __constant__ unsigned char d_key_const[480]; struct SharedMemory { __device__ int* getPointer() { extern __shared__ int s_int[]; return s_int; } }; //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void testKernel( int* g_idata, int* g_odata) { // Shared mem size is determined by the host app at run time SharedMemory smem; int* sdata = smem.getPointer(); // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; //CUPRINTF("\tValue is:%d\n", tid); // read in input data from global memory sdata[tid] = g_idata[tid]; __syncthreads(); // perform some computations sdata[tid] = (int) num_threads * sdata[tid]; __syncthreads(); // write data to global memory g_odata[tid] = sdata[tid]; } __global__ void d_Round( unsigned char* g_state_idata, unsigned char* g_state_odata, unsigned char* g_key, int Nr , int RowSize) { __device__ __shared__ unsigned char s_state[64]; //__device__ __shared__ unsigned char s_temp_state[64]; //__device__ __shared__ char key[480]; //4*8*15 // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; // thread row index const unsigned int Row = tid/RowSize; // thread col index const unsigned int Col = tid%RowSize; //const unsigned int keyIndex = Nr*RowSize*4+tid; const unsigned int Row1stIndex = Row * RowSize; //const unsigned int Col1stIndex = Col * 4; s_state[tid] = g_state_idata[tid]; __syncthreads(); // Round0: s_state[tid] ^= g_key[tid]; __syncthreads(); //CUPRINTF("Round0: state[%d] = 0x%x\n",tid,s_state[tid]); for (int i = 1; i < Nr; i++) { //CUPRINTF("Round%d: state[%d] = 0x%x\n",i,tid,s_state[tid]); s_state[tid] = d_byte_sub_const[s_state[tid]]; __syncthreads(); //CUPRINTF("after ByteSub: state[%d] = 0x%x\n",tid,s_state[tid]); s_state[tid] = s_state[d_shift_row_map_const[tid]]; __syncthreads(); //CUPRINTF("after shiftRows: state[%d] = 0x%x\n",tid,s_state[tid]); //s_temp_state[tid] = s_state[tid]; //__syncthreads(); s_state[tid] = s_state[tid] ^ s_state[Row1stIndex] ^ s_state[Row1stIndex+1] ^ s_state[Row1stIndex+2] ^ s_state[Row1stIndex+3] ^ xmult(s_state[tid]) ^ xmult(s_state[Row1stIndex+((tid+1)%RowSize)]); __syncthreads(); //CUPRINTF("after MixColumn: state[%d] = 0x%x\n",tid,s_state[tid]); s_state[tid] ^= g_key[i*RowSize*4+tid]; __syncthreads(); //CUPRINTF("after AddRoundKey: state[%d] = 0x%x\n",tid,s_state[tid]); } s_state[tid] = d_byte_sub_const[s_state[tid]]; __syncthreads(); s_state[tid] = s_state[d_shift_row_map_const[tid]]; __syncthreads(); s_state[tid] ^= g_key[Nr*RowSize*4+tid]; __syncthreads(); // write data to global memory g_state_odata[tid] = s_state[tid]; } __global__ void d_inv_Round( unsigned char* g_state_idata, unsigned char* g_state_odata, int Nr , int RowSize) { __device__ __shared__ unsigned char s_state[64]; //__device__ __shared__ unsigned char s_temp_state[64]; //__device__ __shared__ char key[480]; //4*8*15 // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; // thread row index const unsigned int Row = tid/RowSize; // thread col index const unsigned int Col = tid%RowSize; //const unsigned int keyIndex = Nr*RowSize*4+tid; const unsigned int Row1stIndex = Row * RowSize; //const unsigned int Col1stIndex = Col * 4; s_state[tid] = g_state_idata[tid]; __syncthreads(); // AddRoundKey(Nr) s_state[tid] ^= d_key_const[Nr*RowSize*4+tid]; __syncthreads(); // InvShiftRow(Nr) s_state[tid] = s_state[d_inv_shift_row_map_const[tid]]; __syncthreads(); //InvByteSub(Nr) s_state[tid] = d_inv_byte_sub_const[s_state[tid]]; __syncthreads(); for (int i = Nr-1; i > 0; i--) { //AddRoundKey(i) s_state[tid] ^= d_key_const[i*RowSize*4+tid]; __syncthreads(); //InvMixColumn(i) s_state[tid] = d_mult_by_14_const[s_state[tid]] ^ d_mult_by_11_const[s_state[Row1stIndex+(tid+1)%RowSize]] ^ d_mult_by_13_const[s_state[Row1stIndex+(tid+2)%RowSize]] ^ d_mult_by_9_const[s_state[Row1stIndex+(tid+3)%RowSize]]; __syncthreads(); //InvShiftByte(i) s_state[tid] = s_state[d_inv_shift_row_map_const[tid]]; __syncthreads(); //InvByteSub(i) s_state[tid] = d_inv_byte_sub_const[s_state[tid]]; __syncthreads(); } // AddRoundKey(0) s_state[tid] ^= d_key_const[tid]; __syncthreads(); // write data to global memory g_state_odata[tid] = s_state[tid]; } __global__ void d_inv_Round_multiBlock( unsigned char* g_state_idata, unsigned char* g_state_odata, int Nr , int RowSize) { //allocate shared memory __device__ __shared__ unsigned char s_state[256]; // access number of threads in this block const unsigned int num_threads = blockDim.x * blockDim.y; // = 256 //if(threadIdx.x == 1 && threadIdx.y == 1) CUPRINTF("blockIdx.x = %d, blockIdx.y = %d, gridDim.x = %d, gridDim.y = %d\n",blockIdx.x,blockIdx.y,gridDim.x,gridDim.y); // block shared memory location const unsigned int s_mem_idx = (blockIdx.y * gridDim.x + blockIdx.x) * num_threads; // access thread id const unsigned int tid = threadIdx.x + threadIdx.y * 16; // access thread row first idx const unsigned int state_offset = threadIdx.y * 16; // aaccess thread id within cypher block const unsigned int ctid = tid % 16; // thread row index const unsigned int Row = ctid/RowSize; // thread col index const unsigned int Col = ctid%RowSize; //const unsigned int keyIndex = Nr*RowSize*4+tid; const unsigned int Row1stIndex = Row * RowSize; //CUPRINTF("kernel vars: gridDim.x = %d , gridDim.y = %d, blockDim.x = %d, blockDim.y = %d, blockIdx.x = %d, blockIdx.y = %d, threadIdx.x = $d, threadIdx.y = %d\n",gridDim.x, gridDim.y,blockDim.x,blockDim.y,blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y); //CUPRINTF("registers values: num_threads = %d, s_mem_idx = %d, tid = %d, state_offset = %d, ctid = %d, Row = %d, Col = %d, Row1stIndex = %d \n",num_threads,s_mem_idx,tid,state_offset,ctid,Row,Col); s_state[tid] = g_state_idata[tid + s_mem_idx]; __syncthreads(); // AddRoundKey(Nr) s_state[tid] ^= d_key_const[Nr*RowSize*4+ctid]; __syncthreads(); // InvShiftRow(Nr) s_state[tid] = s_state[d_inv_shift_row_map_const[ctid] + state_offset]; __syncthreads(); //InvByteSub(Nr) s_state[tid] = d_inv_byte_sub_const[s_state[tid]]; __syncthreads(); for (int i = Nr-1; i > 0; i--) { //AddRoundKey(i) s_state[tid] ^= d_key_const[i*RowSize*4+ctid]; __syncthreads(); //InvMixColumn(i) s_state[tid] = d_mult_by_14_const[s_state[tid]] ^ d_mult_by_11_const[s_state[Row1stIndex + (ctid+1)%RowSize + state_offset]] ^ d_mult_by_13_const[s_state[Row1stIndex + (tid+2)%RowSize + state_offset]] ^ d_mult_by_9_const[s_state[Row1stIndex + (tid+3)%RowSize + state_offset]]; __syncthreads(); //InvShiftByte(i) s_state[tid] = s_state[d_inv_shift_row_map_const[ctid] + state_offset]; __syncthreads(); //InvByteSub(i) s_state[tid] = d_inv_byte_sub_const[s_state[tid]]; __syncthreads(); } // AddRoundKey(0) s_state[tid] ^= d_key_const[ctid]; __syncthreads(); // write data to global memory g_state_odata[tid + s_mem_idx] = s_state[tid]; } #endif // #ifndef_RIJNDAEL_GPU_KERNEL_H_
f40336a26fc5407da88f946de07d79e4044ffdc5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "util.h" #include "data.h" // #include <tensor.h> // #include "config.h" // #include "aggr_kernel_no_template.h" // #include "spmm.h" // #include "att_kernel_no_template.h" // #include "dense.h" // #include "aggr_dense_kernel.h" #include <queue> __global__ void makex(int* neis, int feature_len, int mmax, float* input, float* output) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < mmax) { int whichnei = neis[tid / feature_len]; output[tid] = input[whichnei * feature_len + tid % feature_len]; } } __global__ void makex2(int* neis, int feature_len, int mmax, int num_v, int nei_num, float* input, float* output) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int twhich = tid / feature_len; int tlane = tid % feature_len; if(tid < mmax) { int iterid = twhich % nei_num; int target_pos = (iterid * num_v * feature_len) + (twhich / nei_num * feature_len) + tlane; int src_pos = neis[twhich] * feature_len + tlane; output[target_pos] = input[src_pos]; } } __forceinline__ __device__ float device_logistic ( float x ) { return __frcp_rn ( ( float ) 1 + __expf ( -x ) ); } __forceinline__ __device__ float device_tanh ( float x ) { return tanhf ( x ); } __global__ void kernel_elementwise_lstm_forward ( float *__restrict__ g, float *__restrict__ g2, float *__restrict__ h, float *__restrict__ c, float *__restrict__ ct, float *__restrict__ prev_c, int *__restrict__ n_idx, int num_v, int feature, int num_nei, int cur_nei) { const int i_gates = 3 * num_v * feature; const int o_gates = 0; const int f_gates = 2 * num_v * feature; const int c_gates = num_v * feature; size_t elements = num_v * feature; /* there are N * B threads */ int tid = blockDim.x * blockIdx.x + threadIdx.x; /* in - gates after SGEMMs */ if ( tid < elements ) { int which = n_idx[(tid / feature) * num_nei + cur_nei]; int idx = which * feature + tid % feature; float tmp0 = g2[i_gates + tid] + g[i_gates + idx]; float tmp1 = g2[o_gates + tid] + g[o_gates + idx]; float tmp2 = g2[f_gates + tid] + g[f_gates + idx]; float tmp3 = g2[c_gates + tid] + g[c_gates + idx]; tmp0 = device_logistic(tmp0); tmp1 = device_logistic(tmp1); tmp2 = device_logistic(tmp2); tmp3 = device_tanh(tmp3); float tmp5 = tmp2 * prev_c[tid] + tmp0 * tmp3; float tmp6 = device_tanh(tmp5); h[tid] = tmp1 * tmp6; ct[tid] = tmp6; c[tid] = tmp5; g2[i_gates + tid] = tmp0; g2[o_gates + tid] = tmp1; g2[f_gates + tid] = tmp2; g2[c_gates + tid] = tmp3; } /* out - updated c and h */ } __global__ void kernel_elementwise_lstm_forward_dense ( float *__restrict__ g, float *__restrict__ g2, float *__restrict__ h, float *__restrict__ c, float *__restrict__ ct, float *__restrict__ prev_c, int num_v, int feature, int nei_num, int cur_nei) { const int i_gates = 3 * num_v * feature; const int o_gates = 0; const int f_gates = 2 * num_v * feature; const int c_gates = num_v * feature; size_t elements = num_v * feature; /* there are N * B threads */ int tid = blockDim.x * blockIdx.x + threadIdx.x; /* in - gates after SGEMMs */ if ( tid < elements ) { float tmp0 = g2[i_gates + tid] + g[i_gates + tid]; float tmp1 = g2[o_gates + tid] + g[o_gates + tid]; float tmp2 = g2[f_gates + tid] + g[f_gates + tid]; float tmp3 = g2[c_gates + tid] + g[c_gates + tid]; tmp0 = device_logistic(tmp0); tmp1 = device_logistic(tmp1); tmp2 = device_logistic(tmp2); tmp3 = device_tanh(tmp3); float tmp5 = tmp2 * prev_c[tid] + tmp0 * tmp3; float tmp6 = device_tanh(tmp5); h[tid] = tmp1 * tmp6; ct[tid] = tmp6; c[tid] = tmp5; g2[i_gates + tid] = tmp0; g2[o_gates + tid] = tmp1; g2[f_gates + tid] = tmp2; g2[c_gates + tid] = tmp3; } /* out - updated c and h */ } enum PTRS { x, x_transformed, h, c, weight, weight2, g2, ct, x_reorder }; int main(int argc, char ** argv) { argParse(argc, argv); const int times = 5; const int BLOCK_SIZE = 512; int NEIGHBOR_NUM = 16; // selected neighbor number if(NEINUM != -1) NEIGHBOR_NUM = NEINUM; hiprandGenerator_t hiprand; hiprandCreateGenerator(&hiprand, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(hiprand, 123ULL); hipblasHandle_t cublasHandle; hipblasCreate(&cublasHandle); int* tmp1 = NULL; int* tmp2 = NULL; load_graph(inputgraph, n, m, tmp1, tmp2); gptrs = new int*[1]; gidxs = new int*[1]; checkCudaErrors(cudaMalloc2((void**)gptrs, (n + 1) * sizeof(int))); checkCudaErrors(cudaMalloc2((void**)gidxs, m * sizeof(int))); checkCudaErrors(hipMemcpy(gptrs[0], tmp1, sizeof(int) * (n + 1), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(gidxs[0], tmp2, sizeof(int) * m, hipMemcpyHostToDevice)); vector<float*> ptr; vector<int> sizes = { n * feature_len, n * feature_len * 4, n * feature_len * (NEIGHBOR_NUM + 1), n * feature_len * (NEIGHBOR_NUM + 1), feature_len * feature_len * 4, feature_len * feature_len * 4, feature_len * n * 4, n * feature_len, //n * feature_len * (NEIGHBOR_NUM + 1) * 4, n * feature_len * (NEIGHBOR_NUM) }; double overall_size = 0; for(auto item : sizes) { float* tmp = NULL; checkCudaErrors(cudaMalloc2((void**)&tmp, sizeof(float) * (size_t)item)); checkCudaErrors(hipDeviceSynchronize()); hiprandGenerateNormal(hiprand, tmp, item, 0.f, 1.00); ptr.push_back(tmp); checkCudaErrors(hipDeviceSynchronize()); overall_size += item; } dbg(overall_size); const float alpha = 1.0f; const float beta = 0.0f; { checkCudaErrors(hipDeviceSynchronize()); double timing_our = 0; for(int i = 0; i < times; ++i) { timestamp(t0); checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, ptr[x], n, ptr[weight], feature_len, &beta, ptr[x_transformed], n)); for(int iter = 0; iter < NEIGHBOR_NUM; ++iter) { //dbg(iter); checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, ptr[h] + iter * n * feature_len, n, ptr[weight2], feature_len, &beta, ptr[g2], n)); //ptr[g2] + (iter + 1) * n * feature_len * 4, n)); //checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_elementwise_lstm_forward) , dim3((n * feature_len + BLOCK_SIZE - 1)/ BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, ptr[x_transformed], ptr[g2], ptr[h] + (iter + 1) * n * feature_len, ptr[c] + (iter + 1) * n * feature_len, ptr[ct], //ptr[ct] + (iter + 1) * n * feature_len, ptr[c] + (iter) * n * feature_len, gidxs[0], n, feature_len, NEIGHBOR_NUM, iter ); //checkCudaErrors(hipDeviceSynchronize()); } checkCudaErrors(hipDeviceSynchronize()); timestamp(t1); if(i > 2) timing_our += getDuration(t0, t1); } dbg(timing_our / (times - 3)); } { checkCudaErrors(hipDeviceSynchronize()); double timing_dgl = 0; for(int i = 0; i < times; ++i) { timestamp(t0); hipLaunchKernelGGL(( makex2) , dim3((n * NEIGHBOR_NUM * feature_len + 255) / 256), dim3(256) , 0, 0, gidxs[0], feature_len, n * NEIGHBOR_NUM * feature_len, n, NEIGHBOR_NUM, ptr[x], ptr[x_reorder]); for(int iter = 0; iter < NEIGHBOR_NUM; ++iter) { checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, // ptr[h], n, ptr[h] + iter * n * feature_len, n, ptr[weight2], feature_len, &beta, ptr[g2], n)); //ptr[g2] + (iter + 1) * n * feature_len * 4, n)); checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, ptr[x_reorder] + feature_len * n * iter, n, ptr[weight], feature_len, &beta, ptr[x_transformed], n)); hipLaunchKernelGGL(( kernel_elementwise_lstm_forward_dense) , dim3((n * feature_len + BLOCK_SIZE - 1)/ BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, ptr[x_transformed], ptr[g2], //ptr[g2] + (iter + 1) * n * feature_len * 4, ptr[h] + (iter + 1) * n * feature_len, ptr[c] + (iter + 1) * n * feature_len, ptr[ct], //ptr[ct] + (iter + 1) * n * feature_len, ptr[c] + (iter) * n * feature_len, n, feature_len, NEIGHBOR_NUM, iter ); } checkCudaErrors(hipDeviceSynchronize()); timestamp(t1); if(i > 2) timing_dgl += getDuration(t0, t1); } dbg(timing_dgl / (times - 3)); } { checkCudaErrors(hipDeviceSynchronize()); double timing_sparsefetch = 0; for(int i = 0; i < times; ++i) { timestamp(t0); for(int iter = 0; iter < NEIGHBOR_NUM; ++iter) { checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, // ptr[h], n, ptr[h] + iter * n * feature_len, n, ptr[weight2], feature_len, &beta, ptr[g2], n)); // ptr[g2] + (iter + 1) * n * feature_len * 4, n)); checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, ptr[x_reorder] + feature_len * n * iter, n, ptr[weight], feature_len, &beta, ptr[x_transformed], n)); hipLaunchKernelGGL(( kernel_elementwise_lstm_forward_dense) , dim3((n * feature_len + BLOCK_SIZE - 1)/ BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, ptr[x_transformed], ptr[g2], //ptr[g2] + (iter + 1) * n * feature_len * 4, ptr[h] + (iter + 1) * n * feature_len, ptr[c] + (iter + 1) * n * feature_len, ptr[ct], //ptr[ct] + (iter + 1) * n * feature_len, ptr[c] + (iter) * n * feature_len, n, feature_len, NEIGHBOR_NUM, iter ); } checkCudaErrors(hipDeviceSynchronize()); timestamp(t1); if(i > 2) timing_sparsefetch += getDuration(t0, t1); } dbg(timing_sparsefetch / (times - 3)); } }
f40336a26fc5407da88f946de07d79e4044ffdc5.cu
#include "util.h" #include "data.h" // #include <tensor.h> // #include "config.h" // #include "aggr_kernel_no_template.h" // #include "spmm.h" // #include "att_kernel_no_template.h" // #include "dense.h" // #include "aggr_dense_kernel.h" #include <queue> __global__ void makex(int* neis, int feature_len, int mmax, float* input, float* output) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < mmax) { int whichnei = neis[tid / feature_len]; output[tid] = input[whichnei * feature_len + tid % feature_len]; } } __global__ void makex2(int* neis, int feature_len, int mmax, int num_v, int nei_num, float* input, float* output) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int twhich = tid / feature_len; int tlane = tid % feature_len; if(tid < mmax) { int iterid = twhich % nei_num; int target_pos = (iterid * num_v * feature_len) + (twhich / nei_num * feature_len) + tlane; int src_pos = neis[twhich] * feature_len + tlane; output[target_pos] = input[src_pos]; } } __forceinline__ __device__ float device_logistic ( float x ) { return __frcp_rn ( ( float ) 1 + __expf ( -x ) ); } __forceinline__ __device__ float device_tanh ( float x ) { return tanhf ( x ); } __global__ void kernel_elementwise_lstm_forward ( float *__restrict__ g, float *__restrict__ g2, float *__restrict__ h, float *__restrict__ c, float *__restrict__ ct, float *__restrict__ prev_c, int *__restrict__ n_idx, int num_v, int feature, int num_nei, int cur_nei) { const int i_gates = 3 * num_v * feature; const int o_gates = 0; const int f_gates = 2 * num_v * feature; const int c_gates = num_v * feature; size_t elements = num_v * feature; /* there are N * B threads */ int tid = blockDim.x * blockIdx.x + threadIdx.x; /* in - gates after SGEMMs */ if ( tid < elements ) { int which = n_idx[(tid / feature) * num_nei + cur_nei]; int idx = which * feature + tid % feature; float tmp0 = g2[i_gates + tid] + g[i_gates + idx]; float tmp1 = g2[o_gates + tid] + g[o_gates + idx]; float tmp2 = g2[f_gates + tid] + g[f_gates + idx]; float tmp3 = g2[c_gates + tid] + g[c_gates + idx]; tmp0 = device_logistic(tmp0); tmp1 = device_logistic(tmp1); tmp2 = device_logistic(tmp2); tmp3 = device_tanh(tmp3); float tmp5 = tmp2 * prev_c[tid] + tmp0 * tmp3; float tmp6 = device_tanh(tmp5); h[tid] = tmp1 * tmp6; ct[tid] = tmp6; c[tid] = tmp5; g2[i_gates + tid] = tmp0; g2[o_gates + tid] = tmp1; g2[f_gates + tid] = tmp2; g2[c_gates + tid] = tmp3; } /* out - updated c and h */ } __global__ void kernel_elementwise_lstm_forward_dense ( float *__restrict__ g, float *__restrict__ g2, float *__restrict__ h, float *__restrict__ c, float *__restrict__ ct, float *__restrict__ prev_c, int num_v, int feature, int nei_num, int cur_nei) { const int i_gates = 3 * num_v * feature; const int o_gates = 0; const int f_gates = 2 * num_v * feature; const int c_gates = num_v * feature; size_t elements = num_v * feature; /* there are N * B threads */ int tid = blockDim.x * blockIdx.x + threadIdx.x; /* in - gates after SGEMMs */ if ( tid < elements ) { float tmp0 = g2[i_gates + tid] + g[i_gates + tid]; float tmp1 = g2[o_gates + tid] + g[o_gates + tid]; float tmp2 = g2[f_gates + tid] + g[f_gates + tid]; float tmp3 = g2[c_gates + tid] + g[c_gates + tid]; tmp0 = device_logistic(tmp0); tmp1 = device_logistic(tmp1); tmp2 = device_logistic(tmp2); tmp3 = device_tanh(tmp3); float tmp5 = tmp2 * prev_c[tid] + tmp0 * tmp3; float tmp6 = device_tanh(tmp5); h[tid] = tmp1 * tmp6; ct[tid] = tmp6; c[tid] = tmp5; g2[i_gates + tid] = tmp0; g2[o_gates + tid] = tmp1; g2[f_gates + tid] = tmp2; g2[c_gates + tid] = tmp3; } /* out - updated c and h */ } enum PTRS { x, x_transformed, h, c, weight, weight2, g2, ct, x_reorder }; int main(int argc, char ** argv) { argParse(argc, argv); const int times = 5; const int BLOCK_SIZE = 512; int NEIGHBOR_NUM = 16; // selected neighbor number if(NEINUM != -1) NEIGHBOR_NUM = NEINUM; curandGenerator_t curand; curandCreateGenerator(&curand, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(curand, 123ULL); cublasHandle_t cublasHandle; cublasCreate(&cublasHandle); int* tmp1 = NULL; int* tmp2 = NULL; load_graph(inputgraph, n, m, tmp1, tmp2); gptrs = new int*[1]; gidxs = new int*[1]; checkCudaErrors(cudaMalloc2((void**)gptrs, (n + 1) * sizeof(int))); checkCudaErrors(cudaMalloc2((void**)gidxs, m * sizeof(int))); checkCudaErrors(cudaMemcpy(gptrs[0], tmp1, sizeof(int) * (n + 1), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gidxs[0], tmp2, sizeof(int) * m, cudaMemcpyHostToDevice)); vector<float*> ptr; vector<int> sizes = { n * feature_len, n * feature_len * 4, n * feature_len * (NEIGHBOR_NUM + 1), n * feature_len * (NEIGHBOR_NUM + 1), feature_len * feature_len * 4, feature_len * feature_len * 4, feature_len * n * 4, n * feature_len, //n * feature_len * (NEIGHBOR_NUM + 1) * 4, n * feature_len * (NEIGHBOR_NUM) }; double overall_size = 0; for(auto item : sizes) { float* tmp = NULL; checkCudaErrors(cudaMalloc2((void**)&tmp, sizeof(float) * (size_t)item)); checkCudaErrors(cudaDeviceSynchronize()); curandGenerateNormal(curand, tmp, item, 0.f, 1.00); ptr.push_back(tmp); checkCudaErrors(cudaDeviceSynchronize()); overall_size += item; } dbg(overall_size); const float alpha = 1.0f; const float beta = 0.0f; { checkCudaErrors(cudaDeviceSynchronize()); double timing_our = 0; for(int i = 0; i < times; ++i) { timestamp(t0); checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, ptr[x], n, ptr[weight], feature_len, &beta, ptr[x_transformed], n)); for(int iter = 0; iter < NEIGHBOR_NUM; ++iter) { //dbg(iter); checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, ptr[h] + iter * n * feature_len, n, ptr[weight2], feature_len, &beta, ptr[g2], n)); //ptr[g2] + (iter + 1) * n * feature_len * 4, n)); //checkCudaErrors(cudaDeviceSynchronize()); kernel_elementwise_lstm_forward <<< (n * feature_len + BLOCK_SIZE - 1)/ BLOCK_SIZE, BLOCK_SIZE >>> ( ptr[x_transformed], ptr[g2], ptr[h] + (iter + 1) * n * feature_len, ptr[c] + (iter + 1) * n * feature_len, ptr[ct], //ptr[ct] + (iter + 1) * n * feature_len, ptr[c] + (iter) * n * feature_len, gidxs[0], n, feature_len, NEIGHBOR_NUM, iter ); //checkCudaErrors(cudaDeviceSynchronize()); } checkCudaErrors(cudaDeviceSynchronize()); timestamp(t1); if(i > 2) timing_our += getDuration(t0, t1); } dbg(timing_our / (times - 3)); } { checkCudaErrors(cudaDeviceSynchronize()); double timing_dgl = 0; for(int i = 0; i < times; ++i) { timestamp(t0); makex2 <<< (n * NEIGHBOR_NUM * feature_len + 255) / 256, 256 >>> (gidxs[0], feature_len, n * NEIGHBOR_NUM * feature_len, n, NEIGHBOR_NUM, ptr[x], ptr[x_reorder]); for(int iter = 0; iter < NEIGHBOR_NUM; ++iter) { checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, // ptr[h], n, ptr[h] + iter * n * feature_len, n, ptr[weight2], feature_len, &beta, ptr[g2], n)); //ptr[g2] + (iter + 1) * n * feature_len * 4, n)); checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, ptr[x_reorder] + feature_len * n * iter, n, ptr[weight], feature_len, &beta, ptr[x_transformed], n)); kernel_elementwise_lstm_forward_dense <<< (n * feature_len + BLOCK_SIZE - 1)/ BLOCK_SIZE, BLOCK_SIZE >>> ( ptr[x_transformed], ptr[g2], //ptr[g2] + (iter + 1) * n * feature_len * 4, ptr[h] + (iter + 1) * n * feature_len, ptr[c] + (iter + 1) * n * feature_len, ptr[ct], //ptr[ct] + (iter + 1) * n * feature_len, ptr[c] + (iter) * n * feature_len, n, feature_len, NEIGHBOR_NUM, iter ); } checkCudaErrors(cudaDeviceSynchronize()); timestamp(t1); if(i > 2) timing_dgl += getDuration(t0, t1); } dbg(timing_dgl / (times - 3)); } { checkCudaErrors(cudaDeviceSynchronize()); double timing_sparsefetch = 0; for(int i = 0; i < times; ++i) { timestamp(t0); for(int iter = 0; iter < NEIGHBOR_NUM; ++iter) { checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, // ptr[h], n, ptr[h] + iter * n * feature_len, n, ptr[weight2], feature_len, &beta, ptr[g2], n)); // ptr[g2] + (iter + 1) * n * feature_len * 4, n)); checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, feature_len * 4, feature_len, &alpha, ptr[x_reorder] + feature_len * n * iter, n, ptr[weight], feature_len, &beta, ptr[x_transformed], n)); kernel_elementwise_lstm_forward_dense <<< (n * feature_len + BLOCK_SIZE - 1)/ BLOCK_SIZE, BLOCK_SIZE >>> ( ptr[x_transformed], ptr[g2], //ptr[g2] + (iter + 1) * n * feature_len * 4, ptr[h] + (iter + 1) * n * feature_len, ptr[c] + (iter + 1) * n * feature_len, ptr[ct], //ptr[ct] + (iter + 1) * n * feature_len, ptr[c] + (iter) * n * feature_len, n, feature_len, NEIGHBOR_NUM, iter ); } checkCudaErrors(cudaDeviceSynchronize()); timestamp(t1); if(i > 2) timing_sparsefetch += getDuration(t0, t1); } dbg(timing_sparsefetch / (times - 3)); } }
4f3adf4bf06fbd8cc9a0c5754355f4e9c4ea2bab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sigmoid_activation.hh" #include "../nn_utils/nn_exception.hh" #include <iostream> __device__ float sigmoid(float x) { return 1.0f / (1 + exp(-x)); } __global__ void sigmoidActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { A[index] = sigmoid(Z[index]); } } __global__ void sigmoidActivationBackprop(float* Z, float* dA, float* dZ, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { dZ[index] = dA[index] * sigmoid(Z[index]) * (1 - sigmoid(Z[index])); } } SigmoidActivation::SigmoidActivation(std::string name) { this->name = name; } SigmoidActivation::~SigmoidActivation() { } Matrix& SigmoidActivation::forward(Matrix& Z) { this->Z = Z; A.allocateMemoryIfNotAllocated(Z.shape); dim3 block_size(256); dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x); hipLaunchKernelGGL(( sigmoidActivationForward), dim3(num_of_blocks), dim3(block_size), 0, 0, Z.data_device.get(), A.data_device.get(), Z.shape.x, Z.shape.y); NNException::throwIfDeviceErrorsOccurred("Cannot perform sigmoid forward propagation."); return A; } Matrix& SigmoidActivation::backprop(Matrix& dA, float learning_rate) { dZ.allocateMemoryIfNotAllocated(Z.shape); dim3 block_size(256); dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x); hipLaunchKernelGGL(( sigmoidActivationBackprop), dim3(num_of_blocks), dim3(block_size), 0, 0, Z.data_device.get(), dA.data_device.get(), dZ.data_device.get(), Z.shape.x, Z.shape.y); NNException::throwIfDeviceErrorsOccurred("Cannot perform sigmoid back propagation"); return dZ; }
4f3adf4bf06fbd8cc9a0c5754355f4e9c4ea2bab.cu
#include "sigmoid_activation.hh" #include "../nn_utils/nn_exception.hh" #include <iostream> __device__ float sigmoid(float x) { return 1.0f / (1 + exp(-x)); } __global__ void sigmoidActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { A[index] = sigmoid(Z[index]); } } __global__ void sigmoidActivationBackprop(float* Z, float* dA, float* dZ, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { dZ[index] = dA[index] * sigmoid(Z[index]) * (1 - sigmoid(Z[index])); } } SigmoidActivation::SigmoidActivation(std::string name) { this->name = name; } SigmoidActivation::~SigmoidActivation() { } Matrix& SigmoidActivation::forward(Matrix& Z) { this->Z = Z; A.allocateMemoryIfNotAllocated(Z.shape); dim3 block_size(256); dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x); sigmoidActivationForward<<<num_of_blocks, block_size>>>(Z.data_device.get(), A.data_device.get(), Z.shape.x, Z.shape.y); NNException::throwIfDeviceErrorsOccurred("Cannot perform sigmoid forward propagation."); return A; } Matrix& SigmoidActivation::backprop(Matrix& dA, float learning_rate) { dZ.allocateMemoryIfNotAllocated(Z.shape); dim3 block_size(256); dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x); sigmoidActivationBackprop<<<num_of_blocks, block_size>>>(Z.data_device.get(), dA.data_device.get(), dZ.data_device.get(), Z.shape.x, Z.shape.y); NNException::throwIfDeviceErrorsOccurred("Cannot perform sigmoid back propagation"); return dZ; }
865be701ffa6bb6613a81c4d2739538c4603f5cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void var(float * M1, float * M2, float * X, int b, size_t nele) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx<nele) { float delta = X[idx] - M1[idx]; M1[idx] += delta / (b + 1); M2[idx] += delta*(X[idx] - M1[idx]); } }
865be701ffa6bb6613a81c4d2739538c4603f5cc.cu
#include "includes.h" __global__ void var(float * M1, float * M2, float * X, int b, size_t nele) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx<nele) { float delta = X[idx] - M1[idx]; M1[idx] += delta / (b + 1); M2[idx] += delta*(X[idx] - M1[idx]); } }
a7e11ef454e4a417e53fcaa6491367bb5bc0b276.hip
// !!! This is a file automatically generated by hipify!!! /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <array/NDArray.h> #include <array/NDArrayFactory.h> #include <hip/hip_runtime.h> #include <graph/Context.h> #include <graph/Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <helpers/MmulHelper.h> #include <helpers/TAD.h> #include <ops/specials_cuda.h> #include "testlayers.h" using namespace sd; using namespace sd::graph; class CudaBasicsTests2 : public testing::Test { public: }; TEST_F(CudaBasicsTests2, test_devices_1) { auto caps = Environment::getInstance().capabilities(); ASSERT_FALSE(caps.empty()); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_1) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::FLOAT32); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::FLOAT32); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printIndexedBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_2) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('f', {M, N}, sd::DataType::DOUBLE); NDArray exp('f', {M, N}, {-1.6, -0.7, 0.2, -0.8, 0.1, 1., -0., 0.9, 1.8, 0.8, 1.7, 2.6, 1.6, 2.5, 3.4}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_3) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('f', {M, N}, sd::DataType::DOUBLE); NDArray exp('f', {M, N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_4) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('c', {M, N}, sd::DataType::DOUBLE); NDArray exp('c', {M, N}, {0.1, 2.5, 4.9, 7.3, 9.7, 0.3, 2.7, 5.1, 7.5, 9.9, 0.5, 2.9, 5.3, 7.7, 10.1}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); // NDArray* pA = a.permute({1,0}); // NDArray* pB = b.permute({1,0}); // NDArray* pC = c.permute({1,0}); // sd::MmulHelper::mmul(pB, pA, pC, 1., 0.); // ASSERT_TRUE(c.equalsTo(&exp)); // delete pA; // delete pB; // delete pC; } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_5) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('f', {M, N}, sd::DataType::DOUBLE); NDArray exp('f', {M, N}, {-8.8, -4.3, 0.2, 8.6, 4.1, -0.4, -8.4, -3.9, 0.6, 8.2, 3.7, -0.8, -8.0, -3.5, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_6) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('c', {M, N}, sd::DataType::DOUBLE); NDArray exp('c', {M, N}, {-1.6, -0.8, -0.0, 0.8, 1.6, -0.7, 0.1, 0.9, 1.7, 2.5, 0.2, 1.0, 1.8, 2.6, 3.4}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_7) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('c', {M, N}, sd::DataType::DOUBLE); NDArray exp('c', {M, N}, {-1.9, 1.3, -0.7, 0.1, 0.5, -0.9, 0.3, 0.3, -0.9, 1.5, 0.1, -0.7, 1.3, -1.9, 2.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_8) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('c', {M, N}, sd::DataType::DOUBLE); NDArray exp('c', {M, N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_9) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::FLOAT32); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::FLOAT32); NDArray c('c', {M, N}, sd::DataType::FLOAT32); NDArray exp('c', {M, N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_10) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::FLOAT32); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::FLOAT32); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printIndexedBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_11) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::FLOAT32); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::FLOAT32); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_12) { int devCnt = 0; hipGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 4; const sd::LongType K = 4; const sd::LongType N = 4; NDArray a('f', {M, K}, {1., 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 2, 1, 0, 4, 7.}, sd::DataType::INT8); NDArray b('f', {K, N}, {-2, -3, 0, 1, 5, -6, 7, -8, 9, -1, 2, -2, 3, -4, 5, -6.}, sd::DataType::INT8); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {-16., -22., -23., -25., 30., -12., -38., -70., 20., 16., 18., 18., 22., -8., -28., -52.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_13) { int devCnt = 0; hipGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1., 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, sd::DataType::INT8); NDArray b('c', {K, N}, {-2, -3, 0, 1, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::INT8); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {-109., -122., -135., 111., 120., 129., -121., -134., -147., 129., 144., 159., -130., -140., -150.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_14) { int devCnt = 0; hipGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1., 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, sd::DataType::INT8); NDArray b('c', {K, N}, {-2, -3, 0, 1, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::INT8); NDArray c('c', {M, N}, sd::DataType::FLOAT32); NDArray exp('c', {M, N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_15) { int devCnt = 0; hipGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_16) { int devCnt = 0; hipGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_17) { int devCnt = 0; hipGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('c', {M, N}, sd::DataType::FLOAT32); NDArray exp('c', {M, N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_18) { int devCnt = 0; hipGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('f', {M, N}, sd::DataType::HALF); NDArray exp('f', {M, N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_19) { int devCnt = 0; hipGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('f', {M, N}, sd::DataType::HALF); NDArray exp('f', {M, N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_20) { int devCnt = 0; hipGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('c', {M, N}, sd::DataType::HALF); NDArray exp('c', {M, N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } /* ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_21) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT8); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_22) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::FLOAT32); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_23) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_24) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_25) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::HALF); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_26) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; // 3x4 * 4x5 = 3x5 NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT64); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_27) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_28) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-1.6, -0.7, 0.2, -0.8, 0.1, 1., -0., 0.9, 1.8, 0.8, 1.7, 2.6, 1.6, 2.5, 3.4}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } */ ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_1) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('f', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {0.1, 0.3, 0.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_2) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('f', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_3) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_4) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_5) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {0.1, 0.3, 0.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_6) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray temp('f', {M, N, 5}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0, 2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_7) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {M, N, 5}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0, 2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {5.1, 3.3, 1.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_8) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {N, M, 5}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(4, {1, 2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {6.2, 4.5, 1.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_9) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0, 1}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_10) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_11) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('c', {5, N, M}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(13, {0, 2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-12.1, -10.9, -9.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_12) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('c', {5, N, M}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(10, {0, 2}); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {3.3, 3.3, 3.3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_13) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}, true); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_14) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('c', {5, N, M}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(10, {0, 2}, true); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {3.3, 3.3, 3.3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_15) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}); NDArray y = temp(17, {0, 2}); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_16) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray temp1('c', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}); NDArray y = temp1(17, {0, 2}); NDArray exp('c', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_17) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}); NDArray y = temp(17, {0, 2}, true); // y.printShapeInfo(); NDArray exp('f', {1, M, 1}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_18) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray temp1('c', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}, true); NDArray y = temp1(17, {0, 2}, true); NDArray exp('c', {1, M, 1}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// /* TEST_F(CudaBasicsTests2, mmulMxV_19) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {0.1, 0.3, 0.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_20) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_21) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::FLOAT32); NDArray exp('c', {M}, {-1.6, -0.7, 0.2}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_22) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_23) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_24) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2},true); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_25) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}, true); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_26) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::FLOAT32); NDArray x = temp(2, {0,1}); NDArray y = temp1(17, {0,2}); NDArray exp('c', {M}, {-0.3, 0.3, 0.9}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_27) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::FLOAT32); NDArray x = temp(2, {0,1},true); NDArray y = temp1(17, {0,2},true); NDArray exp('c', {1,M,1}, {-0.3, 0.3, 0.9}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_28) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.1, 3.3, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_1) { const sd::LongType N = 4; NDArray x('c', {N}, {1, 2, 3, 4}, sd::DataType::INT32); NDArray y('f', {N}, {0.1, 0.2, 0.3, 0.4}, sd::DataType::FLOAT32); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_2) { const sd::LongType N = 4; NDArray x('c', {1,1,N}, {1,2, 3, 4}, sd::DataType::INT32); NDArray y('f', {1,1,N,1,1,1}, {0.1, 0.2, 0.3, 0.4}, sd::DataType::FLOAT32); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_3) { const sd::LongType N = 4; NDArray xBig('c', {4,2}, {1, 0, 2, 0, 3, 0, 4, 0}, sd::DataType::INT32); NDArray yBig('c', {4,3}, {0.1, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.4, 0,0}, sd::DataType::FLOAT32); NDArray x = xBig(0, {1}, true); NDArray y = yBig(0, {1}, true); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_4) { const sd::LongType N = 4; NDArray xBig('f', {4,2}, {1, 2, 3, 4, 0, 0, 0, 0}, sd::DataType::INT32); NDArray yBig('c', {4,3}, {0.1, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.4, 0,0}, sd::DataType::FLOAT32); NDArray x = xBig(0, {1}, true); NDArray y = yBig(0, {1}); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } */
a7e11ef454e4a417e53fcaa6491367bb5bc0b276.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <array/NDArray.h> #include <array/NDArrayFactory.h> #include <cuda.h> #include <graph/Context.h> #include <graph/Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <helpers/MmulHelper.h> #include <helpers/TAD.h> #include <ops/specials_cuda.h> #include "testlayers.h" using namespace sd; using namespace sd::graph; class CudaBasicsTests2 : public testing::Test { public: }; TEST_F(CudaBasicsTests2, test_devices_1) { auto caps = Environment::getInstance().capabilities(); ASSERT_FALSE(caps.empty()); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_1) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::FLOAT32); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::FLOAT32); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printIndexedBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_2) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('f', {M, N}, sd::DataType::DOUBLE); NDArray exp('f', {M, N}, {-1.6, -0.7, 0.2, -0.8, 0.1, 1., -0., 0.9, 1.8, 0.8, 1.7, 2.6, 1.6, 2.5, 3.4}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_3) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('f', {M, N}, sd::DataType::DOUBLE); NDArray exp('f', {M, N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_4) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('c', {M, N}, sd::DataType::DOUBLE); NDArray exp('c', {M, N}, {0.1, 2.5, 4.9, 7.3, 9.7, 0.3, 2.7, 5.1, 7.5, 9.9, 0.5, 2.9, 5.3, 7.7, 10.1}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); // NDArray* pA = a.permute({1,0}); // NDArray* pB = b.permute({1,0}); // NDArray* pC = c.permute({1,0}); // sd::MmulHelper::mmul(pB, pA, pC, 1., 0.); // ASSERT_TRUE(c.equalsTo(&exp)); // delete pA; // delete pB; // delete pC; } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_5) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('f', {M, N}, sd::DataType::DOUBLE); NDArray exp('f', {M, N}, {-8.8, -4.3, 0.2, 8.6, 4.1, -0.4, -8.4, -3.9, 0.6, 8.2, 3.7, -0.8, -8.0, -3.5, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_6) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('c', {M, N}, sd::DataType::DOUBLE); NDArray exp('c', {M, N}, {-1.6, -0.8, -0.0, 0.8, 1.6, -0.7, 0.1, 0.9, 1.7, 2.5, 0.2, 1.0, 1.8, 2.6, 3.4}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_7) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('c', {M, N}, sd::DataType::DOUBLE); NDArray exp('c', {M, N}, {-1.9, 1.3, -0.7, 0.1, 0.5, -0.9, 0.3, 0.3, -0.9, 1.5, 0.1, -0.7, 1.3, -1.9, 2.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_8) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::DOUBLE); NDArray c('c', {M, N}, sd::DataType::DOUBLE); NDArray exp('c', {M, N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_9) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::FLOAT32); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::FLOAT32); NDArray c('c', {M, N}, sd::DataType::FLOAT32); NDArray exp('c', {M, N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_10) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::FLOAT32); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::FLOAT32); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printIndexedBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_11) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::FLOAT32); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::FLOAT32); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_12) { int devCnt = 0; cudaGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 4; const sd::LongType K = 4; const sd::LongType N = 4; NDArray a('f', {M, K}, {1., 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 2, 1, 0, 4, 7.}, sd::DataType::INT8); NDArray b('f', {K, N}, {-2, -3, 0, 1, 5, -6, 7, -8, 9, -1, 2, -2, 3, -4, 5, -6.}, sd::DataType::INT8); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {-16., -22., -23., -25., 30., -12., -38., -70., 20., 16., 18., 18., 22., -8., -28., -52.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_13) { int devCnt = 0; cudaGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1., 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, sd::DataType::INT8); NDArray b('c', {K, N}, {-2, -3, 0, 1, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::INT8); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {-109., -122., -135., 111., 120., 129., -121., -134., -147., 129., 144., 159., -130., -140., -150.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_14) { int devCnt = 0; cudaGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1., 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, sd::DataType::INT8); NDArray b('c', {K, N}, {-2, -3, 0, 1, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::INT8); NDArray c('c', {M, N}, sd::DataType::FLOAT32); NDArray exp('c', {M, N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_15) { int devCnt = 0; cudaGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_16) { int devCnt = 0; cudaGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('f', {M, N}, sd::DataType::FLOAT32); NDArray exp('f', {M, N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_17) { int devCnt = 0; cudaGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('c', {M, N}, sd::DataType::FLOAT32); NDArray exp('c', {M, N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_18) { int devCnt = 0; cudaGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('f', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('f', {M, N}, sd::DataType::HALF); NDArray exp('f', {M, N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_19) { int devCnt = 0; cudaGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('f', {M, N}, sd::DataType::HALF); NDArray exp('f', {M, N}, {-1.9, -0.9, 0.1, 1.3, 0.3, -0.7, -0.7, 0.3, 1.3, 0.1, -0.9, -1.9, 0.5, 1.5, 2.5}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_20) { int devCnt = 0; cudaGetDevice(&devCnt); if (Environment::getInstance().capabilities()[devCnt].first() < 5.3) return; const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M, K}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::HALF); NDArray b('c', {K, N}, {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}, sd::DataType::HALF); NDArray c('c', {M, N}, sd::DataType::HALF); NDArray exp('c', {M, N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 1e-1)); } /* ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_21) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT8); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_22) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::FLOAT32); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::FLOAT32); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_23) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::HALF); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_24) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_25) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::DOUBLE); NDArray b('c', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::HALF); NDArray exp('c', {M,N}, {-8.8, 8.6, -8.4, 8.2, -8.0, -4.3, 4.1, -3.9, 3.7, -3.5, 0.2, -0.4, 0.6, -0.8, 1.}, sd::DataType::HALF); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_26) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; // 3x4 * 4x5 = 3x5 NDArray a('c', {M,K}, {1.,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::INT64); NDArray b('c', {K,N}, {-2,-3,0,1,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::FLOAT32); NDArray c('c', {M,N}, sd::DataType::DOUBLE); NDArray exp('c', {M,N}, {-45., 43., -49., 53., -50., -97., 79., -101., 113., -90., -149., 115., -153., 173., -130.}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_27) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('f', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::HALF); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {0.1, 0.3, 0.5, 2.5, 2.7, 2.9, 4.9, 5.1, 5.3, 7.3, 7.5, 7.7, 9.7, 9.9, 10.1}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); // c.printBuffer(); ASSERT_TRUE(c.equalsTo(&exp, 0.01)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxM_28) { const sd::LongType M = 3; const sd::LongType K = 4; const sd::LongType N = 5; NDArray a('c', {M,K}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray b('f', {K,N}, {1,-2,3,-4,5,-6,7,-8,9,-10,11,-12,13,-14,15,-16,17,-18,19,-20}, sd::DataType::DOUBLE); NDArray c('f', {M,N}, sd::DataType::FLOAT32); NDArray exp('f', {M,N}, {-1.6, -0.7, 0.2, -0.8, 0.1, 1., -0., 0.9, 1.8, 0.8, 1.7, 2.6, 1.6, 2.5, 3.4}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &b, &c, 1., 0.); ASSERT_TRUE(c.equalsTo(&exp)); } */ ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_1) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('f', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {0.1, 0.3, 0.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_2) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('f', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_3) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_4) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {-1.6, -0.7, 0.2}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_5) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray x('c', {N}, {1, -2, 3, -4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {0.1, 0.3, 0.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_6) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M, N}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); NDArray temp('f', {M, N, 5}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0, 2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_7) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {M, N, 5}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0, 2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {5.1, 3.3, 1.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_8) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {N, M, 5}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(4, {1, 2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {6.2, 4.5, 1.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_9) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0, 1}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_10) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_11) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('c', {5, N, M}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(13, {0, 2}); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-12.1, -10.9, -9.7}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_12) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('c', {5, N, M}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(10, {0, 2}); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {3.3, 3.3, 3.3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_13) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}, true); NDArray y('f', {M}, sd::DataType::DOUBLE); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_14) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('c', {5, N, M}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(10, {0, 2}, true); NDArray y('c', {M}, sd::DataType::DOUBLE); NDArray exp('c', {M}, {3.3, 3.3, 3.3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_15) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}); NDArray y = temp(17, {0, 2}); NDArray exp('f', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_16) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray temp1('c', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}); NDArray y = temp1(17, {0, 2}); NDArray exp('c', {M}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_17) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}); NDArray y = temp(17, {0, 2}, true); // y.printShapeInfo(); NDArray exp('f', {1, M, 1}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_18) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N, M}, {1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1, 0}, sd::DataType::DOUBLE); a.permutei({1, 0}); NDArray temp('f', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray temp1('c', {5, M, N}, {16, 2, -6, 7, 2, -2, 4, -7, 6, 4, 4, 6, -3, 1, 3, 9, 1, 4, 9, 10, -10, -3, -8, 7, -7, -7, 6, 9, 7, -6, 8, 7, -3, -3, 4, -2, 5, -3, -3, 4, 6, -5, -1, 7, -5, 4, -10, -1, 8, 0, -7, 4, -10, -7, -8, -9, 2, 9, 7, 9}, sd::DataType::DOUBLE); NDArray x = temp(2, {0, 1}, true); NDArray y = temp1(17, {0, 2}, true); NDArray exp('c', {1, M, 1}, {-0.3, 0.3, 0.9}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// /* TEST_F(CudaBasicsTests2, mmulMxV_19) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {0.1, 0.3, 0.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_20) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('f', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {-1.6, -0.7, 0.2}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_21) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray x('c', {N}, {1,-2,3,-4}, sd::DataType::DOUBLE); NDArray y('c', {M}, sd::DataType::FLOAT32); NDArray exp('c', {M}, {-1.6, -0.7, 0.2}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_22) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_23) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_24) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2},true); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.5, 5.1, 4.7}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_25) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('f', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(3, {0,1}, true); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {1.5, 1.8, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_26) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::FLOAT32); NDArray x = temp(2, {0,1}); NDArray y = temp1(17, {0,2}); NDArray exp('c', {M}, {-0.3, 0.3, 0.9}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_27) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {N,M}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); a.permutei({1,0}); NDArray temp('f', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray temp1('c', {5,M,N}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::FLOAT32); NDArray x = temp(2, {0,1},true); NDArray y = temp1(17, {0,2},true); NDArray exp('c', {1,M,1}, {-0.3, 0.3, 0.9}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulMxV_28) { const sd::LongType M = 3; const sd::LongType N = 4; NDArray a('c', {M,N}, {1.2,1.1,1.0,0.9,0.8,0.7,0.5,0.4,0.3,0.2,0.1,0}, sd::DataType::FLOAT32); NDArray temp('f', {M,N,5}, {16,2,-6,7,2,-2,4,-7,6,4,4,6,-3,1,3,9,1,4,9,10,-10,-3,-8,7,-7,-7,6,9,7,-6,8,7,-3,-3,4,-2,5,-3,-3,4,6,-5,-1,7,-5,4,-10,-1,8,0,-7,4,-10,-7,-8,-9,2,9,7,9}, sd::DataType::DOUBLE); NDArray x = temp(6, {0,2}); NDArray y('f', {M}, sd::DataType::FLOAT32); NDArray exp('f', {M}, {5.1, 3.3, 1.5}, sd::DataType::FLOAT32); sd::MmulHelper::mmul(&a, &x, &y, 1., 0.); ASSERT_TRUE(y.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_1) { const sd::LongType N = 4; NDArray x('c', {N}, {1, 2, 3, 4}, sd::DataType::INT32); NDArray y('f', {N}, {0.1, 0.2, 0.3, 0.4}, sd::DataType::FLOAT32); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_2) { const sd::LongType N = 4; NDArray x('c', {1,1,N}, {1,2, 3, 4}, sd::DataType::INT32); NDArray y('f', {1,1,N,1,1,1}, {0.1, 0.2, 0.3, 0.4}, sd::DataType::FLOAT32); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_3) { const sd::LongType N = 4; NDArray xBig('c', {4,2}, {1, 0, 2, 0, 3, 0, 4, 0}, sd::DataType::INT32); NDArray yBig('c', {4,3}, {0.1, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.4, 0,0}, sd::DataType::FLOAT32); NDArray x = xBig(0, {1}, true); NDArray y = yBig(0, {1}, true); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests2, mmulDot_4) { const sd::LongType N = 4; NDArray xBig('f', {4,2}, {1, 2, 3, 4, 0, 0, 0, 0}, sd::DataType::INT32); NDArray yBig('c', {4,3}, {0.1, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.4, 0,0}, sd::DataType::FLOAT32); NDArray x = xBig(0, {1}, true); NDArray y = yBig(0, {1}); NDArray z(sd::DataType::DOUBLE); NDArray exp('c', {}, {3}, sd::DataType::DOUBLE); sd::MmulHelper::mmul(&x, &y, &z); ASSERT_TRUE(z.equalsTo(&exp)); } */
8fb3c4f4bb427b5c0fe866d022295c1e82d001c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * kmeans_task_sgpu.cu * * Created on: Mar 22, 2017 * Author: chao */ #include "nn_task_sgpu.h" #include "../../../common/helper_err.h" #include "nn_kernel.h" template<typename T> void nnSGPU<T>::initImpl(T*objects, T*objsNN, int numObjs, int numCoords, int numNN){ if(__localThreadId ==0){ std::cout<<"task: "<<getCurrentTask()->getName()<<"begin init ...\n"; this->objects = objects; this->objsNN = objsNN; this->numNN = numNN; this->numObjs = numObjs; this->numCoords = numCoords; } intra_Barrier(); if(__localThreadId ==0){ std::cout<<"task: "<<getCurrentTask()->getName()<<" finish initImpl.\n"; } } template<typename T> void nnSGPU<T>::runImpl(double *runtime, MemType memtype){ if(__localThreadId == 0){ std::cout<<getCurrentTask()->getName()<<" begin run ..."<<std::endl; } Timer timer, timer0; double totaltime; /* target center */ T *targetObj = new T[numCoords]; for(int i=0; i<numCoords; i++) targetObj[i] = 0; GpuData<T> objs_d(numObjs*numCoords, memtype); GpuData<T> distanceObjs(numObjs, memtype); GpuData<T> targetObj_d(numCoords, memtype); objs_d.initH(objects); targetObj_d.initH(targetObj); timer0.start(); timer.start(); objs_d.sync(); targetObj_d.sync(); double copyinTime = timer.stop(); double kernelTime1 =0; double kernelTime2 =0; double copyoutTime = 0; double hostCompTime = 0; int batchPerThread = 1; int blocksize = 256; int gridsize = (numObjs + blocksize*batchPerThread -1)/(blocksize*batchPerThread); dim3 block(blocksize, 1, 1); dim3 grid(gridsize, 1, 1); timer.start(); hipLaunchKernelGGL(( distance_kernel), dim3(grid), dim3(block), 0, __streamId, objs_d.getD(), numCoords, numObjs, targetObj_d.getD(), distanceObjs.getD(true), batchPerThread); hipStreamSynchronize(__streamId); checkCudaErr(hipGetLastError()); kernelTime1 = timer.stop(); timer.start(); GpuData<int> *topkIndexArray; int blocksize2 = 16; int gridsize2 = 1; if(numObjs > blocksize2*numNN*8) gridsize2 = numObjs/(blocksize2*numNN*8); if(gridsize2 >=16){ topkIndexArray = new GpuData<int>(numNN*gridsize2, memtype); dim3 block2(blocksize2, 1, 1); dim3 grid2(gridsize2, 1, 1); hipLaunchKernelGGL(( topk_kernel), dim3(grid2), dim3(block2), 0, __streamId, numObjs, numNN, distanceObjs.getD(), topkIndexArray->getD(true)); hipStreamSynchronize(__streamId); checkCudaErr(hipGetLastError()); } kernelTime2 = timer.stop(); timer.start(); distanceObjs.sync(); if(gridsize2 >=16){ topkIndexArray->sync(); } copyoutTime += timer.stop(); /* find k nearest objs */ if(gridsize2<16){ T *distancePtr = distanceObjs.getH(); timer.start(); for(int i=0; i<numNN; i++){ int min =0; while(distancePtr[min]<0) min++; for(int j=0; j<numObjs; j++){ if(distancePtr[j]>=0 && distancePtr[min]>distancePtr[j]) min = j; } distancePtr[min] = -1; for(int j=0; j<numCoords; j++) objsNN[i*numCoords + j] = objects[min*numCoords + j]; } hostCompTime = timer.stop(); } else{ T *distancePtr = distanceObjs.getH(); int *topkindexPtr = topkIndexArray->getH(); timer.start(); for(int i=0; i<numNN; i++){ int min = 0; while(distancePtr[topkindexPtr[min]]<0) min++; for(int j=0; j<topkIndexArray->getSize(); j++){ if(distancePtr[topkindexPtr[j]]>=0 && distancePtr[topkindexPtr[min]]>distancePtr[topkindexPtr[j]]) min = j; } distancePtr[topkindexPtr[min]] = -1; for(int j=0; j<numCoords; j++){ objsNN[i*numCoords +j] = objects[topkindexPtr[min]*numCoords+j]; } } hostCompTime = timer.stop(); } totaltime = timer0.stop(); //runtime[0] = kernelTime + copyinTime + copyoutTime + hostCompTime; runtime[0] = totaltime; runtime[1] = kernelTime1; runtime[2] = kernelTime2; runtime[3] = copyinTime; runtime[4] = copyoutTime; runtime[5] = hostCompTime; if(__localThreadId ==0){ std::cout<<"task: "<<getCurrentTask()->getName()<<" finish runImpl.\n"; } } template class nnSGPU<float>; template class nnSGPU<double>;
8fb3c4f4bb427b5c0fe866d022295c1e82d001c4.cu
/* * kmeans_task_sgpu.cu * * Created on: Mar 22, 2017 * Author: chao */ #include "nn_task_sgpu.h" #include "../../../common/helper_err.h" #include "nn_kernel.h" template<typename T> void nnSGPU<T>::initImpl(T*objects, T*objsNN, int numObjs, int numCoords, int numNN){ if(__localThreadId ==0){ std::cout<<"task: "<<getCurrentTask()->getName()<<"begin init ...\n"; this->objects = objects; this->objsNN = objsNN; this->numNN = numNN; this->numObjs = numObjs; this->numCoords = numCoords; } intra_Barrier(); if(__localThreadId ==0){ std::cout<<"task: "<<getCurrentTask()->getName()<<" finish initImpl.\n"; } } template<typename T> void nnSGPU<T>::runImpl(double *runtime, MemType memtype){ if(__localThreadId == 0){ std::cout<<getCurrentTask()->getName()<<" begin run ..."<<std::endl; } Timer timer, timer0; double totaltime; /* target center */ T *targetObj = new T[numCoords]; for(int i=0; i<numCoords; i++) targetObj[i] = 0; GpuData<T> objs_d(numObjs*numCoords, memtype); GpuData<T> distanceObjs(numObjs, memtype); GpuData<T> targetObj_d(numCoords, memtype); objs_d.initH(objects); targetObj_d.initH(targetObj); timer0.start(); timer.start(); objs_d.sync(); targetObj_d.sync(); double copyinTime = timer.stop(); double kernelTime1 =0; double kernelTime2 =0; double copyoutTime = 0; double hostCompTime = 0; int batchPerThread = 1; int blocksize = 256; int gridsize = (numObjs + blocksize*batchPerThread -1)/(blocksize*batchPerThread); dim3 block(blocksize, 1, 1); dim3 grid(gridsize, 1, 1); timer.start(); distance_kernel<<<grid, block, 0, __streamId>>>( objs_d.getD(), numCoords, numObjs, targetObj_d.getD(), distanceObjs.getD(true), batchPerThread); cudaStreamSynchronize(__streamId); checkCudaErr(cudaGetLastError()); kernelTime1 = timer.stop(); timer.start(); GpuData<int> *topkIndexArray; int blocksize2 = 16; int gridsize2 = 1; if(numObjs > blocksize2*numNN*8) gridsize2 = numObjs/(blocksize2*numNN*8); if(gridsize2 >=16){ topkIndexArray = new GpuData<int>(numNN*gridsize2, memtype); dim3 block2(blocksize2, 1, 1); dim3 grid2(gridsize2, 1, 1); topk_kernel<<<grid2, block2, 0, __streamId>>>( numObjs, numNN, distanceObjs.getD(), topkIndexArray->getD(true)); cudaStreamSynchronize(__streamId); checkCudaErr(cudaGetLastError()); } kernelTime2 = timer.stop(); timer.start(); distanceObjs.sync(); if(gridsize2 >=16){ topkIndexArray->sync(); } copyoutTime += timer.stop(); /* find k nearest objs */ if(gridsize2<16){ T *distancePtr = distanceObjs.getH(); timer.start(); for(int i=0; i<numNN; i++){ int min =0; while(distancePtr[min]<0) min++; for(int j=0; j<numObjs; j++){ if(distancePtr[j]>=0 && distancePtr[min]>distancePtr[j]) min = j; } distancePtr[min] = -1; for(int j=0; j<numCoords; j++) objsNN[i*numCoords + j] = objects[min*numCoords + j]; } hostCompTime = timer.stop(); } else{ T *distancePtr = distanceObjs.getH(); int *topkindexPtr = topkIndexArray->getH(); timer.start(); for(int i=0; i<numNN; i++){ int min = 0; while(distancePtr[topkindexPtr[min]]<0) min++; for(int j=0; j<topkIndexArray->getSize(); j++){ if(distancePtr[topkindexPtr[j]]>=0 && distancePtr[topkindexPtr[min]]>distancePtr[topkindexPtr[j]]) min = j; } distancePtr[topkindexPtr[min]] = -1; for(int j=0; j<numCoords; j++){ objsNN[i*numCoords +j] = objects[topkindexPtr[min]*numCoords+j]; } } hostCompTime = timer.stop(); } totaltime = timer0.stop(); //runtime[0] = kernelTime + copyinTime + copyoutTime + hostCompTime; runtime[0] = totaltime; runtime[1] = kernelTime1; runtime[2] = kernelTime2; runtime[3] = copyinTime; runtime[4] = copyoutTime; runtime[5] = hostCompTime; if(__localThreadId ==0){ std::cout<<"task: "<<getCurrentTask()->getName()<<" finish runImpl.\n"; } } template class nnSGPU<float>; template class nnSGPU<double>;
287ac023c69f3f315a91566f731530ae49ffbdd2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @author Mark Gates @author Tingxing Dong @author Azzam Haidar @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" #include "magma_templates.h" #define PRECISION_z #include "gemv_template_device.cuh" #include "gemv_config/gemvn_param.h" #include "gemv_config/gemvt_param.h" #define version(s,v) s ## _V_ ## v /******************************************************************************/ // NoTrans kernel template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> __global__ void zgemvn_template_kernel_fermi( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ A, int lda, const magmaDoubleComplex * __restrict__ x, int incx, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) gemvn_template_device<magmaDoubleComplex, DIM_X, DIM_Y, TILE_SIZE> (m, n, alpha, A, lda, x, incx, beta, y, incy); #endif /* (__CUDA_ARCH__ >= 200) */ } /******************************************************************************/ // Trans/ConjTans kernel template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans> __global__ void zgemvc_template_kernel_fermi( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ A, int lda, const magmaDoubleComplex * __restrict__ x, int incx, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) gemvc_template_device< magmaDoubleComplex, DIM_X, DIM_Y, TILE_SIZE, trans > (m, n, alpha, A, lda, x, incx, beta, y, incy); #endif /* (__CUDA_ARCH__ >= 200) */ } /******************************************************************************/ // NoTrans CPU driver template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> void zgemvn_template_fermi( magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ A, magma_int_t lda, const magmaDoubleComplex * __restrict__ x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, magma_int_t incy, magma_queue_t queue) { dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 ); dim3 threads( DIM_X, DIM_Y ); hipLaunchKernelGGL(( zgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE>) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, A, lda, x, incx, beta, y, incy); } /******************************************************************************/ // Trans/ConjTans CPU driver template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> void zgemvc_template_fermi( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ A, magma_int_t lda, const magmaDoubleComplex * __restrict__ x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, magma_int_t incy, magma_queue_t queue) { dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 ); dim3 threads ( DIM_X, DIM_Y ); if (trans == MagmaConjTrans) { hipLaunchKernelGGL(( zgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans >) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, A, lda, x, incx, beta, y, incy); } else { hipLaunchKernelGGL(( zgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans >) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, A, lda, x, incx, beta, y, incy); } } /***************************************************************************//** Purpose ------- ZGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX_16 On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX_16 array of dimension ( LDDA, n ) on the GPU. @param[in] ldda INTEGER LDDA specifies the leading dimension of A. @param[in] dx COMPLEX_16 array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX_16 On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy COMPLEX_16 array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_gemv *******************************************************************************/ extern "C" void magmablas_zgemv( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_const_ptr dx, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_int_t incy, magma_queue_t queue) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( trans == MagmaNoTrans ) { zgemvn_template_fermi<version(N, 106)> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } else { zgemvc_template_fermi<version(T, 189)> ( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } }
287ac023c69f3f315a91566f731530ae49ffbdd2.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @author Mark Gates @author Tingxing Dong @author Azzam Haidar @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" #include "magma_templates.h" #define PRECISION_z #include "gemv_template_device.cuh" #include "gemv_config/gemvn_param.h" #include "gemv_config/gemvt_param.h" #define version(s,v) s ## _V_ ## v /******************************************************************************/ // NoTrans kernel template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> __global__ void zgemvn_template_kernel_fermi( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ A, int lda, const magmaDoubleComplex * __restrict__ x, int incx, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) gemvn_template_device<magmaDoubleComplex, DIM_X, DIM_Y, TILE_SIZE> (m, n, alpha, A, lda, x, incx, beta, y, incy); #endif /* (__CUDA_ARCH__ >= 200) */ } /******************************************************************************/ // Trans/ConjTans kernel template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans> __global__ void zgemvc_template_kernel_fermi( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ A, int lda, const magmaDoubleComplex * __restrict__ x, int incx, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) gemvc_template_device< magmaDoubleComplex, DIM_X, DIM_Y, TILE_SIZE, trans > (m, n, alpha, A, lda, x, incx, beta, y, incy); #endif /* (__CUDA_ARCH__ >= 200) */ } /******************************************************************************/ // NoTrans CPU driver template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> void zgemvn_template_fermi( magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ A, magma_int_t lda, const magmaDoubleComplex * __restrict__ x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, magma_int_t incy, magma_queue_t queue) { dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 ); dim3 threads( DIM_X, DIM_Y ); zgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE> <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, A, lda, x, incx, beta, y, incy); } /******************************************************************************/ // Trans/ConjTans CPU driver template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> void zgemvc_template_fermi( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ A, magma_int_t lda, const magmaDoubleComplex * __restrict__ x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, magma_int_t incy, magma_queue_t queue) { dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 ); dim3 threads ( DIM_X, DIM_Y ); if (trans == MagmaConjTrans) { zgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans > <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, A, lda, x, incx, beta, y, incy); } else { zgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans > <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, A, lda, x, incx, beta, y, incy); } } /***************************************************************************//** Purpose ------- ZGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX_16 On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX_16 array of dimension ( LDDA, n ) on the GPU. @param[in] ldda INTEGER LDDA specifies the leading dimension of A. @param[in] dx COMPLEX_16 array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX_16 On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy COMPLEX_16 array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_gemv *******************************************************************************/ extern "C" void magmablas_zgemv( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_const_ptr dx, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_int_t incy, magma_queue_t queue) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( trans == MagmaNoTrans ) { zgemvn_template_fermi<version(N, 106)> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } else { zgemvc_template_fermi<version(T, 189)> ( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } }
d82361a49b92d3f8fd94e537dbe6b62b2f52661f.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> #include <cstdlib> #include <hip/hip_runtime.h> #include <ctime> #include <rocblas.h> #include <hiprand/hiprand.h> #include "core_system/math/matrix.h" //void gpu_blas_mmul(hipblasHandle_t handle, const float *A, const float *B, float *C, const int m, // const int k, const int n) { void gpu_blas_mmul(hipblasHandle_t handle, const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda = m, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS /*hipblasHandle_t handle; hipblasCreate(&handle);*/ // Do the actual multiplication hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // std::cout<<"Yes called"; // Destroy the handle //hipblasDestroy(handle); } void GPU_Multiply_Matrix_Vector(hipblasHandle_t handle, thrust::device_vector<float> &A, int rowA, int colA, thrust::device_vector<float> &B, int rowB, int colB, thrust::device_vector<float> &C) { /*thrust::device_vector<float> GPU_Multiply_Matrix_Vector(thrust::device_vector<float> A, int rowA, int colA, thrust::device_vector<float> B, int rowB, int colB) {*/ //*A and *B -- are Matrices in column-major format //*A will be a Matrix and *B a vector for Matrix to vector multiplication //rowA,colA,rowB,colB -- are the transformed values of original Matrices gpu_blas_mmul(handle, thrust::raw_pointer_cast(&A[0]), thrust::raw_pointer_cast(&B[0]), thrust::raw_pointer_cast(&C[0]), colA, colB, rowB); // return C; //rows=rowB and col=colA all outputs are in column-major format including row,col } void convert_thrust_matrix(math::matrix<double> A, thrust::device_vector<float> &d_A) { //converting matrix A from row-major to thrust::device_vector in column-major format for (int i = 0; i < A.size1(); i++) { for(int j=0;j<A.size2();j++){ d_A[j * A.size2() + i] = (float) A(i,j); } } //return d_A; }
d82361a49b92d3f8fd94e537dbe6b62b2f52661f.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> #include <cstdlib> #include <cuda_runtime.h> #include <ctime> #include <cublas_v2.h> #include <curand.h> #include "core_system/math/matrix.h" //void gpu_blas_mmul(cublasHandle_t handle, const float *A, const float *B, float *C, const int m, // const int k, const int n) { void gpu_blas_mmul(cublasHandle_t handle, const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda = m, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS /*cublasHandle_t handle; cublasCreate(&handle);*/ // Do the actual multiplication cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // std::cout<<"Yes called"; // Destroy the handle //cublasDestroy(handle); } void GPU_Multiply_Matrix_Vector(cublasHandle_t handle, thrust::device_vector<float> &A, int rowA, int colA, thrust::device_vector<float> &B, int rowB, int colB, thrust::device_vector<float> &C) { /*thrust::device_vector<float> GPU_Multiply_Matrix_Vector(thrust::device_vector<float> A, int rowA, int colA, thrust::device_vector<float> B, int rowB, int colB) {*/ //*A and *B -- are Matrices in column-major format //*A will be a Matrix and *B a vector for Matrix to vector multiplication //rowA,colA,rowB,colB -- are the transformed values of original Matrices gpu_blas_mmul(handle, thrust::raw_pointer_cast(&A[0]), thrust::raw_pointer_cast(&B[0]), thrust::raw_pointer_cast(&C[0]), colA, colB, rowB); // return C; //rows=rowB and col=colA all outputs are in column-major format including row,col } void convert_thrust_matrix(math::matrix<double> A, thrust::device_vector<float> &d_A) { //converting matrix A from row-major to thrust::device_vector in column-major format for (int i = 0; i < A.size1(); i++) { for(int j=0;j<A.size2();j++){ d_A[j * A.size2() + i] = (float) A(i,j); } } //return d_A; }
442a3a3f7460e193a89448a3b844b4497acf69a6.hip
// !!! This is a file automatically generated by hipify!!! // Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p5.cu -o assignment5-p5 #include <cmath> #include <cstdlib> #include <hip/hip_runtime.h> #include <iostream> #include <sys/time.h> #define N 512 #define THRESHOLD (0.000001) using std::cerr; using std::cout; using std::endl; // TODO: Edit the function definition as required __global__ void kernel1(float* d_in, float* d_out) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.z * blockDim.z + threadIdx.z; if(i>=1 and i<N-1 and j>=1 and j<N-1 and k>=1 and k<N-1){ d_out[i*N*N + j*N + k] = 0.8 * (d_in[(i-1)*N*N + j*N + k] + d_in[(i+1)*N*N + j*N + k] + d_in[i*N*N + (j-1)*N + k] + d_in[i*N*N + (j+1)*N + k] + d_in[i*N*N + j*N + (k-1)] + d_in[i*N*N + j*N + (k+1)]); } } // TODO: Edit the function definition as required __global__ void kernel2(float* d_in, float* d_out) { int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.z * blockDim.z + threadIdx.z; if(i>=1 and i<N-1 and j>=1 and j<N-1 and k>=1 and k<N-1){ d_out[i*N*N + j*N + k] = 0.8 * (d_in[(i-1)*N*N + j*N + k] + d_in[(i+1)*N*N + j*N + k] + d_in[i*N*N + (j-1)*N + k] + d_in[i*N*N + (j+1)*N + k] + d_in[i*N*N + j*N + (k-1)] + d_in[i*N*N + j*N + (k+1)]); } } __global__ void kernel3(float* d_in, float* d_out) { int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.z * blockDim.z + threadIdx.z; int tj = threadIdx.y; int tk = threadIdx.x; int ti = threadIdx.z; __shared__ float mat[32][8][4]; mat[tk][tj][ti] = d_in[i*N*N + j*N + k]; __syncthreads(); if(i>=1 and i<N-1 and j>=1 and j<N-1 and k>=1 and k<N-1){ float val = 0; if(ti < 1) val += d_in[(i-1)*N*N + j*N + k]; else val += mat[tk][tj][ti-1]; if(ti + 1 > 3) val += d_in[(i+1)*N*N + j*N + k]; else val += mat[tk][tj][ti+1]; if(tj < 1) val += d_in[i*N*N + (j-1)*N + k]; else val += mat[tk][tj-1][ti]; if(tj + 1 > 7) val += d_in[i*N*N + (j+1)*N + k]; else val += mat[tk][tj+1][ti]; if(tk < 1) val += d_in[i*N*N + j*N + k-1]; else val += mat[tk-1][tj][ti]; if(tk + 1 > 31) val += d_in[i*N*N + j*N + k+1]; else val += mat[tk+1][tj][ti]; d_out[i*N*N + j*N + k] = 0.8 * val; } __syncthreads(); } // TODO: Edit the function definition as required __host__ void stencil(float* h_in, float* h_out) { for(int i = 1; i < N-1; i++){ for(int j = 1; j < N-1; j++){ for(int k = 1; k < N-1; k++){ h_out[i*N*N + j*N + k] = 0.8 * (h_in[(i-1)*N*N + j*N + k] + h_in[(i+1)*N*N + j*N + k] + h_in[i*N*N + (j-1)*N + k] + h_in[i*N*N + (j+1)*N + k] + h_in[i*N*N + j*N + (k-1)] + h_in[i*N*N + j*N + (k+1)]); } } } } __host__ void check_result(float* w_ref, float* w_opt) { double maxdiff = 0.0, this_diff = 0.0; int numdiffs = 0; for (uint64_t i = 0; i < N; i++) { for (uint64_t j = 0; j < N; j++) { for (uint64_t k = 0; k < N; k++) { this_diff = w_ref[i + N * j + N * N * k] - w_opt[i + N * j + N * N * k]; if (::fabs(this_diff) > THRESHOLD) { numdiffs++; if (this_diff > maxdiff) { maxdiff = this_diff; } } } } } if (numdiffs > 0) { cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff << endl; } else { cout << "No differences found between base and test versions\n"; } } double rtclock() { // Seconds struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) { cout << "Error return from gettimeofday: " << stat << "\n"; } return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } int main() { uint64_t SIZE = N * N * N; float*h_in, *h_cpu_out, *h_gpu1_out, *h_gpu2_out, *h_gpu3_out; h_in = (float*)malloc(SIZE * sizeof(float)); h_cpu_out = (float*)malloc(SIZE * sizeof(float)); h_gpu1_out = (float*)malloc(SIZE * sizeof(float)); h_gpu2_out = (float*)malloc(SIZE * sizeof(float)); h_gpu3_out = (float*)malloc(SIZE * sizeof(float)); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { for (int k = 0; k < N; k++) { h_in[i * N * N + j * N + k] = rand() % 64; h_cpu_out[i * N * N + j * N + k] = 0; h_gpu1_out[i * N * N + j * N + k] = 0; h_gpu2_out[i * N * N + j * N + k] = 0; h_gpu3_out[i * N * N + j * N + k] = 0; } } } double clkbegin = rtclock(); stencil(h_in, h_cpu_out); double clkend = rtclock(); double cpu_time = clkend - clkbegin; cout << "Stencil time on CPU: " << cpu_time * 1000 << " msec" << endl; hipError_t status; hipEvent_t start, end; // TODO: Fill in kernel1 // TODO: Adapt check_result() and invoke float *d_in, *d_out1; dim3 threadsPerBlock(32,32,1); dim3 numBlocks(N/threadsPerBlock.x, N/threadsPerBlock.y, N/threadsPerBlock.z); status = hipMalloc(&d_in, SIZE * sizeof(float)); if (status != hipSuccess) { fprintf(stderr, "hipMalloc() failed"); return EXIT_FAILURE; } status = hipMalloc(&d_out1, SIZE * sizeof(float)); if (status != hipSuccess) { fprintf(stderr, "hipMalloc() failed"); return EXIT_FAILURE; } hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start, 0); status = hipMemcpy(d_in, h_in, SIZE * sizeof(float), hipMemcpyHostToDevice); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy() failed"); return EXIT_FAILURE; } status = hipMemcpy(d_out1, h_gpu1_out, SIZE * sizeof(float), hipMemcpyHostToDevice); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy() failed"); return EXIT_FAILURE; } hipLaunchKernelGGL(( kernel1), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_in, d_out1); status = hipMemcpy(h_gpu1_out, d_out1, SIZE * sizeof(float), hipMemcpyDeviceToHost); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy() failed"); return EXIT_FAILURE; } hipEventRecord(end, 0); hipEventSynchronize(end); float kernel_time; hipEventElapsedTime(&kernel_time, start, end); hipEventDestroy(start); hipEventDestroy(end); check_result(h_cpu_out, h_gpu1_out); std::cout << "Kernel 1 time (ms): " << kernel_time << "\n"; // TODO: Fill in kernel2 // TODO: Adapt check_result() and invoke float *d_out2; status = hipMalloc(&d_out2, SIZE * sizeof(float)); if (status != hipSuccess) { fprintf(stderr, "hipMalloc() failed"); return EXIT_FAILURE; } hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start, 0); status = hipMemcpy(d_in, h_in, SIZE * sizeof(float), hipMemcpyHostToDevice); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy() failed"); return EXIT_FAILURE; } status = hipMemcpy(d_out2, h_gpu2_out, SIZE * sizeof(float), hipMemcpyHostToDevice); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy() failed"); return EXIT_FAILURE; } hipLaunchKernelGGL(( kernel2), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_in, d_out2); status = hipMemcpy(h_gpu2_out, d_out2, SIZE * sizeof(float), hipMemcpyDeviceToHost); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy() failed"); return EXIT_FAILURE; } hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&kernel_time, start, end); hipEventDestroy(start); hipEventDestroy(end); check_result(h_cpu_out, h_gpu2_out); std::cout << "Kernel 2 time (ms): " << kernel_time << "\n"; // kernel 3 float *d_out3; threadsPerBlock = dim3(32,8,4); numBlocks = dim3(N/threadsPerBlock.x, N/threadsPerBlock.y, N/threadsPerBlock.z); status = hipMalloc(&d_out3, SIZE * sizeof(float)); if (status != hipSuccess) { fprintf(stderr, "hipMalloc() failed"); return EXIT_FAILURE; } hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start, 0); status = hipMemcpy(d_in, h_in, SIZE * sizeof(float), hipMemcpyHostToDevice); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy() failed"); return EXIT_FAILURE; } status = hipMemcpy(d_out3, h_gpu3_out, SIZE * sizeof(float), hipMemcpyHostToDevice); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy() failed"); return EXIT_FAILURE; } hipLaunchKernelGGL(( kernel3), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_in, d_out3); status = hipMemcpy(h_gpu3_out, d_out3, SIZE * sizeof(float), hipMemcpyDeviceToHost); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy() failed"); return EXIT_FAILURE; } hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&kernel_time, start, end); hipEventDestroy(start); hipEventDestroy(end); check_result(h_cpu_out, h_gpu3_out); std::cout << "Kernel 3 time (ms): " << kernel_time << "\n"; // TODO: Free memory hipFree(d_in); hipFree(d_out1); hipFree(d_out2); hipFree(d_out3); free(h_in); free(h_cpu_out); free(h_gpu1_out); free(h_gpu2_out); free(h_gpu3_out); return EXIT_SUCCESS; }
442a3a3f7460e193a89448a3b844b4497acf69a6.cu
// Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p5.cu -o assignment5-p5 #include <cmath> #include <cstdlib> #include <cuda.h> #include <iostream> #include <sys/time.h> #define N 512 #define THRESHOLD (0.000001) using std::cerr; using std::cout; using std::endl; // TODO: Edit the function definition as required __global__ void kernel1(float* d_in, float* d_out) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int k = blockIdx.z * blockDim.z + threadIdx.z; if(i>=1 and i<N-1 and j>=1 and j<N-1 and k>=1 and k<N-1){ d_out[i*N*N + j*N + k] = 0.8 * (d_in[(i-1)*N*N + j*N + k] + d_in[(i+1)*N*N + j*N + k] + d_in[i*N*N + (j-1)*N + k] + d_in[i*N*N + (j+1)*N + k] + d_in[i*N*N + j*N + (k-1)] + d_in[i*N*N + j*N + (k+1)]); } } // TODO: Edit the function definition as required __global__ void kernel2(float* d_in, float* d_out) { int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.z * blockDim.z + threadIdx.z; if(i>=1 and i<N-1 and j>=1 and j<N-1 and k>=1 and k<N-1){ d_out[i*N*N + j*N + k] = 0.8 * (d_in[(i-1)*N*N + j*N + k] + d_in[(i+1)*N*N + j*N + k] + d_in[i*N*N + (j-1)*N + k] + d_in[i*N*N + (j+1)*N + k] + d_in[i*N*N + j*N + (k-1)] + d_in[i*N*N + j*N + (k+1)]); } } __global__ void kernel3(float* d_in, float* d_out) { int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.z * blockDim.z + threadIdx.z; int tj = threadIdx.y; int tk = threadIdx.x; int ti = threadIdx.z; __shared__ float mat[32][8][4]; mat[tk][tj][ti] = d_in[i*N*N + j*N + k]; __syncthreads(); if(i>=1 and i<N-1 and j>=1 and j<N-1 and k>=1 and k<N-1){ float val = 0; if(ti < 1) val += d_in[(i-1)*N*N + j*N + k]; else val += mat[tk][tj][ti-1]; if(ti + 1 > 3) val += d_in[(i+1)*N*N + j*N + k]; else val += mat[tk][tj][ti+1]; if(tj < 1) val += d_in[i*N*N + (j-1)*N + k]; else val += mat[tk][tj-1][ti]; if(tj + 1 > 7) val += d_in[i*N*N + (j+1)*N + k]; else val += mat[tk][tj+1][ti]; if(tk < 1) val += d_in[i*N*N + j*N + k-1]; else val += mat[tk-1][tj][ti]; if(tk + 1 > 31) val += d_in[i*N*N + j*N + k+1]; else val += mat[tk+1][tj][ti]; d_out[i*N*N + j*N + k] = 0.8 * val; } __syncthreads(); } // TODO: Edit the function definition as required __host__ void stencil(float* h_in, float* h_out) { for(int i = 1; i < N-1; i++){ for(int j = 1; j < N-1; j++){ for(int k = 1; k < N-1; k++){ h_out[i*N*N + j*N + k] = 0.8 * (h_in[(i-1)*N*N + j*N + k] + h_in[(i+1)*N*N + j*N + k] + h_in[i*N*N + (j-1)*N + k] + h_in[i*N*N + (j+1)*N + k] + h_in[i*N*N + j*N + (k-1)] + h_in[i*N*N + j*N + (k+1)]); } } } } __host__ void check_result(float* w_ref, float* w_opt) { double maxdiff = 0.0, this_diff = 0.0; int numdiffs = 0; for (uint64_t i = 0; i < N; i++) { for (uint64_t j = 0; j < N; j++) { for (uint64_t k = 0; k < N; k++) { this_diff = w_ref[i + N * j + N * N * k] - w_opt[i + N * j + N * N * k]; if (std::fabs(this_diff) > THRESHOLD) { numdiffs++; if (this_diff > maxdiff) { maxdiff = this_diff; } } } } } if (numdiffs > 0) { cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff << endl; } else { cout << "No differences found between base and test versions\n"; } } double rtclock() { // Seconds struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) { cout << "Error return from gettimeofday: " << stat << "\n"; } return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } int main() { uint64_t SIZE = N * N * N; float*h_in, *h_cpu_out, *h_gpu1_out, *h_gpu2_out, *h_gpu3_out; h_in = (float*)malloc(SIZE * sizeof(float)); h_cpu_out = (float*)malloc(SIZE * sizeof(float)); h_gpu1_out = (float*)malloc(SIZE * sizeof(float)); h_gpu2_out = (float*)malloc(SIZE * sizeof(float)); h_gpu3_out = (float*)malloc(SIZE * sizeof(float)); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { for (int k = 0; k < N; k++) { h_in[i * N * N + j * N + k] = rand() % 64; h_cpu_out[i * N * N + j * N + k] = 0; h_gpu1_out[i * N * N + j * N + k] = 0; h_gpu2_out[i * N * N + j * N + k] = 0; h_gpu3_out[i * N * N + j * N + k] = 0; } } } double clkbegin = rtclock(); stencil(h_in, h_cpu_out); double clkend = rtclock(); double cpu_time = clkend - clkbegin; cout << "Stencil time on CPU: " << cpu_time * 1000 << " msec" << endl; cudaError_t status; cudaEvent_t start, end; // TODO: Fill in kernel1 // TODO: Adapt check_result() and invoke float *d_in, *d_out1; dim3 threadsPerBlock(32,32,1); dim3 numBlocks(N/threadsPerBlock.x, N/threadsPerBlock.y, N/threadsPerBlock.z); status = cudaMalloc(&d_in, SIZE * sizeof(float)); if (status != cudaSuccess) { fprintf(stderr, "cudaMalloc() failed"); return EXIT_FAILURE; } status = cudaMalloc(&d_out1, SIZE * sizeof(float)); if (status != cudaSuccess) { fprintf(stderr, "cudaMalloc() failed"); return EXIT_FAILURE; } cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); status = cudaMemcpy(d_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } status = cudaMemcpy(d_out1, h_gpu1_out, SIZE * sizeof(float), cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } kernel1<<<numBlocks, threadsPerBlock>>>(d_in, d_out1); status = cudaMemcpy(h_gpu1_out, d_out1, SIZE * sizeof(float), cudaMemcpyDeviceToHost); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } cudaEventRecord(end, 0); cudaEventSynchronize(end); float kernel_time; cudaEventElapsedTime(&kernel_time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); check_result(h_cpu_out, h_gpu1_out); std::cout << "Kernel 1 time (ms): " << kernel_time << "\n"; // TODO: Fill in kernel2 // TODO: Adapt check_result() and invoke float *d_out2; status = cudaMalloc(&d_out2, SIZE * sizeof(float)); if (status != cudaSuccess) { fprintf(stderr, "cudaMalloc() failed"); return EXIT_FAILURE; } cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); status = cudaMemcpy(d_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } status = cudaMemcpy(d_out2, h_gpu2_out, SIZE * sizeof(float), cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } kernel2<<<numBlocks, threadsPerBlock>>>(d_in, d_out2); status = cudaMemcpy(h_gpu2_out, d_out2, SIZE * sizeof(float), cudaMemcpyDeviceToHost); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&kernel_time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); check_result(h_cpu_out, h_gpu2_out); std::cout << "Kernel 2 time (ms): " << kernel_time << "\n"; // kernel 3 float *d_out3; threadsPerBlock = dim3(32,8,4); numBlocks = dim3(N/threadsPerBlock.x, N/threadsPerBlock.y, N/threadsPerBlock.z); status = cudaMalloc(&d_out3, SIZE * sizeof(float)); if (status != cudaSuccess) { fprintf(stderr, "cudaMalloc() failed"); return EXIT_FAILURE; } cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); status = cudaMemcpy(d_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } status = cudaMemcpy(d_out3, h_gpu3_out, SIZE * sizeof(float), cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } kernel3<<<numBlocks, threadsPerBlock>>>(d_in, d_out3); status = cudaMemcpy(h_gpu3_out, d_out3, SIZE * sizeof(float), cudaMemcpyDeviceToHost); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy() failed"); return EXIT_FAILURE; } cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&kernel_time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); check_result(h_cpu_out, h_gpu3_out); std::cout << "Kernel 3 time (ms): " << kernel_time << "\n"; // TODO: Free memory cudaFree(d_in); cudaFree(d_out1); cudaFree(d_out2); cudaFree(d_out3); free(h_in); free(h_cpu_out); free(h_gpu1_out); free(h_gpu2_out); free(h_gpu3_out); return EXIT_SUCCESS; }
aa708dff9cf7293e74de1643c45391a1aed0ad76.hip
// !!! This is a file automatically generated by hipify!!! /* Tiny Monte Carlo by Scott Prahl (http://omlc.ogi.edu)" * 1 W Point Source Heating in Infinite Isotropic Scattering Medium * http://omlc.ogi.edu/software/mc/tiny_mc.c * * Adaptado para CP2014, Nicolas Wolovick */ //#define _XOPEN_SOURCE 500 // M_PI #include "params.h" #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> // headers useful for cuda #include "helper_cuda.h" #include <ctime> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> // global state, heat and heat square in each shell /*** * Photon ***/ __global__ void init_curand(hiprandState_t* rng_states) { int gtid = blockDim.x * blockIdx.x + threadIdx.x; // setup a seed for every thread hiprand_init((unsigned long long)clock(), gtid, 0, &rng_states[gtid]); } __global__ void test_curand(hiprandState_t* rng_states) { int gtid = blockDim.x * blockIdx.x + threadIdx.x; float rnd = hiprand_uniform(&rng_states[gtid]); printf("gtid = %i - rnd = %f\n", gtid, rnd); } __global__ void print_kernel() { printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); } __global__ void photon(float* global_heat, float* global_heat2, hiprandState_t* rng_states) { int gtid = blockDim.x * blockIdx.x + threadIdx.x; // a photon per thread and if PHOTONS is not a multiple of 32 cape it if (gtid <= PHOTONS) { hiprandState_t thread_rng_state = rng_states[gtid]; const float albedo = MU_S * (1.0f / (MU_S + MU_A)); const float shells_per_mfp = 1e4 * (1.0f / MICRONS_PER_SHELL) * (1.0f / (MU_A + MU_S)); float x = 0.0f; float y = 0.0f; float z = 0.0f; float u = 0.0f; float v = 0.0f; float w = 1.0f; float weight = 1.0f; for (;;) { float t = -logf((float)hiprand_uniform(&thread_rng_state)); x += t * u; y += t * v; z += t * w; unsigned int shell = sqrtf(x * x + y * y + z * z) * shells_per_mfp; if (shell > SHELLS - 1) { shell = SHELLS - 1; } // atomic add atomicAdd(&global_heat[shell], (1.0f - albedo) * weight); atomicAdd(&global_heat2[shell], (1.0f - albedo) * (1.0f - albedo) * weight * weight); weight *= albedo; float xi1, xi2; do { xi1 = 2.0f * hiprand_uniform(&thread_rng_state) - 1.0f; xi2 = 2.0f * hiprand_uniform(&thread_rng_state) - 1.0f; t = xi1 * xi1 + xi2 * xi2; } while (1.0f < t); u = 2.0f * t - 1.0f; v = xi1 * sqrtf((1.0f - u * u) * (1.0f / t)); w = xi2 * sqrtf((1.0f - u * u) * (1.0f / t)); if (weight < 0.001f) { if ((float)hiprand_uniform(&thread_rng_state) > 0.1f) { break; } weight *= 10.0f; } } } } /*** * Main matter ***/ int main(void) { //get block_according to number of threads per block and PHOTONS double block_count = (PHOTONS + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int total_num_threads = block_count * BLOCK_SIZE; // initialize heat and heat2 to be shared between cpu and gpu float * heat; float * heat2; checkCudaCall(hipMallocManaged(&heat, SHELLS * sizeof(float))); checkCudaCall(hipMallocManaged(&heat2, SHELLS * sizeof(float))); for (int i = 0; i < SHELLS; i++) { heat[i] = 0; heat2[i] = 0; } // init hiprand hiprandState_t* rng_states; checkCudaCall(hipMallocManaged(&rng_states, total_num_threads * sizeof(hiprandState_t))); hipLaunchKernelGGL(( init_curand), dim3(block_count), dim3(BLOCK_SIZE), 0, 0, rng_states); //test_curand<<<block_count, BLOCK_SIZE>>>(rng_states); hipDeviceSynchronize(); checkCudaCall(hipGetLastError()); // variables to measure time float time; hipEvent_t start, stop; checkCudaCall( hipEventCreate(&start) ); checkCudaCall( hipEventCreate(&stop) ); checkCudaCall( hipEventRecord(start, 0) ); //photon hipLaunchKernelGGL(( photon), dim3(block_count), dim3(BLOCK_SIZE), 0, 0, heat, heat2, rng_states); checkCudaCall(hipGetLastError()); checkCudaCall( hipEventRecord(stop, 0) ); checkCudaCall( hipEventSynchronize(stop) ); checkCudaCall( hipEventElapsedTime(&time, start, stop) ); // the measure of time in cuda is ms float elapsed = time / 1000; printf("# %lf seconds\n", elapsed); printf("# %lf K photons per second\n", 1e-3 * PHOTONS / elapsed); printf("%lf\n", 1e-3 * PHOTONS / elapsed); printf("# Radius\tHeat\n"); printf("# [microns]\t[W/cm^3]\tError\n"); float t = 4.0f * M_PI * powf(MICRONS_PER_SHELL, 3.0f) * PHOTONS / 1e12; for (unsigned int i = 0; i < SHELLS - 1; ++i) { printf("%6.0f\t%12.5f\t%12.5f\n", i * (float)MICRONS_PER_SHELL, heat[i] / t / (i * i + i + 1.0 / 3.0), sqrt(heat2[i] - heat[i] * heat[i] / PHOTONS) / t / (i * i + i + 1.0f / 3.0f)); } printf("# extra\t%12.5f\n", heat[SHELLS - 1] / PHOTONS); return 0; }
aa708dff9cf7293e74de1643c45391a1aed0ad76.cu
/* Tiny Monte Carlo by Scott Prahl (http://omlc.ogi.edu)" * 1 W Point Source Heating in Infinite Isotropic Scattering Medium * http://omlc.ogi.edu/software/mc/tiny_mc.c * * Adaptado para CP2014, Nicolas Wolovick */ //#define _XOPEN_SOURCE 500 // M_PI #include "params.h" #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> // headers useful for cuda #include "helper_cuda.h" #include <ctime> #include <cuda_runtime.h> #include <curand_kernel.h> // global state, heat and heat square in each shell /*** * Photon ***/ __global__ void init_curand(curandState* rng_states) { int gtid = blockDim.x * blockIdx.x + threadIdx.x; // setup a seed for every thread curand_init((unsigned long long)clock(), gtid, 0, &rng_states[gtid]); } __global__ void test_curand(curandState* rng_states) { int gtid = blockDim.x * blockIdx.x + threadIdx.x; float rnd = curand_uniform(&rng_states[gtid]); printf("gtid = %i - rnd = %f\n", gtid, rnd); } __global__ void print_kernel() { printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); } __global__ void photon(float* global_heat, float* global_heat2, curandState* rng_states) { int gtid = blockDim.x * blockIdx.x + threadIdx.x; // a photon per thread and if PHOTONS is not a multiple of 32 cape it if (gtid <= PHOTONS) { curandState thread_rng_state = rng_states[gtid]; const float albedo = MU_S * (1.0f / (MU_S + MU_A)); const float shells_per_mfp = 1e4 * (1.0f / MICRONS_PER_SHELL) * (1.0f / (MU_A + MU_S)); float x = 0.0f; float y = 0.0f; float z = 0.0f; float u = 0.0f; float v = 0.0f; float w = 1.0f; float weight = 1.0f; for (;;) { float t = -logf((float)curand_uniform(&thread_rng_state)); x += t * u; y += t * v; z += t * w; unsigned int shell = sqrtf(x * x + y * y + z * z) * shells_per_mfp; if (shell > SHELLS - 1) { shell = SHELLS - 1; } // atomic add atomicAdd(&global_heat[shell], (1.0f - albedo) * weight); atomicAdd(&global_heat2[shell], (1.0f - albedo) * (1.0f - albedo) * weight * weight); weight *= albedo; float xi1, xi2; do { xi1 = 2.0f * curand_uniform(&thread_rng_state) - 1.0f; xi2 = 2.0f * curand_uniform(&thread_rng_state) - 1.0f; t = xi1 * xi1 + xi2 * xi2; } while (1.0f < t); u = 2.0f * t - 1.0f; v = xi1 * sqrtf((1.0f - u * u) * (1.0f / t)); w = xi2 * sqrtf((1.0f - u * u) * (1.0f / t)); if (weight < 0.001f) { if ((float)curand_uniform(&thread_rng_state) > 0.1f) { break; } weight *= 10.0f; } } } } /*** * Main matter ***/ int main(void) { //get block_according to number of threads per block and PHOTONS double block_count = (PHOTONS + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int total_num_threads = block_count * BLOCK_SIZE; // initialize heat and heat2 to be shared between cpu and gpu float * heat; float * heat2; checkCudaCall(cudaMallocManaged(&heat, SHELLS * sizeof(float))); checkCudaCall(cudaMallocManaged(&heat2, SHELLS * sizeof(float))); for (int i = 0; i < SHELLS; i++) { heat[i] = 0; heat2[i] = 0; } // init curand curandState* rng_states; checkCudaCall(cudaMallocManaged(&rng_states, total_num_threads * sizeof(curandState))); init_curand<<<block_count, BLOCK_SIZE>>>(rng_states); //test_curand<<<block_count, BLOCK_SIZE>>>(rng_states); cudaDeviceSynchronize(); checkCudaCall(cudaGetLastError()); // variables to measure time float time; cudaEvent_t start, stop; checkCudaCall( cudaEventCreate(&start) ); checkCudaCall( cudaEventCreate(&stop) ); checkCudaCall( cudaEventRecord(start, 0) ); //photon photon<<<block_count, BLOCK_SIZE>>>(heat, heat2, rng_states); checkCudaCall(cudaGetLastError()); checkCudaCall( cudaEventRecord(stop, 0) ); checkCudaCall( cudaEventSynchronize(stop) ); checkCudaCall( cudaEventElapsedTime(&time, start, stop) ); // the measure of time in cuda is ms float elapsed = time / 1000; printf("# %lf seconds\n", elapsed); printf("# %lf K photons per second\n", 1e-3 * PHOTONS / elapsed); printf("%lf\n", 1e-3 * PHOTONS / elapsed); printf("# Radius\tHeat\n"); printf("# [microns]\t[W/cm^3]\tError\n"); float t = 4.0f * M_PI * powf(MICRONS_PER_SHELL, 3.0f) * PHOTONS / 1e12; for (unsigned int i = 0; i < SHELLS - 1; ++i) { printf("%6.0f\t%12.5f\t%12.5f\n", i * (float)MICRONS_PER_SHELL, heat[i] / t / (i * i + i + 1.0 / 3.0), sqrt(heat2[i] - heat[i] * heat[i] / PHOTONS) / t / (i * i + i + 1.0f / 3.0f)); } printf("# extra\t%12.5f\n", heat[SHELLS - 1] / PHOTONS); return 0; }
67918727dd31e53ff0c9ac0166fb61d3b16ff0a3.hip
// !!! This is a file automatically generated by hipify!!! #include <Hornet.hpp> #include "StandardAPI.hpp" #include "Util/BatchFunctions.hpp" #include "Util/RandomGraphData.cuh" #include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension #include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo #include <algorithm> //std:.generate #include <chrono> //std::chrono #include <random> //std::mt19937_64 #include <hip/hip_runtime_api.h> #include <Core/Static/Static.cuh> #include <Graph/GraphStd.hpp> #include <Host/Classes/Timer.hpp> #include <Device/Util/Timer.cuh> #include "Util/CommandLineParam.hpp" //using namespace hornets_nest; using namespace timer; using namespace std::string_literals; using vert_t = int; using eoff_t = int; using HornetGPU = hornet::gpu::Hornet<vert_t>; using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::EMPTY, hornet::DeviceType::HOST>; using Update = hornet::gpu::BatchUpdate<vert_t>; using Init = hornet::HornetInit<vert_t>; using hornet::SoAData; using hornet::TypeList; using hornet::DeviceType; /** * @brief Example tester for Hornet */ int exec(int argc, char* argv[]) { using namespace graph::structure_prop; using namespace graph::parsing_prop; graph::GraphStd<vert_t, vert_t> graph; graph.read(argv[1]); int batch_size = std::stoi(argv[2]); Init hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGPU hornet_gpu(hornet_init); auto init_coo = hornet_gpu.getCOO(true); hornet::RandomGenTraits<hornet::EMPTY> cooGenTraits; auto randomBatch = hornet::generateRandomCOO<vert_t, eoff_t>(graph.nV(), batch_size, cooGenTraits); Update batch_update(randomBatch); hornet_gpu.insert(batch_update); auto inst_coo = hornet_gpu.getCOO(true); init_coo.append(randomBatch); init_coo.sort(); hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_init_coo = init_coo; hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_inst_coo = inst_coo; auto *s = host_init_coo.srcPtr(); auto *d = host_init_coo.dstPtr(); auto *S = host_inst_coo.srcPtr(); auto *D = host_inst_coo.dstPtr(); auto len = host_init_coo.size(); bool err = false; if (host_inst_coo.size() != host_init_coo.size()) { err = true; std::cerr<<"\nInit Size "<<host_init_coo.size()<<" != Combined size "<<host_inst_coo.size()<<"\n"; len = ::min(host_init_coo.size(), host_inst_coo.size()); } for (int i = 0; i < len; ++i) { if ((s[i] != S[i]) || (d[i] != D[i])) { err = true; std::cout<<"ERR : "; std::cout<<s[i]<<" "<<d[i]<<"\t"; std::cout<<"\t\t"; std::cout<<S[i]<<" "<<D[i]; std::cout<<"\n"; } } if (!err) { std::cout<<"PASSED\n"; } else { std::cout<<"NOT PASSED\n"; } return 0; } int main(int argc, char* argv[]) { int ret = 0; hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. ret = exec(argc, argv); }//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. hornets_nest::gpu::finalizeRMMPoolAllocation(); return ret; }
67918727dd31e53ff0c9ac0166fb61d3b16ff0a3.cu
#include <Hornet.hpp> #include "StandardAPI.hpp" #include "Util/BatchFunctions.hpp" #include "Util/RandomGraphData.cuh" #include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension #include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo #include <algorithm> //std:.generate #include <chrono> //std::chrono #include <random> //std::mt19937_64 #include <cuda_profiler_api.h> #include <Core/Static/Static.cuh> #include <Graph/GraphStd.hpp> #include <Host/Classes/Timer.hpp> #include <Device/Util/Timer.cuh> #include "Util/CommandLineParam.hpp" //using namespace hornets_nest; using namespace timer; using namespace std::string_literals; using vert_t = int; using eoff_t = int; using HornetGPU = hornet::gpu::Hornet<vert_t>; using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::EMPTY, hornet::DeviceType::HOST>; using Update = hornet::gpu::BatchUpdate<vert_t>; using Init = hornet::HornetInit<vert_t>; using hornet::SoAData; using hornet::TypeList; using hornet::DeviceType; /** * @brief Example tester for Hornet */ int exec(int argc, char* argv[]) { using namespace graph::structure_prop; using namespace graph::parsing_prop; graph::GraphStd<vert_t, vert_t> graph; graph.read(argv[1]); int batch_size = std::stoi(argv[2]); Init hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGPU hornet_gpu(hornet_init); auto init_coo = hornet_gpu.getCOO(true); hornet::RandomGenTraits<hornet::EMPTY> cooGenTraits; auto randomBatch = hornet::generateRandomCOO<vert_t, eoff_t>(graph.nV(), batch_size, cooGenTraits); Update batch_update(randomBatch); hornet_gpu.insert(batch_update); auto inst_coo = hornet_gpu.getCOO(true); init_coo.append(randomBatch); init_coo.sort(); hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_init_coo = init_coo; hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_inst_coo = inst_coo; auto *s = host_init_coo.srcPtr(); auto *d = host_init_coo.dstPtr(); auto *S = host_inst_coo.srcPtr(); auto *D = host_inst_coo.dstPtr(); auto len = host_init_coo.size(); bool err = false; if (host_inst_coo.size() != host_init_coo.size()) { err = true; std::cerr<<"\nInit Size "<<host_init_coo.size()<<" != Combined size "<<host_inst_coo.size()<<"\n"; len = std::min(host_init_coo.size(), host_inst_coo.size()); } for (int i = 0; i < len; ++i) { if ((s[i] != S[i]) || (d[i] != D[i])) { err = true; std::cout<<"ERR : "; std::cout<<s[i]<<" "<<d[i]<<"\t"; std::cout<<"\t\t"; std::cout<<S[i]<<" "<<D[i]; std::cout<<"\n"; } } if (!err) { std::cout<<"PASSED\n"; } else { std::cout<<"NOT PASSED\n"; } return 0; } int main(int argc, char* argv[]) { int ret = 0; hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. ret = exec(argc, argv); }//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. hornets_nest::gpu::finalizeRMMPoolAllocation(); return ret; }
78b12f857d1946a2f832dbb7c29f28ae492c5d6a.hip
// !!! This is a file automatically generated by hipify!!! // a cuda app. we will convert this to opencl, and run it :-) #include <iostream> #include <memory> #include <cassert> using namespace std; #include <hip/hip_runtime.h> __global__ void setValue(float *data, int idx, float value) { if(threadIdx.x == 0) { data[idx] = value; } } int main(int argc, char *argv[]) { int N = 1024; hipDeviceptr_t gpuFloats; cuMemAlloc(&gpuFloats, N * sizeof(float)); hipLaunchKernelGGL(( setValue), dim3(dim3(32, 1, 1)), dim3(dim3(32, 1, 1)), 0, 0, (float *)gpuFloats, 2, 123.0f); float hostFloats[4]; cuMemcpyDtoH(hostFloats, gpuFloats, 4 * sizeof(float)); cout << "hostFloats[2] " << hostFloats[2] << endl; assert(hostFloats[2] == 123); hipLaunchKernelGGL(( setValue), dim3(dim3(32, 1, 1)), dim3(dim3(32, 1, 1)), 0, 0, (float *)gpuFloats, 2, 222.0f); cuMemcpyDtoH(hostFloats, gpuFloats, 4 * sizeof(float)); cout << "hostFloats[2] " << hostFloats[2] << endl; assert(hostFloats[2] == 222); hostFloats[2] = 444.0f; cuMemcpyHtoD(gpuFloats, hostFloats, 4 * sizeof(float)); hostFloats[2] = 555.0f; cuMemcpyDtoH(hostFloats, gpuFloats, 4 * sizeof(float)); cout << "hostFloats[2] " << hostFloats[2] << endl; assert(hostFloats[2] == 444); hipFree(gpuFloats); return 0; }
78b12f857d1946a2f832dbb7c29f28ae492c5d6a.cu
// a cuda app. we will convert this to opencl, and run it :-) #include <iostream> #include <memory> #include <cassert> using namespace std; #include <cuda_runtime.h> __global__ void setValue(float *data, int idx, float value) { if(threadIdx.x == 0) { data[idx] = value; } } int main(int argc, char *argv[]) { int N = 1024; CUdeviceptr gpuFloats; cuMemAlloc(&gpuFloats, N * sizeof(float)); setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>((float *)gpuFloats, 2, 123.0f); float hostFloats[4]; cuMemcpyDtoH(hostFloats, gpuFloats, 4 * sizeof(float)); cout << "hostFloats[2] " << hostFloats[2] << endl; assert(hostFloats[2] == 123); setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>((float *)gpuFloats, 2, 222.0f); cuMemcpyDtoH(hostFloats, gpuFloats, 4 * sizeof(float)); cout << "hostFloats[2] " << hostFloats[2] << endl; assert(hostFloats[2] == 222); hostFloats[2] = 444.0f; cuMemcpyHtoD(gpuFloats, hostFloats, 4 * sizeof(float)); hostFloats[2] = 555.0f; cuMemcpyDtoH(hostFloats, gpuFloats, 4 * sizeof(float)); cout << "hostFloats[2] " << hostFloats[2] << endl; assert(hostFloats[2] == 444); cuMemFree(gpuFloats); return 0; }
87e2271e4160e0799a52996f430769db323a36bf.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/strings/translate.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/find.h> #include <algorithm> namespace cudf { namespace strings { namespace detail { using char_range = thrust::pair<char_utf8, char_utf8>; namespace { /** * @brief This is the filter functor for replacing characters * in each string given a vector of char_range values. */ struct filter_fn { column_device_view const d_strings; filter_type keep_characters; rmm::device_uvector<char_range>::iterator table_begin; rmm::device_uvector<char_range>::iterator table_end; string_view const d_replacement; int32_t* d_offsets{}; char* d_chars{}; /** * @brief Return true if this character should be removed. * * @param ch Character to check * @return True if character should be removed. */ __device__ bool remove_char(char_utf8 ch) { auto const entry = thrust::find_if(thrust::seq, table_begin, table_end, [ch] __device__(auto const& range) { return (range.first <= ch) && (ch <= range.second); }); // if keep==true and entry-not-found OR // if keep==false and entry-found return (keep_characters == filter_type::KEEP) == (entry == table_end); } /** * @brief Execute the filter operation on each string. * * This is also used to calculate the size of the output. * * @param idx Index of the current string to process. */ __device__ void operator()(size_type idx) { if (d_strings.is_null(idx)) { if (!d_chars) d_offsets[idx] = 0; return; } auto const d_str = d_strings.element<string_view>(idx); auto nbytes = d_str.size_bytes(); auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; for (auto itr = d_str.begin(); itr != d_str.end(); ++itr) { auto const char_size = bytes_in_char_utf8(*itr); string_view const d_newchar = remove_char(*itr) ? d_replacement : string_view(d_str.data() + itr.byte_offset(), char_size); if (out_ptr) out_ptr = cudf::strings::detail::copy_string(out_ptr, d_newchar); else nbytes += d_newchar.size_bytes() - char_size; } if (!out_ptr) d_offsets[idx] = nbytes; } }; } // namespace /** * @copydoc cudf::strings::filter_characters */ std::unique_ptr<column> filter_characters( strings_column_view const& strings, std::vector<std::pair<cudf::char_utf8, cudf::char_utf8>> characters_to_filter, filter_type keep_characters, string_scalar const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = strings.size(); if (strings_count == 0) return make_empty_column(data_type{type_id::STRING}); CUDF_EXPECTS(replacement.is_valid(), "Parameter replacement must be valid"); cudf::string_view d_replacement(replacement.data(), replacement.size()); // convert input table for copy to device memory size_type table_size = static_cast<size_type>(characters_to_filter.size()); thrust::host_vector<char_range> htable(table_size); std::transform( characters_to_filter.begin(), characters_to_filter.end(), htable.begin(), [](auto entry) { return char_range{entry.first, entry.second}; }); rmm::device_uvector<char_range> table(table_size, stream); CUDA_TRY(hipMemcpyAsync(table.data(), htable.data(), table_size * sizeof(char_range), hipMemcpyHostToDevice, stream.value())); auto d_strings = column_device_view::create(strings.parent(), stream); // this utility calls the strip_fn to build the offsets and chars columns filter_fn ffn{*d_strings, keep_characters, table.begin(), table.end(), d_replacement}; auto children = cudf::strings::detail::make_strings_children(ffn, strings.size(), stream, mr); return make_strings_column(strings_count, std::move(children.first), std::move(children.second), strings.null_count(), cudf::detail::copy_bitmask(strings.parent(), stream, mr)); } } // namespace detail /** * @copydoc cudf::strings::filter_characters */ std::unique_ptr<column> filter_characters( strings_column_view const& strings, std::vector<std::pair<cudf::char_utf8, cudf::char_utf8>> characters_to_filter, filter_type keep_characters, string_scalar const& replacement, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::filter_characters( strings, characters_to_filter, keep_characters, replacement, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
87e2271e4160e0799a52996f430769db323a36bf.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/strings/translate.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/find.h> #include <algorithm> namespace cudf { namespace strings { namespace detail { using char_range = thrust::pair<char_utf8, char_utf8>; namespace { /** * @brief This is the filter functor for replacing characters * in each string given a vector of char_range values. */ struct filter_fn { column_device_view const d_strings; filter_type keep_characters; rmm::device_uvector<char_range>::iterator table_begin; rmm::device_uvector<char_range>::iterator table_end; string_view const d_replacement; int32_t* d_offsets{}; char* d_chars{}; /** * @brief Return true if this character should be removed. * * @param ch Character to check * @return True if character should be removed. */ __device__ bool remove_char(char_utf8 ch) { auto const entry = thrust::find_if(thrust::seq, table_begin, table_end, [ch] __device__(auto const& range) { return (range.first <= ch) && (ch <= range.second); }); // if keep==true and entry-not-found OR // if keep==false and entry-found return (keep_characters == filter_type::KEEP) == (entry == table_end); } /** * @brief Execute the filter operation on each string. * * This is also used to calculate the size of the output. * * @param idx Index of the current string to process. */ __device__ void operator()(size_type idx) { if (d_strings.is_null(idx)) { if (!d_chars) d_offsets[idx] = 0; return; } auto const d_str = d_strings.element<string_view>(idx); auto nbytes = d_str.size_bytes(); auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; for (auto itr = d_str.begin(); itr != d_str.end(); ++itr) { auto const char_size = bytes_in_char_utf8(*itr); string_view const d_newchar = remove_char(*itr) ? d_replacement : string_view(d_str.data() + itr.byte_offset(), char_size); if (out_ptr) out_ptr = cudf::strings::detail::copy_string(out_ptr, d_newchar); else nbytes += d_newchar.size_bytes() - char_size; } if (!out_ptr) d_offsets[idx] = nbytes; } }; } // namespace /** * @copydoc cudf::strings::filter_characters */ std::unique_ptr<column> filter_characters( strings_column_view const& strings, std::vector<std::pair<cudf::char_utf8, cudf::char_utf8>> characters_to_filter, filter_type keep_characters, string_scalar const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = strings.size(); if (strings_count == 0) return make_empty_column(data_type{type_id::STRING}); CUDF_EXPECTS(replacement.is_valid(), "Parameter replacement must be valid"); cudf::string_view d_replacement(replacement.data(), replacement.size()); // convert input table for copy to device memory size_type table_size = static_cast<size_type>(characters_to_filter.size()); thrust::host_vector<char_range> htable(table_size); std::transform( characters_to_filter.begin(), characters_to_filter.end(), htable.begin(), [](auto entry) { return char_range{entry.first, entry.second}; }); rmm::device_uvector<char_range> table(table_size, stream); CUDA_TRY(cudaMemcpyAsync(table.data(), htable.data(), table_size * sizeof(char_range), cudaMemcpyHostToDevice, stream.value())); auto d_strings = column_device_view::create(strings.parent(), stream); // this utility calls the strip_fn to build the offsets and chars columns filter_fn ffn{*d_strings, keep_characters, table.begin(), table.end(), d_replacement}; auto children = cudf::strings::detail::make_strings_children(ffn, strings.size(), stream, mr); return make_strings_column(strings_count, std::move(children.first), std::move(children.second), strings.null_count(), cudf::detail::copy_bitmask(strings.parent(), stream, mr)); } } // namespace detail /** * @copydoc cudf::strings::filter_characters */ std::unique_ptr<column> filter_characters( strings_column_view const& strings, std::vector<std::pair<cudf::char_utf8, cudf::char_utf8>> characters_to_filter, filter_type keep_characters, string_scalar const& replacement, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::filter_characters( strings, characters_to_filter, keep_characters, replacement, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
91122a28db2e765fc9e37049eeb5eda19e859854.hip
// !!! This is a file automatically generated by hipify!!! #include "gpu_utils.cuh" #include "pinned_host_buffer.hpp" namespace timemachine { template <typename T> T *allocate(const std::size_t length) { if (length < 1) { throw std::runtime_error("device buffer length must at least be 1"); } T *buffer; gpuErrchk(hipHostMalloc(&buffer, length * sizeof(T))); return buffer; } template <typename T> PinnedHostBuffer<T>::PinnedHostBuffer(const std::size_t length) : size(length * sizeof(T)), data(allocate<T>(length)) {} template <typename T> PinnedHostBuffer<T>::~PinnedHostBuffer() { // TODO: the file/line context reported by gpuErrchk on failure is // not very useful when it's called from here. Is there a way to // report a stack trace? gpuErrchk(hipHostFree(data)); } template <typename T> void PinnedHostBuffer<T>::copy_from(const T *host_buffer) const { memcpy(data, host_buffer, size); } template <typename T> void PinnedHostBuffer<T>::copy_to(T *host_buffer) const { memcpy(host_buffer, data, size); } template class PinnedHostBuffer<double>; template class PinnedHostBuffer<float>; template class PinnedHostBuffer<int>; template class PinnedHostBuffer<char>; template class PinnedHostBuffer<unsigned int>; template class PinnedHostBuffer<unsigned long long>; } // namespace timemachine
91122a28db2e765fc9e37049eeb5eda19e859854.cu
#include "gpu_utils.cuh" #include "pinned_host_buffer.hpp" namespace timemachine { template <typename T> T *allocate(const std::size_t length) { if (length < 1) { throw std::runtime_error("device buffer length must at least be 1"); } T *buffer; gpuErrchk(cudaMallocHost(&buffer, length * sizeof(T))); return buffer; } template <typename T> PinnedHostBuffer<T>::PinnedHostBuffer(const std::size_t length) : size(length * sizeof(T)), data(allocate<T>(length)) {} template <typename T> PinnedHostBuffer<T>::~PinnedHostBuffer() { // TODO: the file/line context reported by gpuErrchk on failure is // not very useful when it's called from here. Is there a way to // report a stack trace? gpuErrchk(cudaFreeHost(data)); } template <typename T> void PinnedHostBuffer<T>::copy_from(const T *host_buffer) const { memcpy(data, host_buffer, size); } template <typename T> void PinnedHostBuffer<T>::copy_to(T *host_buffer) const { memcpy(host_buffer, data, size); } template class PinnedHostBuffer<double>; template class PinnedHostBuffer<float>; template class PinnedHostBuffer<int>; template class PinnedHostBuffer<char>; template class PinnedHostBuffer<unsigned int>; template class PinnedHostBuffer<unsigned long long>; } // namespace timemachine
9c7ab45163530310fca871c4c2b4f348eb72b89b.hip
// !!! This is a file automatically generated by hipify!!! #include "sampler.h" #include <utility> #include <random> #include <map> #include <ctime> #include <cmath> #include <queue> using namespace glm; constexpr float PI = 3.1415926535897932384626433832795f; constexpr float PI_2 = 1.5707963267948966192313216916398f; vec3 sampleUnitSphere() { static std::default_random_engine eng(time(NULL)); static std::uniform_real_distribution<float> dis(0, 1); float u = 2 * PI * dis(eng); float v = acos(dis(eng) * 2.0f - 1.0f) ; return vec3(sin(v) * cos(u), sin(v) * sin(u), cos(v)); } // hash = x * (lz * ly) + y * ly + z float Grid::getNearest(const vec3 &point, const ivec3& v, const float &r) const { float dmin = 2000000.0f; const int depth = len / r + 1; for(int i = -depth; i <= depth; ++i) for(int j = -depth; j <= depth; ++j) for(int k = -depth; k <= depth; ++k) if((i || j || k)){ auto inc = ivec3(i, j, k); auto res = samples.find(v + inc); if (res != samples.end()) dmin = ::min(dmin, length(res->second - point)); } return dmin; } bool Grid::checkPoissonCriterion(const vec3 &sample, const ivec3 &v, const float &r) const { if (samples.count(v)) return false; auto a = getNearest(sample, v, r); return a > r; } std::vector<vec3> Grid::sampleSurface(const LevelSet &f, float r, int t, float e) { static std::default_random_engine gen(time(NULL)); static std::uniform_real_distribution<float> dis(0.0f, 1.0f); vec3 p; std::vector<vec3> res; // Iterate over the grid for(int x = lx; x <= rx; ++x) for(int y = ly; y <= ry; ++y) for(int z = lz; z <= rz; ++z) { Cube cell; int cnt = 0; for(int ix = 0; ix <= 1; ++ix) for(int iy = 0; iy <= 1; ++iy) for (int iz = 0; iz <= 1; ++iz) { auto ele = vec3(len * x, len * y, len * z) + len * vec3(ix, iy, iz); cell[cnt++] = ele; } float sign = f(cell[0]); unsigned int i; for (i = 1; i < cell.size() && f(cell[i]) * sign > 0.0f; ++i); if(i < cell.size()) { // level set change sign in this grid bool found = false; for (int j = 0; j < t && !found; ++j) { p = cell[0] + dis(gen) * (cell[1] - cell[0]) + dis(gen) * (cell[3] - cell[0]) + dis(gen) * (cell[4] - cell[0]); p = f.project(p); ivec3 coord = p / len; found = checkPoissonCriterion(p, coord, r); } if (found) { ivec3 coord = p / len; samples.insert(std::make_pair(coord, p)); do { found = false; p = f.project(p + e * r * f.getRandomTan(p)); coord = p / len; if (checkPoissonCriterion(p, coord, r)){ samples.insert(std::make_pair(coord, p)); found = true; } } while (found); found = false; } } } relaxSamples([](vec3) {return true;}, f, r, 5, t); for (auto &sample : samples) res.push_back(sample.second); samples.clear(); return std::move(res); } std::vector<vec3> Grid::sampleVolume(Judger g, const LevelSet &f, std::vector<vec3> &temp, const float &r, const int &k) { static std::default_random_engine eng(time(NULL)); std::uniform_real_distribution<float> dis(r, 2.0f * r); static std::default_random_engine engi(time(NULL)); static std::uniform_int_distribution<unsigned int> disi; std::vector<vec3> res; if (temp.empty()) return res; for (auto &v : temp) { ivec3 coord = v / len; samples.insert(std::make_pair(coord, v)); } for (int i = disi(engi) % temp.size(); !temp.empty(); i = disi(engi) % temp.size()) { bool found = false; vec3 st = temp[i]; for (int j = 0; j < k; ++j) { auto sample = st + dis(eng) * sampleUnitSphere(); ivec3 v = sample / len; if (f(sample) <= 0.0 && g(sample) && checkPoissonCriterion(sample, v, r)) { found = true; samples.insert(std::make_pair(v, sample)); temp.push_back(sample); res.push_back(sample); } } if (!found) { std::swap(temp[i], temp[temp.size() - 1]); temp.pop_back(); } if (temp.empty()) break; } samples.clear(); for (auto &entry : res) { ivec3 coord = entry / len; samples.insert(std::make_pair(coord, entry)); } res.clear(); relaxSamples(g, f, r, k, 5, false); for(auto &sample : samples) res.push_back(sample.second); samples.clear(); return std::move(res); } void Grid::relaxSamples(Judger g, const LevelSet &f, float r, int k, int t, bool is_surface) { std::vector<vec3> ball; ball.reserve(8); for (int i = 0; i < k; ++i) { for (auto &sample : samples) { ball.clear(); ivec3 coord = sample.second / len; float d = 2 * r; int depth = 2*r / len + 1; for (int i = -depth; i <= depth; ++i) for (int j = -depth; j <= depth; ++j) for (int k = -depth; k <= depth; ++k) if ( (i || j || k)) { auto res = samples.find(coord + ivec3(i, j, k)); if (res != samples.end()) { float len = length(res->second - sample.second); if (len < 2 * r) { d = ::min(d, len); ball.push_back(res->second); } } } vec3 p_new = sample.second; for (int i = 0; i < t; ++i) { float tau = static_cast<float>(t - i) / t; vec3 cand = sample.second + tau * r * sampleUnitSphere(); if (f(cand) > 0.0f || is_surface) cand = f.project(cand); float d_ = 2.0 * r; for (auto &other : ball) d_ = ::min(d_, length(other - cand)); if (d_ > d && g(cand)) { p_new = cand; d = d_; } } if (ivec3(p_new / len) != ivec3(sample.second / len)) { samples.erase(sample.first); samples.insert(std::make_pair(p_new/ len, p_new)); } else sample.second = p_new; } } return; }
9c7ab45163530310fca871c4c2b4f348eb72b89b.cu
#include "sampler.h" #include <utility> #include <random> #include <map> #include <ctime> #include <cmath> #include <queue> using namespace glm; constexpr float PI = 3.1415926535897932384626433832795f; constexpr float PI_2 = 1.5707963267948966192313216916398f; vec3 sampleUnitSphere() { static std::default_random_engine eng(time(NULL)); static std::uniform_real_distribution<float> dis(0, 1); float u = 2 * PI * dis(eng); float v = acos(dis(eng) * 2.0f - 1.0f) ; return vec3(sin(v) * cos(u), sin(v) * sin(u), cos(v)); } // hash = x * (lz * ly) + y * ly + z float Grid::getNearest(const vec3 &point, const ivec3& v, const float &r) const { float dmin = 2000000.0f; const int depth = len / r + 1; for(int i = -depth; i <= depth; ++i) for(int j = -depth; j <= depth; ++j) for(int k = -depth; k <= depth; ++k) if((i || j || k)){ auto inc = ivec3(i, j, k); auto res = samples.find(v + inc); if (res != samples.end()) dmin = std::min(dmin, length(res->second - point)); } return dmin; } bool Grid::checkPoissonCriterion(const vec3 &sample, const ivec3 &v, const float &r) const { if (samples.count(v)) return false; auto a = getNearest(sample, v, r); return a > r; } std::vector<vec3> Grid::sampleSurface(const LevelSet &f, float r, int t, float e) { static std::default_random_engine gen(time(NULL)); static std::uniform_real_distribution<float> dis(0.0f, 1.0f); vec3 p; std::vector<vec3> res; // Iterate over the grid for(int x = lx; x <= rx; ++x) for(int y = ly; y <= ry; ++y) for(int z = lz; z <= rz; ++z) { Cube cell; int cnt = 0; for(int ix = 0; ix <= 1; ++ix) for(int iy = 0; iy <= 1; ++iy) for (int iz = 0; iz <= 1; ++iz) { auto ele = vec3(len * x, len * y, len * z) + len * vec3(ix, iy, iz); cell[cnt++] = ele; } float sign = f(cell[0]); unsigned int i; for (i = 1; i < cell.size() && f(cell[i]) * sign > 0.0f; ++i); if(i < cell.size()) { // level set change sign in this grid bool found = false; for (int j = 0; j < t && !found; ++j) { p = cell[0] + dis(gen) * (cell[1] - cell[0]) + dis(gen) * (cell[3] - cell[0]) + dis(gen) * (cell[4] - cell[0]); p = f.project(p); ivec3 coord = p / len; found = checkPoissonCriterion(p, coord, r); } if (found) { ivec3 coord = p / len; samples.insert(std::make_pair(coord, p)); do { found = false; p = f.project(p + e * r * f.getRandomTan(p)); coord = p / len; if (checkPoissonCriterion(p, coord, r)){ samples.insert(std::make_pair(coord, p)); found = true; } } while (found); found = false; } } } relaxSamples([](vec3) {return true;}, f, r, 5, t); for (auto &sample : samples) res.push_back(sample.second); samples.clear(); return std::move(res); } std::vector<vec3> Grid::sampleVolume(Judger g, const LevelSet &f, std::vector<vec3> &temp, const float &r, const int &k) { static std::default_random_engine eng(time(NULL)); std::uniform_real_distribution<float> dis(r, 2.0f * r); static std::default_random_engine engi(time(NULL)); static std::uniform_int_distribution<unsigned int> disi; std::vector<vec3> res; if (temp.empty()) return res; for (auto &v : temp) { ivec3 coord = v / len; samples.insert(std::make_pair(coord, v)); } for (int i = disi(engi) % temp.size(); !temp.empty(); i = disi(engi) % temp.size()) { bool found = false; vec3 st = temp[i]; for (int j = 0; j < k; ++j) { auto sample = st + dis(eng) * sampleUnitSphere(); ivec3 v = sample / len; if (f(sample) <= 0.0 && g(sample) && checkPoissonCriterion(sample, v, r)) { found = true; samples.insert(std::make_pair(v, sample)); temp.push_back(sample); res.push_back(sample); } } if (!found) { std::swap(temp[i], temp[temp.size() - 1]); temp.pop_back(); } if (temp.empty()) break; } samples.clear(); for (auto &entry : res) { ivec3 coord = entry / len; samples.insert(std::make_pair(coord, entry)); } res.clear(); relaxSamples(g, f, r, k, 5, false); for(auto &sample : samples) res.push_back(sample.second); samples.clear(); return std::move(res); } void Grid::relaxSamples(Judger g, const LevelSet &f, float r, int k, int t, bool is_surface) { std::vector<vec3> ball; ball.reserve(8); for (int i = 0; i < k; ++i) { for (auto &sample : samples) { ball.clear(); ivec3 coord = sample.second / len; float d = 2 * r; int depth = 2*r / len + 1; for (int i = -depth; i <= depth; ++i) for (int j = -depth; j <= depth; ++j) for (int k = -depth; k <= depth; ++k) if ( (i || j || k)) { auto res = samples.find(coord + ivec3(i, j, k)); if (res != samples.end()) { float len = length(res->second - sample.second); if (len < 2 * r) { d = std::min(d, len); ball.push_back(res->second); } } } vec3 p_new = sample.second; for (int i = 0; i < t; ++i) { float tau = static_cast<float>(t - i) / t; vec3 cand = sample.second + tau * r * sampleUnitSphere(); if (f(cand) > 0.0f || is_surface) cand = f.project(cand); float d_ = 2.0 * r; for (auto &other : ball) d_ = std::min(d_, length(other - cand)); if (d_ > d && g(cand)) { p_new = cand; d = d_; } } if (ivec3(p_new / len) != ivec3(sample.second / len)) { samples.erase(sample.first); samples.insert(std::make_pair(p_new/ len, p_new)); } else sample.second = p_new; } } return; }
1cf8dd2b579af7ad74de6cc257b8530dab77e0a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <stdio.h> #include <stdlib.h> #include <limits> #include <raft/cuda_utils.cuh> #include <raft/random/rng.cuh> #include <stats/minmax.cuh> #include "test_utils.h" namespace MLCommon { namespace Stats { ///@todo: need to add tests for verifying the column subsampling feature template <typename T> struct MinMaxInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MinMaxInputs<T>& dims) { return os; } template <typename T> __global__ void naiveMinMaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= ncols) return; globalmin[tid] = init_val; globalmax[tid] = -init_val; } template <typename T> __global__ void naiveMinMaxKernel(const T* data, int nrows, int ncols, T* globalmin, T* globalmax) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int col = tid / nrows; if (col < ncols) { T val = data[tid]; if (!isnan(val)) { raft::myAtomicMin(&globalmin[col], val); raft::myAtomicMax(&globalmax[col], val); } } } template <typename T> void naiveMinMax( const T* data, int nrows, int ncols, T* globalmin, T* globalmax, hipStream_t stream) { const int TPB = 128; int nblks = raft::ceildiv(ncols, TPB); T init_val = std::numeric_limits<T>::max(); hipLaunchKernelGGL(( naiveMinMaxInitKernel), dim3(nblks), dim3(TPB), 0, stream, ncols, globalmin, globalmax, init_val); CUDA_CHECK(hipGetLastError()); nblks = raft::ceildiv(nrows * ncols, TPB); hipLaunchKernelGGL(( naiveMinMaxKernel), dim3(nblks), dim3(TPB), 0, stream, data, nrows, ncols, globalmin, globalmax); CUDA_CHECK(hipGetLastError()); } template <typename T> __global__ void nanKernel(T* data, const bool* mask, int len, T nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } template <typename T> class MinMaxTest : public ::testing::TestWithParam<MinMaxInputs<T>> { protected: MinMaxTest() : minmax_act(0, stream), minmax_ref(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<MinMaxInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.rows * params.cols; CUDA_CHECK(hipStreamCreate(&stream)); rmm::device_uvector<T> data(len, stream); rmm::device_uvector<bool> mask(len, stream); minmax_act.resize(2 * params.cols, stream); minmax_ref.resize(2 * params.cols, stream); r.normal(data.data(), len, (T)0.0, (T)1.0, stream); T nan_prob = 0.01; r.bernoulli(mask.data(), len, nan_prob, stream); const int TPB = 256; hipLaunchKernelGGL(( nanKernel), dim3(raft::ceildiv(len, TPB)), dim3(TPB), 0, stream, data.data(), mask.data(), len, std::numeric_limits<T>::quiet_NaN()); CUDA_CHECK(hipPeekAtLastError()); naiveMinMax(data.data(), params.rows, params.cols, minmax_ref.data(), minmax_ref.data() + params.cols, stream); minmax<T, 512>(data.data(), nullptr, nullptr, params.rows, params.cols, params.rows, minmax_act.data(), minmax_act.data() + params.cols, nullptr, stream); } protected: MinMaxInputs<T> params; rmm::device_uvector<T> minmax_act; rmm::device_uvector<T> minmax_ref; hipStream_t stream = 0; }; const std::vector<MinMaxInputs<float>> inputsf = {{0.00001f, 1024, 32, 1234ULL}, {0.00001f, 1024, 64, 1234ULL}, {0.00001f, 1024, 128, 1234ULL}, {0.00001f, 1024, 256, 1234ULL}, {0.00001f, 1024, 512, 1234ULL}, {0.00001f, 1024, 1024, 1234ULL}, {0.00001f, 4096, 32, 1234ULL}, {0.00001f, 4096, 64, 1234ULL}, {0.00001f, 4096, 128, 1234ULL}, {0.00001f, 4096, 256, 1234ULL}, {0.00001f, 4096, 512, 1234ULL}, {0.00001f, 4096, 1024, 1234ULL}, {0.00001f, 8192, 32, 1234ULL}, {0.00001f, 8192, 64, 1234ULL}, {0.00001f, 8192, 128, 1234ULL}, {0.00001f, 8192, 256, 1234ULL}, {0.00001f, 8192, 512, 1234ULL}, {0.00001f, 8192, 1024, 1234ULL}, {0.00001f, 1024, 8192, 1234ULL}}; const std::vector<MinMaxInputs<double>> inputsd = {{0.0000001, 1024, 32, 1234ULL}, {0.0000001, 1024, 64, 1234ULL}, {0.0000001, 1024, 128, 1234ULL}, {0.0000001, 1024, 256, 1234ULL}, {0.0000001, 1024, 512, 1234ULL}, {0.0000001, 1024, 1024, 1234ULL}, {0.0000001, 4096, 32, 1234ULL}, {0.0000001, 4096, 64, 1234ULL}, {0.0000001, 4096, 128, 1234ULL}, {0.0000001, 4096, 256, 1234ULL}, {0.0000001, 4096, 512, 1234ULL}, {0.0000001, 4096, 1024, 1234ULL}, {0.0000001, 8192, 32, 1234ULL}, {0.0000001, 8192, 64, 1234ULL}, {0.0000001, 8192, 128, 1234ULL}, {0.0000001, 8192, 256, 1234ULL}, {0.0000001, 8192, 512, 1234ULL}, {0.0000001, 8192, 1024, 1234ULL}, {0.0000001, 1024, 8192, 1234ULL}}; typedef MinMaxTest<float> MinMaxTestF; TEST_P(MinMaxTestF, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<float>(params.tolerance))); } typedef MinMaxTest<double> MinMaxTestD; TEST_P(MinMaxTestD, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
1cf8dd2b579af7ad74de6cc257b8530dab77e0a6.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <stdio.h> #include <stdlib.h> #include <limits> #include <raft/cuda_utils.cuh> #include <raft/random/rng.cuh> #include <stats/minmax.cuh> #include "test_utils.h" namespace MLCommon { namespace Stats { ///@todo: need to add tests for verifying the column subsampling feature template <typename T> struct MinMaxInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MinMaxInputs<T>& dims) { return os; } template <typename T> __global__ void naiveMinMaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= ncols) return; globalmin[tid] = init_val; globalmax[tid] = -init_val; } template <typename T> __global__ void naiveMinMaxKernel(const T* data, int nrows, int ncols, T* globalmin, T* globalmax) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int col = tid / nrows; if (col < ncols) { T val = data[tid]; if (!isnan(val)) { raft::myAtomicMin(&globalmin[col], val); raft::myAtomicMax(&globalmax[col], val); } } } template <typename T> void naiveMinMax( const T* data, int nrows, int ncols, T* globalmin, T* globalmax, cudaStream_t stream) { const int TPB = 128; int nblks = raft::ceildiv(ncols, TPB); T init_val = std::numeric_limits<T>::max(); naiveMinMaxInitKernel<<<nblks, TPB, 0, stream>>>(ncols, globalmin, globalmax, init_val); CUDA_CHECK(cudaGetLastError()); nblks = raft::ceildiv(nrows * ncols, TPB); naiveMinMaxKernel<<<nblks, TPB, 0, stream>>>(data, nrows, ncols, globalmin, globalmax); CUDA_CHECK(cudaGetLastError()); } template <typename T> __global__ void nanKernel(T* data, const bool* mask, int len, T nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } template <typename T> class MinMaxTest : public ::testing::TestWithParam<MinMaxInputs<T>> { protected: MinMaxTest() : minmax_act(0, stream), minmax_ref(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<MinMaxInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.rows * params.cols; CUDA_CHECK(cudaStreamCreate(&stream)); rmm::device_uvector<T> data(len, stream); rmm::device_uvector<bool> mask(len, stream); minmax_act.resize(2 * params.cols, stream); minmax_ref.resize(2 * params.cols, stream); r.normal(data.data(), len, (T)0.0, (T)1.0, stream); T nan_prob = 0.01; r.bernoulli(mask.data(), len, nan_prob, stream); const int TPB = 256; nanKernel<<<raft::ceildiv(len, TPB), TPB, 0, stream>>>( data.data(), mask.data(), len, std::numeric_limits<T>::quiet_NaN()); CUDA_CHECK(cudaPeekAtLastError()); naiveMinMax(data.data(), params.rows, params.cols, minmax_ref.data(), minmax_ref.data() + params.cols, stream); minmax<T, 512>(data.data(), nullptr, nullptr, params.rows, params.cols, params.rows, minmax_act.data(), minmax_act.data() + params.cols, nullptr, stream); } protected: MinMaxInputs<T> params; rmm::device_uvector<T> minmax_act; rmm::device_uvector<T> minmax_ref; cudaStream_t stream = 0; }; const std::vector<MinMaxInputs<float>> inputsf = {{0.00001f, 1024, 32, 1234ULL}, {0.00001f, 1024, 64, 1234ULL}, {0.00001f, 1024, 128, 1234ULL}, {0.00001f, 1024, 256, 1234ULL}, {0.00001f, 1024, 512, 1234ULL}, {0.00001f, 1024, 1024, 1234ULL}, {0.00001f, 4096, 32, 1234ULL}, {0.00001f, 4096, 64, 1234ULL}, {0.00001f, 4096, 128, 1234ULL}, {0.00001f, 4096, 256, 1234ULL}, {0.00001f, 4096, 512, 1234ULL}, {0.00001f, 4096, 1024, 1234ULL}, {0.00001f, 8192, 32, 1234ULL}, {0.00001f, 8192, 64, 1234ULL}, {0.00001f, 8192, 128, 1234ULL}, {0.00001f, 8192, 256, 1234ULL}, {0.00001f, 8192, 512, 1234ULL}, {0.00001f, 8192, 1024, 1234ULL}, {0.00001f, 1024, 8192, 1234ULL}}; const std::vector<MinMaxInputs<double>> inputsd = {{0.0000001, 1024, 32, 1234ULL}, {0.0000001, 1024, 64, 1234ULL}, {0.0000001, 1024, 128, 1234ULL}, {0.0000001, 1024, 256, 1234ULL}, {0.0000001, 1024, 512, 1234ULL}, {0.0000001, 1024, 1024, 1234ULL}, {0.0000001, 4096, 32, 1234ULL}, {0.0000001, 4096, 64, 1234ULL}, {0.0000001, 4096, 128, 1234ULL}, {0.0000001, 4096, 256, 1234ULL}, {0.0000001, 4096, 512, 1234ULL}, {0.0000001, 4096, 1024, 1234ULL}, {0.0000001, 8192, 32, 1234ULL}, {0.0000001, 8192, 64, 1234ULL}, {0.0000001, 8192, 128, 1234ULL}, {0.0000001, 8192, 256, 1234ULL}, {0.0000001, 8192, 512, 1234ULL}, {0.0000001, 8192, 1024, 1234ULL}, {0.0000001, 1024, 8192, 1234ULL}}; typedef MinMaxTest<float> MinMaxTestF; TEST_P(MinMaxTestF, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<float>(params.tolerance))); } typedef MinMaxTest<double> MinMaxTestD; TEST_P(MinMaxTestD, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
35b874d5ec302f5913bba1b7a19b7b92b5c7a548.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> const int numThreads = 512; typedef struct SimPlan { int device; int dataSize; int numBlocks; hipStream_t streamID; float *h_a; float *h_b; float *h_c; float *d_a; float *d_b; float *d_c; bool needDestruct = false; ~SimPlan() { if (needDestruct) { hipFree(this->d_a); hipFree(this->d_b); hipFree(this->d_c); delete[] h_a; delete[] h_b; delete[] h_c; } } }; void setData(SimPlan *plan) { plan->h_a = new float[plan->dataSize]; plan->h_b = new float[plan->dataSize]; plan->h_c = new float[plan->dataSize]; for (int i = 0; i < plan->dataSize; i++) { plan->h_a[i] = float(i / 10); plan->h_b[i] = float(i / 20); } hipMemcpy(plan->d_a, plan->h_a, plan->dataSize*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(plan->d_b, plan->h_b, plan->dataSize*sizeof(float), hipMemcpyHostToDevice); } void initializePlan( SimPlan *plan, int device_, int n_, hipStream_t streamID_ = 0) { plan->needDestruct = true; plan->device = device_; plan->dataSize = n_; plan->numBlocks = (plan->dataSize + numThreads - 1) / numThreads; if (streamID_ != 0) { plan->streamID = streamID_; } hipMalloc((void**)&plan->d_a, plan->dataSize*sizeof(float)); hipMalloc((void**)&plan->d_b, plan->dataSize*sizeof(float)); hipMalloc((void**)&plan->d_c, plan->dataSize*sizeof(float)); setData(plan); } __global__ void Kernel( SimPlan *plan) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= plan->dataSize) { return; } plan->d_c[i] = plan->d_a[i] + plan->d_b[i]; } int main() { SimPlan p1; hipStream_t s1; hipStreamCreate(&s1); initializePlan( &p1, 0, 10000, s1); Kernel << < p1.numBlocks, numThreads, 0, p1.streamID >> >(&p1); return 0; }
35b874d5ec302f5913bba1b7a19b7b92b5c7a548.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> const int numThreads = 512; typedef struct SimPlan { int device; int dataSize; int numBlocks; cudaStream_t streamID; float *h_a; float *h_b; float *h_c; float *d_a; float *d_b; float *d_c; bool needDestruct = false; ~SimPlan() { if (needDestruct) { cudaFree(this->d_a); cudaFree(this->d_b); cudaFree(this->d_c); delete[] h_a; delete[] h_b; delete[] h_c; } } }; void setData(SimPlan *plan) { plan->h_a = new float[plan->dataSize]; plan->h_b = new float[plan->dataSize]; plan->h_c = new float[plan->dataSize]; for (int i = 0; i < plan->dataSize; i++) { plan->h_a[i] = float(i / 10); plan->h_b[i] = float(i / 20); } cudaMemcpy(plan->d_a, plan->h_a, plan->dataSize*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(plan->d_b, plan->h_b, plan->dataSize*sizeof(float), cudaMemcpyHostToDevice); } void initializePlan( SimPlan *plan, int device_, int n_, cudaStream_t streamID_ = 0) { plan->needDestruct = true; plan->device = device_; plan->dataSize = n_; plan->numBlocks = (plan->dataSize + numThreads - 1) / numThreads; if (streamID_ != 0) { plan->streamID = streamID_; } cudaMalloc((void**)&plan->d_a, plan->dataSize*sizeof(float)); cudaMalloc((void**)&plan->d_b, plan->dataSize*sizeof(float)); cudaMalloc((void**)&plan->d_c, plan->dataSize*sizeof(float)); setData(plan); } __global__ void Kernel( SimPlan *plan) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= plan->dataSize) { return; } plan->d_c[i] = plan->d_a[i] + plan->d_b[i]; } int main() { SimPlan p1; cudaStream_t s1; cudaStreamCreate(&s1); initializePlan( &p1, 0, 10000, s1); Kernel << < p1.numBlocks, numThreads, 0, p1.streamID >> >(&p1); return 0; }
17dd70bac0762366f94948bcf373850692631777.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include "timer.h" #include <cassert> #include <iostream> // simpler kernel // each thread scans block template <class T> __global__ void blocked_scan_kernel(T * const d_arr, T * const d_sums_per_block, T * const d_out, int numPerBlock, int numberOfBlocks) { // this kernel sums+scans [x, x+numPerBlock) // places sum at d_sums_per_block[x] // and corresponding scan in d_out int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= numberOfBlocks) return; T sum = 0; for (int i = 0; i < numPerBlock; ++i) { sum += d_arr[x * numPerBlock + i]; d_out[x * numPerBlock + i] = sum; } d_sums_per_block[x] = sum; } template <class T> __global__ void blocked_add_sums_to_scan_kernel(T * const d_arr, const T * const d_block_sums, int numPerBlock, int numberOfBlocks) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= numberOfBlocks) return; for (int i = 0; i < numPerBlock; ++i) { d_arr[x * numPerBlock + i] += d_block_sums[x]; } } struct GridParameters { size_t numPerBlock; size_t roundedN; size_t numberOfBlocks; size_t blockSize; size_t gridSize; GridParameters(size_t N) { // compute numPerBlock blockSize = 64; //MAX_BLOCK_SIZE; gridSize = min((unsigned int) round_up(N, blockSize), (unsigned int) MAX_GRID_SIZE); numPerBlock = round_up(N, blockSize * gridSize); roundedN = (N%numPerBlock==0) ? N : (N+numPerBlock - (N % numPerBlock)); numberOfBlocks = round_up(roundedN, numPerBlock); } GridParameters(size_t N, size_t _numPerBlock) { numPerBlock = _numPerBlock; roundedN = (N%numPerBlock==0) ? N : (N+numPerBlock - (N % numPerBlock)); numberOfBlocks = round_up(roundedN, numPerBlock); blockSize = min((unsigned int) numberOfBlocks, (unsigned int) MAX_BLOCK_SIZE); gridSize = round_up(numberOfBlocks, blockSize); } void display() { DISPLAY(roundedN); DISPLAY(numPerBlock); DISPLAY(numberOfBlocks); DISPLAY(blockSize); DISPLAY(gridSize); } }; float blocked_scan(const ull * const h_arr, ull * const h_out, const int N) { // round N to nearest multiple of blockSize GridParameters params(N); // params.display(); assert(params.blockSize <= 1024 && "too many elements per block"); assert(params.gridSize <= MAX_GRID_SIZE && "too many elements"); ull *d_arr, *d_block_sums, *d_out; size_t arraySize = params.roundedN * sizeof(ull); size_t gridSumSize = params.numberOfBlocks * sizeof(ull); checkCudaErrors(hipMalloc((void**)&d_arr, arraySize)); checkCudaErrors(hipMalloc((void**)&d_block_sums, gridSumSize)); checkCudaErrors(hipMalloc((void**)&d_out, arraySize)); checkCudaErrors(hipMemcpy(d_arr, h_arr, N * sizeof(ull), hipMemcpyHostToDevice)); checkCudaErrors(hipMemset(d_arr + N, 0, arraySize - (N * sizeof(ull)))); // zero excess space GpuTimer timer; timer.Start(); // want this to perform as few memory accesses per thread (start max # of threads?) hipLaunchKernelGGL(( blocked_scan_kernel<ull>), dim3(params.gridSize), dim3(params.blockSize), 0, 0, d_arr, d_block_sums, d_out, params.numPerBlock, params.numberOfBlocks); // scan together d_block_sums by hand on cpu if (params.numberOfBlocks > 1) { // figure this out ull *h_block_sums = (ull *) malloc(gridSumSize); checkCudaErrors(hipMemcpy(h_block_sums, d_block_sums, gridSumSize, hipMemcpyDeviceToHost)); for (int i = 1; i < params.numberOfBlocks; ++i) { h_block_sums[i] += h_block_sums[i-1]; } checkCudaErrors(hipMemcpy(d_block_sums, h_block_sums, gridSumSize, hipMemcpyHostToDevice)); free(h_block_sums); // want this to perform as few memory accesses per thread GridParameters params2(N - params.numPerBlock, params.numPerBlock); hipLaunchKernelGGL(( blocked_add_sums_to_scan_kernel), dim3(params2.gridSize), dim3(params2.blockSize), 0, 0, d_out + params.numPerBlock, d_block_sums, params.numPerBlock, params2.numberOfBlocks); } timer.Stop(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(h_out, d_out, N * sizeof(ull), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_arr)); checkCudaErrors(hipFree(d_block_sums)); checkCudaErrors(hipFree(d_out)); return timer.Elapsed(); }
17dd70bac0762366f94948bcf373850692631777.cu
#include "utils.h" #include "timer.h" #include <cassert> #include <iostream> // simpler kernel // each thread scans block template <class T> __global__ void blocked_scan_kernel(T * const d_arr, T * const d_sums_per_block, T * const d_out, int numPerBlock, int numberOfBlocks) { // this kernel sums+scans [x, x+numPerBlock) // places sum at d_sums_per_block[x] // and corresponding scan in d_out int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= numberOfBlocks) return; T sum = 0; for (int i = 0; i < numPerBlock; ++i) { sum += d_arr[x * numPerBlock + i]; d_out[x * numPerBlock + i] = sum; } d_sums_per_block[x] = sum; } template <class T> __global__ void blocked_add_sums_to_scan_kernel(T * const d_arr, const T * const d_block_sums, int numPerBlock, int numberOfBlocks) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= numberOfBlocks) return; for (int i = 0; i < numPerBlock; ++i) { d_arr[x * numPerBlock + i] += d_block_sums[x]; } } struct GridParameters { size_t numPerBlock; size_t roundedN; size_t numberOfBlocks; size_t blockSize; size_t gridSize; GridParameters(size_t N) { // compute numPerBlock blockSize = 64; //MAX_BLOCK_SIZE; gridSize = min((unsigned int) round_up(N, blockSize), (unsigned int) MAX_GRID_SIZE); numPerBlock = round_up(N, blockSize * gridSize); roundedN = (N%numPerBlock==0) ? N : (N+numPerBlock - (N % numPerBlock)); numberOfBlocks = round_up(roundedN, numPerBlock); } GridParameters(size_t N, size_t _numPerBlock) { numPerBlock = _numPerBlock; roundedN = (N%numPerBlock==0) ? N : (N+numPerBlock - (N % numPerBlock)); numberOfBlocks = round_up(roundedN, numPerBlock); blockSize = min((unsigned int) numberOfBlocks, (unsigned int) MAX_BLOCK_SIZE); gridSize = round_up(numberOfBlocks, blockSize); } void display() { DISPLAY(roundedN); DISPLAY(numPerBlock); DISPLAY(numberOfBlocks); DISPLAY(blockSize); DISPLAY(gridSize); } }; float blocked_scan(const ull * const h_arr, ull * const h_out, const int N) { // round N to nearest multiple of blockSize GridParameters params(N); // params.display(); assert(params.blockSize <= 1024 && "too many elements per block"); assert(params.gridSize <= MAX_GRID_SIZE && "too many elements"); ull *d_arr, *d_block_sums, *d_out; size_t arraySize = params.roundedN * sizeof(ull); size_t gridSumSize = params.numberOfBlocks * sizeof(ull); checkCudaErrors(cudaMalloc((void**)&d_arr, arraySize)); checkCudaErrors(cudaMalloc((void**)&d_block_sums, gridSumSize)); checkCudaErrors(cudaMalloc((void**)&d_out, arraySize)); checkCudaErrors(cudaMemcpy(d_arr, h_arr, N * sizeof(ull), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemset(d_arr + N, 0, arraySize - (N * sizeof(ull)))); // zero excess space GpuTimer timer; timer.Start(); // want this to perform as few memory accesses per thread (start max # of threads?) blocked_scan_kernel<ull><<<params.gridSize, params.blockSize>>>(d_arr, d_block_sums, d_out, params.numPerBlock, params.numberOfBlocks); // scan together d_block_sums by hand on cpu if (params.numberOfBlocks > 1) { // figure this out ull *h_block_sums = (ull *) malloc(gridSumSize); checkCudaErrors(cudaMemcpy(h_block_sums, d_block_sums, gridSumSize, cudaMemcpyDeviceToHost)); for (int i = 1; i < params.numberOfBlocks; ++i) { h_block_sums[i] += h_block_sums[i-1]; } checkCudaErrors(cudaMemcpy(d_block_sums, h_block_sums, gridSumSize, cudaMemcpyHostToDevice)); free(h_block_sums); // want this to perform as few memory accesses per thread GridParameters params2(N - params.numPerBlock, params.numPerBlock); blocked_add_sums_to_scan_kernel<<<params2.gridSize, params2.blockSize>>>(d_out + params.numPerBlock, d_block_sums, params.numPerBlock, params2.numberOfBlocks); } timer.Stop(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(h_out, d_out, N * sizeof(ull), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_arr)); checkCudaErrors(cudaFree(d_block_sums)); checkCudaErrors(cudaFree(d_out)); return timer.Elapsed(); }
661b3e90a632639efbd43e55184128040afa8a41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "wall_force_collector.h" #include "utils/simple_serializer.h" #include "utils/time_stamp.h" #include <mirheo/core/datatypes.h> #include <mirheo/core/pvs/particle_vector.h> #include <mirheo/core/pvs/views/pv.h> #include <mirheo/core/simulation.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/walls/interface.h> namespace mirheo { namespace WallForceCollector { __global__ void totalForce(PVview view, double3 *totalForce) { int tid = blockIdx.x * blockDim.x + threadIdx.x; real3 f {0._r, 0._r, 0._r}; if (tid < view.size) f = make_real3(view.forces[tid]); f = warpReduce(f, [](real a, real b) { return a + b; }); if (laneId() == 0) atomicAdd(totalForce, make_double3(f)); } } //namespace WallForceCollector WallForceCollectorPlugin::WallForceCollectorPlugin(const MirState *state, std::string name, std::string wallName, std::string frozenPvName, int sampleEvery, int dumpEvery) : SimulationPlugin(state, name), sampleEvery_(sampleEvery), dumpEvery_(dumpEvery), wallName_(wallName), frozenPvName_(frozenPvName) {} WallForceCollectorPlugin::~WallForceCollectorPlugin() = default; void WallForceCollectorPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm) { SimulationPlugin::setup(simulation, comm, interComm); wall_ = dynamic_cast<SDFBasedWall*>(simulation->getWallByNameOrDie(wallName_)); if (wall_ == nullptr) die("Plugin '%s' expects a SDF based wall (got '%s')\n", getCName(), wallName_.c_str()); pv_ = simulation->getPVbyNameOrDie(frozenPvName_); bounceForceBuffer_ = wall_->getCurrentBounceForce(); } void WallForceCollectorPlugin::afterIntegration(hipStream_t stream) { if (isTimeEvery(getState(), sampleEvery_)) { pvForceBuffer_.clear(stream); PVview view(pv_, pv_->local()); const int nthreads = 128; SAFE_KERNEL_LAUNCH( WallForceCollector::totalForce, getNblocks(view.size, nthreads), nthreads, 0, stream, view, pvForceBuffer_.devPtr() ); pvForceBuffer_ .downloadFromDevice(stream); bounceForceBuffer_->downloadFromDevice(stream); totalForce_ += pvForceBuffer_[0]; totalForce_ += (*bounceForceBuffer_)[0]; ++nsamples_; } needToDump_ = (isTimeEvery(getState(), dumpEvery_) && nsamples_ > 0); } void WallForceCollectorPlugin::serializeAndSend(__UNUSED hipStream_t stream) { if (needToDump_) { waitPrevSend(); SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, nsamples_, totalForce_); send(sendBuffer_); needToDump_ = false; nsamples_ = 0; totalForce_ = make_double3(0, 0, 0); } } WallForceDumperPlugin::WallForceDumperPlugin(std::string name, std::string filename) : PostprocessPlugin(name) { auto status = fdump_.open(filename, "w"); if (status != FileWrapper::Status::Success) die("Could not open file '%s'", filename.c_str()); } void WallForceDumperPlugin::deserialize() { MirState::TimeType currentTime; int nsamples; double localForce[3], totalForce[3] = {0.0, 0.0, 0.0}; SimpleSerializer::deserialize(data_, currentTime, nsamples, localForce); MPI_Check( MPI_Reduce(localForce, totalForce, 3, MPI_DOUBLE, MPI_SUM, 0, comm_) ); if (rank_ == 0) { totalForce[0] /= (double)nsamples; totalForce[1] /= (double)nsamples; totalForce[2] /= (double)nsamples; fprintf(fdump_.get(), "%g %g %g %g\n", currentTime, totalForce[0], totalForce[1], totalForce[2]); fflush(fdump_.get()); } } } // namespace mirheo
661b3e90a632639efbd43e55184128040afa8a41.cu
#include "wall_force_collector.h" #include "utils/simple_serializer.h" #include "utils/time_stamp.h" #include <mirheo/core/datatypes.h> #include <mirheo/core/pvs/particle_vector.h> #include <mirheo/core/pvs/views/pv.h> #include <mirheo/core/simulation.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/walls/interface.h> namespace mirheo { namespace WallForceCollector { __global__ void totalForce(PVview view, double3 *totalForce) { int tid = blockIdx.x * blockDim.x + threadIdx.x; real3 f {0._r, 0._r, 0._r}; if (tid < view.size) f = make_real3(view.forces[tid]); f = warpReduce(f, [](real a, real b) { return a + b; }); if (laneId() == 0) atomicAdd(totalForce, make_double3(f)); } } //namespace WallForceCollector WallForceCollectorPlugin::WallForceCollectorPlugin(const MirState *state, std::string name, std::string wallName, std::string frozenPvName, int sampleEvery, int dumpEvery) : SimulationPlugin(state, name), sampleEvery_(sampleEvery), dumpEvery_(dumpEvery), wallName_(wallName), frozenPvName_(frozenPvName) {} WallForceCollectorPlugin::~WallForceCollectorPlugin() = default; void WallForceCollectorPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm) { SimulationPlugin::setup(simulation, comm, interComm); wall_ = dynamic_cast<SDFBasedWall*>(simulation->getWallByNameOrDie(wallName_)); if (wall_ == nullptr) die("Plugin '%s' expects a SDF based wall (got '%s')\n", getCName(), wallName_.c_str()); pv_ = simulation->getPVbyNameOrDie(frozenPvName_); bounceForceBuffer_ = wall_->getCurrentBounceForce(); } void WallForceCollectorPlugin::afterIntegration(cudaStream_t stream) { if (isTimeEvery(getState(), sampleEvery_)) { pvForceBuffer_.clear(stream); PVview view(pv_, pv_->local()); const int nthreads = 128; SAFE_KERNEL_LAUNCH( WallForceCollector::totalForce, getNblocks(view.size, nthreads), nthreads, 0, stream, view, pvForceBuffer_.devPtr() ); pvForceBuffer_ .downloadFromDevice(stream); bounceForceBuffer_->downloadFromDevice(stream); totalForce_ += pvForceBuffer_[0]; totalForce_ += (*bounceForceBuffer_)[0]; ++nsamples_; } needToDump_ = (isTimeEvery(getState(), dumpEvery_) && nsamples_ > 0); } void WallForceCollectorPlugin::serializeAndSend(__UNUSED cudaStream_t stream) { if (needToDump_) { waitPrevSend(); SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, nsamples_, totalForce_); send(sendBuffer_); needToDump_ = false; nsamples_ = 0; totalForce_ = make_double3(0, 0, 0); } } WallForceDumperPlugin::WallForceDumperPlugin(std::string name, std::string filename) : PostprocessPlugin(name) { auto status = fdump_.open(filename, "w"); if (status != FileWrapper::Status::Success) die("Could not open file '%s'", filename.c_str()); } void WallForceDumperPlugin::deserialize() { MirState::TimeType currentTime; int nsamples; double localForce[3], totalForce[3] = {0.0, 0.0, 0.0}; SimpleSerializer::deserialize(data_, currentTime, nsamples, localForce); MPI_Check( MPI_Reduce(localForce, totalForce, 3, MPI_DOUBLE, MPI_SUM, 0, comm_) ); if (rank_ == 0) { totalForce[0] /= (double)nsamples; totalForce[1] /= (double)nsamples; totalForce[2] /= (double)nsamples; fprintf(fdump_.get(), "%g %g %g %g\n", currentTime, totalForce[0], totalForce[1], totalForce[2]); fflush(fdump_.get()); } } } // namespace mirheo
042118c3ccbfff71c42dc600b5be0df0c29d1153.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <pairwise_transform.h> __device__ double op(double d1,double d2,double *params) { return d2 / d1; } __device__ double op(double d1,double *params) { return d1; } extern "C" __global__ void rdiv_strided_double(int n, int xOffset,int yOffset,double *dx, double *dy,int incx,int incy,double *params,double *result) { transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result); }
042118c3ccbfff71c42dc600b5be0df0c29d1153.cu
#include <pairwise_transform.h> __device__ double op(double d1,double d2,double *params) { return d2 / d1; } __device__ double op(double d1,double *params) { return d1; } extern "C" __global__ void rdiv_strided_double(int n, int xOffset,int yOffset,double *dx, double *dy,int incx,int incy,double *params,double *result) { transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result); }
b0c107b1e2596fbf06ef79cfc07d1db53ac27632.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/native/hip/GridSampler.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_2d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sH = output.strides[2]; index_t out_sW = output.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const index_t grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_3d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sD = output.strides[2]; index_t out_sH = output.strides[3]; index_t out_sW = output.strides[4]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); index_t iz_nearest = static_cast<index_t>(::round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_2d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sH = grad_output.strides[2]; index_t gOut_sW = grad_output.strides[3]; index_t gInp_sN = grad_input.strides[0]; index_t gInp_sC = grad_input.strides[1]; index_t gInp_sH = grad_input.strides[2]; index_t gInp_sW = grad_input.strides[3]; index_t gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const auto grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // multipliers for gradients on ix and iy scalar_t gix_mult, giy_mult; ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult); iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut); safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut); safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut); safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut); // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (index_t c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_3d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sD = grad_output.strides[2]; index_t gOut_sH = grad_output.strides[3]; index_t gOut_sW = grad_output.strides[4]; index_t gInp_sN = grad_input.strides[0]; index_t gInp_sC = grad_input.strides[1]; index_t gInp_sD = grad_input.strides[2]; index_t gInp_sH = grad_input.strides[3]; index_t gInp_sW = grad_input.strides[4]; index_t gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // multipliers for gradients on ix, iy, and iz scalar_t gix_mult, giy_mult, giz_mult; ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult); iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult); iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut); safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut); safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut); safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut); safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut); safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut); safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut); safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut); // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { auto ix_nearest = static_cast<index_t>(::round(ix)); auto iy_nearest = static_cast<index_t>(::round(iy)); auto iz_nearest = static_cast<index_t>(::round(iz)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) { // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { auto N = input.size(0); auto C = input.size(1); auto H = grid.size(1); auto W = grid.size(2); auto output = at::empty({N, C, H, W}, input.options()); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } else { hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto output = at::empty({N, input.size(1), D, H, W}, input.options()); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } else { hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_2d_backward_cuda"); auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } else { hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(grad_input), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } }); } return std::make_tuple(grad_input, grad_grid); } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_3d_backward_cuda"); auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } else { hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(grad_input), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } }); } return std::make_tuple(grad_input, grad_grid); } }} // namespace at::native
b0c107b1e2596fbf06ef79cfc07d1db53ac27632.cu
#include <ATen/ATen.h> #include <ATen/native/cuda/GridSampler.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_2d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sH = output.strides[2]; index_t out_sW = output.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const index_t grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_3d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sD = output.strides[2]; index_t out_sH = output.strides[3]; index_t out_sW = output.strides[4]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); index_t iz_nearest = static_cast<index_t>(::round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_2d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sH = grad_output.strides[2]; index_t gOut_sW = grad_output.strides[3]; index_t gInp_sN = grad_input.strides[0]; index_t gInp_sC = grad_input.strides[1]; index_t gInp_sH = grad_input.strides[2]; index_t gInp_sW = grad_input.strides[3]; index_t gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const auto grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // multipliers for gradients on ix and iy scalar_t gix_mult, giy_mult; ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult); iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut); safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut); safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut); safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut); // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (index_t c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_3d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sD = grad_output.strides[2]; index_t gOut_sH = grad_output.strides[3]; index_t gOut_sW = grad_output.strides[4]; index_t gInp_sN = grad_input.strides[0]; index_t gInp_sC = grad_input.strides[1]; index_t gInp_sD = grad_input.strides[2]; index_t gInp_sH = grad_input.strides[3]; index_t gInp_sW = grad_input.strides[4]; index_t gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // multipliers for gradients on ix, iy, and iz scalar_t gix_mult, giy_mult, giz_mult; ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult); iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult); iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut); safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut); safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut); safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut); safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut); safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut); safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut); safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut); // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { auto ix_nearest = static_cast<index_t>(::round(ix)); auto iy_nearest = static_cast<index_t>(::round(iy)); auto iz_nearest = static_cast<index_t>(::round(iz)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) { // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { auto N = input.size(0); auto C = input.size(1); auto H = grid.size(1); auto W = grid.size(2); auto output = at::empty({N, C, H, W}, input.options()); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { grid_sampler_2d_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } else { grid_sampler_2d_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto output = at::empty({N, input.size(1), D, H, W}, input.options()); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { grid_sampler_3d_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } else { grid_sampler_3d_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_2d_backward_cuda"); auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { grid_sampler_2d_backward_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } else { grid_sampler_2d_backward_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(grad_input), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } }); } return std::make_tuple(grad_input, grad_grid); } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_3d_backward_cuda"); auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { grid_sampler_3d_backward_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } else { grid_sampler_3d_backward_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(grad_input), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } }); } return std::make_tuple(grad_input, grad_grid); } }} // namespace at::native
38c663ad4b271191d2c54d96d2e93049819f025c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define COARSE_CLA_CONSO 0 #include <classical/classical_amg_level.h> #include <amg_level.h> #include <basic_types.h> #include <cutil.h> #include <multiply.h> #include <transpose.h> #include <truncate.h> #include <blas.h> #include <util.h> #include <thrust/logical.h> #include <thrust/remove.h> #include <thrust/adjacent_difference.h> #include <thrust_wrapper.h> #include <thrust/extrema.h> // for minmax_element #include <algorithm> #include <assert.h> #include <matrix_io.h> #include <csr_multiply.h> #include <thrust/logical.h> #include <thrust/count.h> #include <thrust/sort.h> #include <profile.h> #include <distributed/glue.h> namespace amgx { namespace classical { void __global__ profiler_tag_1() {} void __global__ profiler_tag_2() {} void __global__ profiler_tag_3() {} struct is_zero { __host__ __device__ bool operator()(const double &v) { return fabs(v) < 1e-10; } }; #define AMGX_CAL_BLOCK_SIZE 256 /* There might be a situation where not all local_to_global_map columns are present in the matrix (because some rows were removed and the columns in these rows are therefore no longer present. This kernel creates the flags array that marks existing columns. */ template<typename ind_t> __global__ __launch_bounds__( AMGX_CAL_BLOCK_SIZE ) void flag_existing_local_to_global_columns(ind_t n, ind_t *row_offsets, ind_t *col_indices, ind_t *flags) { ind_t i, j, s, e, col; //go through the matrix for (i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += blockDim.x * gridDim.x) { s = row_offsets[i]; e = row_offsets[i + 1]; for (j = s; j < e; j++) { col = col_indices[j]; //flag columns outside of the square part (which correspond to local_to_global_map) if (col >= n) { flags[col - n] = 1; } } } } /* Renumber the indices based on the prefix-scan/sum of the flags array */ template<typename ind_t> __global__ __launch_bounds__( AMGX_CAL_BLOCK_SIZE ) void compress_existing_local_columns(ind_t n, ind_t *row_offsets, ind_t *col_indices, ind_t *flags) { ind_t i, j, s, e, col; //go through the matrix for (i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += blockDim.x * gridDim.x) { s = row_offsets[i]; e = row_offsets[i + 1]; for (j = s; j < e; j++) { col = col_indices[j]; //flag columns outside of the square part (which correspond to local_to_global_map) if (col >= n) { col_indices[j] = n + flags[col - n]; } } } } /* compress the local to global columns indices based on the prefix-scan/sum of the flags array */ template<typename ind_t, typename ind64_t> __global__ __launch_bounds__( AMGX_CAL_BLOCK_SIZE ) void compress_existing_local_to_global_columns(ind_t n, ind64_t *l2g_in, ind64_t *l2g_out, ind_t *flags) { ind_t i; //go through the arrays (and copy the updated indices when needed) for (i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += blockDim.x * gridDim.x) { if (flags[i] != flags[i + 1]) { l2g_out[flags[i]] = l2g_in[i]; } } } template <class T_Config> Selector<T_Config> *chooseAggressiveSelector(AMG_Config *m_cfg, std::string std_scope) { AMG_Config cfg; std::string cfg_string(""); cfg_string += "default:"; // if necessary, allocate aggressive selector + interpolator bool use_pmis = false, use_hmis = false; // default argument - use the same selector as normal coarsening std::string agg_selector = m_cfg->AMG_Config::getParameter<std::string>("aggressive_selector", std_scope); if (agg_selector == "DEFAULT") { std::string std_selector = m_cfg->AMG_Config::getParameter<std::string>("selector", std_scope); if (std_selector == "PMIS") { cfg_string += "selector=AGGRESSIVE_PMIS"; use_pmis = true; } else if (std_selector == "HMIS") { cfg_string += "selector=AGGRESSIVE_HMIS"; use_hmis = true; } else { FatalError("Must use either PMIS or HMIS algorithms with aggressive coarsening", AMGX_ERR_NOT_IMPLEMENTED); } } // otherwise use specified selector else if (agg_selector == "PMIS") { cfg_string += "selector=AGGRESSIVE_PMIS"; use_pmis = true; } else if (agg_selector == "HMIS") { cfg_string += "selector=AGGRESSIVE_HMIS"; use_hmis = true; } else { FatalError("Invalid aggressive coarsener selected", AMGX_ERR_NOT_IMPLEMENTED); } // check a selector has been selected if (!use_pmis && !use_hmis) { FatalError("No aggressive selector chosen", AMGX_ERR_NOT_IMPLEMENTED); } cfg.parseParameterString(cfg_string.c_str()); // now allocate the selector and interpolator return classical::SelectorFactory<T_Config>::allocate(cfg, "default" /*std_scope*/); } template <class T_Config> Interpolator<T_Config> *chooseAggressiveInterpolator(AMG_Config *m_cfg, std::string std_scope) { // temporary config and pointer to main config AMG_Config cfg; std::string cfg_string(""); cfg_string += "default:"; // Set the interpolator cfg_string += "interpolator="; cfg_string += m_cfg->AMG_Config::getParameter<std::string>("aggressive_interpolator", std_scope); cfg.parseParameterString(cfg_string.c_str()); // now allocate the selector and interpolator return InterpolatorFactory<T_Config>::allocate(cfg, "default" /*std_scope*/); } template <class T_Config> Classical_AMG_Level_Base<T_Config>::Classical_AMG_Level_Base(AMG_Class *amg) : AMG_Level<T_Config>(amg) { strength = StrengthFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); selector = classical::SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); interpolator = InterpolatorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); trunc_factor = amg->m_cfg->AMG_Config::getParameter<double>("interp_truncation_factor", amg->m_cfg_scope); max_elmts = amg->m_cfg->AMG_Config::getParameter<int>("interp_max_elements", amg->m_cfg_scope); max_row_sum = amg->m_cfg->AMG_Config::getParameter<double>("max_row_sum", amg->m_cfg_scope); num_aggressive_levels = amg->m_cfg->AMG_Config::getParameter<int>("aggressive_levels", amg->m_cfg_scope); } template <class T_Config> Classical_AMG_Level_Base<T_Config>::~Classical_AMG_Level_Base() { delete strength; delete selector; delete interpolator; } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::transfer_level(AMG_Level<TConfig1> *ref_lvl) { Classical_AMG_Level_Base<TConfig1> *ref_cla_lvl = dynamic_cast<Classical_AMG_Level_Base<TConfig1>*>(ref_lvl); this->P.copy(ref_cla_lvl->P); this->R.copy(ref_cla_lvl->R); this->m_s_con.copy(ref_cla_lvl->m_s_con); this->m_scratch.copy(ref_cla_lvl->m_scratch); this->m_cf_map.copy(ref_cla_lvl->m_cf_map); } /**************************************** * Computes the A, P, and R operators ***************************************/ template <class T_Config> void Classical_AMG_Level_Base<T_Config>::createCoarseVertices() { if (AMG_Level<T_Config>::getLevelIndex() < this->num_aggressive_levels) { if (selector) { delete selector; } selector = chooseAggressiveSelector<T_Config>(AMG_Level<T_Config>::amg->m_cfg, AMG_Level<T_Config>::amg->m_cfg_scope); } Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space( ) )->getA( ); Matrix<T_Config> &A = this->getA(); int size_all, size_full, nnz_full; if (!A.is_matrix_singleGPU()) { int offset; // Need to get number of 2-ring rows A.getOffsetAndSizeForView(ALL, &offset, &size_all); A.getOffsetAndSizeForView(FULL, &offset, &size_full); A.getNnzForView(FULL, &nnz_full); } else { size_all = A.get_num_rows(); size_full = A.get_num_rows(); nnz_full = A.get_num_nz(); } this->m_cf_map.resize(size_all); this->m_s_con.resize(nnz_full); this->m_scratch.resize(size_full); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); thrust::fill(this->m_s_con.begin(), this->m_s_con.end(), false); cudaCheckError(); thrust::fill(this->m_scratch.begin(), this->m_scratch.end(), 0); cudaCheckError(); markCoarseFinePoints(); } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::createCoarseMatrices() { // allocate aggressive interpolator if needed if (AMG_Level<T_Config>::getLevelIndex() < this->num_aggressive_levels) { if (interpolator) { delete interpolator; } interpolator = chooseAggressiveInterpolator<T_Config>(AMG_Level<T_Config>::amg->m_cfg, AMG_Level<T_Config>::amg->m_cfg_scope); } Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space( ) )->getA( ); Matrix<T_Config> &A = this->getA(); /* WARNING: exit if D1 interpolator is selected in distributed setting */ std::string s(""); s += AMG_Level<T_Config>::amg->m_cfg->AMG_Config::getParameter<std::string>("interpolator", AMG_Level<T_Config>::amg->m_cfg_scope); if (A.is_matrix_distributed() && (s.compare("D1") == 0)) { FatalError("D1 interpolation is not supported in distributed settings", AMGX_ERR_NOT_IMPLEMENTED); } /* WARNING: do not recompute prolongation (P) and restriction (R) when you are reusing the level structure (structure_reuse_levels > 0) */ if (this->isReuseLevel() == false) { computeProlongationOperator(); } // Compute Restriction operator and coarse matrix Ac if (!this->A->is_matrix_distributed() || this->A->manager->get_num_partitions() == 1) { /* WARNING: see above warning. */ if (this->isReuseLevel() == false) { computeRestrictionOperator(); } computeAOperator(); } else { /* WARNING: notice that in this case the computeRestructionOperator() is called inside computeAOperator_distributed() routine. */ computeAOperator_distributed(); } // we also need to renumber columns of P and rows or R correspondingly since we changed RAP halo columns // for R we just keep track of renumbering in and exchange proper vectors in restriction // for P we actually need to renumber columns for prolongation: if (A.is_matrix_distributed() && this->A->manager->get_num_partitions() > 1) { RAP.set_initialized(0); // Renumber the owned nodes as interior and boundary (renumber rows and columns) // We are passing reuse flag to not create neighbours list from scratch, but rather update based on new halos RAP.manager->renumberMatrixOneRing(this->isReuseLevel()); // Renumber the column indices of P and shuffle rows of P RAP.manager->renumber_P_R(this->P, this->R, A); // Create the B2L_maps for RAP RAP.manager->createOneRingHaloRows(); RAP.manager->getComms()->set_neighbors(RAP.manager->num_neighbors()); RAP.setView(OWNED); RAP.set_initialized(1); // update # of columns in P - this is necessary for correct CSR multiply P.set_initialized(0); int new_num_cols = thrust_wrapper::reduce(P.col_indices.begin(), P.col_indices.end(), int(0), thrust::maximum<int>()) + 1; cudaCheckError(); P.set_num_cols(new_num_cols); P.set_initialized(1); } RAP.copyAuxData(&A); if (!A.is_matrix_singleGPU() && RAP.manager == NULL) { RAP.manager = new DistributedManager<TConfig>(); } if (this->getA().is_matrix_singleGPU()) { this->m_next_level_size = this->getNextLevel(typename Matrix<TConfig>::memory_space() )->getA().get_num_rows() * this->getNextLevel(typename Matrix<TConfig>::memory_space() )->getA().get_block_dimy(); } else { // m_next_level_size is the size that will be used to allocate xc, bc vectors int size, offset; this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().getOffsetAndSizeForView(FULL, &offset, &size); this->m_next_level_size = size * this->getNextLevel(typename Matrix<TConfig>::memory_space() )->getA().get_block_dimy(); } } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::markCoarseFinePoints() { Matrix<T_Config> &A = this->getA(); //allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; FVector weights; if (!A.is_matrix_singleGPU()) { int size, offset; A.getOffsetAndSizeForView(FULL, &offset, &size); // size should now contain the number of 1-ring rows weights.resize(size); } else { weights.resize(A.get_num_rows()); } thrust::fill(weights.begin(), weights.end(), 0.0); cudaCheckError(); // extend A to include 1st ring nodes // compute strong connections and weights if (!A.is_matrix_singleGPU()) { ViewType oldView = A.currentView(); A.setView(FULL); strength->computeStrongConnectionsAndWeights(A, this->m_s_con, weights, this->max_row_sum); A.setView(oldView); } else { strength->computeStrongConnectionsAndWeights(A, this->m_s_con, weights, this->max_row_sum); } // Exchange the one-ring of the weights if (!A.is_matrix_singleGPU()) { A.manager->exchange_halo(weights, weights.tag); } //mark coarse and fine points selector->markCoarseFinePoints(A, weights, this->m_s_con, this->m_cf_map, this->m_scratch); // we do resize cf_map to zero later, so we are saving separate copy this->m_cf_map.dirtybit = 1; // Do a two ring exchange of cf_map if (!A.is_matrix_singleGPU()) { A.manager->exchange_halo_2ring(this->m_cf_map, m_cf_map.tag); } // Modify cf_map array such that coarse points are assigned a local index, while fine points entries are not touched selector->renumberAndCountCoarsePoints(this->m_cf_map, this->m_num_coarse_vertices, A.get_num_rows()); } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::computeProlongationOperator() { this->Profile.tic("computeP"); Matrix<T_Config> &A = this->getA(); //allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; //generate the interpolation matrix interpolator->generateInterpolationMatrix(A, this->m_cf_map, this->m_s_con, this->m_scratch, P, AMG_Level<TConfig>::amg); this->m_cf_map.clear(); this->m_cf_map.shrink_to_fit(); this->m_scratch.clear(); this->m_scratch.shrink_to_fit(); this->m_s_con.clear(); this->m_s_con.shrink_to_fit(); profileSubphaseTruncateP(); // truncate based on max # of elements if desired if (this->max_elmts > 0 && P.get_num_rows() > 0) { Truncate<TConfig>::truncateByMaxElements(P, this->max_elmts); } if (!P.isLatencyHidingEnabled(*this->amg->m_cfg)) { // This will cause bsrmv_with_mask to not do latency hiding P.setInteriorView(OWNED); P.setExteriorView(OWNED); } profileSubphaseNone(); this->Profile.toc("computeP"); } /********************************************** * computes R=P^T **********************************************/ template <class T_Config> void Classical_AMG_Level_Base<T_Config>::computeRestrictionOperator() { this->Profile.tic("computeR"); R.set_initialized(0); P.setView(OWNED); transpose(P, R, P.get_num_rows()); if (!R.isLatencyHidingEnabled(*this->amg->m_cfg)) { // This will cause bsrmv_with_mask_restriction to not do latency hiding R.setInteriorView(OWNED); R.setExteriorView(OWNED); } if(P.is_matrix_distributed()) { // Setup the number of non-zeros in R using stub DistributedManager R.manager = new DistributedManager<T_Config>(); int nrows_owned = P.manager->halo_offsets[0]; int nrows_full = P.manager->halo_offsets[P.manager->neighbors.size()]; int nz_full = R.row_offsets[nrows_full]; int nz_owned = R.row_offsets[nrows_owned]; R.manager->setViewSizes(nrows_owned, nz_owned, nrows_owned, nz_owned, nrows_full, nz_full, R.get_num_rows(), R.get_num_nz()); } R.set_initialized(1); this->Profile.toc("computeR"); } /********************************************** * computes the Galerkin product: A_c=R*A*P **********************************************/ template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Classical_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1() { this->Profile.tic("computeA"); Matrix<TConfig_h> RA; RA.addProps(CSR); RA.set_block_dimx(this->getA().get_block_dimx()); RA.set_block_dimy(this->getA().get_block_dimy()); Matrix<TConfig_h> &RAP = this->getNextLevel( typename Matrix<TConfig_h>::memory_space( ) )->getA( ); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); Matrix<TConfig_h> &Atmp = this->getA(); multiplyMM(this->R, this->getA(), RA); multiplyMM(RA, this->P, RAP); RAP.sortByRowAndColumn(); RAP.set_initialized(1); this->Profile.toc("computeA"); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Classical_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1_distributed() { FatalError("Distributed classical AMG not implemented for host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Classical_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1() { this->Profile.tic("computeA"); Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory( ) )->getA( ); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); this->R.set_initialized( 0 ); this->R.addProps( CSR ); this->R.set_initialized( 1 ); this->P.set_initialized( 0 ); this->P.addProps( CSR ); this->P.set_initialized( 1 ); void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace(); if ( wk == NULL ) { wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg), AMG_Level<TConfig_d>::amg->m_cfg_scope ); AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk ); } int spmm_verbose = this->amg->m_cfg->AMG_Config::getParameter<int>("spmm_verbose", this->amg->m_cfg_scope); if ( spmm_verbose ) { typedef typename Matrix<TConfig_d>::IVector::const_iterator Iterator; typedef thrust::pair<Iterator, Iterator> Result; std::ostringstream buffer; buffer << "SPMM: Level " << this->getLevelIndex() << std::endl; if ( this->getLevelIndex() == 0 ) { device_vector_alloc<int> num_nz( this->getA().row_offsets.size() ); thrust::adjacent_difference( this->getA().row_offsets.begin(), this->getA().row_offsets.end(), num_nz.begin() ); cudaCheckError(); Result result = thrust::minmax_element( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); int min_size = *result.first; int max_size = *result.second; int sum = thrust_wrapper::reduce( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); double avg_size = double(sum) / this->getA().get_num_rows(); buffer << "SPMM: A: " << std::endl; buffer << "SPMM: Matrix avg row size: " << avg_size << std::endl; buffer << "SPMM: Matrix min row size: " << min_size << std::endl; buffer << "SPMM: Matrix max row size: " << max_size << std::endl; } device_vector_alloc<int> num_nz( this->P.row_offsets.size() ); thrust::adjacent_difference( this->P.row_offsets.begin(), this->P.row_offsets.end(), num_nz.begin() ); cudaCheckError(); Result result = thrust::minmax_element( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); int min_size = *result.first; int max_size = *result.second; int sum = thrust_wrapper::reduce( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); double avg_size = double(sum) / this->P.get_num_rows(); buffer << "SPMM: P: " << std::endl; buffer << "SPMM: Matrix avg row size: " << avg_size << std::endl; buffer << "SPMM: Matrix min row size: " << min_size << std::endl; buffer << "SPMM: Matrix max row size: " << max_size << std::endl; num_nz.resize( this->R.row_offsets.size() ); thrust::adjacent_difference( this->R.row_offsets.begin(), this->R.row_offsets.end(), num_nz.begin() ); cudaCheckError(); result = thrust::minmax_element( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); min_size = *result.first; max_size = *result.second; sum = thrust_wrapper::reduce( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); avg_size = double(sum) / this->R.get_num_rows(); buffer << "SPMM: R: " << std::endl; buffer << "SPMM: Matrix avg row size: " << avg_size << std::endl; buffer << "SPMM: Matrix min row size: " << min_size << std::endl; buffer << "SPMM: Matrix max row size: " << max_size << std::endl; amgx_output( buffer.str().c_str(), static_cast<int>( buffer.str().length() ) ); } RAP.set_initialized( 0 ); CSR_Multiply<TConfig_d>::csr_galerkin_product( this->R, this->getA(), this->P, RAP, NULL, NULL, NULL, NULL, NULL, NULL, wk ); RAP.set_initialized( 1 ); int spmm_no_sort = this->amg->m_cfg->AMG_Config::getParameter<int>("spmm_no_sort", this->amg->m_cfg_scope); this->Profile.toc("computeA"); } /********************************************** * computes the restriction: rr=R*r **********************************************/ template <class T_Config> void Classical_AMG_Level_Base<T_Config>::restrictResidual(VVector &r, VVector &rr) { // we need to resize residual vector to make sure it can store halo rows to be sent if (!P.is_matrix_singleGPU()) { typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); #if COARSE_CLA_CONSO int desired_size ; if (this->getNextLevel(MemorySpace())->isConsolidationLevel()) { desired_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets_before_glue[Ac.manager->neighbors_before_glue.size()] * rr.get_block_size()); } else { desired_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size()); } #else int desired_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size()); #endif rr.resize(desired_size); } #if 1 this->Profile.tic("restrictRes"); // Disable speculative send of rr if (P.is_matrix_singleGPU()) { multiply( R, r, rr); } else { multiply_with_mask_restriction( R, r, rr, P); } #endif // exchange halo residuals & add residual contribution from neighbors rr.dirtybit = 1; if (!P.is_matrix_singleGPU()) { int desired_size = P.manager->halo_offsets[P.manager->neighbors.size()] * rr.get_block_size(); if (rr.size() < desired_size) { rr.resize(desired_size); } } this->Profile.toc("restrictRes"); } struct is_minus_one { __host__ __device__ bool operator()(const int &x) { return x == -1; } }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Classical_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1_distributed() { Matrix<TConfig_d> &A = this->getA(); Matrix<TConfig_d> &P = this->P; Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory( ) )->getA( ); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); IndexType num_parts = A.manager->get_num_partitions(); IndexType num_neighbors = A.manager->num_neighbors(); IndexType my_rank = A.manager->global_id(); // OWNED includes interior and boundary A.setView(OWNED); int num_owned_coarse_pts = P.manager->halo_offsets[0]; int num_owned_fine_pts = A.manager->halo_offsets[0]; // Initialize RAP.manager if (RAP.manager == NULL) { RAP.manager = new DistributedManager<TConfig_d>(); } RAP.manager->A = &RAP; RAP.manager->setComms(A.manager->getComms()); RAP.manager->set_global_id(my_rank); RAP.manager->set_num_partitions(num_parts); RAP.manager->part_offsets_h = P.manager->part_offsets_h; RAP.manager->part_offsets = P.manager->part_offsets; RAP.manager->set_base_index(RAP.manager->part_offsets_h[my_rank]); RAP.manager->set_index_range(num_owned_coarse_pts); RAP.manager->num_rows_global = RAP.manager->part_offsets_h[num_parts]; // -------------------------------------------------------------------- // Using the B2L_maps of matrix A, identify the rows of P that need to be sent to neighbors, // so that they can compute A*P // Once rows of P are identified, convert the column indices to global indices, and send them to neighbors // --------------------------------------------------------------------------- // Copy some information about the manager of P, since we don't want to modify those IVector_h P_neighbors = P.manager->neighbors; I64Vector_h P_halo_ranges_h = P.manager->halo_ranges_h; I64Vector_d P_halo_ranges = P.manager->halo_ranges; RAP.manager->local_to_global_map = P.manager->local_to_global_map; IVector_h P_halo_offsets = P.manager->halo_offsets; // Create a temporary distributed arranger DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; prep->exchange_halo_rows_P(A, this->P, RAP.manager->local_to_global_map, P_neighbors, P_halo_ranges_h, P_halo_ranges, P_halo_offsets, RAP.manager->part_offsets_h, RAP.manager->part_offsets, num_owned_coarse_pts, RAP.manager->part_offsets_h[my_rank]); cudaCheckError(); // At this point, we can compute RAP_full which contains some rows that will need to be sent to neighbors // i.e. RAP_full = [ RAP_int ] // [ RAP_ext ] // RAP is [ RAP_int ] + [RAP_ext_received_from_neighbors] // We can reuse the serial galerkin product since R, A and P use local indices // TODO: latency hiding (i.e. compute RAP_ext, exchange_matrix_halo, then do RAP_int) /* WARNING: do not recompute prolongation (P) and restriction (R) when you are reusing the level structure (structure_reuse_levels > 0) */ /* We force for matrix P to have only owned rows to be seen for the correct galerkin product computation*/ this->P.set_initialized(0); this->P.set_num_rows(num_owned_fine_pts); this->P.addProps( CSR ); this->P.set_initialized(1); if (this->isReuseLevel() == false) { this->R.set_initialized( 0 ); this->R.addProps( CSR ); // Take the tranpose of P to get R // Single-GPU transpose, no mpi exchange this->computeRestrictionOperator(); this->R.set_initialized( 1 ); } this->Profile.tic("computeA"); Matrix<TConfig_d> RAP_full; // Initialize the workspace needed for galerkin product void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace(); if ( wk == NULL ) { wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg), AMG_Level<TConfig_d>::amg->m_cfg_scope ); AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk ); } // Single-GPU RAP, no mpi exchange RAP_full.set_initialized( 0 ); /* WARNING: Since A is reordered (into interior and boundary nodes), while R and P are not reordered, you must unreorder A when performing R*A*P product in ordre to obtain the correct result. */ CSR_Multiply<TConfig_d>::csr_galerkin_product( this->R, this->getA(), this->P, RAP_full, /* permutation for rows of R, A and P */ NULL, NULL /*&(this->getA().manager->renumbering)*/, NULL, /* permutation for cols of R, A and P */ NULL, NULL /*&(this->getA().manager->inverse_renumbering)*/, NULL, wk ); RAP_full.set_initialized( 1 ); this->Profile.toc("computeA"); // ---------------------------------------------------------------------------------------------- // Now, send rows of RAP_full requireq by neighbors, received rows from neighbors and create RAP // ---------------------------------------------------------------------------------------------- prep->exchange_RAP_ext(RAP, RAP_full, A, this->P, P_halo_offsets, RAP.manager->local_to_global_map, P_neighbors, P_halo_ranges_h, P_halo_ranges, RAP.manager->part_offsets_h, RAP.manager->part_offsets, num_owned_coarse_pts, RAP.manager->part_offsets_h[my_rank], wk); // Delete temporary distributed arranger delete prep; /* WARNING: The RAP matrix generated at this point contains extra rows (that correspond to rows of R, that was obtained by locally transposing P). This rows are ignored by setting the # of matrix rows to be smaller, so that they correspond to number of owned coarse nodes. This should be fine, but it leaves holes in the matrix as there might be columns that belong to the extra rows that now do not belong to the smaller matrix with number of owned coarse nodes rows. The same is trued about the local_to_global_map. These two data structures match at this point. However, in the next calls local_to_global (exclusively) will be used to geberate B2L_maps (wihtout going through column indices) which creates extra elements in the B2L that simply do not exist in the new matrices. I strongly suspect this is the reason fore the bug. The below fix simply compresses the matrix so that there are no holes in it, or in the local_2_global_map. */ //mark local_to_global_columns that exist in the owned coarse nodes rows. IndexType nrow = RAP.get_num_rows(); IndexType ncol = RAP.get_num_cols(); IndexType nl2g = ncol - nrow; if (nl2g > 0) { IVector l2g_p(nl2g + 1, 0); //+1 is needed for prefix_sum/exclusive_scan I64Vector l2g_t(nl2g, 0); IndexType nblocks = (nrow + AMGX_CAL_BLOCK_SIZE - 1) / AMGX_CAL_BLOCK_SIZE; if (nblocks > 0) hipLaunchKernelGGL(( flag_existing_local_to_global_columns<int>) , dim3(nblocks), dim3(AMGX_CAL_BLOCK_SIZE), 0, 0, nrow, RAP.row_offsets.raw(), RAP.col_indices.raw(), l2g_p.raw()); cudaCheckError(); /* //slow version of the above kernel for(int ii=0; ii<nrow; ii++){ int s = RAP.row_offsets[ii]; int e = RAP.row_offsets[ii+1]; for (int jj=s; jj<e; jj++) { int col = RAP.col_indices[jj]; if (col>=nrow){ int kk = col-RAP.get_num_rows(); l2g_p[kk] = 1; } } } cudaCheckError(); */ //create a pointer map for their location using prefix sum thrust_wrapper::exclusive_scan(l2g_p.begin(), l2g_p.end(), l2g_p.begin()); int new_nl2g = l2g_p[nl2g]; //compress the columns using the pointer map if (nblocks > 0) hipLaunchKernelGGL(( compress_existing_local_columns<int>) , dim3(nblocks), dim3(AMGX_CAL_BLOCK_SIZE), 0, 0, nrow, RAP.row_offsets.raw(), RAP.col_indices.raw(), l2g_p.raw()); cudaCheckError(); /* //slow version of the above kernel for(int ii=0; ii<nrow; ii++){ int s = RAP.row_offsets[ii]; int e = RAP.row_offsets[ii+1]; for (int jj=s; jj<e; jj++) { int col = RAP.col_indices[jj]; if (col>=nrow){ int kk = col-RAP.get_num_rows(); RAP.col_indices[jj] = nrow+l2g_p[kk]; } } } cudaCheckError(); */ //adjust matrix size (number of columns) accordingly RAP.set_initialized(0); RAP.set_num_cols(nrow + new_nl2g); RAP.set_initialized(1); //compress local_to_global_map using the pointer map nblocks = (nl2g + AMGX_CAL_BLOCK_SIZE - 1) / AMGX_CAL_BLOCK_SIZE; if (nblocks > 0) hipLaunchKernelGGL(( compress_existing_local_to_global_columns<int, int64_t>) , dim3(nblocks), dim3(AMGX_CAL_BLOCK_SIZE), 0, 0, nl2g, RAP.manager->local_to_global_map.raw(), l2g_t.raw(), l2g_p.raw()); cudaCheckError(); thrust::copy(l2g_t.begin(), l2g_t.begin() + new_nl2g, RAP.manager->local_to_global_map.begin()); cudaCheckError(); /* //slow version of the above kernel (through Thrust) for(int ii=0; ii<(l2g_p.size()-1); ii++){ if (l2g_p[ii] != l2g_p[ii+1]){ RAP.manager->local_to_global_map[l2g_p[ii]] = RAP.manager->local_to_global_map[ii]; } } cudaCheckError(); */ //adjust local_to_global_map size accordingly RAP.manager->local_to_global_map.resize(new_nl2g); } } /********************************************** * prolongates the error: x+=P*e **********************************************/ template <class T_Config> void Classical_AMG_Level_Base<T_Config>::prolongateAndApplyCorrection(VVector &e, VVector &bc, VVector &x, VVector &tmp) { this->Profile.tic("proCorr"); // Use P.manager to exchange halo of e before doing P // (since P has columns belonging to one of P.neighbors) e.dirtybit = 1; if (!P.is_matrix_singleGPU()) { // get coarse matrix typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); #if COARSE_CLA_CONSO int e_size; if (this->getNextLevel(MemorySpace())->isConsolidationLevel()) { e_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets_before_glue[Ac.manager->neighbors_before_glue.size()]) * e.get_block_size(); } else { e_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()]) * e.get_block_size(); } if (e.size() < e_size) { e.resize(e_size); } #else int e_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()]) * e.get_block_size(); e.resize(e_size); #endif } if (P.is_matrix_singleGPU()) { if (e.size() > 0) { multiply( P, e, tmp); } } else { multiply_with_mask( P, e, tmp); } // get owned num rows for fine matrix int owned_size; if (this->A->is_matrix_distributed()) { int owned_offset; P.manager->getOffsetAndSizeForView(OWNED, &owned_offset, &owned_size); } else { owned_size = x.size(); } //apply axpby(x, tmp, x, ValueType(1), ValueType(1), 0, owned_size); this->Profile.toc("proCorr"); x.dirtybit = 1; } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::computeAOperator() { if (this->A->get_block_size() == 1) { computeAOperator_1x1(); } else { FatalError("Classical AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::computeAOperator_distributed() { if (this->A->get_block_size() == 1) { computeAOperator_1x1_distributed(); } else { FatalError("Classical AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::consolidateVector(VVector &x) { #ifdef AMGX_WITH_MPI #if COARSE_CLA_CONSO typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &A = this->getA(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); MPI_Comm comm, temp_com; comm = Ac.manager->getComms()->get_mpi_comm(); temp_com = compute_glue_matrices_communicator(Ac); glue_vector(Ac, comm, x, temp_com); #endif #endif } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::unconsolidateVector(VVector &x) { #ifdef AMGX_WITH_MPI #if COARSE_CLA_CONSO typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &A = this->getA(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); MPI_Comm comm, temp_com; comm = Ac.manager->getComms()->get_mpi_comm(); temp_com = compute_glue_matrices_communicator(Ac); unglue_vector(Ac, comm, x, temp_com, x); #endif #endif } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class Classical_AMG_Level_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Classical_AMG_Level<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace classical } // namespace amgx
38c663ad4b271191d2c54d96d2e93049819f025c.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define COARSE_CLA_CONSO 0 #include <classical/classical_amg_level.h> #include <amg_level.h> #include <basic_types.h> #include <cutil.h> #include <multiply.h> #include <transpose.h> #include <truncate.h> #include <blas.h> #include <util.h> #include <thrust/logical.h> #include <thrust/remove.h> #include <thrust/adjacent_difference.h> #include <thrust_wrapper.h> #include <thrust/extrema.h> // for minmax_element #include <algorithm> #include <assert.h> #include <matrix_io.h> #include <csr_multiply.h> #include <thrust/logical.h> #include <thrust/count.h> #include <thrust/sort.h> #include <profile.h> #include <distributed/glue.h> namespace amgx { namespace classical { void __global__ profiler_tag_1() {} void __global__ profiler_tag_2() {} void __global__ profiler_tag_3() {} struct is_zero { __host__ __device__ bool operator()(const double &v) { return fabs(v) < 1e-10; } }; #define AMGX_CAL_BLOCK_SIZE 256 /* There might be a situation where not all local_to_global_map columns are present in the matrix (because some rows were removed and the columns in these rows are therefore no longer present. This kernel creates the flags array that marks existing columns. */ template<typename ind_t> __global__ __launch_bounds__( AMGX_CAL_BLOCK_SIZE ) void flag_existing_local_to_global_columns(ind_t n, ind_t *row_offsets, ind_t *col_indices, ind_t *flags) { ind_t i, j, s, e, col; //go through the matrix for (i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += blockDim.x * gridDim.x) { s = row_offsets[i]; e = row_offsets[i + 1]; for (j = s; j < e; j++) { col = col_indices[j]; //flag columns outside of the square part (which correspond to local_to_global_map) if (col >= n) { flags[col - n] = 1; } } } } /* Renumber the indices based on the prefix-scan/sum of the flags array */ template<typename ind_t> __global__ __launch_bounds__( AMGX_CAL_BLOCK_SIZE ) void compress_existing_local_columns(ind_t n, ind_t *row_offsets, ind_t *col_indices, ind_t *flags) { ind_t i, j, s, e, col; //go through the matrix for (i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += blockDim.x * gridDim.x) { s = row_offsets[i]; e = row_offsets[i + 1]; for (j = s; j < e; j++) { col = col_indices[j]; //flag columns outside of the square part (which correspond to local_to_global_map) if (col >= n) { col_indices[j] = n + flags[col - n]; } } } } /* compress the local to global columns indices based on the prefix-scan/sum of the flags array */ template<typename ind_t, typename ind64_t> __global__ __launch_bounds__( AMGX_CAL_BLOCK_SIZE ) void compress_existing_local_to_global_columns(ind_t n, ind64_t *l2g_in, ind64_t *l2g_out, ind_t *flags) { ind_t i; //go through the arrays (and copy the updated indices when needed) for (i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += blockDim.x * gridDim.x) { if (flags[i] != flags[i + 1]) { l2g_out[flags[i]] = l2g_in[i]; } } } template <class T_Config> Selector<T_Config> *chooseAggressiveSelector(AMG_Config *m_cfg, std::string std_scope) { AMG_Config cfg; std::string cfg_string(""); cfg_string += "default:"; // if necessary, allocate aggressive selector + interpolator bool use_pmis = false, use_hmis = false; // default argument - use the same selector as normal coarsening std::string agg_selector = m_cfg->AMG_Config::getParameter<std::string>("aggressive_selector", std_scope); if (agg_selector == "DEFAULT") { std::string std_selector = m_cfg->AMG_Config::getParameter<std::string>("selector", std_scope); if (std_selector == "PMIS") { cfg_string += "selector=AGGRESSIVE_PMIS"; use_pmis = true; } else if (std_selector == "HMIS") { cfg_string += "selector=AGGRESSIVE_HMIS"; use_hmis = true; } else { FatalError("Must use either PMIS or HMIS algorithms with aggressive coarsening", AMGX_ERR_NOT_IMPLEMENTED); } } // otherwise use specified selector else if (agg_selector == "PMIS") { cfg_string += "selector=AGGRESSIVE_PMIS"; use_pmis = true; } else if (agg_selector == "HMIS") { cfg_string += "selector=AGGRESSIVE_HMIS"; use_hmis = true; } else { FatalError("Invalid aggressive coarsener selected", AMGX_ERR_NOT_IMPLEMENTED); } // check a selector has been selected if (!use_pmis && !use_hmis) { FatalError("No aggressive selector chosen", AMGX_ERR_NOT_IMPLEMENTED); } cfg.parseParameterString(cfg_string.c_str()); // now allocate the selector and interpolator return classical::SelectorFactory<T_Config>::allocate(cfg, "default" /*std_scope*/); } template <class T_Config> Interpolator<T_Config> *chooseAggressiveInterpolator(AMG_Config *m_cfg, std::string std_scope) { // temporary config and pointer to main config AMG_Config cfg; std::string cfg_string(""); cfg_string += "default:"; // Set the interpolator cfg_string += "interpolator="; cfg_string += m_cfg->AMG_Config::getParameter<std::string>("aggressive_interpolator", std_scope); cfg.parseParameterString(cfg_string.c_str()); // now allocate the selector and interpolator return InterpolatorFactory<T_Config>::allocate(cfg, "default" /*std_scope*/); } template <class T_Config> Classical_AMG_Level_Base<T_Config>::Classical_AMG_Level_Base(AMG_Class *amg) : AMG_Level<T_Config>(amg) { strength = StrengthFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); selector = classical::SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); interpolator = InterpolatorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); trunc_factor = amg->m_cfg->AMG_Config::getParameter<double>("interp_truncation_factor", amg->m_cfg_scope); max_elmts = amg->m_cfg->AMG_Config::getParameter<int>("interp_max_elements", amg->m_cfg_scope); max_row_sum = amg->m_cfg->AMG_Config::getParameter<double>("max_row_sum", amg->m_cfg_scope); num_aggressive_levels = amg->m_cfg->AMG_Config::getParameter<int>("aggressive_levels", amg->m_cfg_scope); } template <class T_Config> Classical_AMG_Level_Base<T_Config>::~Classical_AMG_Level_Base() { delete strength; delete selector; delete interpolator; } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::transfer_level(AMG_Level<TConfig1> *ref_lvl) { Classical_AMG_Level_Base<TConfig1> *ref_cla_lvl = dynamic_cast<Classical_AMG_Level_Base<TConfig1>*>(ref_lvl); this->P.copy(ref_cla_lvl->P); this->R.copy(ref_cla_lvl->R); this->m_s_con.copy(ref_cla_lvl->m_s_con); this->m_scratch.copy(ref_cla_lvl->m_scratch); this->m_cf_map.copy(ref_cla_lvl->m_cf_map); } /**************************************** * Computes the A, P, and R operators ***************************************/ template <class T_Config> void Classical_AMG_Level_Base<T_Config>::createCoarseVertices() { if (AMG_Level<T_Config>::getLevelIndex() < this->num_aggressive_levels) { if (selector) { delete selector; } selector = chooseAggressiveSelector<T_Config>(AMG_Level<T_Config>::amg->m_cfg, AMG_Level<T_Config>::amg->m_cfg_scope); } Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space( ) )->getA( ); Matrix<T_Config> &A = this->getA(); int size_all, size_full, nnz_full; if (!A.is_matrix_singleGPU()) { int offset; // Need to get number of 2-ring rows A.getOffsetAndSizeForView(ALL, &offset, &size_all); A.getOffsetAndSizeForView(FULL, &offset, &size_full); A.getNnzForView(FULL, &nnz_full); } else { size_all = A.get_num_rows(); size_full = A.get_num_rows(); nnz_full = A.get_num_nz(); } this->m_cf_map.resize(size_all); this->m_s_con.resize(nnz_full); this->m_scratch.resize(size_full); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); thrust::fill(this->m_s_con.begin(), this->m_s_con.end(), false); cudaCheckError(); thrust::fill(this->m_scratch.begin(), this->m_scratch.end(), 0); cudaCheckError(); markCoarseFinePoints(); } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::createCoarseMatrices() { // allocate aggressive interpolator if needed if (AMG_Level<T_Config>::getLevelIndex() < this->num_aggressive_levels) { if (interpolator) { delete interpolator; } interpolator = chooseAggressiveInterpolator<T_Config>(AMG_Level<T_Config>::amg->m_cfg, AMG_Level<T_Config>::amg->m_cfg_scope); } Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space( ) )->getA( ); Matrix<T_Config> &A = this->getA(); /* WARNING: exit if D1 interpolator is selected in distributed setting */ std::string s(""); s += AMG_Level<T_Config>::amg->m_cfg->AMG_Config::getParameter<std::string>("interpolator", AMG_Level<T_Config>::amg->m_cfg_scope); if (A.is_matrix_distributed() && (s.compare("D1") == 0)) { FatalError("D1 interpolation is not supported in distributed settings", AMGX_ERR_NOT_IMPLEMENTED); } /* WARNING: do not recompute prolongation (P) and restriction (R) when you are reusing the level structure (structure_reuse_levels > 0) */ if (this->isReuseLevel() == false) { computeProlongationOperator(); } // Compute Restriction operator and coarse matrix Ac if (!this->A->is_matrix_distributed() || this->A->manager->get_num_partitions() == 1) { /* WARNING: see above warning. */ if (this->isReuseLevel() == false) { computeRestrictionOperator(); } computeAOperator(); } else { /* WARNING: notice that in this case the computeRestructionOperator() is called inside computeAOperator_distributed() routine. */ computeAOperator_distributed(); } // we also need to renumber columns of P and rows or R correspondingly since we changed RAP halo columns // for R we just keep track of renumbering in and exchange proper vectors in restriction // for P we actually need to renumber columns for prolongation: if (A.is_matrix_distributed() && this->A->manager->get_num_partitions() > 1) { RAP.set_initialized(0); // Renumber the owned nodes as interior and boundary (renumber rows and columns) // We are passing reuse flag to not create neighbours list from scratch, but rather update based on new halos RAP.manager->renumberMatrixOneRing(this->isReuseLevel()); // Renumber the column indices of P and shuffle rows of P RAP.manager->renumber_P_R(this->P, this->R, A); // Create the B2L_maps for RAP RAP.manager->createOneRingHaloRows(); RAP.manager->getComms()->set_neighbors(RAP.manager->num_neighbors()); RAP.setView(OWNED); RAP.set_initialized(1); // update # of columns in P - this is necessary for correct CSR multiply P.set_initialized(0); int new_num_cols = thrust_wrapper::reduce(P.col_indices.begin(), P.col_indices.end(), int(0), thrust::maximum<int>()) + 1; cudaCheckError(); P.set_num_cols(new_num_cols); P.set_initialized(1); } RAP.copyAuxData(&A); if (!A.is_matrix_singleGPU() && RAP.manager == NULL) { RAP.manager = new DistributedManager<TConfig>(); } if (this->getA().is_matrix_singleGPU()) { this->m_next_level_size = this->getNextLevel(typename Matrix<TConfig>::memory_space() )->getA().get_num_rows() * this->getNextLevel(typename Matrix<TConfig>::memory_space() )->getA().get_block_dimy(); } else { // m_next_level_size is the size that will be used to allocate xc, bc vectors int size, offset; this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().getOffsetAndSizeForView(FULL, &offset, &size); this->m_next_level_size = size * this->getNextLevel(typename Matrix<TConfig>::memory_space() )->getA().get_block_dimy(); } } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::markCoarseFinePoints() { Matrix<T_Config> &A = this->getA(); //allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; FVector weights; if (!A.is_matrix_singleGPU()) { int size, offset; A.getOffsetAndSizeForView(FULL, &offset, &size); // size should now contain the number of 1-ring rows weights.resize(size); } else { weights.resize(A.get_num_rows()); } thrust::fill(weights.begin(), weights.end(), 0.0); cudaCheckError(); // extend A to include 1st ring nodes // compute strong connections and weights if (!A.is_matrix_singleGPU()) { ViewType oldView = A.currentView(); A.setView(FULL); strength->computeStrongConnectionsAndWeights(A, this->m_s_con, weights, this->max_row_sum); A.setView(oldView); } else { strength->computeStrongConnectionsAndWeights(A, this->m_s_con, weights, this->max_row_sum); } // Exchange the one-ring of the weights if (!A.is_matrix_singleGPU()) { A.manager->exchange_halo(weights, weights.tag); } //mark coarse and fine points selector->markCoarseFinePoints(A, weights, this->m_s_con, this->m_cf_map, this->m_scratch); // we do resize cf_map to zero later, so we are saving separate copy this->m_cf_map.dirtybit = 1; // Do a two ring exchange of cf_map if (!A.is_matrix_singleGPU()) { A.manager->exchange_halo_2ring(this->m_cf_map, m_cf_map.tag); } // Modify cf_map array such that coarse points are assigned a local index, while fine points entries are not touched selector->renumberAndCountCoarsePoints(this->m_cf_map, this->m_num_coarse_vertices, A.get_num_rows()); } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::computeProlongationOperator() { this->Profile.tic("computeP"); Matrix<T_Config> &A = this->getA(); //allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; //generate the interpolation matrix interpolator->generateInterpolationMatrix(A, this->m_cf_map, this->m_s_con, this->m_scratch, P, AMG_Level<TConfig>::amg); this->m_cf_map.clear(); this->m_cf_map.shrink_to_fit(); this->m_scratch.clear(); this->m_scratch.shrink_to_fit(); this->m_s_con.clear(); this->m_s_con.shrink_to_fit(); profileSubphaseTruncateP(); // truncate based on max # of elements if desired if (this->max_elmts > 0 && P.get_num_rows() > 0) { Truncate<TConfig>::truncateByMaxElements(P, this->max_elmts); } if (!P.isLatencyHidingEnabled(*this->amg->m_cfg)) { // This will cause bsrmv_with_mask to not do latency hiding P.setInteriorView(OWNED); P.setExteriorView(OWNED); } profileSubphaseNone(); this->Profile.toc("computeP"); } /********************************************** * computes R=P^T **********************************************/ template <class T_Config> void Classical_AMG_Level_Base<T_Config>::computeRestrictionOperator() { this->Profile.tic("computeR"); R.set_initialized(0); P.setView(OWNED); transpose(P, R, P.get_num_rows()); if (!R.isLatencyHidingEnabled(*this->amg->m_cfg)) { // This will cause bsrmv_with_mask_restriction to not do latency hiding R.setInteriorView(OWNED); R.setExteriorView(OWNED); } if(P.is_matrix_distributed()) { // Setup the number of non-zeros in R using stub DistributedManager R.manager = new DistributedManager<T_Config>(); int nrows_owned = P.manager->halo_offsets[0]; int nrows_full = P.manager->halo_offsets[P.manager->neighbors.size()]; int nz_full = R.row_offsets[nrows_full]; int nz_owned = R.row_offsets[nrows_owned]; R.manager->setViewSizes(nrows_owned, nz_owned, nrows_owned, nz_owned, nrows_full, nz_full, R.get_num_rows(), R.get_num_nz()); } R.set_initialized(1); this->Profile.toc("computeR"); } /********************************************** * computes the Galerkin product: A_c=R*A*P **********************************************/ template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Classical_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1() { this->Profile.tic("computeA"); Matrix<TConfig_h> RA; RA.addProps(CSR); RA.set_block_dimx(this->getA().get_block_dimx()); RA.set_block_dimy(this->getA().get_block_dimy()); Matrix<TConfig_h> &RAP = this->getNextLevel( typename Matrix<TConfig_h>::memory_space( ) )->getA( ); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); Matrix<TConfig_h> &Atmp = this->getA(); multiplyMM(this->R, this->getA(), RA); multiplyMM(RA, this->P, RAP); RAP.sortByRowAndColumn(); RAP.set_initialized(1); this->Profile.toc("computeA"); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Classical_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1_distributed() { FatalError("Distributed classical AMG not implemented for host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Classical_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1() { this->Profile.tic("computeA"); Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory( ) )->getA( ); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); this->R.set_initialized( 0 ); this->R.addProps( CSR ); this->R.set_initialized( 1 ); this->P.set_initialized( 0 ); this->P.addProps( CSR ); this->P.set_initialized( 1 ); void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace(); if ( wk == NULL ) { wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg), AMG_Level<TConfig_d>::amg->m_cfg_scope ); AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk ); } int spmm_verbose = this->amg->m_cfg->AMG_Config::getParameter<int>("spmm_verbose", this->amg->m_cfg_scope); if ( spmm_verbose ) { typedef typename Matrix<TConfig_d>::IVector::const_iterator Iterator; typedef thrust::pair<Iterator, Iterator> Result; std::ostringstream buffer; buffer << "SPMM: Level " << this->getLevelIndex() << std::endl; if ( this->getLevelIndex() == 0 ) { device_vector_alloc<int> num_nz( this->getA().row_offsets.size() ); thrust::adjacent_difference( this->getA().row_offsets.begin(), this->getA().row_offsets.end(), num_nz.begin() ); cudaCheckError(); Result result = thrust::minmax_element( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); int min_size = *result.first; int max_size = *result.second; int sum = thrust_wrapper::reduce( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); double avg_size = double(sum) / this->getA().get_num_rows(); buffer << "SPMM: A: " << std::endl; buffer << "SPMM: Matrix avg row size: " << avg_size << std::endl; buffer << "SPMM: Matrix min row size: " << min_size << std::endl; buffer << "SPMM: Matrix max row size: " << max_size << std::endl; } device_vector_alloc<int> num_nz( this->P.row_offsets.size() ); thrust::adjacent_difference( this->P.row_offsets.begin(), this->P.row_offsets.end(), num_nz.begin() ); cudaCheckError(); Result result = thrust::minmax_element( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); int min_size = *result.first; int max_size = *result.second; int sum = thrust_wrapper::reduce( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); double avg_size = double(sum) / this->P.get_num_rows(); buffer << "SPMM: P: " << std::endl; buffer << "SPMM: Matrix avg row size: " << avg_size << std::endl; buffer << "SPMM: Matrix min row size: " << min_size << std::endl; buffer << "SPMM: Matrix max row size: " << max_size << std::endl; num_nz.resize( this->R.row_offsets.size() ); thrust::adjacent_difference( this->R.row_offsets.begin(), this->R.row_offsets.end(), num_nz.begin() ); cudaCheckError(); result = thrust::minmax_element( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); min_size = *result.first; max_size = *result.second; sum = thrust_wrapper::reduce( num_nz.begin() + 1, num_nz.end() ); cudaCheckError(); avg_size = double(sum) / this->R.get_num_rows(); buffer << "SPMM: R: " << std::endl; buffer << "SPMM: Matrix avg row size: " << avg_size << std::endl; buffer << "SPMM: Matrix min row size: " << min_size << std::endl; buffer << "SPMM: Matrix max row size: " << max_size << std::endl; amgx_output( buffer.str().c_str(), static_cast<int>( buffer.str().length() ) ); } RAP.set_initialized( 0 ); CSR_Multiply<TConfig_d>::csr_galerkin_product( this->R, this->getA(), this->P, RAP, NULL, NULL, NULL, NULL, NULL, NULL, wk ); RAP.set_initialized( 1 ); int spmm_no_sort = this->amg->m_cfg->AMG_Config::getParameter<int>("spmm_no_sort", this->amg->m_cfg_scope); this->Profile.toc("computeA"); } /********************************************** * computes the restriction: rr=R*r **********************************************/ template <class T_Config> void Classical_AMG_Level_Base<T_Config>::restrictResidual(VVector &r, VVector &rr) { // we need to resize residual vector to make sure it can store halo rows to be sent if (!P.is_matrix_singleGPU()) { typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); #if COARSE_CLA_CONSO int desired_size ; if (this->getNextLevel(MemorySpace())->isConsolidationLevel()) { desired_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets_before_glue[Ac.manager->neighbors_before_glue.size()] * rr.get_block_size()); } else { desired_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size()); } #else int desired_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size()); #endif rr.resize(desired_size); } #if 1 this->Profile.tic("restrictRes"); // Disable speculative send of rr if (P.is_matrix_singleGPU()) { multiply( R, r, rr); } else { multiply_with_mask_restriction( R, r, rr, P); } #endif // exchange halo residuals & add residual contribution from neighbors rr.dirtybit = 1; if (!P.is_matrix_singleGPU()) { int desired_size = P.manager->halo_offsets[P.manager->neighbors.size()] * rr.get_block_size(); if (rr.size() < desired_size) { rr.resize(desired_size); } } this->Profile.toc("restrictRes"); } struct is_minus_one { __host__ __device__ bool operator()(const int &x) { return x == -1; } }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Classical_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1_distributed() { Matrix<TConfig_d> &A = this->getA(); Matrix<TConfig_d> &P = this->P; Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory( ) )->getA( ); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); IndexType num_parts = A.manager->get_num_partitions(); IndexType num_neighbors = A.manager->num_neighbors(); IndexType my_rank = A.manager->global_id(); // OWNED includes interior and boundary A.setView(OWNED); int num_owned_coarse_pts = P.manager->halo_offsets[0]; int num_owned_fine_pts = A.manager->halo_offsets[0]; // Initialize RAP.manager if (RAP.manager == NULL) { RAP.manager = new DistributedManager<TConfig_d>(); } RAP.manager->A = &RAP; RAP.manager->setComms(A.manager->getComms()); RAP.manager->set_global_id(my_rank); RAP.manager->set_num_partitions(num_parts); RAP.manager->part_offsets_h = P.manager->part_offsets_h; RAP.manager->part_offsets = P.manager->part_offsets; RAP.manager->set_base_index(RAP.manager->part_offsets_h[my_rank]); RAP.manager->set_index_range(num_owned_coarse_pts); RAP.manager->num_rows_global = RAP.manager->part_offsets_h[num_parts]; // -------------------------------------------------------------------- // Using the B2L_maps of matrix A, identify the rows of P that need to be sent to neighbors, // so that they can compute A*P // Once rows of P are identified, convert the column indices to global indices, and send them to neighbors // --------------------------------------------------------------------------- // Copy some information about the manager of P, since we don't want to modify those IVector_h P_neighbors = P.manager->neighbors; I64Vector_h P_halo_ranges_h = P.manager->halo_ranges_h; I64Vector_d P_halo_ranges = P.manager->halo_ranges; RAP.manager->local_to_global_map = P.manager->local_to_global_map; IVector_h P_halo_offsets = P.manager->halo_offsets; // Create a temporary distributed arranger DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>; prep->exchange_halo_rows_P(A, this->P, RAP.manager->local_to_global_map, P_neighbors, P_halo_ranges_h, P_halo_ranges, P_halo_offsets, RAP.manager->part_offsets_h, RAP.manager->part_offsets, num_owned_coarse_pts, RAP.manager->part_offsets_h[my_rank]); cudaCheckError(); // At this point, we can compute RAP_full which contains some rows that will need to be sent to neighbors // i.e. RAP_full = [ RAP_int ] // [ RAP_ext ] // RAP is [ RAP_int ] + [RAP_ext_received_from_neighbors] // We can reuse the serial galerkin product since R, A and P use local indices // TODO: latency hiding (i.e. compute RAP_ext, exchange_matrix_halo, then do RAP_int) /* WARNING: do not recompute prolongation (P) and restriction (R) when you are reusing the level structure (structure_reuse_levels > 0) */ /* We force for matrix P to have only owned rows to be seen for the correct galerkin product computation*/ this->P.set_initialized(0); this->P.set_num_rows(num_owned_fine_pts); this->P.addProps( CSR ); this->P.set_initialized(1); if (this->isReuseLevel() == false) { this->R.set_initialized( 0 ); this->R.addProps( CSR ); // Take the tranpose of P to get R // Single-GPU transpose, no mpi exchange this->computeRestrictionOperator(); this->R.set_initialized( 1 ); } this->Profile.tic("computeA"); Matrix<TConfig_d> RAP_full; // Initialize the workspace needed for galerkin product void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace(); if ( wk == NULL ) { wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg), AMG_Level<TConfig_d>::amg->m_cfg_scope ); AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk ); } // Single-GPU RAP, no mpi exchange RAP_full.set_initialized( 0 ); /* WARNING: Since A is reordered (into interior and boundary nodes), while R and P are not reordered, you must unreorder A when performing R*A*P product in ordre to obtain the correct result. */ CSR_Multiply<TConfig_d>::csr_galerkin_product( this->R, this->getA(), this->P, RAP_full, /* permutation for rows of R, A and P */ NULL, NULL /*&(this->getA().manager->renumbering)*/, NULL, /* permutation for cols of R, A and P */ NULL, NULL /*&(this->getA().manager->inverse_renumbering)*/, NULL, wk ); RAP_full.set_initialized( 1 ); this->Profile.toc("computeA"); // ---------------------------------------------------------------------------------------------- // Now, send rows of RAP_full requireq by neighbors, received rows from neighbors and create RAP // ---------------------------------------------------------------------------------------------- prep->exchange_RAP_ext(RAP, RAP_full, A, this->P, P_halo_offsets, RAP.manager->local_to_global_map, P_neighbors, P_halo_ranges_h, P_halo_ranges, RAP.manager->part_offsets_h, RAP.manager->part_offsets, num_owned_coarse_pts, RAP.manager->part_offsets_h[my_rank], wk); // Delete temporary distributed arranger delete prep; /* WARNING: The RAP matrix generated at this point contains extra rows (that correspond to rows of R, that was obtained by locally transposing P). This rows are ignored by setting the # of matrix rows to be smaller, so that they correspond to number of owned coarse nodes. This should be fine, but it leaves holes in the matrix as there might be columns that belong to the extra rows that now do not belong to the smaller matrix with number of owned coarse nodes rows. The same is trued about the local_to_global_map. These two data structures match at this point. However, in the next calls local_to_global (exclusively) will be used to geberate B2L_maps (wihtout going through column indices) which creates extra elements in the B2L that simply do not exist in the new matrices. I strongly suspect this is the reason fore the bug. The below fix simply compresses the matrix so that there are no holes in it, or in the local_2_global_map. */ //mark local_to_global_columns that exist in the owned coarse nodes rows. IndexType nrow = RAP.get_num_rows(); IndexType ncol = RAP.get_num_cols(); IndexType nl2g = ncol - nrow; if (nl2g > 0) { IVector l2g_p(nl2g + 1, 0); //+1 is needed for prefix_sum/exclusive_scan I64Vector l2g_t(nl2g, 0); IndexType nblocks = (nrow + AMGX_CAL_BLOCK_SIZE - 1) / AMGX_CAL_BLOCK_SIZE; if (nblocks > 0) flag_existing_local_to_global_columns<int> <<< nblocks, AMGX_CAL_BLOCK_SIZE>>> (nrow, RAP.row_offsets.raw(), RAP.col_indices.raw(), l2g_p.raw()); cudaCheckError(); /* //slow version of the above kernel for(int ii=0; ii<nrow; ii++){ int s = RAP.row_offsets[ii]; int e = RAP.row_offsets[ii+1]; for (int jj=s; jj<e; jj++) { int col = RAP.col_indices[jj]; if (col>=nrow){ int kk = col-RAP.get_num_rows(); l2g_p[kk] = 1; } } } cudaCheckError(); */ //create a pointer map for their location using prefix sum thrust_wrapper::exclusive_scan(l2g_p.begin(), l2g_p.end(), l2g_p.begin()); int new_nl2g = l2g_p[nl2g]; //compress the columns using the pointer map if (nblocks > 0) compress_existing_local_columns<int> <<< nblocks, AMGX_CAL_BLOCK_SIZE>>> (nrow, RAP.row_offsets.raw(), RAP.col_indices.raw(), l2g_p.raw()); cudaCheckError(); /* //slow version of the above kernel for(int ii=0; ii<nrow; ii++){ int s = RAP.row_offsets[ii]; int e = RAP.row_offsets[ii+1]; for (int jj=s; jj<e; jj++) { int col = RAP.col_indices[jj]; if (col>=nrow){ int kk = col-RAP.get_num_rows(); RAP.col_indices[jj] = nrow+l2g_p[kk]; } } } cudaCheckError(); */ //adjust matrix size (number of columns) accordingly RAP.set_initialized(0); RAP.set_num_cols(nrow + new_nl2g); RAP.set_initialized(1); //compress local_to_global_map using the pointer map nblocks = (nl2g + AMGX_CAL_BLOCK_SIZE - 1) / AMGX_CAL_BLOCK_SIZE; if (nblocks > 0) compress_existing_local_to_global_columns<int, int64_t> <<< nblocks, AMGX_CAL_BLOCK_SIZE>>> (nl2g, RAP.manager->local_to_global_map.raw(), l2g_t.raw(), l2g_p.raw()); cudaCheckError(); thrust::copy(l2g_t.begin(), l2g_t.begin() + new_nl2g, RAP.manager->local_to_global_map.begin()); cudaCheckError(); /* //slow version of the above kernel (through Thrust) for(int ii=0; ii<(l2g_p.size()-1); ii++){ if (l2g_p[ii] != l2g_p[ii+1]){ RAP.manager->local_to_global_map[l2g_p[ii]] = RAP.manager->local_to_global_map[ii]; } } cudaCheckError(); */ //adjust local_to_global_map size accordingly RAP.manager->local_to_global_map.resize(new_nl2g); } } /********************************************** * prolongates the error: x+=P*e **********************************************/ template <class T_Config> void Classical_AMG_Level_Base<T_Config>::prolongateAndApplyCorrection(VVector &e, VVector &bc, VVector &x, VVector &tmp) { this->Profile.tic("proCorr"); // Use P.manager to exchange halo of e before doing P // (since P has columns belonging to one of P.neighbors) e.dirtybit = 1; if (!P.is_matrix_singleGPU()) { // get coarse matrix typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); #if COARSE_CLA_CONSO int e_size; if (this->getNextLevel(MemorySpace())->isConsolidationLevel()) { e_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets_before_glue[Ac.manager->neighbors_before_glue.size()]) * e.get_block_size(); } else { e_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()]) * e.get_block_size(); } if (e.size() < e_size) { e.resize(e_size); } #else int e_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()]) * e.get_block_size(); e.resize(e_size); #endif } if (P.is_matrix_singleGPU()) { if (e.size() > 0) { multiply( P, e, tmp); } } else { multiply_with_mask( P, e, tmp); } // get owned num rows for fine matrix int owned_size; if (this->A->is_matrix_distributed()) { int owned_offset; P.manager->getOffsetAndSizeForView(OWNED, &owned_offset, &owned_size); } else { owned_size = x.size(); } //apply axpby(x, tmp, x, ValueType(1), ValueType(1), 0, owned_size); this->Profile.toc("proCorr"); x.dirtybit = 1; } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::computeAOperator() { if (this->A->get_block_size() == 1) { computeAOperator_1x1(); } else { FatalError("Classical AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::computeAOperator_distributed() { if (this->A->get_block_size() == 1) { computeAOperator_1x1_distributed(); } else { FatalError("Classical AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::consolidateVector(VVector &x) { #ifdef AMGX_WITH_MPI #if COARSE_CLA_CONSO typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &A = this->getA(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); MPI_Comm comm, temp_com; comm = Ac.manager->getComms()->get_mpi_comm(); temp_com = compute_glue_matrices_communicator(Ac); glue_vector(Ac, comm, x, temp_com); #endif #endif } template <class T_Config> void Classical_AMG_Level_Base<T_Config>::unconsolidateVector(VVector &x) { #ifdef AMGX_WITH_MPI #if COARSE_CLA_CONSO typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &A = this->getA(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); MPI_Comm comm, temp_com; comm = Ac.manager->getComms()->get_mpi_comm(); temp_com = compute_glue_matrices_communicator(Ac); unglue_vector(Ac, comm, x, temp_com, x); #endif #endif } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class Classical_AMG_Level_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Classical_AMG_Level<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace classical } // namespace amgx
06f04f8f1bc89e42d38149591037b5f96a85153e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/layer_norm.h> #include <type_traits> #include <thrust/tuple.h> #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <ATen/native/hip/thread_constants.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/empty_like_native.h> #include <ATen/ops/native_layer_norm_native.h> #include <ATen/ops/native_layer_norm_backward_native.h> #include <ATen/ops/zeros_like_native.h> #endif #include <c10/hip/HIPMathCompat.h> namespace at { namespace native { namespace { constexpr int kCUDANumThreads = 256; constexpr int kColwiseReduceTileSize = 32; constexpr int vec_size = 4; //we could make it dependent on dtype, but that would lead to different results between float and low-p types // aligned vector generates vectorized load/store on CUDA (copy-pasted from MemoryAccess.cuh) template<typename scalar_t, int vec_size> struct alignas(sizeof(scalar_t) * vec_size) aligned_vector { scalar_t val[vec_size]; }; template <typename T, typename T_ACC> __global__ void RowwiseMomentsCUDAKernel( int64_t N, T_ACC eps, const T* X, T_ACC* mean, T_ACC* rstd) { using WelfordType = WelfordData<T_ACC, int64_t, T_ACC>; using WelfordOp = WelfordOps<T_ACC, T_ACC, int64_t, T_ACC, thrust::pair<T_ACC, T_ACC>>; __shared__ typename std::aligned_storage<sizeof(WelfordType), alignof(WelfordType)>:: type val_shared[C10_WARP_SIZE]; WelfordType* val_shared_ptr = reinterpret_cast<WelfordType*>(val_shared); const int64_t i = blockIdx.x; WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; WelfordType val(0, 0, 0, 0); for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; val = welford_op.reduce(val, static_cast<T_ACC>(X[index]), index); } val = cuda_utils::BlockReduce( val, welford_op, /*identity_element=*/WelfordType(0, 0, 0, 0), val_shared_ptr); if (threadIdx.x == 0) { T_ACC m1; T_ACC m2; thrust::tie(m2, m1) = welford_op.project(val); mean[i] = m1; rstd[i] = c10::hip::compat::rsqrt(m2 + eps); } } template <typename T, typename T_ACC> __global__ void LayerNormForwardCUDAKernel( int64_t N, const T* X, const T_ACC* mean, const T_ACC* rstd, const T* gamma, const T* beta, T* Y) { const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); const T_ACC beta_v = beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]); Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]) * gamma_v + beta_v; } } struct WelfordDataLN{ float mean; float sigma2; float count; C10_HOST_DEVICE WelfordDataLN(): mean(0.f), sigma2(0.f), count(0.f){} C10_HOST_DEVICE WelfordDataLN(float mean, float sigma2, float count): mean(mean), sigma2(sigma2), count(count) {} }; template<typename U> __device__ WelfordDataLN cuWelfordOnlineSum( const U val, const WelfordDataLN& curr_sum) { U delta = val - curr_sum.mean; U new_count = curr_sum.count + 1.f; U new_mean = curr_sum.mean + delta * (1.f/new_count); //proper division is slow, this is less accurate but noticeably faster return {new_mean, curr_sum.sigma2 + delta * (val - new_mean), new_count}; } __device__ WelfordDataLN cuWelfordCombine( const WelfordDataLN dataB, const WelfordDataLN dataA ) { using U = decltype(dataB.count); U delta = dataB.mean - dataA.mean; U count = dataA.count + dataB.count; U mean, sigma2; if (count > decltype(dataB.count){0}) { auto coef = 1.f/count; //NB we don't use --use_fast_math, but this is emulation, 1./count goes to intrinsic, `* coef` is multiplication, instead of slow fp division auto nA = dataA.count * coef; auto nB = dataB.count * coef; mean = nA*dataA.mean + nB*dataB.mean; sigma2 = dataA.sigma2 + dataB.sigma2 + delta * delta * dataA.count * nB; } else { mean = U(0); sigma2 = U(0); } return {mean, sigma2, count}; } template<typename T> __device__ WelfordDataLN compute_stats( const T* __restrict__ X, const int N, float * buf ) { //X points to the row to read using vec_t = aligned_vector<T, vec_size>; using acc_t = acc_type<T, true>; const vec_t * X_vec = reinterpret_cast<const vec_t*>(X); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const int n_vec_to_read = N/vec_size; WelfordDataLN wd(0.f, 0.f, 0.f); //no tail, we check that N is multiple of vec_size for (int i = thrx; i < n_vec_to_read; i += numx) { vec_t data = X_vec[i]; #pragma unroll for (int ii=0; ii < vec_size; ii++){ wd = cuWelfordOnlineSum(static_cast<acc_t>(data.val[ii]), wd); } } // intra-warp reduction for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { WelfordDataLN wdB{WARP_SHFL_DOWN(wd.mean, offset), WARP_SHFL_DOWN(wd.sigma2, offset), WARP_SHFL_DOWN(wd.count, offset)}; wd = cuWelfordCombine(wd, wdB); } // threadIdx.x == 0 has correct values for each warp // inter-warp reductions if (blockDim.y > 1) { float * meansigmabuf = buf; float * countbuf = buf + blockDim.y; for (int offset = blockDim.y/2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) { const int wrt_y = threadIdx.y - offset; meansigmabuf[2*wrt_y] = wd.mean; meansigmabuf[2*wrt_y+1] = wd.sigma2; countbuf[wrt_y] = wd.count; } __syncthreads(); // lower half merges if (threadIdx.x == 0 && threadIdx.y < offset) { WelfordDataLN wdB{meansigmabuf[2*threadIdx.y], meansigmabuf[2*threadIdx.y+1], countbuf[threadIdx.y]}; wd = cuWelfordCombine(wd, wdB); } __syncthreads(); } if (threadIdx.x == 0 && threadIdx.y ==0) { meansigmabuf[0] = wd.mean; meansigmabuf[1] = wd.sigma2/float(N); } __syncthreads(); return WelfordDataLN{meansigmabuf[0], meansigmabuf[1],0.f}; } else { return WelfordDataLN{WARP_SHFL(wd.mean,0), WARP_SHFL(wd.sigma2,0)/float(N), 0.f}; } } template <typename T, typename T_ACC, typename std::enable_if<!std::is_same<T, double>::value, int>::type = 0> __device__ __inline__ void vectorized_layer_norm_kernel_impl( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ extern __shared__ float s_data[]; //if we made smem WelfordDataLN type, there would be bank conflicts, //as one thread would have to write 3 consecutive floats auto i1 = blockIdx.x; const T * block_row = X + i1 * N; WelfordDataLN wd = compute_stats(block_row, N, s_data); using vec_t = aligned_vector<T, vec_size>; const vec_t * X_vec = reinterpret_cast<const vec_t*>(block_row); vec_t * Y_vec = reinterpret_cast<vec_t*>(Y + i1 * N); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const int n_vec_to_read = N/vec_size; T_ACC rstd_val = c10::hip::compat::rsqrt(wd.sigma2 + eps); //no tail, N is guaranteed to be multiple of vec size for (int i = thrx; i < n_vec_to_read; i += numx) { vec_t data = X_vec[i]; vec_t out; //computation is performed in T_ACC, X is cast to T_ACC and result is implicitly cast to T if (gamma != nullptr && beta != nullptr) { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = static_cast<T_ACC>(gamma[i*vec_size + ii]) * (rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean)) + static_cast<T_ACC>(beta[i*vec_size + ii]); } } else if (gamma != nullptr) { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = static_cast<T_ACC>(gamma[i*vec_size + ii]) * (rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean)); } } else if (beta != nullptr) { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = (rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean)) + static_cast<T_ACC>(beta[i*vec_size + ii]); } } else { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean); } } Y_vec[i] = out; } if (thrx == 0) { mean[i1] = wd.mean; rstd[i1] = rstd_val; } } template <typename T, typename T_ACC, typename std::enable_if<std::is_same<T, double>::value, int>::type = 0> __device__ __inline__ void vectorized_layer_norm_kernel_impl( const int /*N*/, T_ACC /*eps*/, const T* __restrict__ /*X*/, const T* /*gamma*/, const T* /*beta*/, T_ACC* /*mean*/, T_ACC* /*rstd*/, T* /*Y*/){ CUDA_KERNEL_ASSERT(false && "doesn't work with double"); } //to avoid windows SFINAE errors template <typename T, typename T_ACC> __global__ __inline__ void vectorized_layer_norm_kernel( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ vectorized_layer_norm_kernel_impl(N, eps, X, gamma, beta, mean, rstd, Y); } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, acc_type<T, true>* ds, acc_type<T, true>* db) { using T_ACC = acc_type<T, true>; __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v; sum2 += static_cast<T_ACC>(dY[index]) * gamma_v; } sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); if (threadIdx.x == 0) { ds[i] = sum1; db[i] = sum2; } } template<typename T, typename T_ACC> __device__ __inline__ void compute_gI( const T* __restrict__ dY, const T* __restrict__ X, const T_ACC* __restrict__ mean, const T_ACC* __restrict__ rstd, const T* __restrict__ gamma, T* dX, const int N, T_ACC * buf){ const auto i1 = blockIdx.x; const T_ACC mean_val = mean[i1]; const T_ACC rstd_val = rstd[i1]; T_ACC stats_x1{0}, stats_x2{0}; constexpr int unroll = 4; auto l = unroll * threadIdx.x; const T * X_i = X + i1 * N; const T * dY_i = dY + i1 * N; T * dX_i = dX + i1 * N; //vectorized reads don't improve perf, so use regular unrolling for (; l+unroll - 1 < N; l += blockDim.x * unroll){ #pragma unroll for (int k=0; k< unroll; k++){ T_ACC gamma_val = (gamma != nullptr) ? static_cast<T_ACC>(gamma[l+k]) : T_ACC(1); const T_ACC c_h = static_cast<T_ACC>(X_i[l+k]); const T_ACC c_loss = static_cast<T_ACC>(dY_i[l+k]); stats_x1 += c_loss * gamma_val; stats_x2 += c_loss * gamma_val * (c_h - mean_val) * rstd_val; } } for (; l < N; l ++) { T_ACC gamma_val = (gamma != nullptr) ? static_cast<T_ACC>(gamma[l]) : T_ACC(1); const T_ACC c_h = static_cast<T_ACC>(X_i[l]); const T_ACC c_loss = static_cast<T_ACC>(dY_i[l]); stats_x1 += c_loss * gamma_val; stats_x2 += c_loss * gamma_val * (c_h - mean_val) * rstd_val; } stats_x1 = cuda_utils::BlockReduceSum(stats_x1, buf); stats_x2 = cuda_utils::BlockReduceSum(stats_x2, buf); if (threadIdx.x == 0) { buf[0] = stats_x1; buf[1] = stats_x2; } __syncthreads(); stats_x1 = buf[0]; stats_x2 = buf[1]; T_ACC fH = N; T_ACC term1 = (T_ACC(1) / fH) * rstd_val; for (int l = threadIdx.x; l < N; l += blockDim.x){ const T_ACC x = X_i[l]; const T_ACC dy = dY_i[l]; T_ACC gamma_val = (gamma != nullptr) ? static_cast<T_ACC>(gamma[l]) : T_ACC(1); T_ACC f_grad_input = fH * gamma_val * dy; f_grad_input -= (x - mean_val) * rstd_val * stats_x2; f_grad_input -= stats_x1; f_grad_input *= term1; dX_i[l] = f_grad_input; } } template<typename T, typename T_ACC> __global__ void layer_norm_grad_input_kernel( const T* __restrict__ dY, const T* __restrict__ X, const T_ACC* __restrict__ mean, const T_ACC* __restrict__ rstd, const T* __restrict__ gamma, T* dX, const int N){ alignas(sizeof(double)) extern __shared__ char s_data1[]; T_ACC * buf = reinterpret_cast<T_ACC*>(&s_data1); compute_gI(dY, X, mean, rstd, gamma, dX, N, buf); } template <typename T, typename T_ACC> __global__ void ComputeGradientFusedParamsCUDAKernel( int64_t M, int64_t N, const T_ACC* mean, const T_ACC* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, acc_type<T, true>* c1, acc_type<T, true>* c2) { const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < M) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N); const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * s; c1[index] = a; c2[index] = -(a * static_cast<T_ACC>(mean[index]) + db[index] * static_cast<T_ACC>(rstd[index]) * s); } } template <typename T, typename T_ACC> __global__ void LayerNormBackwardCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, const T_ACC* a, const acc_type<T, true>* b, const acc_type<T, true>* c, T* dX) { const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); dX[index] = static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v + b[i] * static_cast<T_ACC>(X[index]) + c[i]; } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardSimpleCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; if (j < N) { T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = 0; i < M; ++i) { const int64_t index = i * N + j; sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]) * (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]); sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]); } if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardCUDAKernel1( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { __shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; __shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (j < N) { for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) { const int64_t i1 = i; const int64_t i2 = i + blockDim.y; const int64_t index1 = i1 * N + j; const int64_t index2 = i2 * N + j; dg_sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]) * (static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) * static_cast<T_ACC>(rstd[i1]); db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]); if (i2 < M) { dg_sum2 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]) * (static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) * static_cast<T_ACC>(rstd[i2]); db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]); } } } g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { alignas(sizeof(double)) extern __shared__ char s_data1[]; T_ACC * s_data_typed = reinterpret_cast<T_ACC*>(&s_data1); const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; constexpr int unroll = 8; T dYs[unroll]; T Xs[unroll]; T_ACC * means = s_data_typed; T_ACC * rstds = s_data_typed + unroll * blockDim.y; T_ACC dg_sum = 0; T_ACC db_sum = 0; if (j < N) { int bcounter; for (bcounter = 0; bcounter < M/(blockDim.y * unroll); bcounter++){ int offset = (bcounter * blockDim.y + threadIdx.y) * unroll; #pragma unroll for (int ii=0; ii<unroll; ii++){ if (threadIdx.x == 0) { means[ii*blockDim.y + threadIdx.y] = mean[offset + ii]; rstds[ii*blockDim.y + threadIdx.y] = rstd[offset + ii]; } dYs[ii] = dY[(offset + ii) * N + j ]; Xs[ii] = X[(offset + ii) * N + j]; } __syncthreads(); #pragma unroll for (int ii=0; ii<unroll; ii++){ dg_sum += dYs[ii] * (Xs[ii] - means[ii*blockDim.y + threadIdx.y]) * rstds[ii * blockDim.y + threadIdx.y]; db_sum += dYs[ii]; } __syncthreads(); } int offset = (bcounter * blockDim.y + threadIdx.y) * unroll; for (int ii = 0; ii<8; ii++ ){ T_ACC mean_val, rstd_val; // we don't use smem in the tail to avoid awkward synchronizations, perf penalty is negligible if ((offset + ii) < M) { mean_val = mean[offset+ii]; rstd_val = rstd[offset+ii]; dYs[0] = dY[(offset + ii) * N + j ]; Xs[0] = X[(offset + ii) * N + j]; dg_sum += dYs[0] * (Xs[0] - mean_val) * rstd_val; db_sum += dYs[0]; } } s_data_typed[threadIdx.y * blockDim.x + threadIdx.x] = dg_sum; s_data_typed[blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x] = db_sum; __syncthreads(); for (int offset = blockDim.y/2; offset >=1; offset /= 2){ if (threadIdx.y < offset) { s_data_typed[threadIdx.y * blockDim.x + threadIdx.x] += s_data_typed[(threadIdx.y + offset) * blockDim.x + threadIdx.x]; s_data_typed[blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x] += s_data_typed[blockDim.x * blockDim.y + (threadIdx.y + offset) * blockDim.x + threadIdx.x]; } __syncthreads(); } if (threadIdx.y == 0) { if (dg) { dg[j] = s_data_typed[threadIdx.x]; } if (db) { db[j] = s_data_typed[threadIdx.x + blockDim.x * blockDim.y]; } } } } template <typename T, typename T_ACC> void launch_vectorized_layer_norm_kernel( int N, int64_t M, T_ACC eps, const T* X_data, const T* gamma_data, const T* beta_data, T* Y_data, T_ACC* mean_data, T_ACC* rstd_data ) { //constexpr int alignment = 16; //currently unused to make sure float and half results are bw accurate auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); const int warp_size = at::cuda::warp_size(); const dim3 threads(warp_size, num_threads() / warp_size, 1); const dim3 blocks(M); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(threads.y % 2 == 0 || threads.y == 1); int nshared = threads.y > 1 ? threads.y * 3/2 *sizeof(T_ACC) : 0; hipLaunchKernelGGL(( vectorized_layer_norm_kernel), dim3(blocks), dim3(threads), nshared, stream, N, eps, X_data, gamma_data, beta_data, mean_data, rstd_data, Y_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename T, typename T_ACC> void LayerNormKernelImplInternal( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, T_ACC eps, Tensor* Y, Tensor* mean, Tensor* rstd) { // assumes input, gamma and beta are of proper shape, this was checked in _check_layer_norm_inputs // assumes all tensors are contiguous TORCH_CHECK(M <= at::cuda::getCurrentDeviceProperties()->maxGridSize[0], "M should be less than maximum CUDA grid size, \ file a support request to support bigger batches"); const T* X_data = X.data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr; T* Y_data = Y->data_ptr<T>(); T_ACC* mean_data = mean->data_ptr<T_ACC>(); T_ACC* rstd_data = rstd->data_ptr<T_ACC>(); // check if can take fast path - all tensors are properly aligned, N is less than 2^24 (to use float count), // N is multiple of vec_size (so that all rows are aligned if tensor is aligned) auto can_vectorize = [&](const T * ptr, int alignment){uint64_t addr = reinterpret_cast<uint64_t>(ptr); return addr % alignment == 0;}; constexpr int num_vec_elems = vec_size; constexpr int alignment = num_vec_elems * sizeof(T); if ((std::is_same<T, float>::value || std::is_same<T, at::Half>::value) && N <= 1ULL << std::numeric_limits<float>::digits && N % num_vec_elems == 0 && can_vectorize(X_data, alignment) && can_vectorize(Y_data, alignment)) { launch_vectorized_layer_norm_kernel(static_cast<int>(N), M, eps, X_data, gamma_data, beta_data, Y_data, mean_data, rstd_data); } else { hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T, T_ACC>) , dim3(M), dim3(cuda_utils::kCUDABlockReduceNumThreads), 0, cuda_stream, N, eps, X_data, mean_data, rstd_data); C10_HIP_KERNEL_LAUNCH_CHECK(); hipLaunchKernelGGL(( LayerNormForwardCUDAKernel<T, T_ACC>), dim3(M), dim3(kCUDANumThreads), 0, cuda_stream, N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } } void LayerNormKernelImpl( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, double eps, Tensor* Y, Tensor* mean, Tensor* rstd) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormKernelImpl", [&]() { using acc_t = acc_type<scalar_t, true>; LayerNormKernelImplInternal<scalar_t, acc_t>( X, gamma, beta, M, N, static_cast<acc_t>(eps), Y, mean, rstd); }); } template <typename T> void LayerNormBackwardKernelImplInternal( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { using T_ACC = acc_type<T, true>; TORCH_CHECK(dY.numel() == M * N); TORCH_CHECK(mean.numel() == M); TORCH_CHECK(rstd.numel() == M); TORCH_CHECK(M <= at::cuda::getCurrentDeviceProperties()->maxGridSize[0], "M should be less than maximum CUDA grid size, \ file a support request to support bigger batches"); TORCH_CHECK(N <= std::numeric_limits<int>::max(), "Normalized shape should have less than INT_MAX elements, \ file a support request to support bigger normalized shapes"); const T* dY_data = dY.template data_ptr<T>(); const T* X_data = X.template data_ptr<T>(); const T_ACC* mean_data = mean.template data_ptr<T_ACC>(); const T_ACC* rstd_data = rstd.template data_ptr<T_ACC>(); const T* gamma_data = gamma.defined() ? gamma.template data_ptr<T>() : nullptr; T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr; hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (dX_data != nullptr) { const int warp_size = at::cuda::warp_size(); const dim3 blocks(M); int nshared = (num_threads()/warp_size) * sizeof(T_ACC); hipLaunchKernelGGL(( layer_norm_grad_input_kernel), dim3(blocks), dim3(num_threads()), nshared, cuda_stream, dY_data, X_data, mean_data, rstd_data, gamma_data, dX_data, N); C10_HIP_KERNEL_LAUNCH_CHECK(); } if (dgamma->defined() || dbeta->defined()) { T* dgamma_data = dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr; T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr; if (M < 512) { // For small batch size, do colwise reduce directly. const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads; hipLaunchKernelGGL(( GammaBetaBackwardSimpleCUDAKernel<T, T_ACC>) , dim3(B), dim3(kCUDANumThreads), 0, cuda_stream, M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { dim3 threads{16, 32}; int blocks = (N + threads.x-1)/threads.x; hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<T, T_ACC>) , dim3(blocks), dim3(threads), 2 * sizeof(T_ACC) * threads.x * threads.y, cuda_stream, M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } void LayerNormBackwardKernelImpl( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() { LayerNormBackwardKernelImplInternal<scalar_t>( dY.contiguous(), X, mean, rstd, gamma, M, N, dX, dgamma, dbeta); }); } } // namespace std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda( const Tensor& input, IntArrayRef normalized_shape, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias); auto M = M_N.first; auto N = M_N.second; auto X = input.expect_contiguous(); auto gamma = weight.expect_contiguous(); auto beta = bias.expect_contiguous(); Tensor Y = at::native::empty_like( *X, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true); Tensor mean = at::empty({M}, X->options().dtype(acc_type)); Tensor rstd = at::empty({M}, X->options().dtype(acc_type)); // Calling the kernel for M==0 gives a CUDA error // See: https://github.com/pytorch/pytorch/pull/28614 if (M > 0) { LayerNormKernelImpl(*X, *gamma, *beta, M, N, eps, &Y, &mean, &rstd); } const auto input_shape = input.sizes(); const size_t axis = input.dim() - normalized_shape.size(); std::vector<int64_t> stat_shape; for (size_t idx = 0; idx < axis; ++idx) { stat_shape.push_back(input_shape[idx]); } for (size_t idx = axis; idx < input.dim(); ++idx) { stat_shape.push_back(1); } mean = mean.view(stat_shape); rstd = rstd.view(stat_shape); return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd)); } std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda( const Tensor& dY, const Tensor& input, IntArrayRef normalized_shape, const Tensor& mean, const Tensor& rstd, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, std::array<bool, 3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias); auto M = M_N.first; auto N = M_N.second; auto X = input.expect_contiguous(); auto gamma = weight.expect_contiguous(); auto beta = bias.expect_contiguous(); Tensor dX; Tensor dgamma; Tensor dbeta; if (grad_input_mask[0]) { dX = at::native::empty_like( *X, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[1]) { dgamma = M > 0 ? at::native::empty_like( *gamma, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like( *gamma, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[2]) { dbeta = M > 0 ? at::native::empty_like( *beta, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like( *beta, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (M > 0) { LayerNormBackwardKernelImpl( dY, *X, mean, rstd, *gamma, M, N, &dX, &dgamma, &dbeta); } return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta)); } REGISTER_DISPATCH(LayerNormKernel, &LayerNormKernelImpl); } // namespace native } // namespace at
06f04f8f1bc89e42d38149591037b5f96a85153e.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/layer_norm.h> #include <type_traits> #include <thrust/tuple.h> #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <ATen/native/cuda/thread_constants.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/empty_like_native.h> #include <ATen/ops/native_layer_norm_native.h> #include <ATen/ops/native_layer_norm_backward_native.h> #include <ATen/ops/zeros_like_native.h> #endif #include <c10/cuda/CUDAMathCompat.h> namespace at { namespace native { namespace { constexpr int kCUDANumThreads = 256; constexpr int kColwiseReduceTileSize = 32; constexpr int vec_size = 4; //we could make it dependent on dtype, but that would lead to different results between float and low-p types // aligned vector generates vectorized load/store on CUDA (copy-pasted from MemoryAccess.cuh) template<typename scalar_t, int vec_size> struct alignas(sizeof(scalar_t) * vec_size) aligned_vector { scalar_t val[vec_size]; }; template <typename T, typename T_ACC> __global__ void RowwiseMomentsCUDAKernel( int64_t N, T_ACC eps, const T* X, T_ACC* mean, T_ACC* rstd) { using WelfordType = WelfordData<T_ACC, int64_t, T_ACC>; using WelfordOp = WelfordOps<T_ACC, T_ACC, int64_t, T_ACC, thrust::pair<T_ACC, T_ACC>>; __shared__ typename std::aligned_storage<sizeof(WelfordType), alignof(WelfordType)>:: type val_shared[C10_WARP_SIZE]; WelfordType* val_shared_ptr = reinterpret_cast<WelfordType*>(val_shared); const int64_t i = blockIdx.x; WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; WelfordType val(0, 0, 0, 0); for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; val = welford_op.reduce(val, static_cast<T_ACC>(X[index]), index); } val = cuda_utils::BlockReduce( val, welford_op, /*identity_element=*/WelfordType(0, 0, 0, 0), val_shared_ptr); if (threadIdx.x == 0) { T_ACC m1; T_ACC m2; thrust::tie(m2, m1) = welford_op.project(val); mean[i] = m1; rstd[i] = c10::cuda::compat::rsqrt(m2 + eps); } } template <typename T, typename T_ACC> __global__ void LayerNormForwardCUDAKernel( int64_t N, const T* X, const T_ACC* mean, const T_ACC* rstd, const T* gamma, const T* beta, T* Y) { const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); const T_ACC beta_v = beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]); Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]) * gamma_v + beta_v; } } struct WelfordDataLN{ float mean; float sigma2; float count; C10_HOST_DEVICE WelfordDataLN(): mean(0.f), sigma2(0.f), count(0.f){} C10_HOST_DEVICE WelfordDataLN(float mean, float sigma2, float count): mean(mean), sigma2(sigma2), count(count) {} }; template<typename U> __device__ WelfordDataLN cuWelfordOnlineSum( const U val, const WelfordDataLN& curr_sum) { U delta = val - curr_sum.mean; U new_count = curr_sum.count + 1.f; U new_mean = curr_sum.mean + delta * (1.f/new_count); //proper division is slow, this is less accurate but noticeably faster return {new_mean, curr_sum.sigma2 + delta * (val - new_mean), new_count}; } __device__ WelfordDataLN cuWelfordCombine( const WelfordDataLN dataB, const WelfordDataLN dataA ) { using U = decltype(dataB.count); U delta = dataB.mean - dataA.mean; U count = dataA.count + dataB.count; U mean, sigma2; if (count > decltype(dataB.count){0}) { auto coef = 1.f/count; //NB we don't use --use_fast_math, but this is emulation, 1./count goes to intrinsic, `* coef` is multiplication, instead of slow fp division auto nA = dataA.count * coef; auto nB = dataB.count * coef; mean = nA*dataA.mean + nB*dataB.mean; sigma2 = dataA.sigma2 + dataB.sigma2 + delta * delta * dataA.count * nB; } else { mean = U(0); sigma2 = U(0); } return {mean, sigma2, count}; } template<typename T> __device__ WelfordDataLN compute_stats( const T* __restrict__ X, const int N, float * buf ) { //X points to the row to read using vec_t = aligned_vector<T, vec_size>; using acc_t = acc_type<T, true>; const vec_t * X_vec = reinterpret_cast<const vec_t*>(X); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const int n_vec_to_read = N/vec_size; WelfordDataLN wd(0.f, 0.f, 0.f); //no tail, we check that N is multiple of vec_size for (int i = thrx; i < n_vec_to_read; i += numx) { vec_t data = X_vec[i]; #pragma unroll for (int ii=0; ii < vec_size; ii++){ wd = cuWelfordOnlineSum(static_cast<acc_t>(data.val[ii]), wd); } } // intra-warp reduction for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { WelfordDataLN wdB{WARP_SHFL_DOWN(wd.mean, offset), WARP_SHFL_DOWN(wd.sigma2, offset), WARP_SHFL_DOWN(wd.count, offset)}; wd = cuWelfordCombine(wd, wdB); } // threadIdx.x == 0 has correct values for each warp // inter-warp reductions if (blockDim.y > 1) { float * meansigmabuf = buf; float * countbuf = buf + blockDim.y; for (int offset = blockDim.y/2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) { const int wrt_y = threadIdx.y - offset; meansigmabuf[2*wrt_y] = wd.mean; meansigmabuf[2*wrt_y+1] = wd.sigma2; countbuf[wrt_y] = wd.count; } __syncthreads(); // lower half merges if (threadIdx.x == 0 && threadIdx.y < offset) { WelfordDataLN wdB{meansigmabuf[2*threadIdx.y], meansigmabuf[2*threadIdx.y+1], countbuf[threadIdx.y]}; wd = cuWelfordCombine(wd, wdB); } __syncthreads(); } if (threadIdx.x == 0 && threadIdx.y ==0) { meansigmabuf[0] = wd.mean; meansigmabuf[1] = wd.sigma2/float(N); } __syncthreads(); return WelfordDataLN{meansigmabuf[0], meansigmabuf[1],0.f}; } else { return WelfordDataLN{WARP_SHFL(wd.mean,0), WARP_SHFL(wd.sigma2,0)/float(N), 0.f}; } } template <typename T, typename T_ACC, typename std::enable_if<!std::is_same<T, double>::value, int>::type = 0> __device__ __inline__ void vectorized_layer_norm_kernel_impl( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ extern __shared__ float s_data[]; //if we made smem WelfordDataLN type, there would be bank conflicts, //as one thread would have to write 3 consecutive floats auto i1 = blockIdx.x; const T * block_row = X + i1 * N; WelfordDataLN wd = compute_stats(block_row, N, s_data); using vec_t = aligned_vector<T, vec_size>; const vec_t * X_vec = reinterpret_cast<const vec_t*>(block_row); vec_t * Y_vec = reinterpret_cast<vec_t*>(Y + i1 * N); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const int n_vec_to_read = N/vec_size; T_ACC rstd_val = c10::cuda::compat::rsqrt(wd.sigma2 + eps); //no tail, N is guaranteed to be multiple of vec size for (int i = thrx; i < n_vec_to_read; i += numx) { vec_t data = X_vec[i]; vec_t out; //computation is performed in T_ACC, X is cast to T_ACC and result is implicitly cast to T if (gamma != nullptr && beta != nullptr) { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = static_cast<T_ACC>(gamma[i*vec_size + ii]) * (rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean)) + static_cast<T_ACC>(beta[i*vec_size + ii]); } } else if (gamma != nullptr) { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = static_cast<T_ACC>(gamma[i*vec_size + ii]) * (rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean)); } } else if (beta != nullptr) { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = (rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean)) + static_cast<T_ACC>(beta[i*vec_size + ii]); } } else { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean); } } Y_vec[i] = out; } if (thrx == 0) { mean[i1] = wd.mean; rstd[i1] = rstd_val; } } template <typename T, typename T_ACC, typename std::enable_if<std::is_same<T, double>::value, int>::type = 0> __device__ __inline__ void vectorized_layer_norm_kernel_impl( const int /*N*/, T_ACC /*eps*/, const T* __restrict__ /*X*/, const T* /*gamma*/, const T* /*beta*/, T_ACC* /*mean*/, T_ACC* /*rstd*/, T* /*Y*/){ CUDA_KERNEL_ASSERT(false && "doesn't work with double"); } //to avoid windows SFINAE errors template <typename T, typename T_ACC> __global__ __inline__ void vectorized_layer_norm_kernel( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ vectorized_layer_norm_kernel_impl(N, eps, X, gamma, beta, mean, rstd, Y); } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, acc_type<T, true>* ds, acc_type<T, true>* db) { using T_ACC = acc_type<T, true>; __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v; sum2 += static_cast<T_ACC>(dY[index]) * gamma_v; } sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); if (threadIdx.x == 0) { ds[i] = sum1; db[i] = sum2; } } template<typename T, typename T_ACC> __device__ __inline__ void compute_gI( const T* __restrict__ dY, const T* __restrict__ X, const T_ACC* __restrict__ mean, const T_ACC* __restrict__ rstd, const T* __restrict__ gamma, T* dX, const int N, T_ACC * buf){ const auto i1 = blockIdx.x; const T_ACC mean_val = mean[i1]; const T_ACC rstd_val = rstd[i1]; T_ACC stats_x1{0}, stats_x2{0}; constexpr int unroll = 4; auto l = unroll * threadIdx.x; const T * X_i = X + i1 * N; const T * dY_i = dY + i1 * N; T * dX_i = dX + i1 * N; //vectorized reads don't improve perf, so use regular unrolling for (; l+unroll - 1 < N; l += blockDim.x * unroll){ #pragma unroll for (int k=0; k< unroll; k++){ T_ACC gamma_val = (gamma != nullptr) ? static_cast<T_ACC>(gamma[l+k]) : T_ACC(1); const T_ACC c_h = static_cast<T_ACC>(X_i[l+k]); const T_ACC c_loss = static_cast<T_ACC>(dY_i[l+k]); stats_x1 += c_loss * gamma_val; stats_x2 += c_loss * gamma_val * (c_h - mean_val) * rstd_val; } } for (; l < N; l ++) { T_ACC gamma_val = (gamma != nullptr) ? static_cast<T_ACC>(gamma[l]) : T_ACC(1); const T_ACC c_h = static_cast<T_ACC>(X_i[l]); const T_ACC c_loss = static_cast<T_ACC>(dY_i[l]); stats_x1 += c_loss * gamma_val; stats_x2 += c_loss * gamma_val * (c_h - mean_val) * rstd_val; } stats_x1 = cuda_utils::BlockReduceSum(stats_x1, buf); stats_x2 = cuda_utils::BlockReduceSum(stats_x2, buf); if (threadIdx.x == 0) { buf[0] = stats_x1; buf[1] = stats_x2; } __syncthreads(); stats_x1 = buf[0]; stats_x2 = buf[1]; T_ACC fH = N; T_ACC term1 = (T_ACC(1) / fH) * rstd_val; for (int l = threadIdx.x; l < N; l += blockDim.x){ const T_ACC x = X_i[l]; const T_ACC dy = dY_i[l]; T_ACC gamma_val = (gamma != nullptr) ? static_cast<T_ACC>(gamma[l]) : T_ACC(1); T_ACC f_grad_input = fH * gamma_val * dy; f_grad_input -= (x - mean_val) * rstd_val * stats_x2; f_grad_input -= stats_x1; f_grad_input *= term1; dX_i[l] = f_grad_input; } } template<typename T, typename T_ACC> __global__ void layer_norm_grad_input_kernel( const T* __restrict__ dY, const T* __restrict__ X, const T_ACC* __restrict__ mean, const T_ACC* __restrict__ rstd, const T* __restrict__ gamma, T* dX, const int N){ alignas(sizeof(double)) extern __shared__ char s_data1[]; T_ACC * buf = reinterpret_cast<T_ACC*>(&s_data1); compute_gI(dY, X, mean, rstd, gamma, dX, N, buf); } template <typename T, typename T_ACC> __global__ void ComputeGradientFusedParamsCUDAKernel( int64_t M, int64_t N, const T_ACC* mean, const T_ACC* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, acc_type<T, true>* c1, acc_type<T, true>* c2) { const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < M) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N); const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * s; c1[index] = a; c2[index] = -(a * static_cast<T_ACC>(mean[index]) + db[index] * static_cast<T_ACC>(rstd[index]) * s); } } template <typename T, typename T_ACC> __global__ void LayerNormBackwardCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, const T_ACC* a, const acc_type<T, true>* b, const acc_type<T, true>* c, T* dX) { const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); dX[index] = static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v + b[i] * static_cast<T_ACC>(X[index]) + c[i]; } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardSimpleCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; if (j < N) { T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = 0; i < M; ++i) { const int64_t index = i * N + j; sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]) * (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]); sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]); } if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardCUDAKernel1( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { __shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; __shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (j < N) { for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) { const int64_t i1 = i; const int64_t i2 = i + blockDim.y; const int64_t index1 = i1 * N + j; const int64_t index2 = i2 * N + j; dg_sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]) * (static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) * static_cast<T_ACC>(rstd[i1]); db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]); if (i2 < M) { dg_sum2 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]) * (static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) * static_cast<T_ACC>(rstd[i2]); db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]); } } } g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { alignas(sizeof(double)) extern __shared__ char s_data1[]; T_ACC * s_data_typed = reinterpret_cast<T_ACC*>(&s_data1); const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; constexpr int unroll = 8; T dYs[unroll]; T Xs[unroll]; T_ACC * means = s_data_typed; T_ACC * rstds = s_data_typed + unroll * blockDim.y; T_ACC dg_sum = 0; T_ACC db_sum = 0; if (j < N) { int bcounter; for (bcounter = 0; bcounter < M/(blockDim.y * unroll); bcounter++){ int offset = (bcounter * blockDim.y + threadIdx.y) * unroll; #pragma unroll for (int ii=0; ii<unroll; ii++){ if (threadIdx.x == 0) { means[ii*blockDim.y + threadIdx.y] = mean[offset + ii]; rstds[ii*blockDim.y + threadIdx.y] = rstd[offset + ii]; } dYs[ii] = dY[(offset + ii) * N + j ]; Xs[ii] = X[(offset + ii) * N + j]; } __syncthreads(); #pragma unroll for (int ii=0; ii<unroll; ii++){ dg_sum += dYs[ii] * (Xs[ii] - means[ii*blockDim.y + threadIdx.y]) * rstds[ii * blockDim.y + threadIdx.y]; db_sum += dYs[ii]; } __syncthreads(); } int offset = (bcounter * blockDim.y + threadIdx.y) * unroll; for (int ii = 0; ii<8; ii++ ){ T_ACC mean_val, rstd_val; // we don't use smem in the tail to avoid awkward synchronizations, perf penalty is negligible if ((offset + ii) < M) { mean_val = mean[offset+ii]; rstd_val = rstd[offset+ii]; dYs[0] = dY[(offset + ii) * N + j ]; Xs[0] = X[(offset + ii) * N + j]; dg_sum += dYs[0] * (Xs[0] - mean_val) * rstd_val; db_sum += dYs[0]; } } s_data_typed[threadIdx.y * blockDim.x + threadIdx.x] = dg_sum; s_data_typed[blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x] = db_sum; __syncthreads(); for (int offset = blockDim.y/2; offset >=1; offset /= 2){ if (threadIdx.y < offset) { s_data_typed[threadIdx.y * blockDim.x + threadIdx.x] += s_data_typed[(threadIdx.y + offset) * blockDim.x + threadIdx.x]; s_data_typed[blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x] += s_data_typed[blockDim.x * blockDim.y + (threadIdx.y + offset) * blockDim.x + threadIdx.x]; } __syncthreads(); } if (threadIdx.y == 0) { if (dg) { dg[j] = s_data_typed[threadIdx.x]; } if (db) { db[j] = s_data_typed[threadIdx.x + blockDim.x * blockDim.y]; } } } } template <typename T, typename T_ACC> void launch_vectorized_layer_norm_kernel( int N, int64_t M, T_ACC eps, const T* X_data, const T* gamma_data, const T* beta_data, T* Y_data, T_ACC* mean_data, T_ACC* rstd_data ) { //constexpr int alignment = 16; //currently unused to make sure float and half results are bw accurate auto stream = at::cuda::getCurrentCUDAStream().stream(); const int warp_size = at::cuda::warp_size(); const dim3 threads(warp_size, num_threads() / warp_size, 1); const dim3 blocks(M); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(threads.y % 2 == 0 || threads.y == 1); int nshared = threads.y > 1 ? threads.y * 3/2 *sizeof(T_ACC) : 0; vectorized_layer_norm_kernel<<<blocks, threads, nshared, stream>>>(N, eps, X_data, gamma_data, beta_data, mean_data, rstd_data, Y_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename T, typename T_ACC> void LayerNormKernelImplInternal( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, T_ACC eps, Tensor* Y, Tensor* mean, Tensor* rstd) { // assumes input, gamma and beta are of proper shape, this was checked in _check_layer_norm_inputs // assumes all tensors are contiguous TORCH_CHECK(M <= at::cuda::getCurrentDeviceProperties()->maxGridSize[0], "M should be less than maximum CUDA grid size, \ file a support request to support bigger batches"); const T* X_data = X.data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr; T* Y_data = Y->data_ptr<T>(); T_ACC* mean_data = mean->data_ptr<T_ACC>(); T_ACC* rstd_data = rstd->data_ptr<T_ACC>(); // check if can take fast path - all tensors are properly aligned, N is less than 2^24 (to use float count), // N is multiple of vec_size (so that all rows are aligned if tensor is aligned) auto can_vectorize = [&](const T * ptr, int alignment){uint64_t addr = reinterpret_cast<uint64_t>(ptr); return addr % alignment == 0;}; constexpr int num_vec_elems = vec_size; constexpr int alignment = num_vec_elems * sizeof(T); if ((std::is_same<T, float>::value || std::is_same<T, at::Half>::value) && N <= 1ULL << std::numeric_limits<float>::digits && N % num_vec_elems == 0 && can_vectorize(X_data, alignment) && can_vectorize(Y_data, alignment)) { launch_vectorized_layer_norm_kernel(static_cast<int>(N), M, eps, X_data, gamma_data, beta_data, Y_data, mean_data, rstd_data); } else { cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); RowwiseMomentsCUDAKernel<T, T_ACC> <<<M, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>( N, eps, X_data, mean_data, rstd_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); LayerNormForwardCUDAKernel<T, T_ACC><<<M, kCUDANumThreads, 0, cuda_stream>>>( N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } void LayerNormKernelImpl( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, double eps, Tensor* Y, Tensor* mean, Tensor* rstd) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormKernelImpl", [&]() { using acc_t = acc_type<scalar_t, true>; LayerNormKernelImplInternal<scalar_t, acc_t>( X, gamma, beta, M, N, static_cast<acc_t>(eps), Y, mean, rstd); }); } template <typename T> void LayerNormBackwardKernelImplInternal( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { using T_ACC = acc_type<T, true>; TORCH_CHECK(dY.numel() == M * N); TORCH_CHECK(mean.numel() == M); TORCH_CHECK(rstd.numel() == M); TORCH_CHECK(M <= at::cuda::getCurrentDeviceProperties()->maxGridSize[0], "M should be less than maximum CUDA grid size, \ file a support request to support bigger batches"); TORCH_CHECK(N <= std::numeric_limits<int>::max(), "Normalized shape should have less than INT_MAX elements, \ file a support request to support bigger normalized shapes"); const T* dY_data = dY.template data_ptr<T>(); const T* X_data = X.template data_ptr<T>(); const T_ACC* mean_data = mean.template data_ptr<T_ACC>(); const T_ACC* rstd_data = rstd.template data_ptr<T_ACC>(); const T* gamma_data = gamma.defined() ? gamma.template data_ptr<T>() : nullptr; T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr; cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); if (dX_data != nullptr) { const int warp_size = at::cuda::warp_size(); const dim3 blocks(M); int nshared = (num_threads()/warp_size) * sizeof(T_ACC); layer_norm_grad_input_kernel<<<blocks, num_threads(), nshared, cuda_stream>>>(dY_data, X_data, mean_data, rstd_data, gamma_data, dX_data, N); C10_CUDA_KERNEL_LAUNCH_CHECK(); } if (dgamma->defined() || dbeta->defined()) { T* dgamma_data = dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr; T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr; if (M < 512) { // For small batch size, do colwise reduce directly. const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads; GammaBetaBackwardSimpleCUDAKernel<T, T_ACC> <<<B, kCUDANumThreads, 0, cuda_stream>>>( M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { dim3 threads{16, 32}; int blocks = (N + threads.x-1)/threads.x; GammaBetaBackwardCUDAKernel<T, T_ACC> <<<blocks, threads, 2 * sizeof(T_ACC) * threads.x * threads.y, cuda_stream>>>( M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } void LayerNormBackwardKernelImpl( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() { LayerNormBackwardKernelImplInternal<scalar_t>( dY.contiguous(), X, mean, rstd, gamma, M, N, dX, dgamma, dbeta); }); } } // namespace std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda( const Tensor& input, IntArrayRef normalized_shape, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias); auto M = M_N.first; auto N = M_N.second; auto X = input.expect_contiguous(); auto gamma = weight.expect_contiguous(); auto beta = bias.expect_contiguous(); Tensor Y = at::native::empty_like( *X, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true); Tensor mean = at::empty({M}, X->options().dtype(acc_type)); Tensor rstd = at::empty({M}, X->options().dtype(acc_type)); // Calling the kernel for M==0 gives a CUDA error // See: https://github.com/pytorch/pytorch/pull/28614 if (M > 0) { LayerNormKernelImpl(*X, *gamma, *beta, M, N, eps, &Y, &mean, &rstd); } const auto input_shape = input.sizes(); const size_t axis = input.dim() - normalized_shape.size(); std::vector<int64_t> stat_shape; for (size_t idx = 0; idx < axis; ++idx) { stat_shape.push_back(input_shape[idx]); } for (size_t idx = axis; idx < input.dim(); ++idx) { stat_shape.push_back(1); } mean = mean.view(stat_shape); rstd = rstd.view(stat_shape); return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd)); } std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda( const Tensor& dY, const Tensor& input, IntArrayRef normalized_shape, const Tensor& mean, const Tensor& rstd, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, std::array<bool, 3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias); auto M = M_N.first; auto N = M_N.second; auto X = input.expect_contiguous(); auto gamma = weight.expect_contiguous(); auto beta = bias.expect_contiguous(); Tensor dX; Tensor dgamma; Tensor dbeta; if (grad_input_mask[0]) { dX = at::native::empty_like( *X, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[1]) { dgamma = M > 0 ? at::native::empty_like( *gamma, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like( *gamma, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[2]) { dbeta = M > 0 ? at::native::empty_like( *beta, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like( *beta, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (M > 0) { LayerNormBackwardKernelImpl( dY, *X, mean, rstd, *gamma, M, N, &dX, &dgamma, &dbeta); } return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta)); } REGISTER_DISPATCH(LayerNormKernel, &LayerNormKernelImpl); } // namespace native } // namespace at
38d36606070c367f94bd251eb01d7f42915e1e59.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #define VISRTX_DEBUGGING 0 #include "gpu/gpu_debug.h" #include "gpu/shading_api.h" namespace visrtx { enum class RayType { DIFFUSE_RADIANCE }; struct PathData { int depth{0}; vec3 Lw{1.f}; Hit currentHit{}; }; DECLARE_FRAME_DATA(frameData) // OptiX programs ///////////////////////////////////////////////////////////// RT_PROGRAM void __closesthit__() { ray::populateHit(); } RT_PROGRAM void __anyhit__() { SurfaceHit hit; ray::populateSurfaceHit(hit); const auto &material = *hit.material; const auto mat_opacity = getMaterialParameter(frameData, material.opacity, hit); if (mat_opacity < 0.99f) optixIgnoreIntersection(); } RT_PROGRAM void __miss__() { // no-op } RT_PROGRAM void __raygen__() { auto &rendererParams = frameData.renderer; auto &dptParams = rendererParams.params.dpt; PathData pathData; auto &hit = pathData.currentHit; ///////////////////////////////////////////////////////////////////////////// // TODO: clean this up! need to split out Ray/RNG, don't need screen samples auto ss = createScreenSample(frameData); if (pixelOutOfFrame(ss.pixel, frameData.fb)) return; auto ray = makePrimaryRay(ss); auto tmax = ray.t.upper; ///////////////////////////////////////////////////////////////////////////// if (debug()) printf("========== BEGIN: FrameID %i ==========\n", frameData.fb.frameID); const auto bg = getBackground(frameData.renderer, ss.screen); vec3 outColor(bg); vec3 outNormal = ray.dir; float outDepth = tmax; while (true) { if (debug()) printf("-------- BOUNCE: %i --------\n", pathData.depth); hit.foundHit = false; intersectSurface(ss, ray, RayType::DIFFUSE_RADIANCE, &hit, 0); float volumeOpacity = 0.f; vec3 volumeColor(0.f); float Tr = 0.f; const float volumeDepth = sampleDistanceAllVolumes(ss, ray, RayType::DIFFUSE_RADIANCE, hit.foundHit ? hit.t : ray.t.upper, volumeColor, volumeOpacity, Tr); const bool volumeHit = Tr < 1.f && (!hit.foundHit || volumeDepth < hit.t); if (!hit.foundHit && !volumeHit) break; if (pathData.depth++ >= dptParams.maxDepth) { pathData.Lw = vec3(0.f); break; } vec3 albedo(1.f); vec3 pos(0.f); if (!volumeHit) { pos = hit.hitpoint + (hit.epsilon * hit.Ng); const auto &material = *hit.material; albedo = getMaterialParameter(frameData, material.baseColor, hit); } else { pos = ray.org + volumeDepth * ray.dir; albedo = volumeColor; } pathData.Lw *= albedo; // RR absorption float P = glm::compMax(pathData.Lw); if (P < .2f /*lp.rouletteProb*/) { if (hiprand_uniform(&ss.rs) > P) { pathData.Lw = vec3(0.f); break; } pathData.Lw /= P; } // pathData.Lw += Le; // TODO: emission vec3 scatterDir(0.f); if (!volumeHit) { scatterDir = randomDir(ss.rs, hit.Ns); pathData.Lw *= fmaxf(0.f, dot(scatterDir, hit.Ng)); } else scatterDir = sampleUnitSphere(ss.rs, -ray.dir); ray.org = pos; ray.dir = scatterDir; ray.t.lower = 0.f; ray.t.upper = rendererParams.occlusionDistance; if (pathData.depth == 0) { outDepth = min(hit.t, volumeDepth); outNormal = hit.Ng; // TODO: for volume (gradient?) } } vec3 Ld(rendererParams.ambientIntensity); // ambient light! // if (numLights > 0) { // Ld = ...; // } vec3 color = pathData.depth ? pathData.Lw * Ld : vec3(bg); if (crosshair()) color = vec3(1) - color; if (debug()) printf("========== END: FrameID %i ==========\n", frameData.fb.frameID); accumResults( frameData.fb, ss.pixel, vec4(color, 1.f), outDepth, outColor, outNormal); } } // namespace visrtx
38d36606070c367f94bd251eb01d7f42915e1e59.cu
/* * Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #define VISRTX_DEBUGGING 0 #include "gpu/gpu_debug.h" #include "gpu/shading_api.h" namespace visrtx { enum class RayType { DIFFUSE_RADIANCE }; struct PathData { int depth{0}; vec3 Lw{1.f}; Hit currentHit{}; }; DECLARE_FRAME_DATA(frameData) // OptiX programs ///////////////////////////////////////////////////////////// RT_PROGRAM void __closesthit__() { ray::populateHit(); } RT_PROGRAM void __anyhit__() { SurfaceHit hit; ray::populateSurfaceHit(hit); const auto &material = *hit.material; const auto mat_opacity = getMaterialParameter(frameData, material.opacity, hit); if (mat_opacity < 0.99f) optixIgnoreIntersection(); } RT_PROGRAM void __miss__() { // no-op } RT_PROGRAM void __raygen__() { auto &rendererParams = frameData.renderer; auto &dptParams = rendererParams.params.dpt; PathData pathData; auto &hit = pathData.currentHit; ///////////////////////////////////////////////////////////////////////////// // TODO: clean this up! need to split out Ray/RNG, don't need screen samples auto ss = createScreenSample(frameData); if (pixelOutOfFrame(ss.pixel, frameData.fb)) return; auto ray = makePrimaryRay(ss); auto tmax = ray.t.upper; ///////////////////////////////////////////////////////////////////////////// if (debug()) printf("========== BEGIN: FrameID %i ==========\n", frameData.fb.frameID); const auto bg = getBackground(frameData.renderer, ss.screen); vec3 outColor(bg); vec3 outNormal = ray.dir; float outDepth = tmax; while (true) { if (debug()) printf("-------- BOUNCE: %i --------\n", pathData.depth); hit.foundHit = false; intersectSurface(ss, ray, RayType::DIFFUSE_RADIANCE, &hit, 0); float volumeOpacity = 0.f; vec3 volumeColor(0.f); float Tr = 0.f; const float volumeDepth = sampleDistanceAllVolumes(ss, ray, RayType::DIFFUSE_RADIANCE, hit.foundHit ? hit.t : ray.t.upper, volumeColor, volumeOpacity, Tr); const bool volumeHit = Tr < 1.f && (!hit.foundHit || volumeDepth < hit.t); if (!hit.foundHit && !volumeHit) break; if (pathData.depth++ >= dptParams.maxDepth) { pathData.Lw = vec3(0.f); break; } vec3 albedo(1.f); vec3 pos(0.f); if (!volumeHit) { pos = hit.hitpoint + (hit.epsilon * hit.Ng); const auto &material = *hit.material; albedo = getMaterialParameter(frameData, material.baseColor, hit); } else { pos = ray.org + volumeDepth * ray.dir; albedo = volumeColor; } pathData.Lw *= albedo; // RR absorption float P = glm::compMax(pathData.Lw); if (P < .2f /*lp.rouletteProb*/) { if (curand_uniform(&ss.rs) > P) { pathData.Lw = vec3(0.f); break; } pathData.Lw /= P; } // pathData.Lw += Le; // TODO: emission vec3 scatterDir(0.f); if (!volumeHit) { scatterDir = randomDir(ss.rs, hit.Ns); pathData.Lw *= fmaxf(0.f, dot(scatterDir, hit.Ng)); } else scatterDir = sampleUnitSphere(ss.rs, -ray.dir); ray.org = pos; ray.dir = scatterDir; ray.t.lower = 0.f; ray.t.upper = rendererParams.occlusionDistance; if (pathData.depth == 0) { outDepth = min(hit.t, volumeDepth); outNormal = hit.Ng; // TODO: for volume (gradient?) } } vec3 Ld(rendererParams.ambientIntensity); // ambient light! // if (numLights > 0) { // Ld = ...; // } vec3 color = pathData.depth ? pathData.Lw * Ld : vec3(bg); if (crosshair()) color = vec3(1) - color; if (debug()) printf("========== END: FrameID %i ==========\n", frameData.fb.frameID); accumResults( frameData.fb, ss.pixel, vec4(color, 1.f), outDepth, outColor, outNormal); } } // namespace visrtx
4dddd6d714dd7c0ef0840b6dd6c6b7ed4d0f31ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "functions.hpp" __global__ void kernelChi2(float* d_mua,float* d_mub,float* d_like_dum,int Nwave,int Nlocs); __constant__ float const_f_obs[64]; __constant__ float const_df_obs[64]; void getChi2Cuda(double* like_all,float** f_obs_all,float* df_obs,float** h_mua,float** h_mub,int Nlocs,int Nwave,int Nratios){ hipError_t err; // Size of the shared memory required in the kernel is always Nwave*Nthreads*sizeof(float) // i.e. this is the chunk of muA locations for all wavelengths. // I need to generally keep this below 48KB. int Ngrid; int Nthreads; setGridThreads(Ngrid,Nthreads,Nwave); printf("Shared memory occupancy: %d/48000\n",Nwave*Nthreads*sizeof(float)); printf("Ngrid/Nthreads = %d/%d\n",Ngrid,Nthreads); // Allocate memory on device // inputs: float* d_mua; float* d_mub; hipMalloc(&d_mua,Nwave*Nlocs*sizeof(float)); hipMalloc(&d_mub,Nwave*Nlocs*sizeof(float)); // output: int Nlike = Ngrid*Nthreads; float* h_like_dum = (float*) malloc(Nlike*sizeof(float)); float* d_like_dum; hipMalloc(&d_like_dum,Nlike*sizeof(float)); err = hipGetLastError(); if( err != hipSuccess ){ fprintf(stderr,"Error: %s - in \"memory allocation\" \n",hipGetErrorString(err)); } // Transfer (input) memory from host to device for(int k=0;k<Nwave;k++){ hipMemcpy(d_mua+k*Nlocs,h_mua[k],Nlocs*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(d_mub+k*Nlocs,h_mub[k],Nlocs*sizeof(float),hipMemcpyHostToDevice); } hipMemcpyToSymbol(const_df_obs,(void*)df_obs,Nwave*sizeof(float),0); err = hipGetLastError(); if( err != hipSuccess ){ fprintf(stderr,"Error: %s - in \"memory transfer to device\" \n",hipGetErrorString(err)); } // Loop over f_obs (Nratios times) dim3 grid(Ngrid); dim3 threads(Nthreads); int Nmem = Nwave*Nthreads; for(int i=0;i<Nratios;i++){ // Transfer f_obs to constant memory hipMemcpyToSymbol(const_f_obs,(void*)f_obs_all[i],Nwave*sizeof(float),0); // Execute GPU kernels hipLaunchKernelGGL(( kernelChi2), dim3(grid),dim3(threads),Nmem*sizeof(float), 0, d_mua,d_mub,d_like_dum,Nwave,Nlocs); hipDeviceSynchronize(); err = hipGetLastError(); if( err != hipSuccess ){ fprintf(stderr,"Error: %s - in \"kernelChi2\" \n",hipGetErrorString(err)); } // Get memory from device hipMemcpy(h_like_dum,d_like_dum,Nlike*sizeof(float),hipMemcpyDeviceToHost); err = hipGetLastError(); if( err != hipSuccess ){ fprintf(stderr,"Error: %s - in \"memory transfer from device\" \n",hipGetErrorString(err)); } // Add all the likelihoods in h_like_dum double like = 0.0; for(int j=0;j<Nlike;j++){ like += h_like_dum[j]; } like_all[i] = like; std::cout << like_all[i] << std::endl; } free(h_like_dum); } __global__ void kernelChi2(float* d_mua,float* d_mub,float* d_like_dum,int Nwave,int Nlocs){ unsigned int t = threadIdx.x; unsigned int Nthreads = blockDim.x; unsigned int thread_id = blockIdx.x*Nthreads + t; unsigned int Nblocks = gridDim.x; // Allocate shared memory extern __shared__ float mua[]; float like = 0.0; // Loop over all of muA, reading it block by block for(int j=0;j<Nblocks;j++){ // Each thread reads Nwave entries from muA into shared memory (different wavelengths from the same magmap location) for(int k=0;k<Nwave;k++){ mua[k*Nthreads+t] = d_mua[k*Nlocs+Nthreads*j+t]; } __syncthreads(); // Each thread combines its unique muB value (thread_id) with the muA values currently into shared memory for(int i=0;i<Nthreads;i++){ float fac = 0.0; for(int k=0;k<Nwave;k++){ float fsim = mua[k*Nthreads+i]/d_mub[k*Nlocs+thread_id]; float dum = (const_f_obs[k] - fsim)/const_df_obs[k]; fac += dum*dum/2.0; } float dum = exp(-fac); like += dum; // like += 1; } } d_like_dum[thread_id] = like; } void setGridThreads(int& Ngrid,int& Nthreads,int Nwave){ Ngrid = 10; Nthreads = 1000; int shared_mem_size = Nthreads*Nwave*sizeof(float); while( shared_mem_size > 30000 ){ Ngrid *= 2; Nthreads /= 2; shared_mem_size = Nthreads*Nwave*sizeof(float); } } void getChi2CudaCPU(double* like_all,float** f_obs_all,float* df_obs,float** h_mua,float** h_mub,int Nlocs,int Nwave,int Nratios){ for(int q=0;q<Nratios;q++){ double like = 0.0; for(int i=0;i<Nlocs;i++){ for(int j=0;j<Nlocs;j++){ double chi2 = 0.0; for(int k=0;k<Nwave;k++){ double fsim = h_mua[k][i]/h_mub[k][j]; double dum = (f_obs_all[q][k] - fsim)/df_obs[k]; chi2 += dum*dum; } like += exp(-chi2/2.0); } } like_all[q] = like; std::cout << like_all[q] << std::endl; } }
4dddd6d714dd7c0ef0840b6dd6c6b7ed4d0f31ac.cu
#include "functions.hpp" __global__ void kernelChi2(float* d_mua,float* d_mub,float* d_like_dum,int Nwave,int Nlocs); __constant__ float const_f_obs[64]; __constant__ float const_df_obs[64]; void getChi2Cuda(double* like_all,float** f_obs_all,float* df_obs,float** h_mua,float** h_mub,int Nlocs,int Nwave,int Nratios){ cudaError_t err; // Size of the shared memory required in the kernel is always Nwave*Nthreads*sizeof(float) // i.e. this is the chunk of muA locations for all wavelengths. // I need to generally keep this below 48KB. int Ngrid; int Nthreads; setGridThreads(Ngrid,Nthreads,Nwave); printf("Shared memory occupancy: %d/48000\n",Nwave*Nthreads*sizeof(float)); printf("Ngrid/Nthreads = %d/%d\n",Ngrid,Nthreads); // Allocate memory on device // inputs: float* d_mua; float* d_mub; cudaMalloc(&d_mua,Nwave*Nlocs*sizeof(float)); cudaMalloc(&d_mub,Nwave*Nlocs*sizeof(float)); // output: int Nlike = Ngrid*Nthreads; float* h_like_dum = (float*) malloc(Nlike*sizeof(float)); float* d_like_dum; cudaMalloc(&d_like_dum,Nlike*sizeof(float)); err = cudaGetLastError(); if( err != cudaSuccess ){ fprintf(stderr,"Error: %s - in \"memory allocation\" \n",cudaGetErrorString(err)); } // Transfer (input) memory from host to device for(int k=0;k<Nwave;k++){ cudaMemcpy(d_mua+k*Nlocs,h_mua[k],Nlocs*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_mub+k*Nlocs,h_mub[k],Nlocs*sizeof(float),cudaMemcpyHostToDevice); } cudaMemcpyToSymbol(const_df_obs,(void*)df_obs,Nwave*sizeof(float),0); err = cudaGetLastError(); if( err != cudaSuccess ){ fprintf(stderr,"Error: %s - in \"memory transfer to device\" \n",cudaGetErrorString(err)); } // Loop over f_obs (Nratios times) dim3 grid(Ngrid); dim3 threads(Nthreads); int Nmem = Nwave*Nthreads; for(int i=0;i<Nratios;i++){ // Transfer f_obs to constant memory cudaMemcpyToSymbol(const_f_obs,(void*)f_obs_all[i],Nwave*sizeof(float),0); // Execute GPU kernels kernelChi2<<<grid,threads,Nmem*sizeof(float)>>>(d_mua,d_mub,d_like_dum,Nwave,Nlocs); cudaThreadSynchronize(); err = cudaGetLastError(); if( err != cudaSuccess ){ fprintf(stderr,"Error: %s - in \"kernelChi2\" \n",cudaGetErrorString(err)); } // Get memory from device cudaMemcpy(h_like_dum,d_like_dum,Nlike*sizeof(float),cudaMemcpyDeviceToHost); err = cudaGetLastError(); if( err != cudaSuccess ){ fprintf(stderr,"Error: %s - in \"memory transfer from device\" \n",cudaGetErrorString(err)); } // Add all the likelihoods in h_like_dum double like = 0.0; for(int j=0;j<Nlike;j++){ like += h_like_dum[j]; } like_all[i] = like; std::cout << like_all[i] << std::endl; } free(h_like_dum); } __global__ void kernelChi2(float* d_mua,float* d_mub,float* d_like_dum,int Nwave,int Nlocs){ unsigned int t = threadIdx.x; unsigned int Nthreads = blockDim.x; unsigned int thread_id = blockIdx.x*Nthreads + t; unsigned int Nblocks = gridDim.x; // Allocate shared memory extern __shared__ float mua[]; float like = 0.0; // Loop over all of muA, reading it block by block for(int j=0;j<Nblocks;j++){ // Each thread reads Nwave entries from muA into shared memory (different wavelengths from the same magmap location) for(int k=0;k<Nwave;k++){ mua[k*Nthreads+t] = d_mua[k*Nlocs+Nthreads*j+t]; } __syncthreads(); // Each thread combines its unique muB value (thread_id) with the muA values currently into shared memory for(int i=0;i<Nthreads;i++){ float fac = 0.0; for(int k=0;k<Nwave;k++){ float fsim = mua[k*Nthreads+i]/d_mub[k*Nlocs+thread_id]; float dum = (const_f_obs[k] - fsim)/const_df_obs[k]; fac += dum*dum/2.0; } float dum = exp(-fac); like += dum; // like += 1; } } d_like_dum[thread_id] = like; } void setGridThreads(int& Ngrid,int& Nthreads,int Nwave){ Ngrid = 10; Nthreads = 1000; int shared_mem_size = Nthreads*Nwave*sizeof(float); while( shared_mem_size > 30000 ){ Ngrid *= 2; Nthreads /= 2; shared_mem_size = Nthreads*Nwave*sizeof(float); } } void getChi2CudaCPU(double* like_all,float** f_obs_all,float* df_obs,float** h_mua,float** h_mub,int Nlocs,int Nwave,int Nratios){ for(int q=0;q<Nratios;q++){ double like = 0.0; for(int i=0;i<Nlocs;i++){ for(int j=0;j<Nlocs;j++){ double chi2 = 0.0; for(int k=0;k<Nwave;k++){ double fsim = h_mua[k][i]/h_mub[k][j]; double dum = (f_obs_all[q][k] - fsim)/df_obs[k]; chi2 += dum*dum; } like += exp(-chi2/2.0); } } like_all[q] = like; std::cout << like_all[q] << std::endl; } }
695aecc21cf4b59c2b10c020c33d9ed3610dc478.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2020 by XGBoost Contributors */ #include "evaluate_splits.cuh" #include <limits> namespace xgboost { namespace tree { // With constraints template <typename GradientPairT> XGBOOST_DEVICE float LossChangeMissing(const GradientPairT &scan, const GradientPairT &missing, const GradientPairT &parent_sum, const GPUTrainingParam &param, bst_node_t nidx, bst_feature_t fidx, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, bool &missing_left_out) { // NOLINT float parent_gain = CalcGain(param, parent_sum); float missing_left_gain = evaluator.CalcSplitGain(param, nidx, fidx, GradStats(scan + missing), GradStats(parent_sum - (scan + missing))); float missing_right_gain = evaluator.CalcSplitGain( param, nidx, fidx, GradStats(scan), GradStats(parent_sum - scan)); if (missing_left_gain >= missing_right_gain) { missing_left_out = true; return missing_left_gain - parent_gain; } else { missing_left_out = false; return missing_right_gain - parent_gain; } } /*! * \brief * * \tparam ReduceT BlockReduce Type. * \tparam TempStorage Cub Shared memory * * \param begin * \param end * \param temp_storage Shared memory for intermediate result. */ template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT> __device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram, TempStorageT* temp_storage) { __shared__ cub::Uninitialized<GradientSumT> uninitialized_sum; GradientSumT& shared_sum = uninitialized_sum.Alias(); GradientSumT local_sum = GradientSumT(); // For loop sums features into one block size auto begin = feature_histogram.data(); auto end = begin + feature_histogram.size(); for (auto itr = begin; itr < end; itr += BLOCK_THREADS) { bool thread_active = itr + threadIdx.x < end; // Scan histogram GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT(); local_sum += bin; } local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, hipcub::Sum()); // Reduction result is stored in thread 0. if (threadIdx.x == 0) { shared_sum = local_sum; } __syncthreads(); return shared_sum; } /*! \brief Find the thread with best gain. */ template <int BLOCK_THREADS, typename ReduceT, typename ScanT, typename MaxReduceT, typename TempStorageT, typename GradientSumT> __device__ void EvaluateFeature( int fidx, EvaluateSplitInputs<GradientSumT> inputs, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, DeviceSplitCandidate* best_split, // shared memory storing best split TempStorageT* temp_storage // temp memory for cub operations ) { // Use pointer from cut to indicate begin and end of bins for each feature. uint32_t gidx_begin = inputs.feature_segments[fidx]; // begining bin uint32_t gidx_end = inputs.feature_segments[fidx + 1]; // end bin for i^th feature // Sum histogram bins for current feature GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT, TempStorageT, GradientSumT>( inputs.gradient_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage); GradientSumT const missing = inputs.parent_sum - feature_sum; float const null_gain = -std::numeric_limits<bst_float>::infinity(); SumCallbackOp<GradientSumT> prefix_op = SumCallbackOp<GradientSumT>(); for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += BLOCK_THREADS) { bool thread_active = (scan_begin + threadIdx.x) < gidx_end; // Gradient value for current bin. GradientSumT bin = thread_active ? inputs.gradient_histogram[scan_begin + threadIdx.x] : GradientSumT(); ScanT(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op); // Whether the gradient of missing values is put to the left side. bool missing_left = true; float gain = null_gain; if (thread_active) { gain = LossChangeMissing(bin, missing, inputs.parent_sum, inputs.param, inputs.nidx, fidx, evaluator, missing_left); } __syncthreads(); // Find thread with best gain hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain); hipcub::KeyValuePair<int, float> best = MaxReduceT(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax()); __shared__ hipcub::KeyValuePair<int, float> block_max; if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { int split_gidx = (scan_begin + threadIdx.x) - 1; float fvalue; if (split_gidx < static_cast<int>(gidx_begin)) { fvalue = inputs.min_fvalue[fidx]; } else { fvalue = inputs.feature_values[split_gidx]; } GradientSumT left = missing_left ? bin + missing : bin; GradientSumT right = inputs.parent_sum - left; best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, GradientPair(left), GradientPair(right), inputs.param); } __syncthreads(); } } template <int BLOCK_THREADS, typename GradientSumT> __global__ void EvaluateSplitsKernel( EvaluateSplitInputs<GradientSumT> left, EvaluateSplitInputs<GradientSumT> right, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, common::Span<DeviceSplitCandidate> out_candidates) { // KeyValuePair here used as threadIdx.x -> gain_value using ArgMaxT = hipcub::KeyValuePair<int, float>; using BlockScanT = hipcub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>; using MaxReduceT = hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS>; using SumReduceT = hipcub::BlockReduce<GradientSumT, BLOCK_THREADS>; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; // Aligned && shared storage for best_split __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& best_split = uninitialized_split.Alias(); __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); // If this block is working on the left or right node bool is_left = blockIdx.x < left.feature_set.size(); EvaluateSplitInputs<GradientSumT>& inputs = is_left ? left : right; // One block for each feature. Features are sampled, so fidx != blockIdx.x int fidx = inputs.feature_set[is_left ? blockIdx.x : blockIdx.x - left.feature_set.size()]; EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>( fidx, inputs, evaluator, &best_split, &temp_storage); __syncthreads(); if (threadIdx.x == 0) { // Record best loss for each feature out_candidates[blockIdx.x] = best_split; } } __device__ DeviceSplitCandidate operator+(const DeviceSplitCandidate& a, const DeviceSplitCandidate& b) { return b.loss_chg > a.loss_chg ? b : a; } template <typename GradientSumT> void EvaluateSplits(common::Span<DeviceSplitCandidate> out_splits, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientSumT> left, EvaluateSplitInputs<GradientSumT> right) { size_t combined_num_features = left.feature_set.size() + right.feature_set.size(); dh::TemporaryArray<DeviceSplitCandidate> feature_best_splits( combined_num_features); // One block for each feature uint32_t constexpr kBlockThreads = 256; dh::LaunchKernel {uint32_t(combined_num_features), kBlockThreads, 0}( EvaluateSplitsKernel<kBlockThreads, GradientSumT>, left, right, evaluator, dh::ToSpan(feature_best_splits)); // Reduce to get best candidate for left and right child over all features auto reduce_offset = dh::MakeTransformIterator<size_t>(thrust::make_counting_iterator(0llu), [=] __device__(size_t idx) -> size_t { if (idx == 0) { return 0; } if (idx == 1) { return left.feature_set.size(); } if (idx == 2) { return combined_num_features; } return 0; }); size_t temp_storage_bytes = 0; auto num_segments = out_splits.size(); hipcub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes, feature_best_splits.data(), out_splits.data(), num_segments, reduce_offset, reduce_offset + 1); dh::TemporaryArray<int8_t> temp(temp_storage_bytes); hipcub::DeviceSegmentedReduce::Sum(temp.data().get(), temp_storage_bytes, feature_best_splits.data(), out_splits.data(), num_segments, reduce_offset, reduce_offset + 1); } template <typename GradientSumT> void EvaluateSingleSplit(common::Span<DeviceSplitCandidate> out_split, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientSumT> input) { EvaluateSplits(out_split, evaluator, input, {}); } template void EvaluateSplits<GradientPair>( common::Span<DeviceSplitCandidate> out_splits, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientPair> left, EvaluateSplitInputs<GradientPair> right); template void EvaluateSplits<GradientPairPrecise>( common::Span<DeviceSplitCandidate> out_splits, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientPairPrecise> left, EvaluateSplitInputs<GradientPairPrecise> right); template void EvaluateSingleSplit<GradientPair>( common::Span<DeviceSplitCandidate> out_split, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientPair> input); template void EvaluateSingleSplit<GradientPairPrecise>( common::Span<DeviceSplitCandidate> out_split, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientPairPrecise> input); } // namespace tree } // namespace xgboost
695aecc21cf4b59c2b10c020c33d9ed3610dc478.cu
/*! * Copyright 2020 by XGBoost Contributors */ #include "evaluate_splits.cuh" #include <limits> namespace xgboost { namespace tree { // With constraints template <typename GradientPairT> XGBOOST_DEVICE float LossChangeMissing(const GradientPairT &scan, const GradientPairT &missing, const GradientPairT &parent_sum, const GPUTrainingParam &param, bst_node_t nidx, bst_feature_t fidx, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, bool &missing_left_out) { // NOLINT float parent_gain = CalcGain(param, parent_sum); float missing_left_gain = evaluator.CalcSplitGain(param, nidx, fidx, GradStats(scan + missing), GradStats(parent_sum - (scan + missing))); float missing_right_gain = evaluator.CalcSplitGain( param, nidx, fidx, GradStats(scan), GradStats(parent_sum - scan)); if (missing_left_gain >= missing_right_gain) { missing_left_out = true; return missing_left_gain - parent_gain; } else { missing_left_out = false; return missing_right_gain - parent_gain; } } /*! * \brief * * \tparam ReduceT BlockReduce Type. * \tparam TempStorage Cub Shared memory * * \param begin * \param end * \param temp_storage Shared memory for intermediate result. */ template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT> __device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram, TempStorageT* temp_storage) { __shared__ cub::Uninitialized<GradientSumT> uninitialized_sum; GradientSumT& shared_sum = uninitialized_sum.Alias(); GradientSumT local_sum = GradientSumT(); // For loop sums features into one block size auto begin = feature_histogram.data(); auto end = begin + feature_histogram.size(); for (auto itr = begin; itr < end; itr += BLOCK_THREADS) { bool thread_active = itr + threadIdx.x < end; // Scan histogram GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT(); local_sum += bin; } local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum()); // Reduction result is stored in thread 0. if (threadIdx.x == 0) { shared_sum = local_sum; } __syncthreads(); return shared_sum; } /*! \brief Find the thread with best gain. */ template <int BLOCK_THREADS, typename ReduceT, typename ScanT, typename MaxReduceT, typename TempStorageT, typename GradientSumT> __device__ void EvaluateFeature( int fidx, EvaluateSplitInputs<GradientSumT> inputs, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, DeviceSplitCandidate* best_split, // shared memory storing best split TempStorageT* temp_storage // temp memory for cub operations ) { // Use pointer from cut to indicate begin and end of bins for each feature. uint32_t gidx_begin = inputs.feature_segments[fidx]; // begining bin uint32_t gidx_end = inputs.feature_segments[fidx + 1]; // end bin for i^th feature // Sum histogram bins for current feature GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT, TempStorageT, GradientSumT>( inputs.gradient_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage); GradientSumT const missing = inputs.parent_sum - feature_sum; float const null_gain = -std::numeric_limits<bst_float>::infinity(); SumCallbackOp<GradientSumT> prefix_op = SumCallbackOp<GradientSumT>(); for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += BLOCK_THREADS) { bool thread_active = (scan_begin + threadIdx.x) < gidx_end; // Gradient value for current bin. GradientSumT bin = thread_active ? inputs.gradient_histogram[scan_begin + threadIdx.x] : GradientSumT(); ScanT(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op); // Whether the gradient of missing values is put to the left side. bool missing_left = true; float gain = null_gain; if (thread_active) { gain = LossChangeMissing(bin, missing, inputs.parent_sum, inputs.param, inputs.nidx, fidx, evaluator, missing_left); } __syncthreads(); // Find thread with best gain cub::KeyValuePair<int, float> tuple(threadIdx.x, gain); cub::KeyValuePair<int, float> best = MaxReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax()); __shared__ cub::KeyValuePair<int, float> block_max; if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { int split_gidx = (scan_begin + threadIdx.x) - 1; float fvalue; if (split_gidx < static_cast<int>(gidx_begin)) { fvalue = inputs.min_fvalue[fidx]; } else { fvalue = inputs.feature_values[split_gidx]; } GradientSumT left = missing_left ? bin + missing : bin; GradientSumT right = inputs.parent_sum - left; best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, GradientPair(left), GradientPair(right), inputs.param); } __syncthreads(); } } template <int BLOCK_THREADS, typename GradientSumT> __global__ void EvaluateSplitsKernel( EvaluateSplitInputs<GradientSumT> left, EvaluateSplitInputs<GradientSumT> right, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, common::Span<DeviceSplitCandidate> out_candidates) { // KeyValuePair here used as threadIdx.x -> gain_value using ArgMaxT = cub::KeyValuePair<int, float>; using BlockScanT = cub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>; using MaxReduceT = cub::BlockReduce<ArgMaxT, BLOCK_THREADS>; using SumReduceT = cub::BlockReduce<GradientSumT, BLOCK_THREADS>; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; // Aligned && shared storage for best_split __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& best_split = uninitialized_split.Alias(); __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); // If this block is working on the left or right node bool is_left = blockIdx.x < left.feature_set.size(); EvaluateSplitInputs<GradientSumT>& inputs = is_left ? left : right; // One block for each feature. Features are sampled, so fidx != blockIdx.x int fidx = inputs.feature_set[is_left ? blockIdx.x : blockIdx.x - left.feature_set.size()]; EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>( fidx, inputs, evaluator, &best_split, &temp_storage); __syncthreads(); if (threadIdx.x == 0) { // Record best loss for each feature out_candidates[blockIdx.x] = best_split; } } __device__ DeviceSplitCandidate operator+(const DeviceSplitCandidate& a, const DeviceSplitCandidate& b) { return b.loss_chg > a.loss_chg ? b : a; } template <typename GradientSumT> void EvaluateSplits(common::Span<DeviceSplitCandidate> out_splits, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientSumT> left, EvaluateSplitInputs<GradientSumT> right) { size_t combined_num_features = left.feature_set.size() + right.feature_set.size(); dh::TemporaryArray<DeviceSplitCandidate> feature_best_splits( combined_num_features); // One block for each feature uint32_t constexpr kBlockThreads = 256; dh::LaunchKernel {uint32_t(combined_num_features), kBlockThreads, 0}( EvaluateSplitsKernel<kBlockThreads, GradientSumT>, left, right, evaluator, dh::ToSpan(feature_best_splits)); // Reduce to get best candidate for left and right child over all features auto reduce_offset = dh::MakeTransformIterator<size_t>(thrust::make_counting_iterator(0llu), [=] __device__(size_t idx) -> size_t { if (idx == 0) { return 0; } if (idx == 1) { return left.feature_set.size(); } if (idx == 2) { return combined_num_features; } return 0; }); size_t temp_storage_bytes = 0; auto num_segments = out_splits.size(); cub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes, feature_best_splits.data(), out_splits.data(), num_segments, reduce_offset, reduce_offset + 1); dh::TemporaryArray<int8_t> temp(temp_storage_bytes); cub::DeviceSegmentedReduce::Sum(temp.data().get(), temp_storage_bytes, feature_best_splits.data(), out_splits.data(), num_segments, reduce_offset, reduce_offset + 1); } template <typename GradientSumT> void EvaluateSingleSplit(common::Span<DeviceSplitCandidate> out_split, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientSumT> input) { EvaluateSplits(out_split, evaluator, input, {}); } template void EvaluateSplits<GradientPair>( common::Span<DeviceSplitCandidate> out_splits, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientPair> left, EvaluateSplitInputs<GradientPair> right); template void EvaluateSplits<GradientPairPrecise>( common::Span<DeviceSplitCandidate> out_splits, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientPairPrecise> left, EvaluateSplitInputs<GradientPairPrecise> right); template void EvaluateSingleSplit<GradientPair>( common::Span<DeviceSplitCandidate> out_split, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientPair> input); template void EvaluateSingleSplit<GradientPairPrecise>( common::Span<DeviceSplitCandidate> out_split, TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator, EvaluateSplitInputs<GradientPairPrecise> input); } // namespace tree } // namespace xgboost
eb1ccdcd2e865a6b32de025176eea639ab4ac506.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "CompressionKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int dimensionalityd = 1; unsigned long long *cbufd = NULL; hipMalloc(&cbufd, XSIZE*YSIZE); unsigned char *dbufd = NULL; hipMalloc(&dbufd, XSIZE*YSIZE); int *cutd = NULL; hipMalloc(&cutd, XSIZE*YSIZE); int *offd = NULL; hipMalloc(&offd, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( CompressionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dimensionalityd,cbufd,dbufd,cutd,offd); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( CompressionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dimensionalityd,cbufd,dbufd,cutd,offd); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( CompressionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dimensionalityd,cbufd,dbufd,cutd,offd); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
eb1ccdcd2e865a6b32de025176eea639ab4ac506.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "CompressionKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int dimensionalityd = 1; unsigned long long *cbufd = NULL; cudaMalloc(&cbufd, XSIZE*YSIZE); unsigned char *dbufd = NULL; cudaMalloc(&dbufd, XSIZE*YSIZE); int *cutd = NULL; cudaMalloc(&cutd, XSIZE*YSIZE); int *offd = NULL; cudaMalloc(&offd, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); CompressionKernel<<<gridBlock,threadBlock>>>(dimensionalityd,cbufd,dbufd,cutd,offd); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { CompressionKernel<<<gridBlock,threadBlock>>>(dimensionalityd,cbufd,dbufd,cutd,offd); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { CompressionKernel<<<gridBlock,threadBlock>>>(dimensionalityd,cbufd,dbufd,cutd,offd); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1215890a296d5eada90164dff06f1404643985c2.hip
// !!! This is a file automatically generated by hipify!!! /* * @Author: Alejandro Marrero * @Contact: alu0100825008@ull.edu.es * @Date: 2018-05-08 18:56:58 * @Last Modified time: 2018-05-08 18:56:58 */ #include <iostream> #include <vector> #include <ctime> #include <cstdlib> #include <boost/chrono.hpp> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> const int LOWER = 0.0; const int UPPER = 1.0; void fillVector(thrust::host_vector<double> &V, const double lower, const double upper, const unsigned int seed) { srand(time(NULL)); size_t elem = V.size(); for( size_t i = 0; i < elem; ++i){ V[i] = (double) rand() / (double) RAND_MAX; } } int main() { thrust::host_vector<double> hostVector; thrust::device_vector<double> deviceVector; unsigned int seed = (unsigned int) time(NULL); size_t limit = 100000000; for(int i = 500; i <= limit; i *= 2 ) { hostVector.resize(i); fillVector(V, LOWER, UPPER, seed); boost::chrono::steady_clock::time_point start_cpu = boost::chrono::steady_clock::now(); deviceVector = hostVector; boost::chrono::steady_clock::time_point end_cpu = boost::chrono::steady_clock::now(); double durationCPU = boost::chrono::duration <double, boost::milli> (end_cpu - start_cpu).count(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); thrust::stable_sort(d_V.begin(), d_V.end()); hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); start_cpu = boost::chrono::steady_clock::now(); hostVector = deviceVector; end_cpu = boost::chrono::steady_clock::now(); double durationCPU_2 = boost::chrono::duration <double, boost::milli> (end_cpu - start_cpu).count(); std::cout << i << std::endl << ((elapsedTime + durationCPU + durationCPU_2) * 0.001) << std::endl; } return 0; }
1215890a296d5eada90164dff06f1404643985c2.cu
/* * @Author: Alejandro Marrero * @Contact: alu0100825008@ull.edu.es * @Date: 2018-05-08 18:56:58 * @Last Modified time: 2018-05-08 18:56:58 */ #include <iostream> #include <vector> #include <ctime> #include <cstdlib> #include <boost/chrono.hpp> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> const int LOWER = 0.0; const int UPPER = 1.0; void fillVector(thrust::host_vector<double> &V, const double lower, const double upper, const unsigned int seed) { srand(time(NULL)); size_t elem = V.size(); for( size_t i = 0; i < elem; ++i){ V[i] = (double) rand() / (double) RAND_MAX; } } int main() { thrust::host_vector<double> hostVector; thrust::device_vector<double> deviceVector; unsigned int seed = (unsigned int) time(NULL); size_t limit = 100000000; for(int i = 500; i <= limit; i *= 2 ) { hostVector.resize(i); fillVector(V, LOWER, UPPER, seed); boost::chrono::steady_clock::time_point start_cpu = boost::chrono::steady_clock::now(); deviceVector = hostVector; boost::chrono::steady_clock::time_point end_cpu = boost::chrono::steady_clock::now(); double durationCPU = boost::chrono::duration <double, boost::milli> (end_cpu - start_cpu).count(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); thrust::stable_sort(d_V.begin(), d_V.end()); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); start_cpu = boost::chrono::steady_clock::now(); hostVector = deviceVector; end_cpu = boost::chrono::steady_clock::now(); double durationCPU_2 = boost::chrono::duration <double, boost::milli> (end_cpu - start_cpu).count(); std::cout << i << std::endl << ((elapsedTime + durationCPU + durationCPU_2) * 0.001) << std::endl; } return 0; }
3ebf2c0e94e7711993d2bd77508c9a31853b7637.hip
// !!! This is a file automatically generated by hipify!!! //#define GLEW_STATIC //#pragma comment(lib,"glew32.lib") //#include <windows.h> //#include <gl/glew.h> //#include <glut.h> #include <complex> #include <stdio.h> #include <iostream> #include <cmath> #include <stdlib.h> #include <fstream> #include <cstdlib> #include <fstream> #include <hip/hip_runtime.h> //#include "stdafx.h" #include <iomanip> #include <time.h> //#include <cuda_gl_interop.h> #include <hip/hip_runtime.h> //#include <hip/hip_complex.h> #include <vector> #include <math_functions.h> //#include "EasyBMP.h" //#include "EasyBMP_DataStructures.h" //#include "EasyBMP_VariousBMPutilities.h" #define GL_GLEXT_PROTOTYPES #define PI 3.141592653589793238 #define alpha_max 0.01 #define alpha_min 0.000 #define eps0 8.85418e-12 #define sigma_factor 1.0 #define ncells 10 #define mu0 (PI*4e-7) #define center_freq (5e9) #define eta0 (sqrt(mu0/eps0)) #define c0 (1.0/sqrt(mu0*eps0)) #define dt (dx/c0/2)// dx/c0/2 #define domain_size 0.18 #define dx (0.001) #define NF2FFdistfromboundary ((int)floor((3.2*breast_radius/dx))) #define source_position 0.5 #define dy (0.001) #define number_of_time_steps 3000 #define f1x (nx/2 - 150) #define f2x (nx/2+150) #define f1y (ny/2) #define f2y (ny/2) //#define nx ((int)ceil(domain_size/dx)) //#define ny ((int)ceil(domain_size/dy)) #define nx ((int)ceil(12.7*breast_radius/dx)) #define ny ((int)ceil(12.7*breast_radius/dy)) #define d (10*dx) #define npml 2 #define kmax 10 #define numberofexcitationangles 4 #define isPW 1 #define isscattering 1 #define HANDLE_ERROR( err ) err #define sigma_max_pml (3/(200*PI*dx)) #define size_NF2FF_total (2*nx-8*NF2FFdistfromboundary+2*ny-4) #define size_cjzy (nx-2*NF2FFdistfromboundary-2) #define size_cjzx (ny-2*NF2FFdistfromboundary) #define numberofobservationangles 60 #define t0 (sqrt(20.0)*tau) // t0 = sqrt(20)*tau #define l0 (nx*dx/2-breast_radius) #define pwidth 10 #define nc 20 // 20 cells per wavelength #define fmax (c0/(nc*dx))// change if dy is bigger though now they're the same fmax is the highest frequency this program can handle #define tau (3.3445267e-11) // float ta bu = sqrt(2.3)*nc*dx/(PI*c0*1/sqrt(eps_r_MAX)); from a calculation of fmax. //#define tau (5.288161e-11) #define target_x (nx/2+15)//105 is breast_radius / dx #define target_y (ny/2-15) #define source_x (nx/2) //(target_x-105-80) #define source_y (ny/2) #define breast_radius 0.0315 //87.535 mm . Sample size = 1. #define tumor_size (0.01) //#include <unistd.h> //const hipComplex jcmpx (0.0, 1.0); /*static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } }*/ //__constant__ float*dev_Ceze,*dev_Cezhy,*dev_Cezhx,*dev_Cezj,*dev_Jz,*dev_Chyh,*dev_Chxh,*dev_Chyez,*dev_Chxez,*dev_bex,*dev_bey,*dev_aex,*dev_aey,*dev_bmy,*dev_bmx,*dev_amy,*dev_amx,*dev_C_Psi_ezy, //*dev_C_Psi_ezx,*dev_C_Psi_hxy,*dev_C_Psi_hyx; struct hipComplex { float r; float i; __host__ __device__ hipComplex( float a, float b ) : r(a), i(b) {} __host__ __device__ hipComplex(float a): r(a), i(0) {} float magnitude2( void ) { return r * r + i * i; } __host__ __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __host__ __device__ hipComplex operator*(const float& a){ return hipComplex(r*a,i*a); } __host__ __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __host__ __device__ hipComplex operator+(const float& a){ return hipComplex(r+a,i); } __host__ __device__ void operator+=(const float& f){ r += f; } __host__ __device__ void operator+=(const hipComplex& C); hipComplex(); }; __host__ __device__ hipComplex operator*(const float &f, const hipComplex &C) { return hipComplex(C.r*f,C.i*f); } __host__ __device__ void hipComplex::operator+=(const hipComplex& C) { r +=C.r; i += C.i; } __host__ __device__ float cuabs(hipComplex x) { return sqrt(x.i*x.i + x.r*x.r); } __host__ __device__ hipComplex cuexp(hipComplex arg) { hipComplex res(0,0); float s, c; float e = expf(arg.r); sincosf(arg.i,&s,&c); res.r = c * e; res.i = s * e; return res; } __device__ int isOnNF2FFBound(int x, int y) { if(x==NF2FFdistfromboundary||x==nx-NF2FFdistfromboundary||y==NF2FFdistfromboundary||y==ny-NF2FFdistfromboundary) { return 1; } else { return 0; } } __device__ int getxfromthreadIdNF2FF(int index) { int x=0; if(index<(nx-2*NF2FFdistfromboundary-2))//yn { x = index+NF2FFdistfromboundary+1; } else if(index<(nx-4*NF2FFdistfromboundary+ny-2))//xp { x = nx-NF2FFdistfromboundary-1; } else if(index<(2*nx-6*NF2FFdistfromboundary+ny-4))//yp { x = nx-NF2FFdistfromboundary - (index-(nx-4*NF2FFdistfromboundary+ny-2))-2; } else if(index<(2*nx-8*NF2FFdistfromboundary+2*ny-4))//xn notice 2*nx-8*NF2FFdistfromboundary+2*ny-4 is the max index term. { x = NF2FFdistfromboundary; } return x; } __device__ int getyfromthreadIdNF2FF(int index) { int y=0; if(index<(nx-2*NF2FFdistfromboundary-2)) { y = NF2FFdistfromboundary; } else if(index<(nx-4*NF2FFdistfromboundary+ny-2)) { y = (index-(nx-2*NF2FFdistfromboundary-2))+NF2FFdistfromboundary; } else if(index<(2*nx-6*NF2FFdistfromboundary+ny-4)) { y = ny-NF2FFdistfromboundary-1; } else if(index<(2*nx-8*NF2FFdistfromboundary+2*ny-4)) { y = ny-NF2FFdistfromboundary-(index-(2*nx-6*NF2FFdistfromboundary+ny-4))-1; } return y; } int CPUgetxfromthreadIdNF2FF(int index) { int x=0; if(index<(nx-2*NF2FFdistfromboundary-2))//yn { x = index+NF2FFdistfromboundary+1; } else if(index<(nx-4*NF2FFdistfromboundary+ny-2))//xp { x = nx-NF2FFdistfromboundary-1; } else if(index<(2*nx-6*NF2FFdistfromboundary+ny-4))//yp { x = nx-NF2FFdistfromboundary - (index-(nx-4*NF2FFdistfromboundary+ny-2))-2; } else if(index<(2*nx-8*NF2FFdistfromboundary+2*ny-4))//xn notice 2*nx-8*NF2FFdistfromboundary+2*ny-4 is the max index term. { x = NF2FFdistfromboundary; } return x; } int CPUgetyfromthreadIdNF2FF(int index) { int y=0; if(index<(nx-2*NF2FFdistfromboundary-2)) { y = NF2FFdistfromboundary; } else if(index<(nx-4*NF2FFdistfromboundary+ny-2)) { y = (index-(nx-2*NF2FFdistfromboundary-2))+NF2FFdistfromboundary; } else if(index<(2*nx-6*NF2FFdistfromboundary+ny-4)) { y = ny-NF2FFdistfromboundary-1; } else if(index<(2*nx-8*NF2FFdistfromboundary+2*ny-4)) { y = ny-NF2FFdistfromboundary-(index-(2*nx-6*NF2FFdistfromboundary+ny-4))-1; } return y; } __device__ __host__ int isOnxn(int x) { if(x==(NF2FFdistfromboundary)) { return 1; } else { return 0; } } __device__ __host__ int isOnxp(int x) { if(x==(nx-NF2FFdistfromboundary-1)) { return 1; } else { return 0; } } __device__ __host__ int isOnyp(int x,int y) { if(y==(ny-NF2FFdistfromboundary-1)&&!isOnxn(x)&&!isOnxp(x)) { return 1; } else { return 0; } } __device__ __host__ int isOnyn(int x, int y) { if((y==(NF2FFdistfromboundary))&&!isOnxn(x)&&!(isOnxp(x))) { return 1; } else { return 0; } } __device__ int dgetCell(int x, int y, int size) { return x +y*size; } __global__ void calculate_JandM(float* f,int* timestep,float*dev_Ez,float*dev_Hy,float*dev_Hx,hipComplex *cjzxp,hipComplex *cjzyp,hipComplex*cjzxn,hipComplex*cjzyn,hipComplex*cmxyp,hipComplex*cmyxp,hipComplex*cmxyn,hipComplex*cmyxn) { float freq = *f; int index = threadIdx.x+blockIdx.x*blockDim.x;// should launch 2*nx-8*NF2FFdistfromboundary+2*ny-4 threads. if(index<=size_NF2FF_total) { const hipComplex j(0.0,1.0); int x = getxfromthreadIdNF2FF(index); int y = getyfromthreadIdNF2FF(index); float Ez; hipComplex pi(PI , 0); hipComplex two(2.0,0.0); hipComplex negativeone(-1.0,0); hipComplex deltatime(dt,0); if(isOnyp(x,y)) { Ez = (dev_Ez[dgetCell(x,y+1,nx+1)]+dev_Ez[dgetCell(x,y,nx+1)])/2; float Hx = dev_Hx[dgetCell(x,y,nx)]; cjzyp[index-(nx+ny-4*NF2FFdistfromboundary-2)] += -1*Hx*deltatime*cuexp((float)(-1)*j*(float)2*pi*freq*(float)(*timestep)*deltatime);//cjzyp and cmxyp have nx - 2*NF2FFBoundary -2 elements cmxyp[index-(nx+ny-4*NF2FFdistfromboundary-2)] += -1*Ez*deltatime*cuexp((float)-1.0*j*(float)2.0*(float)PI*freq*((float)(*timestep)+0.5)*(float)dt); } else if(isOnxp(x))//X faces override y faces at their intersections { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x+1,y,nx+1)])/2; float Hy = dev_Hy[dgetCell(x,y,nx)]; cjzxp[index-(nx-2*NF2FFdistfromboundary-2)] += Hy*deltatime*cuexp(-1*j*2*pi*freq*(float)(*timestep)*(float)dt);//cjzxp and cmyxp have ny-2*NF2FFBound elements cmyxp[index-(nx-2*NF2FFdistfromboundary-2)] += Ez*(float)dt*cuexp((float)(-1)*j*(float)2.0*pi*freq*((float)(*timestep)+0.5)*(float)dt);// this is the discrete fourier transform, by the way. } else if(isOnyn(x,y)) { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x,y+1,nx+1)])/2; float Hx=dev_Hx[dgetCell(x,y,nx)]; cjzyn[index] += Hx*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*(float)(*timestep)*(float)dt); //cjzyn and cmxyn need to have nx-2*NF2FFbound-2 elements cmxyn[index] += Ez*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*((float)(*timestep)+0.5)*(float)dt); } else if(isOnxn(x)) { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x+1,y,nx+1)])/2; cjzxn[index-(2*nx+ny-6*NF2FFdistfromboundary-4)] += -1*dev_Hy[dgetCell(x,y,nx)]*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*(float)(*timestep)*(float)dt); // cjzxn and cmyxn must have ny-2*NFdistfromboundary elements cmyxn[index-(2*nx+ny-6*NF2FFdistfromboundary-4)] += -1*Ez*(float)dt*cuexp(-1.0*j*2.0*(float)PI*freq*((float)(*timestep)+0.5)*(float)dt); } } } __host__ __device__ float fwf(float timestep,float x, float y,float Phi_inc,float l) { float ar; float ky, kx;//k hat sincosf(Phi_inc,&ky,&kx); ar = (float)timestep*dt-(float)t0-(1/(float)c0)*(ky*y*dx+kx*x*dy-l); //ar = timestep*dt-t0; //return exp(-1*(ar*ar)/(tau*tau));// gaussian pulse argument is k dot r, return exp(-1*ar*ar/(tau*tau)); //return sin(2*PI*1e9*timestep*dt); } __global__ void H_field_update(float*dev_Hy,float*dev_Hx,float*dev_Ez,float*dev_bmx,float*dev_Psi_hyx,float*dev_amx,float*dev_bmy,float*dev_amy,float*dev_Psi_hxy,float*kex) { float buffer_Hy; float buffer_Hx; float Chez = (dt/dx)/(mu0); int x = threadIdx.x +blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if(x<nx&&y<nx) { buffer_Hy = dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx = dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); if(x<ncells) { buffer_Hy= dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[ncells-1-x]; dev_Psi_hyx[dgetCell(x,y,20)]=dev_bmx[ncells-1-x]*dev_Psi_hyx[dgetCell(x,y,20)]+dev_amx[ncells-1-x]*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hy+=Chez*dx*dev_Psi_hyx[dgetCell(x,y,20)] ; } if(x>=(nx-ncells)) { buffer_Hy=dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[x-nx+ncells]; dev_Psi_hyx[dgetCell(x-nx+20,y,2*ncells)]=dev_bmx[x-nx+ncells]*dev_Psi_hyx[dgetCell(x-nx+20,y,20)]+dev_amx[x-nx+ncells]*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hy+=Chez*dx*dev_Psi_hyx[dgetCell(x-nx+20,y,20)]; } if(y<ncells) { buffer_Hx=dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[ncells-1-y]; dev_Psi_hxy[dgetCell(x,y,nx)]=dev_bmy[ncells-1-y]*dev_Psi_hxy[dgetCell(x,y,nx)]+dev_amy[ncells-1-y]*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx-=Chez*dy*dev_Psi_hxy[dgetCell(x,y,nx)]; } if(y>=(ny-ncells)) { buffer_Hx=dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[y-ny+ncells]; dev_Psi_hxy[dgetCell(x,y-ny+20,nx)]=dev_bmy[y-ny+ncells]*dev_Psi_hxy[dgetCell(x,y-ny+20,nx)]+dev_amy[y-ny+ncells]*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx-=Chez*dy*dev_Psi_hxy[dgetCell(x,y-nx+20,nx)]; } //__syncthreads(); if(isnan(buffer_Hx)) { dev_Hx[dgetCell(x,y,nx)] = 0.0; } else { dev_Hx[dgetCell(x,y,nx)] = buffer_Hx; } if(isnan(buffer_Hy)) { dev_Hy[dgetCell(x,y,nx)] = 0.0; } else { dev_Hy[dgetCell(x,y,nx)] = buffer_Hy; } //dev_Hx[dgetCell(x,y,nx)] = buffer_Hx; //dev_Hy[dgetCell(x,y,nx)] = buffer_Hy; } } __global__ void H_inc_update(float*dev_Hy,float*dev_Hx,float*dev_Ez,float*dev_bmx,float*dev_Psi_hyx,float*dev_amx,float*dev_bmy,float*dev_amy,float*dev_Psi_hxy,float*kex) { float buffer_Hy; float buffer_Hx; float Chez = (dt/dx)/(mu0); int x = threadIdx.x +blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if(x<nx&&y<nx) { buffer_Hy = dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx = dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); if(x<ncells) { buffer_Hy= dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[ncells-1-x]; dev_Psi_hyx[dgetCell(x,y,20)]=dev_bmx[ncells-1-x]*dev_Psi_hyx[dgetCell(x,y,20)]+dev_amx[ncells-1-x]*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hy+=Chez*dx*dev_Psi_hyx[dgetCell(x,y,20)] ; } if(x>=(nx-ncells)) { buffer_Hy=dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[x-nx+ncells]; dev_Psi_hyx[dgetCell(x-nx+20,y,2*ncells)]=dev_bmx[x-nx+ncells]*dev_Psi_hyx[dgetCell(x-nx+20,y,20)]+dev_amx[x-nx+ncells]*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hy+=Chez*dx*dev_Psi_hyx[dgetCell(x-nx+20,y,20)]; } if(y<ncells) { buffer_Hx=dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[ncells-1-y]; dev_Psi_hxy[dgetCell(x,y,nx)]=dev_bmy[ncells-1-y]*dev_Psi_hxy[dgetCell(x,y,nx)]+dev_amy[ncells-1-y]*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx-=Chez*dy*dev_Psi_hxy[dgetCell(x,y,nx)]; } if(y>=(ny-ncells)) { buffer_Hx=dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[y-ny+ncells]; dev_Psi_hxy[dgetCell(x,y-ny+20,nx)]=dev_bmy[y-ny+ncells]*dev_Psi_hxy[dgetCell(x,y-ny+20,nx)]+dev_amy[y-ny+ncells]*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx-=Chez*dy*dev_Psi_hxy[dgetCell(x,y-nx+20,nx)]; } //__syncthreads(); if(isnan(buffer_Hx)) { dev_Hx[dgetCell(x,y,nx)] = 0.0; } else { dev_Hx[dgetCell(x,y,nx)] = buffer_Hx; } if(isnan(buffer_Hy)) { dev_Hy[dgetCell(x,y,nx)] = 0.0; } else { dev_Hy[dgetCell(x,y,nx)] = buffer_Hy; } //dev_Hx[dgetCell(x,y,nx)] = buffer_Hx; //dev_Hy[dgetCell(x,y,nx)] = buffer_Hy; } } __global__ void E_field_update(int *i,float*dev_Ez,float*dev_Hy,float*dev_Hx,float*dev_Psi_ezx,float*dev_aex,float*dev_aey,float*dev_bex,float*dev_bey,float*dev_Psi_ezy,float*kex,float*Cezhy,float*Cezhx,float*Ceze,float*Cezeip,float*Cezeic,float*Phi) { int x=threadIdx.x+blockDim.x*blockIdx.x; int y=threadIdx.y+blockDim.y*blockIdx.y; // int offset = x+y*blockDim.x*gridDim.x; float buffer_Ez; //float Ceh = (dt/dx)/(eps0); float Cezj = -dt/eps0; float length_offset; if(x<=nx&&y<=ny) { //if(x==0||x==nx||y==0||y==ny) if(x==nx||y==ny||x==0||y==0) { buffer_Ez=0.0; } else { if(isscattering) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)]) -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)]) +Cezeic[dgetCell(x,y,nx+1)]*fwf((float)(*i)+0.5,x-nx/2,y-ny/2,*Phi,-breast_radius) +Cezeip[dgetCell(x,y,nx+1)]*fwf((float)(*i)-0.5,x-nx/2,y-ny/2,*Phi,-breast_radius); } else { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)]) -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)]); if(x==(int)(source_x)&&y==(int)(source_y)) { buffer_Ez=buffer_Ez + 100*Cezj*fwf((float)(*i),0,0,0,0); } } //if(x==((int)nx/2)&&y==((int)nx/2)) //{ // //buffer_Ez=buffer_Ez + Cezj*dev_Jz[*i]; // buffer_Ez=buffer_Ez + Cezj*fwf((float)(*i),0,0,0,0); //} if(x<=ncells&&x!=0) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)])/kex[ncells-x] -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)])/kex[ncells-x]; dev_Psi_ezx[dgetCell(x-1,y-1,20)] = dev_bex[ncells-x]*dev_Psi_ezx[dgetCell(x-1,y-1,20)]+dev_aex[ncells-x]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)]); buffer_Ez += Cezhy[dgetCell(x,y,nx+1)]*dx*dev_Psi_ezx[dgetCell(x-1,y-1,2*ncells)]; } if(x>=(nx-ncells)&&x!=nx) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)])/kex[x-nx+ncells] -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)])/kex[x-nx+ncells]; dev_Psi_ezx[dgetCell(x-nx+20,y-1,20)]=dev_bex[x-nx+ncells]*dev_Psi_ezx[dgetCell(x-nx+20,y-1,20)]+dev_aex[x-nx+ncells]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)]); buffer_Ez+=Cezhy[dgetCell(x,y,nx+1)]*dx*dev_Psi_ezx[dgetCell(x-nx+20,y-1,2*ncells)]; } if(y<=ncells&&y!=0) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)])/kex[ncells-y] -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)])/kex[ncells-y]; dev_Psi_ezy[dgetCell(x-1,y-1,nx)]=dev_bey[(ncells-y)]*dev_Psi_ezy[dgetCell(x-1,y-1,nx)]+dev_aey[(ncells-y)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)]); buffer_Ez-=Cezhx[dgetCell(x,y,nx+1)]*dy*dev_Psi_ezy[dgetCell(x-1,y-1,nx)]; } if(y>=(ny-ncells)&&y!=ny) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)])/kex[y-ny+ncells] -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)])/kex[y-ny+ncells]; dev_Psi_ezy[dgetCell(x-1,y-ny+20,nx)]=dev_bey[y-ny+ncells]*dev_Psi_ezy[dgetCell(x-1,y-ny+20,nx)]+dev_aey[y-ny+ncells]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)]); buffer_Ez-=Cezhx[dgetCell(x,y,nx+1)]*dy*dev_Psi_ezy[dgetCell(x-1,y-ny+20,nx)]; } } // unsigned char green = 128+127*buffer_Ez/0.4; /*ptr[offset].x = 0; ptr[offset].y = green; ptr[offset].z = 0; ptr[offset].w = 255;*///OpenGL stuff //__syncthreads(); if(isnan(buffer_Ez)) { dev_Ez[dgetCell(x,y,nx+1)] = 0.0; } else { dev_Ez[dgetCell(x,y,nx+1)] = buffer_Ez; } //dev_Ez[dgetCell(x,y,nx+1)] = buffer_Ez; } } __global__ void Field_reset(float* Ez, float* Hy, float* Hx, float* Psi_ezy,float* Psi_ezx,float* Psi_hyx,float* Psi_hxy,hipComplex*cjzyn,hipComplex*cjzxp,hipComplex*cjzyp,hipComplex*cjzxn,hipComplex*cmxyn,hipComplex*cmyxp,hipComplex*cmxyp,hipComplex*cmyxn) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y+blockDim.y*blockIdx.y; int index = x + y*blockDim.x*gridDim.x; if(x<=ncells&&x!=0) { Psi_ezx[dgetCell(x-1,y-1,20)] =0; } if(x>=(nx-ncells)&&x!=nx) { Psi_ezx[dgetCell(x-nx+20,y-1,20)]=0; } if(y<=ncells&&y!=0) { Psi_ezy[dgetCell(x-1,y-1,nx)]=0; } if(y>=(ny-ncells)&&y!=ny) { Psi_ezy[dgetCell(x-1,y-ny+20,nx)]=0; } if(x<ncells) { Psi_hyx[dgetCell(x,y,20)]=0; } if(x>=(nx-ncells)) { Psi_hyx[dgetCell(x-nx+20,y,2*ncells)]=0.0; } if(y<ncells) { Psi_hxy[dgetCell(x,y,nx)]=0.0; } if(y>=(ny-ncells)) { Psi_hxy[dgetCell(x,y-ny+20,nx)]=0.0; } if(x<=nx&&y<=ny) { Ez[dgetCell(x,y,nx+1)] = 0.0; } if(x<nx&&y<ny) { Hy[dgetCell(x,y,nx)] = 0.0; Hx[dgetCell(x,y,nx)] = 0.0; } if(index<=size_NF2FF_total) { const hipComplex j(0.0,1.0); int x = getxfromthreadIdNF2FF(index); int y = getyfromthreadIdNF2FF(index); float Ez; hipComplex pi(PI , 0); hipComplex two(2.0,0.0); hipComplex negativeone(-1.0,0); hipComplex deltatime(dt,0); if(index<size_cjzy) { cjzyp[index] = hipComplex(0,0);//cjzyp and cmxyp have nx - 2*NF2FFBoundary -2 elements cjzyn[index] = hipComplex(0,0); cmxyp[index] = hipComplex(0,0); cmxyn[index] = hipComplex(0,0); } if(index<size_cjzx) { cjzxp[index] = hipComplex(0,0); cjzxn[index] = hipComplex(0,0); cmyxp[index] = hipComplex(0,0); cmyxn[index] = hipComplex(0,0); } } } __global__ void E_inc_update(int *i,float*dev_Hy_inc,float*dev_Hx_inc,float*dev_Psi_ezx_inc,float*dev_aex,float*dev_aey,float*dev_bex,float*dev_bey,float*dev_Psi_ezy_inc,float*kex,float*dev_Ezip,float*dev_Ezic,float*Phi) { int x=threadIdx.x+blockDim.x*blockIdx.x; int y=threadIdx.y+blockDim.y*blockIdx.y; // int offset = x+y*blockDim.x*gridDim.x; float buffer_Ez; //float Ceh = (dt/dx)/(eps0); float Cezj = -dt/eps0; float Ceze = 1; float Cezhy = (dt/(dx*eps0)); if(x<=nx&&y<=ny) { //if(x==0||x==nx||y==0||y==ny) if(x==nx||y==ny||x==0||y==0) { buffer_Ez=0.0; } else { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)]) -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)]); if(x==((int)source_x)&&y==(int)(source_y)) { //buffer_Ez=buffer_Ez + Cezj*dev_Jz[*i]; buffer_Ez=buffer_Ez + 100*Cezj*fwf((float)(*i),0,0,0,0); } if(x<=ncells&&x!=0) { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)])/kex[ncells-x] -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)])/kex[ncells-x]; dev_Psi_ezx_inc[dgetCell(x-1,y-1,20)] = dev_bex[ncells-x]*dev_Psi_ezx_inc[dgetCell(x-1,y-1,20)]+dev_aex[ncells-x]*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)]); buffer_Ez += Cezhy*dx*dev_Psi_ezx_inc[dgetCell(x-1,y-1,2*ncells)]; } if(x>=(nx-ncells)&&x!=nx) { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)])/kex[x-nx+ncells] -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)])/kex[x-nx+ncells]; dev_Psi_ezx_inc[dgetCell(x-nx+20,y-1,20)]=dev_bex[x-nx+ncells]*dev_Psi_ezx_inc[dgetCell(x-nx+20,y-1,20)]+dev_aex[x-nx+ncells]*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)]); buffer_Ez+=Cezhy*dx*dev_Psi_ezx_inc[dgetCell(x-nx+20,y-1,2*ncells)]; } if(y<=ncells&&y!=0) { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)])/kex[ncells-y] -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)])/kex[ncells-y]; dev_Psi_ezy_inc[dgetCell(x-1,y-1,nx)]=dev_bey[(ncells-y)]*dev_Psi_ezy_inc[dgetCell(x-1,y-1,nx)]+dev_aey[(ncells-y)]*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)]); buffer_Ez-=Cezhy*dy*dev_Psi_ezy_inc[dgetCell(x-1,y-1,nx)]; } if(y>=(ny-ncells)&&y!=ny) { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)])/kex[y-ny+ncells] -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)])/kex[y-ny+ncells]; dev_Psi_ezy_inc[dgetCell(x-1,y-ny+20,nx)]=dev_bey[y-ny+ncells]*dev_Psi_ezy_inc[dgetCell(x-1,y-ny+20,nx)]+dev_aey[y-ny+ncells]*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)]); buffer_Ez-=Cezhy*dy*dev_Psi_ezy_inc[dgetCell(x-1,y-ny+20,nx)]; } } dev_Ezip[dgetCell(x,y,nx+1)] = dev_Ezic[dgetCell(x,y,nx+1)]; dev_Ezic[dgetCell(x,y,nx+1)] = buffer_Ez; } } float calc_radiated_power(hipComplex *cjzxp,hipComplex *cjzyp,hipComplex *cjzxn,hipComplex *cjzyn,hipComplex *cmxyp,hipComplex *cmyxp,hipComplex *cmxyn,hipComplex *cmyxn) { int indexofleg1 = nx-2*NF2FFdistfromboundary-2; int indexofleg2 = nx+ny-4*NF2FFdistfromboundary-2; int indexofleg3 = 2*nx+ny-6*NF2FFdistfromboundary-4; int maxindex = 2*nx-8*NF2FFdistfromboundary+2*ny-4; int index; hipComplex cjz(0,0); hipComplex power = 0; for(index = 0; index<indexofleg1;index++) { cjz = hipComplex(cjzyn[index].r,-1.0*cjzyn[index].i);//conjugation //z x x = y dot -y = -1 power+=-1.0*cjz*cmxyn[index]*dx;// the negative one comes from the dot product between JxM and the n hat vector } for(index = indexofleg1; index<indexofleg2;index++) { cjz = hipComplex(cjzxp[index-indexofleg1].r,-1.0*cjzxp[index-indexofleg1].i);//making the conjugate // z cross y = -x dot x = -1 power+= -1.0*cjz*cmyxp[index-indexofleg1]*dy;//positive x unit normal vector } for(index = indexofleg2;index<indexofleg3;index++) { // z cross x = y dot y = 1 cjz = hipComplex(cjzyp[index-indexofleg2].r,-1.0*cjzyp[index-indexofleg2].i); power+= cjz*cmxyp[index-indexofleg2]*dx;//postive y unit normal vector } for(index = indexofleg3;index<maxindex;index++) { // z cross y = -x dot -x = 1 cjz = hipComplex(cjzxn[index-indexofleg3].r,-1.0*cjzxn[index-indexofleg3].i); power += cjz*cmyxn[index-indexofleg3]*dy;// negative x hat n vector } float realpower = power.r; realpower *= 0.5; return realpower; } float calc_incident_power(float freq) { return (0.5/eta0)*pow(tau*sqrt(PI)*exp(-tau*tau*2*PI*freq*2*PI*freq/4),2);// just gonna assume gaussian pulse. This is the fourier transform of the gaussian pulse. } __global__ void calculate_JandM_total(float* f,int* timestep,float*dev_Ez,float*dev_Hy,float*dev_Hx,hipComplex *cjzxp,hipComplex *cjzyp,hipComplex*cjzxn,hipComplex*cjzyn,hipComplex*cmxyp,hipComplex*cmyxp,hipComplex*cmxyn,hipComplex*cmyxn,float*dev_Ezic,float*dev_Ezip,float*dev_Hx_inc,float*dev_Hy_inc) { float freq = *f; int index = threadIdx.x+blockIdx.x*blockDim.x;// should launch 2*nx-8*NF2FFdistfromboundary+2*ny-4 threads. if(index<=size_NF2FF_total) { const hipComplex j(0.0,1.0); int x = getxfromthreadIdNF2FF(index); int y = getyfromthreadIdNF2FF(index); float Ez; hipComplex pi(PI , 0); hipComplex two(2.0,0.0); hipComplex negativeone(-1.0,0); hipComplex deltatime(dt,0); if(isOnyp(x,y)) { Ez = (dev_Ez[dgetCell(x,y+1,nx+1)]+dev_Ez[dgetCell(x,y,nx+1)])/2; Ez += (dev_Ezic[dgetCell(x,y+1,nx+1)] + dev_Ezic[dgetCell(x,y,nx+1)] + dev_Ezip[dgetCell(x,y+1,nx+1)] + dev_Ezip[dgetCell(x,y,nx+1)])/4; float Hx = dev_Hx[dgetCell(x,y,nx)] + dev_Hx_inc[dgetCell(x,y,nx)]; cjzyp[index-(nx+ny-4*NF2FFdistfromboundary-2)] += -1*Hx*deltatime*cuexp((float)(-1)*j*(float)2*pi*freq*(float)(*timestep)*deltatime);//cjzyp and cmxyp have nx - 2*NF2FFBoundary -2 elements cmxyp[index-(nx+ny-4*NF2FFdistfromboundary-2)] += -1*Ez*deltatime*cuexp((float)-1.0*j*(float)2.0*(float)PI*freq*((float)(*timestep)-0.5)*(float)dt); } else if(isOnxp(x))//X faces override y faces at their intersections { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x+1,y,nx+1)])/2; Ez += (dev_Ezic[dgetCell(x+1,y,nx+1)] + dev_Ezic[dgetCell(x,y,nx+1)] + dev_Ezip[dgetCell(x+1,y,nx+1)] + dev_Ezip[dgetCell(x,y,nx+1)])/4; float Hy = dev_Hy[dgetCell(x,y,nx)] + dev_Hy_inc[dgetCell(x,y,nx)]; cjzxp[index-(nx-2*NF2FFdistfromboundary-2)] += Hy*deltatime*cuexp(-1*j*2*pi*freq*(float)(*timestep)*(float)dt);//cjzxp and cmyxp have ny-2*NF2FFBound elements cmyxp[index-(nx-2*NF2FFdistfromboundary-2)] += Ez*(float)dt*cuexp((float)(-1)*j*(float)2.0*pi*freq*((float)(*timestep)-0.5)*(float)dt);// this is the discrete fourier transform, by the way. } else if(isOnyn(x,y)) { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x,y+1,nx+1)])/2; Ez += (dev_Ezic[dgetCell(x,y+1,nx+1)] + dev_Ezic[dgetCell(x,y,nx+1)] + dev_Ezip[dgetCell(x,y+1,nx+1)] + dev_Ezip[dgetCell(x,y,nx+1)])/4; float Hx=dev_Hx[dgetCell(x,y,nx)]+dev_Hx_inc[dgetCell(x,y,nx)]; cjzyn[index] += Hx*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*(float)(*timestep)*(float)dt); //cjzyn and cmxyn need to have nx-2*NF2FFbound-2 elements cmxyn[index] += Ez*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*((float)(*timestep)-0.5)*(float)dt); } else if(isOnxn(x)) { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x+1,y,nx+1)])/2; Ez += (dev_Ezic[dgetCell(x+1,y,nx+1)] + dev_Ezic[dgetCell(x,y,nx+1)] + dev_Ezip[dgetCell(x+1,y,nx+1)] + dev_Ezip[dgetCell(x,y,nx+1)])/4; float Hy = dev_Hy[dgetCell(x,y,nx)] + dev_Hy_inc[dgetCell(x,y,nx)]; cjzxn[index-(2*nx+ny-6*NF2FFdistfromboundary-4)] += -1*Hy*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*(float)(*timestep)*(float)dt); // cjzxn and cmyxn must have ny-2*NFdistfromboundary elements cmyxn[index-(2*nx+ny-6*NF2FFdistfromboundary-4)] += -1*Ez*(float)dt*cuexp(-1.0*j*2.0*(float)PI*freq*((float)(*timestep)-0.5)*(float)dt); } } } __host__ __device__ int getOptimizationCell(int x, int y) { int x_coord,y_coord; x_coord = (x-(nx/2-(int)(breast_radius/dx)))/(2*breast_radius/(9*dx)); y_coord = (y-(ny/2-breast_radius/dy))/(2*breast_radius/(9*dy));//the optimization space is 216 FDTD cells wide and high. //The optimization space is split into 25 by 25 optimization cells. //each optimization cell has 24 by 24 FDTD cells within it. That's what the 108, 24 and 25 are about. return x_coord+9*y_coord;//The max return should be, 9*9-1, hopefully. } void N2FPostProcess (float* D,float f, hipComplex *N,hipComplex *L,hipComplex *cjzxp,hipComplex *cjzyp,hipComplex *cjzxn,hipComplex *cjzyn,hipComplex *cmxyp,hipComplex *cmyxp,hipComplex *cmxyn,hipComplex *cmyxn) { int indexofleg1 = nx-2*NF2FFdistfromboundary-2; int indexofleg2 = nx+ny-4*NF2FFdistfromboundary-2; int indexofleg3 = 2*nx+ny-6*NF2FFdistfromboundary-4; int maxindex = 2*nx-8*NF2FFdistfromboundary+2*ny-4; int x,y; float rhoprime; float Psi; int Phi_index; hipComplex Mphi(0,0); float Phi; float k = 2*PI*f/c0; hipComplex negativeone(-1.0,0.0); int index = 0; hipComplex jcmpx(0,1); //float Prad = calc_radiated_power(cjzxp,cjzyp,cjzxn,cjzyn,cmxyp,cmyxp,cmxyn,cmyxn); float Prad = calc_incident_power(f); //std::cout<<"Prad = "<<Prad<<std::endl; float flx, fly; for(Phi_index = 0; Phi_index<numberofobservationangles;Phi_index++) { Phi = 2*PI/numberofobservationangles*(float)Phi_index; for(index = 0;index<indexofleg1;index++) { x = CPUgetxfromthreadIdNF2FF(index); y = CPUgetyfromthreadIdNF2FF(index); flx = (float)x;//float x fly = (float)y + 0.5; rhoprime = sqrt(pow((dx*((-1.0*(float)nx/2)+1+flx)),2)+pow((dy*(-1.0*(float)ny/2+1+fly)),2)); Psi = atan2(-1*((float)ny/2)+1+fly,-1*((float)nx/2)+1+flx)-Phi; N[Phi_index]+=-1.0*cjzyn[index]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dx; L[Phi_index]+=-1.0*sin(Phi)*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*cmxyn[index]*dx;//Lphi = } for(index = indexofleg1;index<indexofleg2;index++) { x = CPUgetxfromthreadIdNF2FF(index); y = CPUgetyfromthreadIdNF2FF(index); flx = (float)x+0.5; fly = (float)y; rhoprime = sqrt(pow((dx*(((float)nx/2)-1-flx)),2)+pow((dy*(((float)ny/2)-1-fly)),2)); Psi = atan2(-1*((float)ny/2)+1+fly,(-1*((float)nx/2)+1+flx))-Phi; N[Phi_index]+=-1.0*cjzxp[index-indexofleg1]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dy; L[Phi_index]+=cos(Phi)*cmyxp[index-indexofleg1]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dy;//L_phi = -Lxsin(phi)+Lycos(Phi) here we only have Ly } for(index=indexofleg2;index<indexofleg3;index++) { x = CPUgetxfromthreadIdNF2FF(index); y = CPUgetyfromthreadIdNF2FF(index); flx = (float)x; fly = (float)y + 0.5; rhoprime = sqrt(pow((dx*(((float)nx/2)-1-flx)),2)+pow((dy*(((float)ny/2)-1-fly)),2)); Psi = atan2((-1*(float)ny/2+1+fly),(-1*((float)nx/2)+1+flx))-Phi; N[Phi_index]+=-1.0*cjzyp[index-indexofleg2]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dx; L[Phi_index]+=-1.0*sin(Phi)*cmxyp[index-indexofleg2]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dx;// } for(index = indexofleg3;index<maxindex;index++) { x = CPUgetxfromthreadIdNF2FF(index); y = CPUgetyfromthreadIdNF2FF(index); flx = (float)x+0.5; fly = (float)y; rhoprime = sqrt(pow(dx*(((float)nx/2)-1-flx),2)+pow((dy*(((float)ny/2)-1-fly)),2)); Psi = atan2(-1*((float)ny/2)+1+fly,-1*(float)nx/2+1+flx)-Phi; N[Phi_index]+=-1.0*cjzxn[index-indexofleg3]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dy; L[Phi_index]+= cos(Phi)*cmyxn[index-indexofleg3]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dy; } D[Phi_index] = (k*k*cuabs(L[Phi_index]+(float)eta0*N[Phi_index])*cuabs(L[Phi_index]+(float)eta0*N[Phi_index])/((float)8*(float)PI*(float)eta0*Prad*33.329));//why 33.329? I dunno, something is probably wrong with Prad. } } float fitness(float* D,int max_index, float* measurement) { float fit = 0; for(int i =0;i<max_index;i++) { fit -= pow((measurement[i]-D[i]),2)/(numberofexcitationangles*pow(measurement[i],2)); } return fit; } //static void draw_func(void){ // glDrawPixels(nx,ny,GL_RGBA,GL_UNSIGNED_BYTE,0); // glutSwapBuffers; //} using namespace std; void Ceze_init(float * eps_r_z, float* sig_e_z, float* Ceze); void Cezhy_init(float* eps_r_z, float* sigma_e_z,float*Cezhy,float*kex); void Cezhx_init(float* eps_r_z,float*sigma_e_z,float*Cezhx,float*kex); void eps_r_z_init(float * eps_r_z,const vector<float> &argument); void sigma_e_z_init(float *sigma_e_z,float*sigma_e_pml,const vector<float> &argument); void Cezj_init(float*eps_r_z,float*sigma_e_z,float*Cezj); void Ez_init(float*Ez); void Ey_init(float*Ey); //void Jz_init(float*Jz); void Chxh_init(float*mu_r_x,float*sigma_m_x,float*Chxh); void Chxez_init(float*mu_r_x,float*sigma_m_x,float*Chxez); //void Chxm_init(float*mu_r_x,float*sigma_m_x,float*Chxm); void Chyh_init(float*mu_r_y,float*sigma_m_y,float*Chyh); void Chyez_init(float*mu_r_y,float*sigma_m_y,float*Chyez); //void Chym_init(float*mu_r_y,float*sigma_m_y,float*Chym); void Hy_init(float*Hy); void Hx_init(float*Hx); void My_init(float*My); void Mx_init(float*Mx); void mu_r_y_init(float*mu_r_y); void mu_r_x_init(float*mu_r_x); void sigma_m_y_init(float*sigma_m_y_init); void sigma_m_x_init(float*sigma_m_x_init); int getCell(int x,int y,int size); void Jz_waveform(float * time,float*Jz_impressed); void waveform_time_init(float*time1); float* Make2DfloatArray(int arraySizeX, int arraySizeY); void C_Psi_ezy_init(float *C_Psi_ezy,float*Cezhx); void C_Psi_ezx_init(float* C_Psi_ezx,float*Cezhy); void C_Psi_hyx_init(float*C_Psi_hyx,float*Chyez); void C_psi_hxy_init(float *C_Psi_hxy,float*Chxez); void aex_init(float*aex,float*sigma_e_pml,float*kex,float*alpha_e_x,float*bex); void bex_init(float*bex ,float*sigma_e_pml,float*kex,float*alpha_e_x); void bey_init(float*bey,float*sigma_e_pml,float*key,float*alpha_e_y); void amy_init(float*amy,float*sigma_m_pml,float*kmy,float*alpha_m_y,float*bmy); void bmy_init(float*bmy,float*sigma_m_pml,float*kmy,float*alpha_m_y); void amx_init(float*amx,float*sigma_m_pml,float*kmx,float*alpha_m_x,float*bmx); void bmx_init(float*bmx,float*sigma_m_pml,float*kmx,float*alpha_m_x); void alpha_e_init(float*alpha_e); void alpha_m_init(float*alpha_e,float*alpha_m); void k_e_init(float*k); void k_m_init(float*k); void sigma_e_pml_init(float* sigma_e_pml); void sigma_m_pml_init(float*sigma_m_pml,float*sigma_e_pml); void Psi_ezy_init(float*Psi_ezy); void Psi_ezx_init(float*Psi_ezx); void Psi_hyx_init(float*Psi_hyx); void Psi_hxy_init(float*Psi_hxy); void CJ_Init(hipComplex * cjzyn,int size); __global__ void scattered_parameter_init(float*eps_r_z,float*sigma_e_z,float*Cezeic,float*Cezeip); double FDTD_GPU(const vector<double> &arguments) { // BMP Output_Image; //BMP Scattered_Field_snapshot; // Output_Image.SetSize((nx+1),(ny+1)); // Output_Image.SetBitDepth(16); //Scattered_Field_snapshot.SetSize((nx+1),(ny+1)); //Scattered_Field_snapshot.SetBitDepth(16); //RGBApixel Temp; // string outputfilename; cout << "calculating FDTD GPU" << endl; hipSetDevice(0); vector<float> image; for (int lerp = 0; lerp < 81; lerp++) { //This is setting the material parameters of the optimization cells. image.push_back((float)arguments.at(lerp)); //image.push_back(10); } for (int lerp = 81; lerp < 81 * 2; lerp++) { image.push_back((float)arguments.at(lerp)); // image.push_back(0); } //GLuint bufferObj; //cudaGraphicsResource *resource; hipError_t error; //int dev; //hipDeviceProp_t prop; //memset(&prop,sizeof(hipDeviceProp_t),sizeof(hipDeviceProp_t)); //prop.major = 1; //prop.minor = 1; //hipChooseDevice(&dev,&prop); // hipGLSetGLDevice(dev); /*glutInit(&argc,argv); glewInit(); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(nx,ny); glutCreateWindow("bitmap"); glGenBuffers(1,&bufferObj); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, nx*ny*4,NULL,GL_DYNAMIC_DRAW_ARB); hipGraphicsGLRegisterBuffer(&resource,bufferObj,hipGraphicsMapFlagsNone);*/ //uchar4* devPtr; //size_t size; //hipGraphicsMapResources(1,&resource,NULL); //hipGraphicsResourceGetMappedPointer((void**)&devPtr,&size,resource); float *Ceze, *Cezhy, *Cezhx, *dev_Cezeic, *dev_Cezeip, *Ez, *eps_r_z, *sigma_e_z, *Hy, *Hx, *kex, *aex, *bex, *amx, *bmx, *alpha_e, *alpha_m, *sigma_e_pml, *sigma_m_pml, *Psi_ezy, *Psi_ezx, *Psi_hyx, *Psi_hxy, *kmx; //*Cezj later if using loop current source float* dev_sigma_e_z,*dev_eps_r_z; float freq = center_freq; float *dev_freq,*D,*dev_Phi; hipComplex *cjzxp,*cjzyp,*cjzxn,*cjzyn,*cmxyp,*cmyxp,*cmxyn,*cmyxn; hipComplex *hcjzxp,*hcjzyp,*hcjzxn,*hcjzyn,*hcmxyp,*hcmyxp,*hcmxyn,*hcmyxn; hipComplex *L,*N; int grid_x = int(ceil((float)nx/22)); int grid_y = int(ceil((float)ny/22)); dim3 grid(grid_x,grid_y); dim3 block(22,22); Ceze = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); Cezhy = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); Cezhx = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); //Cezj = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); // if using loop current source Ez = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); eps_r_z = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); sigma_e_z = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); D = (float*)malloc(sizeof(float)*numberofexcitationangles*numberofobservationangles);//D = (float*)malloc(numberofobservationangles*sizeof(float)); Hy=(float*)malloc(sizeof(float)*nx*ny); Hx=(float*)malloc(sizeof(float)*nx*ny); kex = (float*)malloc(sizeof(float)*10); kmx = (float*)malloc(sizeof(float)*10); aex=(float*)malloc(sizeof(float)*10); bex=(float*)malloc(sizeof(float)*10); amx=(float*)malloc(sizeof(float)*10); bmx=(float*)malloc(sizeof(float)*10); alpha_e=(float*)malloc(sizeof(float)*10); alpha_m=(float*)malloc(sizeof(float)*10); sigma_e_pml=(float*)malloc(sizeof(float)*10); sigma_m_pml=(float*)malloc(sizeof(float)*10); Psi_ezy=(float*)malloc(sizeof(float)*ny*20); Psi_ezx=(float*)malloc(sizeof(float)*nx*20); Psi_hyx=(float*)malloc(sizeof(float)*ny*20); Psi_hxy=(float*)malloc(sizeof(float)*nx*20); hcjzyp = (hipComplex*)malloc(sizeof(hipComplex )*size_cjzy); hcjzyn = (hipComplex *)malloc(sizeof(hipComplex )*size_cjzy); hcjzxp = (hipComplex *)malloc(sizeof(hipComplex )*size_cjzx); hcjzxn = (hipComplex *)malloc(sizeof(hipComplex )*size_cjzx); hcmxyn = (hipComplex *)malloc(sizeof(hipComplex )*size_cjzy); hcmxyp = (hipComplex *)malloc(sizeof(hipComplex )*size_cjzy); hcmyxp = (hipComplex *)malloc(sizeof(hipComplex )*size_cjzx); hcmyxn = (hipComplex *)malloc(sizeof(hipComplex )*size_cjzx); L = (hipComplex*)malloc(sizeof(hipComplex)*size_NF2FF_total); N = (hipComplex*)malloc(sizeof(hipComplex)*size_NF2FF_total); eps_r_z_init(eps_r_z,image); sigma_e_z_init(sigma_e_z,sigma_e_pml,image); //float*time1; //time1 = (float*)malloc(sizeof(float)*number_of_time_steps); Ceze_init(eps_r_z,sigma_e_z,Ceze); k_e_init(kex); k_m_init(kmx); Cezhy_init(eps_r_z,sigma_e_z,Cezhy,kex); Cezhx_init(eps_r_z,sigma_e_z,Cezhx,kex); sigma_e_pml_init(sigma_e_pml); sigma_m_pml_init(sigma_m_pml,sigma_e_pml); alpha_e_init(alpha_e); alpha_m_init(alpha_e,alpha_m); bex_init(bex ,sigma_e_pml,kex,alpha_e); aex_init(aex,sigma_e_pml,kex,alpha_e,bex); bmx_init(bmx,sigma_m_pml,kmx,alpha_m); amx_init(amx,sigma_m_pml,kmx,alpha_m,bmx); for (int i = 0; i < 10; i++) { cout<<"kex["<<i<<"]= "<<kex[i]<<endl; cout<<"kmx["<<i<<"]= "<<kmx[i]<<endl; cout<<"aex["<<i<<"]= "<<aex[i]<<endl; cout<<"amx["<<i<<"]= "<<amx[i]<<endl; cout<<"bex["<<i<<"]= "<<bex[i]<<endl; cout<<"bmx["<<i<<"]= "<<bmx[i]<<endl; cout<<"alpha_e = "<<alpha_e[i]<<endl; cout<<"alpha_m = "<<alpha_m[i]<<endl; cout << endl; } //Jz_init(Jz); //system("pause"); //FILE* file = fopen("results.txt", "w"); //float*Jz_impressed = (float*)malloc(sizeof(float)*number_of_time_steps); //waveform_time_init(time1); //Jz_waveform(time1,Jz_impressed); //int source_position_index_x = int(nx*source_position/domain_size)+1; // int source_position_index_y = int(ny*source_position/domain_size)+1; float*dev_Ceze,*dev_Cezhy,*dev_Cezhx,*dev_bex,*dev_aex,*dev_bmx,*dev_amx,*dev_kex,*dev_kmx;//dev_Cezj if using loop current source float *dev_Ez,*dev_Hy,*dev_Hx; float*dev_Psi_ezy,*dev_Psi_ezx,*dev_Psi_hyx,*dev_Psi_hxy; hipMalloc(&dev_eps_r_z,sizeof(float)*(nx+1)*(ny+1)); hipMalloc(&dev_sigma_e_z,sizeof(float)*(nx+1)*(ny+1)); hipMalloc(&dev_Cezeic,sizeof(float)*(nx+1)*(ny+1)); hipMalloc(&dev_Cezeip,sizeof(float)*(nx+1)*(ny+1)); hipMemcpy(dev_eps_r_z,eps_r_z,sizeof(float)*(nx+1)*(ny+1),hipMemcpyHostToDevice); hipMemcpy(dev_sigma_e_z,sigma_e_z,sizeof(float)*(nx+1)*(ny+1),hipMemcpyHostToDevice); hipLaunchKernelGGL(( scattered_parameter_init), dim3(grid),dim3(block), 0, 0, dev_eps_r_z,dev_sigma_e_z,dev_Cezeic,dev_Cezeip); //float *Cezeic = (float*)malloc((sizeof(float))*(nx+1)*(ny+1)); // float *Cezeip = (float*)malloc((sizeof(float))*(nx+1)*(ny+1)); //hipMemcpy(Cezeic,dev_Cezeic,sizeof(float)*(nx+1)*(ny+1),hipMemcpyDeviceToHost); //hipMemcpy(Cezeip,dev_Cezeip,sizeof(float)*(nx+1)*(ny+1),hipMemcpyDeviceToHost); float radius; hipMalloc(&dev_Phi,sizeof(float)); hipMalloc(&dev_kex,sizeof(float)*10); hipMalloc(&dev_kmx,sizeof(float)*10); hipMalloc(&dev_Ez,sizeof(float)*(nx+1)*(ny+1)); hipMalloc(&dev_Hy,sizeof(float)*nx*ny); hipMalloc(&dev_freq ,sizeof(float)); hipMalloc(&dev_Hx,sizeof(float)*nx*ny); hipMalloc(&dev_Psi_ezy,sizeof(float)*20*(nx+1)); hipMalloc(&dev_Psi_ezx,sizeof(float)*20*(ny+1)); hipMalloc(&dev_Psi_hyx,sizeof(float)*20*(ny)); hipMalloc(&dev_Psi_hxy,sizeof(float)*20*(nx)); hipMalloc(&cjzxp,sizeof(hipComplex)*size_NF2FF_total); hipMalloc(&cjzyp,sizeof(hipComplex)*size_NF2FF_total); hipMalloc(&cjzxn,sizeof(hipComplex)*size_NF2FF_total); hipMalloc(&cjzyn,sizeof(hipComplex)*size_NF2FF_total); hipMalloc(&cmxyp,sizeof(hipComplex)*size_NF2FF_total); hipMalloc(&cmxyn,sizeof(hipComplex)*size_NF2FF_total); hipMalloc(&cmyxp,sizeof(hipComplex)*size_NF2FF_total); hipMalloc(&cmyxn,sizeof(hipComplex)*size_NF2FF_total); hipMemcpy(dev_freq,&freq,sizeof(float),hipMemcpyHostToDevice); hipMalloc(&dev_bex,sizeof(float)*10); hipMalloc(&dev_bmx,sizeof(float)*10); hipMalloc(&dev_amx,sizeof(float)*10); hipMalloc(&dev_aex,sizeof(float)*10); hipMalloc(&dev_Ceze,sizeof(float)*(nx+1)*(ny+1)); hipMalloc(&dev_Cezhy,sizeof(float)*(nx+1)*(ny+1)); //hipMalloc(&dev_Cezj,sizeof(float)*(nx+1)*(ny+1)); if using current source error = hipGetLastError(); if (error != hipSuccess) { printf("Error after cuda Mallocs: %s\n",hipGetErrorString(error)); } hipLaunchKernelGGL(( Field_reset), dim3(grid),dim3(block), 0, 0, dev_Ez, dev_Hy, dev_Hx, dev_Psi_ezy, dev_Psi_ezx, dev_Psi_hyx, dev_Psi_hxy,cjzyn,cjzxp,cjzyp,cjzxn,cmxyn,cmyxp,cmxyp,cmyxn); error = hipGetLastError(); if (error != hipSuccess) { printf("Error after field reset: %s\n",hipGetErrorString(error)); } //Field_reset is also good for making all these values zero. hipMemcpy(dev_kex,kex,sizeof(float)*10,hipMemcpyHostToDevice); hipMemcpy(dev_kmx,kmx,sizeof(float)*10,hipMemcpyHostToDevice); hipMemcpy(dev_aex,aex,sizeof(float)*10,hipMemcpyHostToDevice); hipMemcpy(dev_bex,bex,sizeof(float)*10,hipMemcpyHostToDevice); hipMemcpy(dev_bmx,bmx,sizeof(float)*10,hipMemcpyHostToDevice); hipMemcpy(dev_amx,amx,sizeof(float)*10,hipMemcpyHostToDevice); hipMemcpy(dev_Ceze,Ceze,sizeof(float)*(nx+1)*(ny+1),hipMemcpyHostToDevice); hipMemcpy(dev_Cezhy,Cezhy,sizeof(float)*(nx+1)*(ny+1),hipMemcpyHostToDevice); error = hipGetLastError(); if(error != hipSuccess) { printf("Error after cuda Memcpy: %s\n",hipGetErrorString(error)); } int*dev_i; hipMalloc(&dev_i,sizeof(int)); float test_Ez; dim3 gridNF2FF((int)ceil(size_NF2FF_total/512.0)); dim3 blockNF2FF(512); float test_Ez_2; float Phi; //Output_Image.SetBitDepth(16); //for(int x = 0;x<nx+1;x++)// This double loop makes an image of the target. Delete when using this as forward solver. // for(int y = 0; y<ny+1;y++) // { // { // Temp.Green = 0; // if(eps_r_z[getCell(x,y,nx+1)] >15) // { // Temp.Red = 255; // Temp.Blue = 0; // } // else // { // Temp.Blue = 150; // Temp.Red = 0; // } // } // Output_Image.SetPixel(x,y,Temp); // } //Output_Image.WriteToFile("Permittivity_map_measurement.bmp"); /* The calculation part! */ //ofstream measurement_data; //measurement_data.open("measurement_data.txt"); for(int Phi_index = 0; Phi_index<numberofexcitationangles; Phi_index++) { Phi = Phi_index*2*PI/numberofexcitationangles; hipMemcpy(dev_Phi,&Phi,sizeof(float),hipMemcpyHostToDevice); for(int i=0;i<number_of_time_steps;i++) { hipMemcpy(dev_i,&i,sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( H_field_update), dim3(grid),dim3(block), 0, 0, dev_Hy,dev_Hx,dev_Ez,dev_bmx,dev_Psi_hyx,dev_amx,dev_bmx,dev_amx,dev_Psi_hxy,dev_kmx); hipLaunchKernelGGL(( E_field_update), dim3(grid),dim3(block), 0, 0, dev_i,dev_Ez,dev_Hy,dev_Hx,dev_Psi_ezx,dev_aex,dev_aex,dev_bex,dev_bex,dev_Psi_ezy,dev_kex,dev_Cezhy,dev_Cezhy,dev_Ceze,dev_Cezeip,dev_Cezeic,dev_Phi); hipLaunchKernelGGL(( calculate_JandM), dim3(gridNF2FF),dim3(blockNF2FF), 0, 0, dev_freq, dev_i,dev_Ez,dev_Hy,dev_Hx,cjzxp,cjzyp,cjzxn,cjzyn,cmxyp,cmyxp,cmxyn,cmyxn); } hipMemcpy(hcjzyn,cjzyn,sizeof(hipComplex)*size_cjzy,hipMemcpyDeviceToHost); hipMemcpy(hcjzxp,cjzxp,sizeof(hipComplex)*size_cjzx,hipMemcpyDeviceToHost); hipMemcpy(hcjzyp,cjzyp,sizeof(hipComplex)*size_cjzy,hipMemcpyDeviceToHost); hipMemcpy(hcjzxn,cjzxn,sizeof(hipComplex)*size_cjzx,hipMemcpyDeviceToHost); hipMemcpy(hcmxyn,cmxyn,sizeof(hipComplex)*size_cjzy,hipMemcpyDeviceToHost); hipMemcpy(hcmyxp,cmyxp,sizeof(hipComplex)*size_cjzx,hipMemcpyDeviceToHost); hipMemcpy(hcmxyp,cmxyp,sizeof(hipComplex)*size_cjzy,hipMemcpyDeviceToHost); hipMemcpy(hcmyxn,cmyxn,sizeof(hipComplex)*size_cjzy,hipMemcpyDeviceToHost); CJ_Init(L,size_NF2FF_total); CJ_Init(N,size_NF2FF_total); N2FPostProcess(D + Phi_index*numberofobservationangles, freq,N,L,hcjzxp,hcjzyp,hcjzxn,hcjzyn,hcmxyp,hcmyxp,hcmxyn,hcmyxn); //notice the D + Phi_index*numberofobservationangles. D is in total 4*numberofobservaion angles, so that's how we fill them in sequentially. //for(int i = 0;i<numberofobservationangles;i++) // This is for recording simulated measured data //{ //measurement_data<<*(D+Phi_index*numberofobservationangles+i)<<" , "; //cout<<*(D+Phi_index*numberofobservationangles+i)<<endl; //} //measurement_data<<endl; hipLaunchKernelGGL(( Field_reset), dim3(grid),dim3(block), 0, 0, dev_Ez, dev_Hy, dev_Hx, dev_Psi_ezy, dev_Psi_ezx, dev_Psi_hyx, dev_Psi_hxy,cjzyn,cjzxp,cjzyp,cjzxn,cmxyn,cmyxp,cmxyp,cmyxn); } float measurement[numberofobservationangles*numberofexcitationangles] = {0.544912 , 0.518606 , 0.439233 , 0.330533 , 0.219116 , 0.135115 , 0.0923969 , 0.0774134 , 0.0740459 , 0.0739238 , 0.0660047 , 0.0465372 , 0.0248307 , 0.00913681 , 0.00186162 , 0.0038402 , 0.0130785 , 0.0238094 , 0.0312918 , 0.035705 , 0.0388307 , 0.039513 , 0.0368443 , 0.0338221 , 0.0324815 , 0.0305907 , 0.0270149 , 0.0239178 , 0.0224438 , 0.021849 , 0.0217346 , 0.0222152 , 0.023146 , 0.0245181 , 0.0267161 , 0.0286964 , 0.0276803 , 0.0235098 , 0.0197177 , 0.0183168 , 0.0196998 , 0.0261493 , 0.0375584 , 0.0479223 , 0.0511598 , 0.0461443 , 0.035713 , 0.0249863 , 0.0203708 , 0.0260456 , 0.0395441 , 0.054163 , 0.0660136 , 0.0763823 , 0.0935922 , 0.132053 , 0.201299 , 0.299247 , 0.410792 , 0.504467 , 0.0490085 , 0.0278468 , 0.0123693 , 0.00899709 , 0.0196632 , 0.0401112 , 0.0623734 , 0.0809561 , 0.096057 , 0.113814 , 0.145125 , 0.200388 , 0.283438 , 0.386362 , 0.486139 , 0.549594 , 0.547993 , 0.475775 , 0.358033 , 0.230962 , 0.118935 , 0.039843 , 0.00700227 , 0.0112335 , 0.0300356 , 0.0494414 , 0.0605159 , 0.0585777 , 0.0503323 , 0.045704 , 0.0474064 , 0.0523123 , 0.0558987 , 0.0545722 , 0.0475098 , 0.0366045 , 0.0248037 , 0.0155752 , 0.0115322 , 0.0127167 , 0.0176523 , 0.0243556 , 0.0310764 , 0.037444 , 0.0432292 , 0.0469609 , 0.0471761 , 0.0435653 , 0.0369347 , 0.0293987 , 0.0235478 , 0.0206039 , 0.020754 , 0.0247748 , 0.0336772 , 0.047007 , 0.0618746 , 0.0734482 , 0.0763332 , 0.0674785 , 0.0463129 , 0.0448933 , 0.0398454 , 0.0319834 , 0.0239428 , 0.0174267 , 0.0129155 , 0.0116624 , 0.0154122 , 0.0247183 , 0.0376821 , 0.0494142 , 0.0552493 , 0.0544909 , 0.0501016 , 0.0466044 , 0.047395 , 0.0522298 , 0.0576919 , 0.0588555 , 0.0504011 , 0.0311956 , 0.0107719 , 0.00755493 , 0.0394798 , 0.116099 , 0.232324 , 0.36478 , 0.478314 , 0.541685 , 0.541186 , 0.484009 , 0.391878 , 0.291105 , 0.204554 , 0.145352 , 0.113254 , 0.0973423 , 0.0835717 , 0.0637299 , 0.0397899 , 0.0189781 , 0.00814281 , 0.0118845 , 0.0291142 , 0.0513172 , 0.0680543 , 0.0744519 , 0.0718442 , 0.0622228 , 0.0473734 , 0.0329352 , 0.0245156 , 0.0212818 , 0.0204027 , 0.0228792 , 0.0298908 , 0.0380399 , 0.0432513 , 0.0455291 , 0.0469428 , 0.049667 , 0.0453111 , 0.0370016 , 0.0278006 , 0.0201062 , 0.0173687 , 0.020228 , 0.0242543 , 0.0264199 , 0.0275476 , 0.027771 , 0.0262174 , 0.0237332 , 0.0219206 , 0.0212424 , 0.0214967 , 0.0226845 , 0.0248514 , 0.0275874 , 0.0300439 , 0.0318892 , 0.0340621 , 0.0369823 , 0.0388068 , 0.0379494 , 0.0350817 , 0.030462 , 0.0230471 , 0.0133404 , 0.00457234 , 0.00152755 , 0.00874873 , 0.0260448 , 0.0463293 , 0.0633742 , 0.0751071 , 0.0775575 , 0.0756597 , 0.0916989 , 0.141021 , 0.22185 , 0.328433 , 0.44207 , 0.524772 , 0.544711 , 0.498668 , 0.407614 , 0.29953 , 0.199594 , 0.128704 , 0.0929922 , 0.0772499 , 0.0654169 , 0.0536587 , 0.0399619 , 0.0255793 , 0.0193488 , 0.0253531 , 0.0373143 , };//I've just hardcoded the measurement values. Maybe later we'll read them from a text file. for (int i = 0; i < numberofexcitationangles*numberofobservationangles; i++) { cout << "D[" << i << " ]: " << D[i] << endl; } float fit; fit=fitness(D,numberofobservationangles*numberofexcitationangles, measurement); error = hipGetLastError(); free(Ceze); free(Cezhy); free(Cezhx); free(Ez); free(eps_r_z); free(sigma_e_z); free(Hy); free(Hx); free(kex); free(aex); free(bex); free(amx); free(bmx); free(alpha_e); free(alpha_m); free(sigma_e_pml); free(sigma_m_pml); free(Psi_ezy); free(Psi_ezx); free(Psi_hyx); free(Psi_hxy); free(kmx); free(D); free(hcjzxp); free(hcjzyp); free(hcjzxn); free(hcjzyn); free(hcmxyp); free(hcmyxp); free(hcmxyn); free(hcmyxn); free(L); free(N); //free(measurement); //float *Cezeic = (float*)malloc((sizeof(float))*(nx+1)*(ny+1)); //float *Cezeip = (float*)malloc((sizeof(float))*(nx+1)*(ny+1)); //float*Ceze,*Cezhy,*Cezhx,*dev_Cezeic,*dev_Cezeip,*Ez,*eps_r_z,*sigma_e_z,*Hy,*Hx, // *kex,*aex,*bex,*amx,*bmx,*alpha_e,*alpha_m,*sigma_e_pml,*sigma_m_pml // ,*Psi_ezy,*Psi_ezx,*Psi_hyx,*Psi_hxy,*kmx;//*Cezj later if using loop current source //float* dev_sigma_e_z,*dev_eps_r_z; //float freq = center_freq; //float *dev_freq,*D,*D_tot; //float* Ezip,*Ezic,*dev_Ezip,*dev_Ezic,*Hy_inc,*Hx_inc,*dev_Hy_inc,*dev_Hx_inc,*dev_Psi_ezy_inc,*dev_Psi_ezx_inc,*dev_Psi_hyx_inc,*dev_Psi_hxy_inc, // *Psi_ezy_inc,*Psi_ezx_inc,*Psi_hyx_inc,*Psi_hxy_inc; // //hipComplex *cjzxp,*cjzyp,*cjzxn,*cjzyn,*cmxyp,*cmyxp,*cmxyn,*cmyxn,*cjzxp_tot,*cjzyp_tot,*cjzxn_tot,*cjzyn_tot,*cmxyp_tot,*cmyxp_tot,*cmxyn_tot,*cmyxn_tot; //hipComplex *hcjzxp,*hcjzyp,*hcjzxn,*hcjzyn,*hcmxyp,*hcmyxp,*hcmxyn,*hcmyxn,*hcjzxp_tot,*hcjzyp_tot,*hcjzxn_tot,*hcjzyn_tot,*hcmxyp_tot,*hcmyxp_tot,*hcmxyn_tot // ,*hcmyxn_tot; hipFree(dev_Cezeic); hipFree(dev_Cezeip); hipFree(dev_sigma_e_z); hipFree(dev_eps_r_z); hipFree(dev_freq); hipFree(cjzxp); hipFree(cjzyp); hipFree(cjzxn); hipFree(cjzyn); hipFree(cmxyp); hipFree(cmyxp); hipFree(cmxyn); hipFree(cmyxn); hipFree(dev_Ceze); hipFree(dev_Cezhy); hipFree(dev_Cezhx); hipFree(dev_bex); hipFree(dev_aex); hipFree(dev_bmx); hipFree(dev_amx); hipFree(dev_kex); hipFree(dev_kmx); hipFree(dev_Ez); hipFree(dev_Hy); hipFree(dev_Hx); hipFree(dev_Psi_ezy); hipFree(dev_Psi_ezx); hipFree(dev_Psi_hyx); hipFree(dev_Psi_hxy); //float*dev_Ceze,*dev_Cezhy,*dev_Cezhx,*dev_Jz,*dev_bex,*dev_aex,*dev_bmx,*dev_amx,*dev_kex,*dev_kmx;//dev_Cezj if using loop current source //float *dev_Ez,*dev_Hy,*dev_Hx; //float*dev_Psi_ezy,*dev_Psi_ezx,*dev_Psi_hyx,*dev_Psi_hxy; cout << "fitness is: " << fit << endl; return (double)fit; } __global__ void scattered_parameter_init(float*eps_r_z,float*sigma_e_z,float*Cezeic,float*Cezeip) { int x=threadIdx.x+blockDim.x*blockIdx.x; int y=threadIdx.y+blockDim.y*blockIdx.y; if(x<(nx+1)&&y<(ny+1)) { Cezeic[dgetCell(x,y,nx+1)] = (2*(eps0-eps0*eps_r_z[dgetCell(x,y,nx+1)])-sigma_e_z[dgetCell(x,y,nx+1)]*dt)/(2*eps0*eps_r_z[dgetCell(x,y,nx+1)]+sigma_e_z[dgetCell(x,y,nx+1)]*dt); Cezeip[dgetCell(x,y,nx+1)] = -1*(2*(eps0-eps0*eps_r_z[dgetCell(x,y,nx+1)])+sigma_e_z[dgetCell(x,y,nx+1)]*dt)/(2*eps0*eps_r_z[dgetCell(x,y,nx+1)]+sigma_e_z[dgetCell(x,y,nx+1)]*dt); } } int getCell(int x, int y,int size)//size will just be the width in the x dimension of the array. { return x+y*size; } float* Make2DfloatArray(int arraySizeX, int arraySizeY) { float* theArray; theArray = (float*) malloc(arraySizeX*arraySizeY*sizeof(float*)); return theArray; } void waveform_time_init(float*time1) { int size = number_of_time_steps; for(int i = 0;i<size;i++) { time1[i]=(float)i*dt; } } void Jz_waveform(float * time,float*Jz_impressed) { float w = 2*PI*center_freq;//center_freq is the frequency for(int i = 0;i<number_of_time_steps;i++) { Jz_impressed[i]= 10*sin(w*time[i]); //Jz_impressed[i]=exp(-1*((time[i]-2e-10)/5e-11)*(time[i]-2e-10)/(5e-11)); } } void Ceze_init(float * eps_r_z, float* sig_e_z, float* Ceze) { int size = nx+1; for(int j=0;j<ny+1;j++) { for(int i=0;i<size;i++) { Ceze[getCell(i,j,nx+1)] = (2*eps_r_z[getCell(i,j,nx+1)]*eps0-dt*sig_e_z[getCell(i,j,nx+1)])/(2*eps_r_z[getCell(i,j,nx+1)]*eps0+dt*sig_e_z[getCell(i,j,nx+1)]); } } } void Cezhy_init(float*eps_r_z, float* sigma_e_z,float* Cezhy,float*kex) { int size = nx+1; for(int j =0;j<ny+1;j++) { for(int i=0;i<size;i++) { Cezhy[getCell(i,j,size)] = (2*dt/dx)/(2*eps_r_z[getCell(i,j,size)]*eps0+dt*sigma_e_z[getCell(i,j,size)]); } } } void Cezhx_init(float* eps_r_z,float*sigma_e_z,float*Cezhx,float*kex) { int size=nx+1; for(int j=0;j<ny+1;j++) { for(int i =0;i<nx+1;i++) { Cezhx[getCell(i,j,size)]=(2*dt/dy)/(2*eps_r_z[getCell(i,j,size)]*eps0+dt*sigma_e_z[getCell(i,j,size)]); } } } void Cezj_init(float*eps_r_z,float*sigma_e_z,float*Cezj) { int size =nx+1; for(int j=0;j<ny+1;j++) { for(int i=0;i<nx+1;i++) { Cezj[getCell(i,j,size)] = (-2*dt)/(2*eps_r_z[getCell(i,j,size)]*eps0+dt*sigma_e_z[getCell(i,j,size)]); } } } void Ez_init(float*Ez) { int size=nx+1; for(int j = 0;j<ny+1;j++) { for(int i = 0;i<nx+1;i++) { Ez[getCell(i,j,size)] = (float)0; } } } /*void Jz_init(float*Jz) { for(int j =0;j<ny+1;j++) { for(int i = 0;i<nx+1;i++) { Jz[getCell(i,j,nx+1)] = 0; } } }*/ void Chyh_init(float*mu_r_y,float*sigma_m_y,float*Chyh) { int size=nx; for(int i = 0;i<nx;i++) for(int j =0;j<ny;j++) { { Chyh[getCell(i,j,size)] = (2*mu_r_y[getCell(i,j,size)]*mu0-dt*sigma_m_y[getCell(i,j,size)])/(2*mu_r_y[getCell(i,j,size)]*mu0+dt*sigma_m_y[getCell(i,j,size)]); } } } void Chxh_init(float*mu_r_x,float*sigma_m_x,float*Chxh) { int size=nx; for(int i = 0;i<nx;i++) for(int j =0;j<ny;j++) { { Chxh[getCell(i,j,size)] = (2*mu_r_x[getCell(i,j,size)]*mu0-dt*sigma_m_x[getCell(i,j,size)])/(2*mu_r_x[getCell(i,j,size)]*mu0+dt*sigma_m_x[getCell(i,j,size)]); } } } void Chyez_init(float*mu_r_y,float*sigma_m_y,float*Chyez) { int size = nx; for(int j =0;j<ny;j++) { for(int i = 0;i<size;i++) { Chyez[getCell(i,j,size)] = (2*dt/dx)/(2*mu_r_y[getCell(i,j,size)]*mu0+dt*sigma_m_y[getCell(i,j,size)]); } } } void Chxez_init(float*mu_r_x,float*sigma_m_x,float*Chxez) { int size = nx; for(int j =0;j<ny;j++) { for(int i = 0;i<size;i++) { Chxez[getCell(i,j,size)] = (2*dt/dy)/(2*mu_r_x[getCell(i,j,size)]*mu0+dt*sigma_m_x[getCell(i,j,size)]); } } } /*void Chym_init(float*mu_r_y,float*sigma_m_y,float*Chym) { int size = nx; for(int j =0;j<ny;j++) { for(int i = 0;i<size;i++) { Chym[getCell(i,j,size)] = (-2*dt)/(2*mu_r_y[getCell(i,j,size)]*mu0+dt*sigma_m_y[getCell(i,j,size)]); } } } void Chxm_init(float*mu_r_x,float*sigma_m_x,float*Chxm) { int size = nx; for(int j =0;j<ny;j++) { for(int i = 0;i<size;i++) { Chxm[getCell(i,j,size)] = (-2*dt)/(2*mu_r_x[getCell(i,j,size)]*mu0+dt*sigma_m_x[getCell(i,j,size)]); } } }*/ void eps_r_z_init(float * eps_r_z,const vector<float> &argument) { int size = nx+1; float radius;//tumor_radius,tumor_radius_2,tumor_radius_3; for(int j =0;j<ny+1;j++) { for(int i = 0;i<nx+1;i++) { eps_r_z[getCell(i,j,size)] = 1; radius = sqrt(pow( ((float)i-nx/2)*dx,2) + pow( ((float)j-ny/2)*dy,2)); // tumor_radius = sqrt(pow( ((float)i - target_x)*dx,2) + pow( ((float)j-target_y)*dy,2)); if(radius<=breast_radius) { eps_r_z[getCell(i,j,size)] = (float)argument.at(getOptimizationCell(i,j)); //This is the line that should be uncommented if using as forward solver //eps_r_z[getCell(i,j,size)] = 10; //if(tumor_radius <= tumor_size)//delete this if using as forward solver //{ // eps_r_z[getCell(i,j,size)] = 60; //} } } } } void sigma_e_z_init(float * sigma_e_z,float*sigma_e_pml, const vector<float> &argument) { int size = nx+1; float radius;//,tumor_radius; for(int j =0;j<ny+1;j++) { for(int i = 0;i<nx+1;i++) { sigma_e_z[getCell(i,j,size)] = 0; radius = sqrt(pow( ((float)i-nx/2)*dx,2) + pow( ((float)j-ny/2)*dy,2)); //tumor_radius = sqrt(pow( ((float)i - target_x)*dx,2) + pow( ((float)j-target_y)*dy,2)); if(radius<=breast_radius) { sigma_e_z[getCell(i,j,size)] = (float)argument.at(getOptimizationCell(i,j)+9*9); //sigma_e_z[getCell(i,j,size)] = 0.15; //if(tumor_radius <= tumor_size)//delete this if using as forward solver //{ // sigma_e_z[getCell(i,j,size)] = 0.7; //} } } } } void Hy_init(float*Hy) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { Hy[getCell(i,j,size)] = 0; } } } void Hx_init(float*Hx) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { Hx[getCell(i,j,size)] = 0; } } } void My_init(float*My) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { My[getCell(i,j,size)] = 0; } } } void Mx_init(float*Mx) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { Mx[getCell(i,j,size)] = 0; } } } void mu_r_y_init(float*mu_r_y) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { mu_r_y[getCell(i,j,size)] =1.000; } } } void mu_r_x_init(float*mu_r_x) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { mu_r_x[getCell(i,j,size)]=1.000; } } } void sigma_m_y_init(float*sigma_m_y) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { sigma_m_y[getCell(i,j,size)] = 0; } } } void sigma_m_x_init(float*sigma_m_x) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { sigma_m_x[getCell(i,j,size)] = 0; } } } void C_Psi_ezy_init(float *C_Psi_ezy,float*Cezhx) { int size = 20; for(int j = 0;j<ny;j++) for( int i =0;i<size;i++) { if(i<10) { C_Psi_ezy[getCell(i,j,size)]=dy*Cezhx[getCell(i,j,nx)]; } else { C_Psi_ezy[getCell(i,j,size)]=dy*Cezhx[getCell(nx-20+i,j,nx)]; } } } void C_Psi_ezx_init(float* C_Psi_ezx,float*Cezhy) { int size_y=20; for(int j=0;j<size_y;j++) { for(int i=0;i<nx;i++) { if(j<10) { C_Psi_ezx[getCell(i,j,nx)] = dx*Cezhy[getCell(i,j,nx)]; } else { C_Psi_ezx[getCell(i,j,nx)] = dx*Cezhy[getCell(i,ny-20+j,nx)]; } } } } void C_Psi_hyx_init(float*C_Psi_hyx,float*Chyez) { int size_x=20; for(int j=0;j<ny;j++) { for(int i=0;i<size_x;i++) { if(i<10) { C_Psi_hyx[getCell(i,j,size_x)]=dx*Chyez[getCell(i,j,nx)]; } else { C_Psi_hyx[getCell(i,j,size_x)]=dx*Chyez[getCell(nx-20+i,j,nx)]; } } } } void C_psi_hxy_init(float *C_Psi_hxy,float*Chxez) { int size_y=20; for(int j=0;j<size_y;j++) { for(int i=0;i<nx;i++) { if(j<11) { C_Psi_hxy[getCell(i,j,nx)]=dy*Chxez[getCell(i,j,nx)]; } else { C_Psi_hxy[getCell(i,j,nx)]=dy*Chxez[getCell(i,ny-20+j,nx)]; } } } } void aex_init(float*aex,float*sigma_e_pml,float*kex,float*alpha_e_x,float*bex) { int size=ncells; //aex[0]=0.0; //cout<<"aex[0] = "<<aex[0]<<endl; for(int i=0;i<size;i++) { aex[i]=((bex[i]-1)*sigma_e_pml[i])/(dx*(sigma_e_pml[i]*kex[i]+alpha_e_x[i]*kex[i]*kex[i])); //cout<<"aex["<<i<<"] = "<<aex[i]<<endl; } } void bex_init(float*bex ,float*sigma_e_pml,float*kex,float*alpha_e_x) { int size=ncells; for(int i=0;i<size;i++) { bex[i]=exp(-1*(dt/eps0)*(sigma_e_pml[i]/kex[i]+alpha_e_x[i])); //cout<<"bex["<<i<<"] = "<<bex[i]<<endl; } } void aey_init(float*aey,float*sigma_e_pml,float*key,float*alpha_e_y,float*bey) { for(int i=0;i>ncells;i++) { aey[i]=(bey[i]-1)*sigma_e_pml[i]/(dy*(sigma_e_pml[i]*key[i]+alpha_e_y[i]*key[i]*key[i])); } } void bey_init(float*bey,float*sigma_e_pml,float*key,float*alpha_e_y) { int size=ncells; for(int i=0;i<size;i++) { bey[i]=exp(-1*(dt/eps0)*(sigma_e_pml[i]/key[i]+alpha_e_y[i])); } } void amy_init(float*amy,float*sigma_m_pml,float*kmy,float*alpha_m_y,float*bmy) { int size=ncells; for(int i=0;i<size;i++) { amy[i]=(bmy[i]-1)*sigma_m_pml[i]/(dx*(sigma_m_pml[i]*kmy[i]+alpha_m_y[i]*kmy[i]*kmy[i])); } } void bmy_init(float*bmy,float*sigma_m_pml,float*kmy,float*alpha_m_y) { int size=ncells; for(int i=0;i<size;i++) { bmy[i]=exp(-1*(dt/mu0)*(sigma_m_pml[i]/kmy[i]+alpha_m_y[i])); } } void amx_init(float*amx,float*sigma_m_pml,float*kmx,float*alpha_m_x,float*bmx) { int size=ncells; //cout<<" amx = "<<amx[0]<<endl; //amx[0]=0.0; //cout<<" amx = "<<amx[0]<<endl; for(int i=0;i<size;i++) { amx[i]=(bmx[i]-1)*sigma_m_pml[i]/(dx*(sigma_m_pml[i]*kmx[i]+alpha_m_x[i]*kmx[i]*kmx[i])); cout<<" amx = "<<amx[i]<<endl; } } void bmx_init(float*bmx,float*sigma_m_pml,float*kmx,float*alpha_m_x) { int size=10; float argument; //float constant; for(int i=0;i<size;i++) { //constant = dt/mu0; //cout<< "dt/mu0 = "<<constant<<endl; argument = -1*(dt/mu0)*((sigma_m_pml[i]/kmx[i])+alpha_m_x[i]); bmx[i]=exp(argument); //cout<<"argument of bmx = "<<argument<<endl; //cout<<"bmx = "<<bmx[i]<<endl; } } void alpha_e_init(float*alpha_e) { float rho; int size=ncells; for(int i=0;i<ncells;i++) { rho = ((float)i+0.25)/ncells; alpha_e[i]=alpha_min+(alpha_max-alpha_min)*rho; //cout<<"alpha_e = "<<alpha_e[i]<<endl; } } void alpha_m_init(float*alpha_e,float*alpha_m) { int size=ncells; float rho; for(int i=0;i<size;i++) { rho = ((float)i+0.75)/ncells; alpha_m[i]=(mu0/eps0)*(alpha_min+(alpha_max-alpha_min)*rho); //cout<<"alpha_m = "<<alpha_m[i]<<endl; } } void k_e_init(float*k) { int size=ncells; float rho; for(int i=0;i<size;i++) { rho = ((float)i+0.25)/ncells; k[i]=pow(rho,npml)*(kmax-1)+1; //cout<<"kex ["<<i<<"]= "<<k[i]<<endl; } } void k_m_init(float*k) { int size=ncells; float rho; for(int i=0;i<size;i++) { rho = ((float)i+0.75)/ncells; k[i]=pow(rho,npml)*(kmax-1)+1; //cout<<"kmx ["<<i<<"]= "<<k[i]<<endl; } } void sigma_e_pml_init(float* sigma_e_pml) { float sigma_max = (npml+1)/(150*PI*dx); int size = 10; float rho; for(int i=0;i<size;i++) { rho = ((float)i+0.25)/ncells; sigma_e_pml[i]=sigma_max*sigma_factor*pow(rho,npml); cout<<"sigma_e_pml = "<<sigma_e_pml[i]<<endl; } } void sigma_m_pml_init(float*sigma_m_pml,float*sigma_e_pml) { float rho; int size = 10; float sigma_max = (npml+1)/(150*PI*dx); for(int i=0;i<size;i++) { rho = ((float)i+0.75)/ncells; sigma_m_pml[i]=(mu0/eps0)*sigma_max*sigma_factor*pow(rho,npml); cout<<"sigma_m_pml "<<sigma_m_pml[i]<<endl; } } void Psi_ezy_init(float*Psi_ezy) { int size=nx*20; for(int i=0;i<size;i++) { Psi_ezy[i]=0.0; } } void Psi_ezx_init(float*Psi_ezx) { int size=ny*20; for(int i=0;i<size;i++) { Psi_ezx[i]=0.0; } } void Psi_hyx_init(float*Psi_hyx) { int size=ny*20; for(int i=0;i<size;i++) { Psi_hyx[i]=0.0; } } void Psi_hxy_init(float*Psi_hxy) { int size=nx*20; for(int i=0;i<size;i++) { Psi_hxy[i]=0.0; } } void CJ_Init(hipComplex * cjzyn,int size) { hipComplex nullComplex(0,0); for( int i =0; i<size;i++) { cjzyn[i] = nullComplex; } }
3ebf2c0e94e7711993d2bd77508c9a31853b7637.cu
//#define GLEW_STATIC //#pragma comment(lib,"glew32.lib") //#include <windows.h> //#include <gl/glew.h> //#include <glut.h> #include <complex> #include <stdio.h> #include <iostream> #include <cmath> #include <stdlib.h> #include <fstream> #include <cstdlib> #include <fstream> #include <cuda.h> //#include "stdafx.h" #include <iomanip> #include <time.h> //#include <cuda_gl_interop.h> #include <cuda_runtime.h> //#include <cuComplex.h> #include <vector> #include <math_functions.h> //#include "EasyBMP.h" //#include "EasyBMP_DataStructures.h" //#include "EasyBMP_VariousBMPutilities.h" #define GL_GLEXT_PROTOTYPES #define PI 3.141592653589793238 #define alpha_max 0.01 #define alpha_min 0.000 #define eps0 8.85418e-12 #define sigma_factor 1.0 #define ncells 10 #define mu0 (PI*4e-7) #define center_freq (5e9) #define eta0 (sqrt(mu0/eps0)) #define c0 (1.0/sqrt(mu0*eps0)) #define dt (dx/c0/2)// dx/c0/2 #define domain_size 0.18 #define dx (0.001) #define NF2FFdistfromboundary ((int)floor((3.2*breast_radius/dx))) #define source_position 0.5 #define dy (0.001) #define number_of_time_steps 3000 #define f1x (nx/2 - 150) #define f2x (nx/2+150) #define f1y (ny/2) #define f2y (ny/2) //#define nx ((int)ceil(domain_size/dx)) //#define ny ((int)ceil(domain_size/dy)) #define nx ((int)ceil(12.7*breast_radius/dx)) #define ny ((int)ceil(12.7*breast_radius/dy)) #define d (10*dx) #define npml 2 #define kmax 10 #define numberofexcitationangles 4 #define isPW 1 #define isscattering 1 #define HANDLE_ERROR( err ) err #define sigma_max_pml (3/(200*PI*dx)) #define size_NF2FF_total (2*nx-8*NF2FFdistfromboundary+2*ny-4) #define size_cjzy (nx-2*NF2FFdistfromboundary-2) #define size_cjzx (ny-2*NF2FFdistfromboundary) #define numberofobservationangles 60 #define t0 (sqrt(20.0)*tau) // t0 = sqrt(20)*tau #define l0 (nx*dx/2-breast_radius) #define pwidth 10 #define nc 20 // 20 cells per wavelength #define fmax (c0/(nc*dx))// change if dy is bigger though now they're the same fmax is the highest frequency this program can handle #define tau (3.3445267e-11) // float ta bu = sqrt(2.3)*nc*dx/(PI*c0*1/sqrt(eps_r_MAX)); from a calculation of fmax. //#define tau (5.288161e-11) #define target_x (nx/2+15)//105 is breast_radius / dx #define target_y (ny/2-15) #define source_x (nx/2) //(target_x-105-80) #define source_y (ny/2) #define breast_radius 0.0315 //87.535 mm . Sample size = 1. #define tumor_size (0.01) //#include <unistd.h> //const cuComplex jcmpx (0.0, 1.0); /*static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } }*/ //__constant__ float*dev_Ceze,*dev_Cezhy,*dev_Cezhx,*dev_Cezj,*dev_Jz,*dev_Chyh,*dev_Chxh,*dev_Chyez,*dev_Chxez,*dev_bex,*dev_bey,*dev_aex,*dev_aey,*dev_bmy,*dev_bmx,*dev_amy,*dev_amx,*dev_C_Psi_ezy, //*dev_C_Psi_ezx,*dev_C_Psi_hxy,*dev_C_Psi_hyx; struct cuComplex { float r; float i; __host__ __device__ cuComplex( float a, float b ) : r(a), i(b) {} __host__ __device__ cuComplex(float a): r(a), i(0) {} float magnitude2( void ) { return r * r + i * i; } __host__ __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __host__ __device__ cuComplex operator*(const float& a){ return cuComplex(r*a,i*a); } __host__ __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __host__ __device__ cuComplex operator+(const float& a){ return cuComplex(r+a,i); } __host__ __device__ void operator+=(const float& f){ r += f; } __host__ __device__ void operator+=(const cuComplex& C); cuComplex(); }; __host__ __device__ cuComplex operator*(const float &f, const cuComplex &C) { return cuComplex(C.r*f,C.i*f); } __host__ __device__ void cuComplex::operator+=(const cuComplex& C) { r +=C.r; i += C.i; } __host__ __device__ float cuabs(cuComplex x) { return sqrt(x.i*x.i + x.r*x.r); } __host__ __device__ cuComplex cuexp(cuComplex arg) { cuComplex res(0,0); float s, c; float e = expf(arg.r); sincosf(arg.i,&s,&c); res.r = c * e; res.i = s * e; return res; } __device__ int isOnNF2FFBound(int x, int y) { if(x==NF2FFdistfromboundary||x==nx-NF2FFdistfromboundary||y==NF2FFdistfromboundary||y==ny-NF2FFdistfromboundary) { return 1; } else { return 0; } } __device__ int getxfromthreadIdNF2FF(int index) { int x=0; if(index<(nx-2*NF2FFdistfromboundary-2))//yn { x = index+NF2FFdistfromboundary+1; } else if(index<(nx-4*NF2FFdistfromboundary+ny-2))//xp { x = nx-NF2FFdistfromboundary-1; } else if(index<(2*nx-6*NF2FFdistfromboundary+ny-4))//yp { x = nx-NF2FFdistfromboundary - (index-(nx-4*NF2FFdistfromboundary+ny-2))-2; } else if(index<(2*nx-8*NF2FFdistfromboundary+2*ny-4))//xn notice 2*nx-8*NF2FFdistfromboundary+2*ny-4 is the max index term. { x = NF2FFdistfromboundary; } return x; } __device__ int getyfromthreadIdNF2FF(int index) { int y=0; if(index<(nx-2*NF2FFdistfromboundary-2)) { y = NF2FFdistfromboundary; } else if(index<(nx-4*NF2FFdistfromboundary+ny-2)) { y = (index-(nx-2*NF2FFdistfromboundary-2))+NF2FFdistfromboundary; } else if(index<(2*nx-6*NF2FFdistfromboundary+ny-4)) { y = ny-NF2FFdistfromboundary-1; } else if(index<(2*nx-8*NF2FFdistfromboundary+2*ny-4)) { y = ny-NF2FFdistfromboundary-(index-(2*nx-6*NF2FFdistfromboundary+ny-4))-1; } return y; } int CPUgetxfromthreadIdNF2FF(int index) { int x=0; if(index<(nx-2*NF2FFdistfromboundary-2))//yn { x = index+NF2FFdistfromboundary+1; } else if(index<(nx-4*NF2FFdistfromboundary+ny-2))//xp { x = nx-NF2FFdistfromboundary-1; } else if(index<(2*nx-6*NF2FFdistfromboundary+ny-4))//yp { x = nx-NF2FFdistfromboundary - (index-(nx-4*NF2FFdistfromboundary+ny-2))-2; } else if(index<(2*nx-8*NF2FFdistfromboundary+2*ny-4))//xn notice 2*nx-8*NF2FFdistfromboundary+2*ny-4 is the max index term. { x = NF2FFdistfromboundary; } return x; } int CPUgetyfromthreadIdNF2FF(int index) { int y=0; if(index<(nx-2*NF2FFdistfromboundary-2)) { y = NF2FFdistfromboundary; } else if(index<(nx-4*NF2FFdistfromboundary+ny-2)) { y = (index-(nx-2*NF2FFdistfromboundary-2))+NF2FFdistfromboundary; } else if(index<(2*nx-6*NF2FFdistfromboundary+ny-4)) { y = ny-NF2FFdistfromboundary-1; } else if(index<(2*nx-8*NF2FFdistfromboundary+2*ny-4)) { y = ny-NF2FFdistfromboundary-(index-(2*nx-6*NF2FFdistfromboundary+ny-4))-1; } return y; } __device__ __host__ int isOnxn(int x) { if(x==(NF2FFdistfromboundary)) { return 1; } else { return 0; } } __device__ __host__ int isOnxp(int x) { if(x==(nx-NF2FFdistfromboundary-1)) { return 1; } else { return 0; } } __device__ __host__ int isOnyp(int x,int y) { if(y==(ny-NF2FFdistfromboundary-1)&&!isOnxn(x)&&!isOnxp(x)) { return 1; } else { return 0; } } __device__ __host__ int isOnyn(int x, int y) { if((y==(NF2FFdistfromboundary))&&!isOnxn(x)&&!(isOnxp(x))) { return 1; } else { return 0; } } __device__ int dgetCell(int x, int y, int size) { return x +y*size; } __global__ void calculate_JandM(float* f,int* timestep,float*dev_Ez,float*dev_Hy,float*dev_Hx,cuComplex *cjzxp,cuComplex *cjzyp,cuComplex*cjzxn,cuComplex*cjzyn,cuComplex*cmxyp,cuComplex*cmyxp,cuComplex*cmxyn,cuComplex*cmyxn) { float freq = *f; int index = threadIdx.x+blockIdx.x*blockDim.x;// should launch 2*nx-8*NF2FFdistfromboundary+2*ny-4 threads. if(index<=size_NF2FF_total) { const cuComplex j(0.0,1.0); int x = getxfromthreadIdNF2FF(index); int y = getyfromthreadIdNF2FF(index); float Ez; cuComplex pi(PI , 0); cuComplex two(2.0,0.0); cuComplex negativeone(-1.0,0); cuComplex deltatime(dt,0); if(isOnyp(x,y)) { Ez = (dev_Ez[dgetCell(x,y+1,nx+1)]+dev_Ez[dgetCell(x,y,nx+1)])/2; float Hx = dev_Hx[dgetCell(x,y,nx)]; cjzyp[index-(nx+ny-4*NF2FFdistfromboundary-2)] += -1*Hx*deltatime*cuexp((float)(-1)*j*(float)2*pi*freq*(float)(*timestep)*deltatime);//cjzyp and cmxyp have nx - 2*NF2FFBoundary -2 elements cmxyp[index-(nx+ny-4*NF2FFdistfromboundary-2)] += -1*Ez*deltatime*cuexp((float)-1.0*j*(float)2.0*(float)PI*freq*((float)(*timestep)+0.5)*(float)dt); } else if(isOnxp(x))//X faces override y faces at their intersections { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x+1,y,nx+1)])/2; float Hy = dev_Hy[dgetCell(x,y,nx)]; cjzxp[index-(nx-2*NF2FFdistfromboundary-2)] += Hy*deltatime*cuexp(-1*j*2*pi*freq*(float)(*timestep)*(float)dt);//cjzxp and cmyxp have ny-2*NF2FFBound elements cmyxp[index-(nx-2*NF2FFdistfromboundary-2)] += Ez*(float)dt*cuexp((float)(-1)*j*(float)2.0*pi*freq*((float)(*timestep)+0.5)*(float)dt);// this is the discrete fourier transform, by the way. } else if(isOnyn(x,y)) { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x,y+1,nx+1)])/2; float Hx=dev_Hx[dgetCell(x,y,nx)]; cjzyn[index] += Hx*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*(float)(*timestep)*(float)dt); //cjzyn and cmxyn need to have nx-2*NF2FFbound-2 elements cmxyn[index] += Ez*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*((float)(*timestep)+0.5)*(float)dt); } else if(isOnxn(x)) { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x+1,y,nx+1)])/2; cjzxn[index-(2*nx+ny-6*NF2FFdistfromboundary-4)] += -1*dev_Hy[dgetCell(x,y,nx)]*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*(float)(*timestep)*(float)dt); // cjzxn and cmyxn must have ny-2*NFdistfromboundary elements cmyxn[index-(2*nx+ny-6*NF2FFdistfromboundary-4)] += -1*Ez*(float)dt*cuexp(-1.0*j*2.0*(float)PI*freq*((float)(*timestep)+0.5)*(float)dt); } } } __host__ __device__ float fwf(float timestep,float x, float y,float Phi_inc,float l) { float ar; float ky, kx;//k hat sincosf(Phi_inc,&ky,&kx); ar = (float)timestep*dt-(float)t0-(1/(float)c0)*(ky*y*dx+kx*x*dy-l); //ar = timestep*dt-t0; //return exp(-1*(ar*ar)/(tau*tau));// gaussian pulse argument is k dot r, return exp(-1*ar*ar/(tau*tau)); //return sin(2*PI*1e9*timestep*dt); } __global__ void H_field_update(float*dev_Hy,float*dev_Hx,float*dev_Ez,float*dev_bmx,float*dev_Psi_hyx,float*dev_amx,float*dev_bmy,float*dev_amy,float*dev_Psi_hxy,float*kex) { float buffer_Hy; float buffer_Hx; float Chez = (dt/dx)/(mu0); int x = threadIdx.x +blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if(x<nx&&y<nx) { buffer_Hy = dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx = dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); if(x<ncells) { buffer_Hy= dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[ncells-1-x]; dev_Psi_hyx[dgetCell(x,y,20)]=dev_bmx[ncells-1-x]*dev_Psi_hyx[dgetCell(x,y,20)]+dev_amx[ncells-1-x]*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hy+=Chez*dx*dev_Psi_hyx[dgetCell(x,y,20)] ; } if(x>=(nx-ncells)) { buffer_Hy=dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[x-nx+ncells]; dev_Psi_hyx[dgetCell(x-nx+20,y,2*ncells)]=dev_bmx[x-nx+ncells]*dev_Psi_hyx[dgetCell(x-nx+20,y,20)]+dev_amx[x-nx+ncells]*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hy+=Chez*dx*dev_Psi_hyx[dgetCell(x-nx+20,y,20)]; } if(y<ncells) { buffer_Hx=dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[ncells-1-y]; dev_Psi_hxy[dgetCell(x,y,nx)]=dev_bmy[ncells-1-y]*dev_Psi_hxy[dgetCell(x,y,nx)]+dev_amy[ncells-1-y]*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx-=Chez*dy*dev_Psi_hxy[dgetCell(x,y,nx)]; } if(y>=(ny-ncells)) { buffer_Hx=dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[y-ny+ncells]; dev_Psi_hxy[dgetCell(x,y-ny+20,nx)]=dev_bmy[y-ny+ncells]*dev_Psi_hxy[dgetCell(x,y-ny+20,nx)]+dev_amy[y-ny+ncells]*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx-=Chez*dy*dev_Psi_hxy[dgetCell(x,y-nx+20,nx)]; } //__syncthreads(); if(isnan(buffer_Hx)) { dev_Hx[dgetCell(x,y,nx)] = 0.0; } else { dev_Hx[dgetCell(x,y,nx)] = buffer_Hx; } if(isnan(buffer_Hy)) { dev_Hy[dgetCell(x,y,nx)] = 0.0; } else { dev_Hy[dgetCell(x,y,nx)] = buffer_Hy; } //dev_Hx[dgetCell(x,y,nx)] = buffer_Hx; //dev_Hy[dgetCell(x,y,nx)] = buffer_Hy; } } __global__ void H_inc_update(float*dev_Hy,float*dev_Hx,float*dev_Ez,float*dev_bmx,float*dev_Psi_hyx,float*dev_amx,float*dev_bmy,float*dev_amy,float*dev_Psi_hxy,float*kex) { float buffer_Hy; float buffer_Hx; float Chez = (dt/dx)/(mu0); int x = threadIdx.x +blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if(x<nx&&y<nx) { buffer_Hy = dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx = dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); if(x<ncells) { buffer_Hy= dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[ncells-1-x]; dev_Psi_hyx[dgetCell(x,y,20)]=dev_bmx[ncells-1-x]*dev_Psi_hyx[dgetCell(x,y,20)]+dev_amx[ncells-1-x]*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hy+=Chez*dx*dev_Psi_hyx[dgetCell(x,y,20)] ; } if(x>=(nx-ncells)) { buffer_Hy=dev_Hy[dgetCell(x,y,nx)]+Chez*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[x-nx+ncells]; dev_Psi_hyx[dgetCell(x-nx+20,y,2*ncells)]=dev_bmx[x-nx+ncells]*dev_Psi_hyx[dgetCell(x-nx+20,y,20)]+dev_amx[x-nx+ncells]*(dev_Ez[dgetCell(x+1,y,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hy+=Chez*dx*dev_Psi_hyx[dgetCell(x-nx+20,y,20)]; } if(y<ncells) { buffer_Hx=dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[ncells-1-y]; dev_Psi_hxy[dgetCell(x,y,nx)]=dev_bmy[ncells-1-y]*dev_Psi_hxy[dgetCell(x,y,nx)]+dev_amy[ncells-1-y]*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx-=Chez*dy*dev_Psi_hxy[dgetCell(x,y,nx)]; } if(y>=(ny-ncells)) { buffer_Hx=dev_Hx[dgetCell(x,y,nx)]-Chez*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)])/kex[y-ny+ncells]; dev_Psi_hxy[dgetCell(x,y-ny+20,nx)]=dev_bmy[y-ny+ncells]*dev_Psi_hxy[dgetCell(x,y-ny+20,nx)]+dev_amy[y-ny+ncells]*(dev_Ez[dgetCell(x,y+1,nx+1)]-dev_Ez[dgetCell(x,y,nx+1)]); buffer_Hx-=Chez*dy*dev_Psi_hxy[dgetCell(x,y-nx+20,nx)]; } //__syncthreads(); if(isnan(buffer_Hx)) { dev_Hx[dgetCell(x,y,nx)] = 0.0; } else { dev_Hx[dgetCell(x,y,nx)] = buffer_Hx; } if(isnan(buffer_Hy)) { dev_Hy[dgetCell(x,y,nx)] = 0.0; } else { dev_Hy[dgetCell(x,y,nx)] = buffer_Hy; } //dev_Hx[dgetCell(x,y,nx)] = buffer_Hx; //dev_Hy[dgetCell(x,y,nx)] = buffer_Hy; } } __global__ void E_field_update(int *i,float*dev_Ez,float*dev_Hy,float*dev_Hx,float*dev_Psi_ezx,float*dev_aex,float*dev_aey,float*dev_bex,float*dev_bey,float*dev_Psi_ezy,float*kex,float*Cezhy,float*Cezhx,float*Ceze,float*Cezeip,float*Cezeic,float*Phi) { int x=threadIdx.x+blockDim.x*blockIdx.x; int y=threadIdx.y+blockDim.y*blockIdx.y; // int offset = x+y*blockDim.x*gridDim.x; float buffer_Ez; //float Ceh = (dt/dx)/(eps0); float Cezj = -dt/eps0; float length_offset; if(x<=nx&&y<=ny) { //if(x==0||x==nx||y==0||y==ny) if(x==nx||y==ny||x==0||y==0) { buffer_Ez=0.0; } else { if(isscattering) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)]) -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)]) +Cezeic[dgetCell(x,y,nx+1)]*fwf((float)(*i)+0.5,x-nx/2,y-ny/2,*Phi,-breast_radius) +Cezeip[dgetCell(x,y,nx+1)]*fwf((float)(*i)-0.5,x-nx/2,y-ny/2,*Phi,-breast_radius); } else { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)]) -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)]); if(x==(int)(source_x)&&y==(int)(source_y)) { buffer_Ez=buffer_Ez + 100*Cezj*fwf((float)(*i),0,0,0,0); } } //if(x==((int)nx/2)&&y==((int)nx/2)) //{ // //buffer_Ez=buffer_Ez + Cezj*dev_Jz[*i]; // buffer_Ez=buffer_Ez + Cezj*fwf((float)(*i),0,0,0,0); //} if(x<=ncells&&x!=0) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)])/kex[ncells-x] -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)])/kex[ncells-x]; dev_Psi_ezx[dgetCell(x-1,y-1,20)] = dev_bex[ncells-x]*dev_Psi_ezx[dgetCell(x-1,y-1,20)]+dev_aex[ncells-x]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)]); buffer_Ez += Cezhy[dgetCell(x,y,nx+1)]*dx*dev_Psi_ezx[dgetCell(x-1,y-1,2*ncells)]; } if(x>=(nx-ncells)&&x!=nx) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)])/kex[x-nx+ncells] -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)])/kex[x-nx+ncells]; dev_Psi_ezx[dgetCell(x-nx+20,y-1,20)]=dev_bex[x-nx+ncells]*dev_Psi_ezx[dgetCell(x-nx+20,y-1,20)]+dev_aex[x-nx+ncells]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)]); buffer_Ez+=Cezhy[dgetCell(x,y,nx+1)]*dx*dev_Psi_ezx[dgetCell(x-nx+20,y-1,2*ncells)]; } if(y<=ncells&&y!=0) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)])/kex[ncells-y] -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)])/kex[ncells-y]; dev_Psi_ezy[dgetCell(x-1,y-1,nx)]=dev_bey[(ncells-y)]*dev_Psi_ezy[dgetCell(x-1,y-1,nx)]+dev_aey[(ncells-y)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)]); buffer_Ez-=Cezhx[dgetCell(x,y,nx+1)]*dy*dev_Psi_ezy[dgetCell(x-1,y-1,nx)]; } if(y>=(ny-ncells)&&y!=ny) { buffer_Ez = Ceze[dgetCell(x,y,nx+1)]*dev_Ez[dgetCell(x,y,nx+1)]+Cezhy[dgetCell(x,y,nx+1)]*(dev_Hy[dgetCell(x,y,nx)]-dev_Hy[dgetCell(x-1,y,nx)])/kex[y-ny+ncells] -Cezhx[dgetCell(x,y,nx+1)]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)])/kex[y-ny+ncells]; dev_Psi_ezy[dgetCell(x-1,y-ny+20,nx)]=dev_bey[y-ny+ncells]*dev_Psi_ezy[dgetCell(x-1,y-ny+20,nx)]+dev_aey[y-ny+ncells]*(dev_Hx[dgetCell(x,y,nx)]-dev_Hx[dgetCell(x,y-1,nx)]); buffer_Ez-=Cezhx[dgetCell(x,y,nx+1)]*dy*dev_Psi_ezy[dgetCell(x-1,y-ny+20,nx)]; } } // unsigned char green = 128+127*buffer_Ez/0.4; /*ptr[offset].x = 0; ptr[offset].y = green; ptr[offset].z = 0; ptr[offset].w = 255;*///OpenGL stuff //__syncthreads(); if(isnan(buffer_Ez)) { dev_Ez[dgetCell(x,y,nx+1)] = 0.0; } else { dev_Ez[dgetCell(x,y,nx+1)] = buffer_Ez; } //dev_Ez[dgetCell(x,y,nx+1)] = buffer_Ez; } } __global__ void Field_reset(float* Ez, float* Hy, float* Hx, float* Psi_ezy,float* Psi_ezx,float* Psi_hyx,float* Psi_hxy,cuComplex*cjzyn,cuComplex*cjzxp,cuComplex*cjzyp,cuComplex*cjzxn,cuComplex*cmxyn,cuComplex*cmyxp,cuComplex*cmxyp,cuComplex*cmyxn) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y+blockDim.y*blockIdx.y; int index = x + y*blockDim.x*gridDim.x; if(x<=ncells&&x!=0) { Psi_ezx[dgetCell(x-1,y-1,20)] =0; } if(x>=(nx-ncells)&&x!=nx) { Psi_ezx[dgetCell(x-nx+20,y-1,20)]=0; } if(y<=ncells&&y!=0) { Psi_ezy[dgetCell(x-1,y-1,nx)]=0; } if(y>=(ny-ncells)&&y!=ny) { Psi_ezy[dgetCell(x-1,y-ny+20,nx)]=0; } if(x<ncells) { Psi_hyx[dgetCell(x,y,20)]=0; } if(x>=(nx-ncells)) { Psi_hyx[dgetCell(x-nx+20,y,2*ncells)]=0.0; } if(y<ncells) { Psi_hxy[dgetCell(x,y,nx)]=0.0; } if(y>=(ny-ncells)) { Psi_hxy[dgetCell(x,y-ny+20,nx)]=0.0; } if(x<=nx&&y<=ny) { Ez[dgetCell(x,y,nx+1)] = 0.0; } if(x<nx&&y<ny) { Hy[dgetCell(x,y,nx)] = 0.0; Hx[dgetCell(x,y,nx)] = 0.0; } if(index<=size_NF2FF_total) { const cuComplex j(0.0,1.0); int x = getxfromthreadIdNF2FF(index); int y = getyfromthreadIdNF2FF(index); float Ez; cuComplex pi(PI , 0); cuComplex two(2.0,0.0); cuComplex negativeone(-1.0,0); cuComplex deltatime(dt,0); if(index<size_cjzy) { cjzyp[index] = cuComplex(0,0);//cjzyp and cmxyp have nx - 2*NF2FFBoundary -2 elements cjzyn[index] = cuComplex(0,0); cmxyp[index] = cuComplex(0,0); cmxyn[index] = cuComplex(0,0); } if(index<size_cjzx) { cjzxp[index] = cuComplex(0,0); cjzxn[index] = cuComplex(0,0); cmyxp[index] = cuComplex(0,0); cmyxn[index] = cuComplex(0,0); } } } __global__ void E_inc_update(int *i,float*dev_Hy_inc,float*dev_Hx_inc,float*dev_Psi_ezx_inc,float*dev_aex,float*dev_aey,float*dev_bex,float*dev_bey,float*dev_Psi_ezy_inc,float*kex,float*dev_Ezip,float*dev_Ezic,float*Phi) { int x=threadIdx.x+blockDim.x*blockIdx.x; int y=threadIdx.y+blockDim.y*blockIdx.y; // int offset = x+y*blockDim.x*gridDim.x; float buffer_Ez; //float Ceh = (dt/dx)/(eps0); float Cezj = -dt/eps0; float Ceze = 1; float Cezhy = (dt/(dx*eps0)); if(x<=nx&&y<=ny) { //if(x==0||x==nx||y==0||y==ny) if(x==nx||y==ny||x==0||y==0) { buffer_Ez=0.0; } else { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)]) -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)]); if(x==((int)source_x)&&y==(int)(source_y)) { //buffer_Ez=buffer_Ez + Cezj*dev_Jz[*i]; buffer_Ez=buffer_Ez + 100*Cezj*fwf((float)(*i),0,0,0,0); } if(x<=ncells&&x!=0) { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)])/kex[ncells-x] -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)])/kex[ncells-x]; dev_Psi_ezx_inc[dgetCell(x-1,y-1,20)] = dev_bex[ncells-x]*dev_Psi_ezx_inc[dgetCell(x-1,y-1,20)]+dev_aex[ncells-x]*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)]); buffer_Ez += Cezhy*dx*dev_Psi_ezx_inc[dgetCell(x-1,y-1,2*ncells)]; } if(x>=(nx-ncells)&&x!=nx) { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)])/kex[x-nx+ncells] -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)])/kex[x-nx+ncells]; dev_Psi_ezx_inc[dgetCell(x-nx+20,y-1,20)]=dev_bex[x-nx+ncells]*dev_Psi_ezx_inc[dgetCell(x-nx+20,y-1,20)]+dev_aex[x-nx+ncells]*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)]); buffer_Ez+=Cezhy*dx*dev_Psi_ezx_inc[dgetCell(x-nx+20,y-1,2*ncells)]; } if(y<=ncells&&y!=0) { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)])/kex[ncells-y] -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)])/kex[ncells-y]; dev_Psi_ezy_inc[dgetCell(x-1,y-1,nx)]=dev_bey[(ncells-y)]*dev_Psi_ezy_inc[dgetCell(x-1,y-1,nx)]+dev_aey[(ncells-y)]*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)]); buffer_Ez-=Cezhy*dy*dev_Psi_ezy_inc[dgetCell(x-1,y-1,nx)]; } if(y>=(ny-ncells)&&y!=ny) { buffer_Ez = Ceze*dev_Ezic[dgetCell(x,y,nx+1)]+Cezhy*(dev_Hy_inc[dgetCell(x,y,nx)]-dev_Hy_inc[dgetCell(x-1,y,nx)])/kex[y-ny+ncells] -Cezhy*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)])/kex[y-ny+ncells]; dev_Psi_ezy_inc[dgetCell(x-1,y-ny+20,nx)]=dev_bey[y-ny+ncells]*dev_Psi_ezy_inc[dgetCell(x-1,y-ny+20,nx)]+dev_aey[y-ny+ncells]*(dev_Hx_inc[dgetCell(x,y,nx)]-dev_Hx_inc[dgetCell(x,y-1,nx)]); buffer_Ez-=Cezhy*dy*dev_Psi_ezy_inc[dgetCell(x-1,y-ny+20,nx)]; } } dev_Ezip[dgetCell(x,y,nx+1)] = dev_Ezic[dgetCell(x,y,nx+1)]; dev_Ezic[dgetCell(x,y,nx+1)] = buffer_Ez; } } float calc_radiated_power(cuComplex *cjzxp,cuComplex *cjzyp,cuComplex *cjzxn,cuComplex *cjzyn,cuComplex *cmxyp,cuComplex *cmyxp,cuComplex *cmxyn,cuComplex *cmyxn) { int indexofleg1 = nx-2*NF2FFdistfromboundary-2; int indexofleg2 = nx+ny-4*NF2FFdistfromboundary-2; int indexofleg3 = 2*nx+ny-6*NF2FFdistfromboundary-4; int maxindex = 2*nx-8*NF2FFdistfromboundary+2*ny-4; int index; cuComplex cjz(0,0); cuComplex power = 0; for(index = 0; index<indexofleg1;index++) { cjz = cuComplex(cjzyn[index].r,-1.0*cjzyn[index].i);//conjugation //z x x = y dot -y = -1 power+=-1.0*cjz*cmxyn[index]*dx;// the negative one comes from the dot product between JxM and the n hat vector } for(index = indexofleg1; index<indexofleg2;index++) { cjz = cuComplex(cjzxp[index-indexofleg1].r,-1.0*cjzxp[index-indexofleg1].i);//making the conjugate // z cross y = -x dot x = -1 power+= -1.0*cjz*cmyxp[index-indexofleg1]*dy;//positive x unit normal vector } for(index = indexofleg2;index<indexofleg3;index++) { // z cross x = y dot y = 1 cjz = cuComplex(cjzyp[index-indexofleg2].r,-1.0*cjzyp[index-indexofleg2].i); power+= cjz*cmxyp[index-indexofleg2]*dx;//postive y unit normal vector } for(index = indexofleg3;index<maxindex;index++) { // z cross y = -x dot -x = 1 cjz = cuComplex(cjzxn[index-indexofleg3].r,-1.0*cjzxn[index-indexofleg3].i); power += cjz*cmyxn[index-indexofleg3]*dy;// negative x hat n vector } float realpower = power.r; realpower *= 0.5; return realpower; } float calc_incident_power(float freq) { return (0.5/eta0)*pow(tau*sqrt(PI)*exp(-tau*tau*2*PI*freq*2*PI*freq/4),2);// just gonna assume gaussian pulse. This is the fourier transform of the gaussian pulse. } __global__ void calculate_JandM_total(float* f,int* timestep,float*dev_Ez,float*dev_Hy,float*dev_Hx,cuComplex *cjzxp,cuComplex *cjzyp,cuComplex*cjzxn,cuComplex*cjzyn,cuComplex*cmxyp,cuComplex*cmyxp,cuComplex*cmxyn,cuComplex*cmyxn,float*dev_Ezic,float*dev_Ezip,float*dev_Hx_inc,float*dev_Hy_inc) { float freq = *f; int index = threadIdx.x+blockIdx.x*blockDim.x;// should launch 2*nx-8*NF2FFdistfromboundary+2*ny-4 threads. if(index<=size_NF2FF_total) { const cuComplex j(0.0,1.0); int x = getxfromthreadIdNF2FF(index); int y = getyfromthreadIdNF2FF(index); float Ez; cuComplex pi(PI , 0); cuComplex two(2.0,0.0); cuComplex negativeone(-1.0,0); cuComplex deltatime(dt,0); if(isOnyp(x,y)) { Ez = (dev_Ez[dgetCell(x,y+1,nx+1)]+dev_Ez[dgetCell(x,y,nx+1)])/2; Ez += (dev_Ezic[dgetCell(x,y+1,nx+1)] + dev_Ezic[dgetCell(x,y,nx+1)] + dev_Ezip[dgetCell(x,y+1,nx+1)] + dev_Ezip[dgetCell(x,y,nx+1)])/4; float Hx = dev_Hx[dgetCell(x,y,nx)] + dev_Hx_inc[dgetCell(x,y,nx)]; cjzyp[index-(nx+ny-4*NF2FFdistfromboundary-2)] += -1*Hx*deltatime*cuexp((float)(-1)*j*(float)2*pi*freq*(float)(*timestep)*deltatime);//cjzyp and cmxyp have nx - 2*NF2FFBoundary -2 elements cmxyp[index-(nx+ny-4*NF2FFdistfromboundary-2)] += -1*Ez*deltatime*cuexp((float)-1.0*j*(float)2.0*(float)PI*freq*((float)(*timestep)-0.5)*(float)dt); } else if(isOnxp(x))//X faces override y faces at their intersections { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x+1,y,nx+1)])/2; Ez += (dev_Ezic[dgetCell(x+1,y,nx+1)] + dev_Ezic[dgetCell(x,y,nx+1)] + dev_Ezip[dgetCell(x+1,y,nx+1)] + dev_Ezip[dgetCell(x,y,nx+1)])/4; float Hy = dev_Hy[dgetCell(x,y,nx)] + dev_Hy_inc[dgetCell(x,y,nx)]; cjzxp[index-(nx-2*NF2FFdistfromboundary-2)] += Hy*deltatime*cuexp(-1*j*2*pi*freq*(float)(*timestep)*(float)dt);//cjzxp and cmyxp have ny-2*NF2FFBound elements cmyxp[index-(nx-2*NF2FFdistfromboundary-2)] += Ez*(float)dt*cuexp((float)(-1)*j*(float)2.0*pi*freq*((float)(*timestep)-0.5)*(float)dt);// this is the discrete fourier transform, by the way. } else if(isOnyn(x,y)) { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x,y+1,nx+1)])/2; Ez += (dev_Ezic[dgetCell(x,y+1,nx+1)] + dev_Ezic[dgetCell(x,y,nx+1)] + dev_Ezip[dgetCell(x,y+1,nx+1)] + dev_Ezip[dgetCell(x,y,nx+1)])/4; float Hx=dev_Hx[dgetCell(x,y,nx)]+dev_Hx_inc[dgetCell(x,y,nx)]; cjzyn[index] += Hx*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*(float)(*timestep)*(float)dt); //cjzyn and cmxyn need to have nx-2*NF2FFbound-2 elements cmxyn[index] += Ez*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*((float)(*timestep)-0.5)*(float)dt); } else if(isOnxn(x)) { Ez = (dev_Ez[dgetCell(x,y,nx+1)]+dev_Ez[dgetCell(x+1,y,nx+1)])/2; Ez += (dev_Ezic[dgetCell(x+1,y,nx+1)] + dev_Ezic[dgetCell(x,y,nx+1)] + dev_Ezip[dgetCell(x+1,y,nx+1)] + dev_Ezip[dgetCell(x,y,nx+1)])/4; float Hy = dev_Hy[dgetCell(x,y,nx)] + dev_Hy_inc[dgetCell(x,y,nx)]; cjzxn[index-(2*nx+ny-6*NF2FFdistfromboundary-4)] += -1*Hy*(float)dt*cuexp((float)(-1)*j*(float)2.0*(float)PI*freq*(float)(*timestep)*(float)dt); // cjzxn and cmyxn must have ny-2*NFdistfromboundary elements cmyxn[index-(2*nx+ny-6*NF2FFdistfromboundary-4)] += -1*Ez*(float)dt*cuexp(-1.0*j*2.0*(float)PI*freq*((float)(*timestep)-0.5)*(float)dt); } } } __host__ __device__ int getOptimizationCell(int x, int y) { int x_coord,y_coord; x_coord = (x-(nx/2-(int)(breast_radius/dx)))/(2*breast_radius/(9*dx)); y_coord = (y-(ny/2-breast_radius/dy))/(2*breast_radius/(9*dy));//the optimization space is 216 FDTD cells wide and high. //The optimization space is split into 25 by 25 optimization cells. //each optimization cell has 24 by 24 FDTD cells within it. That's what the 108, 24 and 25 are about. return x_coord+9*y_coord;//The max return should be, 9*9-1, hopefully. } void N2FPostProcess (float* D,float f, cuComplex *N,cuComplex *L,cuComplex *cjzxp,cuComplex *cjzyp,cuComplex *cjzxn,cuComplex *cjzyn,cuComplex *cmxyp,cuComplex *cmyxp,cuComplex *cmxyn,cuComplex *cmyxn) { int indexofleg1 = nx-2*NF2FFdistfromboundary-2; int indexofleg2 = nx+ny-4*NF2FFdistfromboundary-2; int indexofleg3 = 2*nx+ny-6*NF2FFdistfromboundary-4; int maxindex = 2*nx-8*NF2FFdistfromboundary+2*ny-4; int x,y; float rhoprime; float Psi; int Phi_index; cuComplex Mphi(0,0); float Phi; float k = 2*PI*f/c0; cuComplex negativeone(-1.0,0.0); int index = 0; cuComplex jcmpx(0,1); //float Prad = calc_radiated_power(cjzxp,cjzyp,cjzxn,cjzyn,cmxyp,cmyxp,cmxyn,cmyxn); float Prad = calc_incident_power(f); //std::cout<<"Prad = "<<Prad<<std::endl; float flx, fly; for(Phi_index = 0; Phi_index<numberofobservationangles;Phi_index++) { Phi = 2*PI/numberofobservationangles*(float)Phi_index; for(index = 0;index<indexofleg1;index++) { x = CPUgetxfromthreadIdNF2FF(index); y = CPUgetyfromthreadIdNF2FF(index); flx = (float)x;//float x fly = (float)y + 0.5; rhoprime = sqrt(pow((dx*((-1.0*(float)nx/2)+1+flx)),2)+pow((dy*(-1.0*(float)ny/2+1+fly)),2)); Psi = atan2(-1*((float)ny/2)+1+fly,-1*((float)nx/2)+1+flx)-Phi; N[Phi_index]+=-1.0*cjzyn[index]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dx; L[Phi_index]+=-1.0*sin(Phi)*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*cmxyn[index]*dx;//Lphi = } for(index = indexofleg1;index<indexofleg2;index++) { x = CPUgetxfromthreadIdNF2FF(index); y = CPUgetyfromthreadIdNF2FF(index); flx = (float)x+0.5; fly = (float)y; rhoprime = sqrt(pow((dx*(((float)nx/2)-1-flx)),2)+pow((dy*(((float)ny/2)-1-fly)),2)); Psi = atan2(-1*((float)ny/2)+1+fly,(-1*((float)nx/2)+1+flx))-Phi; N[Phi_index]+=-1.0*cjzxp[index-indexofleg1]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dy; L[Phi_index]+=cos(Phi)*cmyxp[index-indexofleg1]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dy;//L_phi = -Lxsin(phi)+Lycos(Phi) here we only have Ly } for(index=indexofleg2;index<indexofleg3;index++) { x = CPUgetxfromthreadIdNF2FF(index); y = CPUgetyfromthreadIdNF2FF(index); flx = (float)x; fly = (float)y + 0.5; rhoprime = sqrt(pow((dx*(((float)nx/2)-1-flx)),2)+pow((dy*(((float)ny/2)-1-fly)),2)); Psi = atan2((-1*(float)ny/2+1+fly),(-1*((float)nx/2)+1+flx))-Phi; N[Phi_index]+=-1.0*cjzyp[index-indexofleg2]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dx; L[Phi_index]+=-1.0*sin(Phi)*cmxyp[index-indexofleg2]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dx;// } for(index = indexofleg3;index<maxindex;index++) { x = CPUgetxfromthreadIdNF2FF(index); y = CPUgetyfromthreadIdNF2FF(index); flx = (float)x+0.5; fly = (float)y; rhoprime = sqrt(pow(dx*(((float)nx/2)-1-flx),2)+pow((dy*(((float)ny/2)-1-fly)),2)); Psi = atan2(-1*((float)ny/2)+1+fly,-1*(float)nx/2+1+flx)-Phi; N[Phi_index]+=-1.0*cjzxn[index-indexofleg3]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dy; L[Phi_index]+= cos(Phi)*cmyxn[index-indexofleg3]*cuexp(1.0*jcmpx*k*rhoprime*cos(Psi))*dy; } D[Phi_index] = (k*k*cuabs(L[Phi_index]+(float)eta0*N[Phi_index])*cuabs(L[Phi_index]+(float)eta0*N[Phi_index])/((float)8*(float)PI*(float)eta0*Prad*33.329));//why 33.329? I dunno, something is probably wrong with Prad. } } float fitness(float* D,int max_index, float* measurement) { float fit = 0; for(int i =0;i<max_index;i++) { fit -= pow((measurement[i]-D[i]),2)/(numberofexcitationangles*pow(measurement[i],2)); } return fit; } //static void draw_func(void){ // glDrawPixels(nx,ny,GL_RGBA,GL_UNSIGNED_BYTE,0); // glutSwapBuffers; //} using namespace std; void Ceze_init(float * eps_r_z, float* sig_e_z, float* Ceze); void Cezhy_init(float* eps_r_z, float* sigma_e_z,float*Cezhy,float*kex); void Cezhx_init(float* eps_r_z,float*sigma_e_z,float*Cezhx,float*kex); void eps_r_z_init(float * eps_r_z,const vector<float> &argument); void sigma_e_z_init(float *sigma_e_z,float*sigma_e_pml,const vector<float> &argument); void Cezj_init(float*eps_r_z,float*sigma_e_z,float*Cezj); void Ez_init(float*Ez); void Ey_init(float*Ey); //void Jz_init(float*Jz); void Chxh_init(float*mu_r_x,float*sigma_m_x,float*Chxh); void Chxez_init(float*mu_r_x,float*sigma_m_x,float*Chxez); //void Chxm_init(float*mu_r_x,float*sigma_m_x,float*Chxm); void Chyh_init(float*mu_r_y,float*sigma_m_y,float*Chyh); void Chyez_init(float*mu_r_y,float*sigma_m_y,float*Chyez); //void Chym_init(float*mu_r_y,float*sigma_m_y,float*Chym); void Hy_init(float*Hy); void Hx_init(float*Hx); void My_init(float*My); void Mx_init(float*Mx); void mu_r_y_init(float*mu_r_y); void mu_r_x_init(float*mu_r_x); void sigma_m_y_init(float*sigma_m_y_init); void sigma_m_x_init(float*sigma_m_x_init); int getCell(int x,int y,int size); void Jz_waveform(float * time,float*Jz_impressed); void waveform_time_init(float*time1); float* Make2DfloatArray(int arraySizeX, int arraySizeY); void C_Psi_ezy_init(float *C_Psi_ezy,float*Cezhx); void C_Psi_ezx_init(float* C_Psi_ezx,float*Cezhy); void C_Psi_hyx_init(float*C_Psi_hyx,float*Chyez); void C_psi_hxy_init(float *C_Psi_hxy,float*Chxez); void aex_init(float*aex,float*sigma_e_pml,float*kex,float*alpha_e_x,float*bex); void bex_init(float*bex ,float*sigma_e_pml,float*kex,float*alpha_e_x); void bey_init(float*bey,float*sigma_e_pml,float*key,float*alpha_e_y); void amy_init(float*amy,float*sigma_m_pml,float*kmy,float*alpha_m_y,float*bmy); void bmy_init(float*bmy,float*sigma_m_pml,float*kmy,float*alpha_m_y); void amx_init(float*amx,float*sigma_m_pml,float*kmx,float*alpha_m_x,float*bmx); void bmx_init(float*bmx,float*sigma_m_pml,float*kmx,float*alpha_m_x); void alpha_e_init(float*alpha_e); void alpha_m_init(float*alpha_e,float*alpha_m); void k_e_init(float*k); void k_m_init(float*k); void sigma_e_pml_init(float* sigma_e_pml); void sigma_m_pml_init(float*sigma_m_pml,float*sigma_e_pml); void Psi_ezy_init(float*Psi_ezy); void Psi_ezx_init(float*Psi_ezx); void Psi_hyx_init(float*Psi_hyx); void Psi_hxy_init(float*Psi_hxy); void CJ_Init(cuComplex * cjzyn,int size); __global__ void scattered_parameter_init(float*eps_r_z,float*sigma_e_z,float*Cezeic,float*Cezeip); double FDTD_GPU(const vector<double> &arguments) { // BMP Output_Image; //BMP Scattered_Field_snapshot; // Output_Image.SetSize((nx+1),(ny+1)); // Output_Image.SetBitDepth(16); //Scattered_Field_snapshot.SetSize((nx+1),(ny+1)); //Scattered_Field_snapshot.SetBitDepth(16); //RGBApixel Temp; // string outputfilename; cout << "calculating FDTD GPU" << endl; cudaSetDevice(0); vector<float> image; for (int lerp = 0; lerp < 81; lerp++) { //This is setting the material parameters of the optimization cells. image.push_back((float)arguments.at(lerp)); //image.push_back(10); } for (int lerp = 81; lerp < 81 * 2; lerp++) { image.push_back((float)arguments.at(lerp)); // image.push_back(0); } //GLuint bufferObj; //cudaGraphicsResource *resource; cudaError_t error; //int dev; //cudaDeviceProp prop; //memset(&prop,sizeof(cudaDeviceProp),sizeof(cudaDeviceProp)); //prop.major = 1; //prop.minor = 1; //cudaChooseDevice(&dev,&prop); // cudaGLSetGLDevice(dev); /*glutInit(&argc,argv); glewInit(); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(nx,ny); glutCreateWindow("bitmap"); glGenBuffers(1,&bufferObj); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, nx*ny*4,NULL,GL_DYNAMIC_DRAW_ARB); cudaGraphicsGLRegisterBuffer(&resource,bufferObj,cudaGraphicsMapFlagsNone);*/ //uchar4* devPtr; //size_t size; //cudaGraphicsMapResources(1,&resource,NULL); //cudaGraphicsResourceGetMappedPointer((void**)&devPtr,&size,resource); float *Ceze, *Cezhy, *Cezhx, *dev_Cezeic, *dev_Cezeip, *Ez, *eps_r_z, *sigma_e_z, *Hy, *Hx, *kex, *aex, *bex, *amx, *bmx, *alpha_e, *alpha_m, *sigma_e_pml, *sigma_m_pml, *Psi_ezy, *Psi_ezx, *Psi_hyx, *Psi_hxy, *kmx; //*Cezj later if using loop current source float* dev_sigma_e_z,*dev_eps_r_z; float freq = center_freq; float *dev_freq,*D,*dev_Phi; cuComplex *cjzxp,*cjzyp,*cjzxn,*cjzyn,*cmxyp,*cmyxp,*cmxyn,*cmyxn; cuComplex *hcjzxp,*hcjzyp,*hcjzxn,*hcjzyn,*hcmxyp,*hcmyxp,*hcmxyn,*hcmyxn; cuComplex *L,*N; int grid_x = int(ceil((float)nx/22)); int grid_y = int(ceil((float)ny/22)); dim3 grid(grid_x,grid_y); dim3 block(22,22); Ceze = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); Cezhy = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); Cezhx = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); //Cezj = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); // if using loop current source Ez = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); eps_r_z = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); sigma_e_z = (float*)malloc(sizeof(float)*(1+nx)*(1+ny)); D = (float*)malloc(sizeof(float)*numberofexcitationangles*numberofobservationangles);//D = (float*)malloc(numberofobservationangles*sizeof(float)); Hy=(float*)malloc(sizeof(float)*nx*ny); Hx=(float*)malloc(sizeof(float)*nx*ny); kex = (float*)malloc(sizeof(float)*10); kmx = (float*)malloc(sizeof(float)*10); aex=(float*)malloc(sizeof(float)*10); bex=(float*)malloc(sizeof(float)*10); amx=(float*)malloc(sizeof(float)*10); bmx=(float*)malloc(sizeof(float)*10); alpha_e=(float*)malloc(sizeof(float)*10); alpha_m=(float*)malloc(sizeof(float)*10); sigma_e_pml=(float*)malloc(sizeof(float)*10); sigma_m_pml=(float*)malloc(sizeof(float)*10); Psi_ezy=(float*)malloc(sizeof(float)*ny*20); Psi_ezx=(float*)malloc(sizeof(float)*nx*20); Psi_hyx=(float*)malloc(sizeof(float)*ny*20); Psi_hxy=(float*)malloc(sizeof(float)*nx*20); hcjzyp = (cuComplex*)malloc(sizeof(cuComplex )*size_cjzy); hcjzyn = (cuComplex *)malloc(sizeof(cuComplex )*size_cjzy); hcjzxp = (cuComplex *)malloc(sizeof(cuComplex )*size_cjzx); hcjzxn = (cuComplex *)malloc(sizeof(cuComplex )*size_cjzx); hcmxyn = (cuComplex *)malloc(sizeof(cuComplex )*size_cjzy); hcmxyp = (cuComplex *)malloc(sizeof(cuComplex )*size_cjzy); hcmyxp = (cuComplex *)malloc(sizeof(cuComplex )*size_cjzx); hcmyxn = (cuComplex *)malloc(sizeof(cuComplex )*size_cjzx); L = (cuComplex*)malloc(sizeof(cuComplex)*size_NF2FF_total); N = (cuComplex*)malloc(sizeof(cuComplex)*size_NF2FF_total); eps_r_z_init(eps_r_z,image); sigma_e_z_init(sigma_e_z,sigma_e_pml,image); //float*time1; //time1 = (float*)malloc(sizeof(float)*number_of_time_steps); Ceze_init(eps_r_z,sigma_e_z,Ceze); k_e_init(kex); k_m_init(kmx); Cezhy_init(eps_r_z,sigma_e_z,Cezhy,kex); Cezhx_init(eps_r_z,sigma_e_z,Cezhx,kex); sigma_e_pml_init(sigma_e_pml); sigma_m_pml_init(sigma_m_pml,sigma_e_pml); alpha_e_init(alpha_e); alpha_m_init(alpha_e,alpha_m); bex_init(bex ,sigma_e_pml,kex,alpha_e); aex_init(aex,sigma_e_pml,kex,alpha_e,bex); bmx_init(bmx,sigma_m_pml,kmx,alpha_m); amx_init(amx,sigma_m_pml,kmx,alpha_m,bmx); for (int i = 0; i < 10; i++) { cout<<"kex["<<i<<"]= "<<kex[i]<<endl; cout<<"kmx["<<i<<"]= "<<kmx[i]<<endl; cout<<"aex["<<i<<"]= "<<aex[i]<<endl; cout<<"amx["<<i<<"]= "<<amx[i]<<endl; cout<<"bex["<<i<<"]= "<<bex[i]<<endl; cout<<"bmx["<<i<<"]= "<<bmx[i]<<endl; cout<<"alpha_e = "<<alpha_e[i]<<endl; cout<<"alpha_m = "<<alpha_m[i]<<endl; cout << endl; } //Jz_init(Jz); //system("pause"); //FILE* file = fopen("results.txt", "w"); //float*Jz_impressed = (float*)malloc(sizeof(float)*number_of_time_steps); //waveform_time_init(time1); //Jz_waveform(time1,Jz_impressed); //int source_position_index_x = int(nx*source_position/domain_size)+1; // int source_position_index_y = int(ny*source_position/domain_size)+1; float*dev_Ceze,*dev_Cezhy,*dev_Cezhx,*dev_bex,*dev_aex,*dev_bmx,*dev_amx,*dev_kex,*dev_kmx;//dev_Cezj if using loop current source float *dev_Ez,*dev_Hy,*dev_Hx; float*dev_Psi_ezy,*dev_Psi_ezx,*dev_Psi_hyx,*dev_Psi_hxy; cudaMalloc(&dev_eps_r_z,sizeof(float)*(nx+1)*(ny+1)); cudaMalloc(&dev_sigma_e_z,sizeof(float)*(nx+1)*(ny+1)); cudaMalloc(&dev_Cezeic,sizeof(float)*(nx+1)*(ny+1)); cudaMalloc(&dev_Cezeip,sizeof(float)*(nx+1)*(ny+1)); cudaMemcpy(dev_eps_r_z,eps_r_z,sizeof(float)*(nx+1)*(ny+1),cudaMemcpyHostToDevice); cudaMemcpy(dev_sigma_e_z,sigma_e_z,sizeof(float)*(nx+1)*(ny+1),cudaMemcpyHostToDevice); scattered_parameter_init<<<grid,block>>>(dev_eps_r_z,dev_sigma_e_z,dev_Cezeic,dev_Cezeip); //float *Cezeic = (float*)malloc((sizeof(float))*(nx+1)*(ny+1)); // float *Cezeip = (float*)malloc((sizeof(float))*(nx+1)*(ny+1)); //cudaMemcpy(Cezeic,dev_Cezeic,sizeof(float)*(nx+1)*(ny+1),cudaMemcpyDeviceToHost); //cudaMemcpy(Cezeip,dev_Cezeip,sizeof(float)*(nx+1)*(ny+1),cudaMemcpyDeviceToHost); float radius; cudaMalloc(&dev_Phi,sizeof(float)); cudaMalloc(&dev_kex,sizeof(float)*10); cudaMalloc(&dev_kmx,sizeof(float)*10); cudaMalloc(&dev_Ez,sizeof(float)*(nx+1)*(ny+1)); cudaMalloc(&dev_Hy,sizeof(float)*nx*ny); cudaMalloc(&dev_freq ,sizeof(float)); cudaMalloc(&dev_Hx,sizeof(float)*nx*ny); cudaMalloc(&dev_Psi_ezy,sizeof(float)*20*(nx+1)); cudaMalloc(&dev_Psi_ezx,sizeof(float)*20*(ny+1)); cudaMalloc(&dev_Psi_hyx,sizeof(float)*20*(ny)); cudaMalloc(&dev_Psi_hxy,sizeof(float)*20*(nx)); cudaMalloc(&cjzxp,sizeof(cuComplex)*size_NF2FF_total); cudaMalloc(&cjzyp,sizeof(cuComplex)*size_NF2FF_total); cudaMalloc(&cjzxn,sizeof(cuComplex)*size_NF2FF_total); cudaMalloc(&cjzyn,sizeof(cuComplex)*size_NF2FF_total); cudaMalloc(&cmxyp,sizeof(cuComplex)*size_NF2FF_total); cudaMalloc(&cmxyn,sizeof(cuComplex)*size_NF2FF_total); cudaMalloc(&cmyxp,sizeof(cuComplex)*size_NF2FF_total); cudaMalloc(&cmyxn,sizeof(cuComplex)*size_NF2FF_total); cudaMemcpy(dev_freq,&freq,sizeof(float),cudaMemcpyHostToDevice); cudaMalloc(&dev_bex,sizeof(float)*10); cudaMalloc(&dev_bmx,sizeof(float)*10); cudaMalloc(&dev_amx,sizeof(float)*10); cudaMalloc(&dev_aex,sizeof(float)*10); cudaMalloc(&dev_Ceze,sizeof(float)*(nx+1)*(ny+1)); cudaMalloc(&dev_Cezhy,sizeof(float)*(nx+1)*(ny+1)); //cudaMalloc(&dev_Cezj,sizeof(float)*(nx+1)*(ny+1)); if using current source error = cudaGetLastError(); if (error != cudaSuccess) { printf("Error after cuda Mallocs: %s\n",cudaGetErrorString(error)); } Field_reset<<<grid,block>>>(dev_Ez, dev_Hy, dev_Hx, dev_Psi_ezy, dev_Psi_ezx, dev_Psi_hyx, dev_Psi_hxy,cjzyn,cjzxp,cjzyp,cjzxn,cmxyn,cmyxp,cmxyp,cmyxn); error = cudaGetLastError(); if (error != cudaSuccess) { printf("Error after field reset: %s\n",cudaGetErrorString(error)); } //Field_reset is also good for making all these values zero. cudaMemcpy(dev_kex,kex,sizeof(float)*10,cudaMemcpyHostToDevice); cudaMemcpy(dev_kmx,kmx,sizeof(float)*10,cudaMemcpyHostToDevice); cudaMemcpy(dev_aex,aex,sizeof(float)*10,cudaMemcpyHostToDevice); cudaMemcpy(dev_bex,bex,sizeof(float)*10,cudaMemcpyHostToDevice); cudaMemcpy(dev_bmx,bmx,sizeof(float)*10,cudaMemcpyHostToDevice); cudaMemcpy(dev_amx,amx,sizeof(float)*10,cudaMemcpyHostToDevice); cudaMemcpy(dev_Ceze,Ceze,sizeof(float)*(nx+1)*(ny+1),cudaMemcpyHostToDevice); cudaMemcpy(dev_Cezhy,Cezhy,sizeof(float)*(nx+1)*(ny+1),cudaMemcpyHostToDevice); error = cudaGetLastError(); if(error != cudaSuccess) { printf("Error after cuda Memcpy: %s\n",cudaGetErrorString(error)); } int*dev_i; cudaMalloc(&dev_i,sizeof(int)); float test_Ez; dim3 gridNF2FF((int)ceil(size_NF2FF_total/512.0)); dim3 blockNF2FF(512); float test_Ez_2; float Phi; //Output_Image.SetBitDepth(16); //for(int x = 0;x<nx+1;x++)// This double loop makes an image of the target. Delete when using this as forward solver. // for(int y = 0; y<ny+1;y++) // { // { // Temp.Green = 0; // if(eps_r_z[getCell(x,y,nx+1)] >15) // { // Temp.Red = 255; // Temp.Blue = 0; // } // else // { // Temp.Blue = 150; // Temp.Red = 0; // } // } // Output_Image.SetPixel(x,y,Temp); // } //Output_Image.WriteToFile("Permittivity_map_measurement.bmp"); /* The calculation part! */ //ofstream measurement_data; //measurement_data.open("measurement_data.txt"); for(int Phi_index = 0; Phi_index<numberofexcitationangles; Phi_index++) { Phi = Phi_index*2*PI/numberofexcitationangles; cudaMemcpy(dev_Phi,&Phi,sizeof(float),cudaMemcpyHostToDevice); for(int i=0;i<number_of_time_steps;i++) { cudaMemcpy(dev_i,&i,sizeof(int),cudaMemcpyHostToDevice); H_field_update<<<grid,block>>>(dev_Hy,dev_Hx,dev_Ez,dev_bmx,dev_Psi_hyx,dev_amx,dev_bmx,dev_amx,dev_Psi_hxy,dev_kmx); E_field_update<<<grid,block>>>(dev_i,dev_Ez,dev_Hy,dev_Hx,dev_Psi_ezx,dev_aex,dev_aex,dev_bex,dev_bex,dev_Psi_ezy,dev_kex,dev_Cezhy,dev_Cezhy,dev_Ceze,dev_Cezeip,dev_Cezeic,dev_Phi); calculate_JandM<<<gridNF2FF,blockNF2FF>>>(dev_freq, dev_i,dev_Ez,dev_Hy,dev_Hx,cjzxp,cjzyp,cjzxn,cjzyn,cmxyp,cmyxp,cmxyn,cmyxn); } cudaMemcpy(hcjzyn,cjzyn,sizeof(cuComplex)*size_cjzy,cudaMemcpyDeviceToHost); cudaMemcpy(hcjzxp,cjzxp,sizeof(cuComplex)*size_cjzx,cudaMemcpyDeviceToHost); cudaMemcpy(hcjzyp,cjzyp,sizeof(cuComplex)*size_cjzy,cudaMemcpyDeviceToHost); cudaMemcpy(hcjzxn,cjzxn,sizeof(cuComplex)*size_cjzx,cudaMemcpyDeviceToHost); cudaMemcpy(hcmxyn,cmxyn,sizeof(cuComplex)*size_cjzy,cudaMemcpyDeviceToHost); cudaMemcpy(hcmyxp,cmyxp,sizeof(cuComplex)*size_cjzx,cudaMemcpyDeviceToHost); cudaMemcpy(hcmxyp,cmxyp,sizeof(cuComplex)*size_cjzy,cudaMemcpyDeviceToHost); cudaMemcpy(hcmyxn,cmyxn,sizeof(cuComplex)*size_cjzy,cudaMemcpyDeviceToHost); CJ_Init(L,size_NF2FF_total); CJ_Init(N,size_NF2FF_total); N2FPostProcess(D + Phi_index*numberofobservationangles, freq,N,L,hcjzxp,hcjzyp,hcjzxn,hcjzyn,hcmxyp,hcmyxp,hcmxyn,hcmyxn); //notice the D + Phi_index*numberofobservationangles. D is in total 4*numberofobservaion angles, so that's how we fill them in sequentially. //for(int i = 0;i<numberofobservationangles;i++) // This is for recording simulated measured data //{ //measurement_data<<*(D+Phi_index*numberofobservationangles+i)<<" , "; //cout<<*(D+Phi_index*numberofobservationangles+i)<<endl; //} //measurement_data<<endl; Field_reset<<<grid,block>>>(dev_Ez, dev_Hy, dev_Hx, dev_Psi_ezy, dev_Psi_ezx, dev_Psi_hyx, dev_Psi_hxy,cjzyn,cjzxp,cjzyp,cjzxn,cmxyn,cmyxp,cmxyp,cmyxn); } float measurement[numberofobservationangles*numberofexcitationangles] = {0.544912 , 0.518606 , 0.439233 , 0.330533 , 0.219116 , 0.135115 , 0.0923969 , 0.0774134 , 0.0740459 , 0.0739238 , 0.0660047 , 0.0465372 , 0.0248307 , 0.00913681 , 0.00186162 , 0.0038402 , 0.0130785 , 0.0238094 , 0.0312918 , 0.035705 , 0.0388307 , 0.039513 , 0.0368443 , 0.0338221 , 0.0324815 , 0.0305907 , 0.0270149 , 0.0239178 , 0.0224438 , 0.021849 , 0.0217346 , 0.0222152 , 0.023146 , 0.0245181 , 0.0267161 , 0.0286964 , 0.0276803 , 0.0235098 , 0.0197177 , 0.0183168 , 0.0196998 , 0.0261493 , 0.0375584 , 0.0479223 , 0.0511598 , 0.0461443 , 0.035713 , 0.0249863 , 0.0203708 , 0.0260456 , 0.0395441 , 0.054163 , 0.0660136 , 0.0763823 , 0.0935922 , 0.132053 , 0.201299 , 0.299247 , 0.410792 , 0.504467 , 0.0490085 , 0.0278468 , 0.0123693 , 0.00899709 , 0.0196632 , 0.0401112 , 0.0623734 , 0.0809561 , 0.096057 , 0.113814 , 0.145125 , 0.200388 , 0.283438 , 0.386362 , 0.486139 , 0.549594 , 0.547993 , 0.475775 , 0.358033 , 0.230962 , 0.118935 , 0.039843 , 0.00700227 , 0.0112335 , 0.0300356 , 0.0494414 , 0.0605159 , 0.0585777 , 0.0503323 , 0.045704 , 0.0474064 , 0.0523123 , 0.0558987 , 0.0545722 , 0.0475098 , 0.0366045 , 0.0248037 , 0.0155752 , 0.0115322 , 0.0127167 , 0.0176523 , 0.0243556 , 0.0310764 , 0.037444 , 0.0432292 , 0.0469609 , 0.0471761 , 0.0435653 , 0.0369347 , 0.0293987 , 0.0235478 , 0.0206039 , 0.020754 , 0.0247748 , 0.0336772 , 0.047007 , 0.0618746 , 0.0734482 , 0.0763332 , 0.0674785 , 0.0463129 , 0.0448933 , 0.0398454 , 0.0319834 , 0.0239428 , 0.0174267 , 0.0129155 , 0.0116624 , 0.0154122 , 0.0247183 , 0.0376821 , 0.0494142 , 0.0552493 , 0.0544909 , 0.0501016 , 0.0466044 , 0.047395 , 0.0522298 , 0.0576919 , 0.0588555 , 0.0504011 , 0.0311956 , 0.0107719 , 0.00755493 , 0.0394798 , 0.116099 , 0.232324 , 0.36478 , 0.478314 , 0.541685 , 0.541186 , 0.484009 , 0.391878 , 0.291105 , 0.204554 , 0.145352 , 0.113254 , 0.0973423 , 0.0835717 , 0.0637299 , 0.0397899 , 0.0189781 , 0.00814281 , 0.0118845 , 0.0291142 , 0.0513172 , 0.0680543 , 0.0744519 , 0.0718442 , 0.0622228 , 0.0473734 , 0.0329352 , 0.0245156 , 0.0212818 , 0.0204027 , 0.0228792 , 0.0298908 , 0.0380399 , 0.0432513 , 0.0455291 , 0.0469428 , 0.049667 , 0.0453111 , 0.0370016 , 0.0278006 , 0.0201062 , 0.0173687 , 0.020228 , 0.0242543 , 0.0264199 , 0.0275476 , 0.027771 , 0.0262174 , 0.0237332 , 0.0219206 , 0.0212424 , 0.0214967 , 0.0226845 , 0.0248514 , 0.0275874 , 0.0300439 , 0.0318892 , 0.0340621 , 0.0369823 , 0.0388068 , 0.0379494 , 0.0350817 , 0.030462 , 0.0230471 , 0.0133404 , 0.00457234 , 0.00152755 , 0.00874873 , 0.0260448 , 0.0463293 , 0.0633742 , 0.0751071 , 0.0775575 , 0.0756597 , 0.0916989 , 0.141021 , 0.22185 , 0.328433 , 0.44207 , 0.524772 , 0.544711 , 0.498668 , 0.407614 , 0.29953 , 0.199594 , 0.128704 , 0.0929922 , 0.0772499 , 0.0654169 , 0.0536587 , 0.0399619 , 0.0255793 , 0.0193488 , 0.0253531 , 0.0373143 , };//I've just hardcoded the measurement values. Maybe later we'll read them from a text file. for (int i = 0; i < numberofexcitationangles*numberofobservationangles; i++) { cout << "D[" << i << " ]: " << D[i] << endl; } float fit; fit=fitness(D,numberofobservationangles*numberofexcitationangles, measurement); error = cudaGetLastError(); free(Ceze); free(Cezhy); free(Cezhx); free(Ez); free(eps_r_z); free(sigma_e_z); free(Hy); free(Hx); free(kex); free(aex); free(bex); free(amx); free(bmx); free(alpha_e); free(alpha_m); free(sigma_e_pml); free(sigma_m_pml); free(Psi_ezy); free(Psi_ezx); free(Psi_hyx); free(Psi_hxy); free(kmx); free(D); free(hcjzxp); free(hcjzyp); free(hcjzxn); free(hcjzyn); free(hcmxyp); free(hcmyxp); free(hcmxyn); free(hcmyxn); free(L); free(N); //free(measurement); //float *Cezeic = (float*)malloc((sizeof(float))*(nx+1)*(ny+1)); //float *Cezeip = (float*)malloc((sizeof(float))*(nx+1)*(ny+1)); //float*Ceze,*Cezhy,*Cezhx,*dev_Cezeic,*dev_Cezeip,*Ez,*eps_r_z,*sigma_e_z,*Hy,*Hx, // *kex,*aex,*bex,*amx,*bmx,*alpha_e,*alpha_m,*sigma_e_pml,*sigma_m_pml // ,*Psi_ezy,*Psi_ezx,*Psi_hyx,*Psi_hxy,*kmx;//*Cezj later if using loop current source //float* dev_sigma_e_z,*dev_eps_r_z; //float freq = center_freq; //float *dev_freq,*D,*D_tot; //float* Ezip,*Ezic,*dev_Ezip,*dev_Ezic,*Hy_inc,*Hx_inc,*dev_Hy_inc,*dev_Hx_inc,*dev_Psi_ezy_inc,*dev_Psi_ezx_inc,*dev_Psi_hyx_inc,*dev_Psi_hxy_inc, // *Psi_ezy_inc,*Psi_ezx_inc,*Psi_hyx_inc,*Psi_hxy_inc; // //cuComplex *cjzxp,*cjzyp,*cjzxn,*cjzyn,*cmxyp,*cmyxp,*cmxyn,*cmyxn,*cjzxp_tot,*cjzyp_tot,*cjzxn_tot,*cjzyn_tot,*cmxyp_tot,*cmyxp_tot,*cmxyn_tot,*cmyxn_tot; //cuComplex *hcjzxp,*hcjzyp,*hcjzxn,*hcjzyn,*hcmxyp,*hcmyxp,*hcmxyn,*hcmyxn,*hcjzxp_tot,*hcjzyp_tot,*hcjzxn_tot,*hcjzyn_tot,*hcmxyp_tot,*hcmyxp_tot,*hcmxyn_tot // ,*hcmyxn_tot; cudaFree(dev_Cezeic); cudaFree(dev_Cezeip); cudaFree(dev_sigma_e_z); cudaFree(dev_eps_r_z); cudaFree(dev_freq); cudaFree(cjzxp); cudaFree(cjzyp); cudaFree(cjzxn); cudaFree(cjzyn); cudaFree(cmxyp); cudaFree(cmyxp); cudaFree(cmxyn); cudaFree(cmyxn); cudaFree(dev_Ceze); cudaFree(dev_Cezhy); cudaFree(dev_Cezhx); cudaFree(dev_bex); cudaFree(dev_aex); cudaFree(dev_bmx); cudaFree(dev_amx); cudaFree(dev_kex); cudaFree(dev_kmx); cudaFree(dev_Ez); cudaFree(dev_Hy); cudaFree(dev_Hx); cudaFree(dev_Psi_ezy); cudaFree(dev_Psi_ezx); cudaFree(dev_Psi_hyx); cudaFree(dev_Psi_hxy); //float*dev_Ceze,*dev_Cezhy,*dev_Cezhx,*dev_Jz,*dev_bex,*dev_aex,*dev_bmx,*dev_amx,*dev_kex,*dev_kmx;//dev_Cezj if using loop current source //float *dev_Ez,*dev_Hy,*dev_Hx; //float*dev_Psi_ezy,*dev_Psi_ezx,*dev_Psi_hyx,*dev_Psi_hxy; cout << "fitness is: " << fit << endl; return (double)fit; } __global__ void scattered_parameter_init(float*eps_r_z,float*sigma_e_z,float*Cezeic,float*Cezeip) { int x=threadIdx.x+blockDim.x*blockIdx.x; int y=threadIdx.y+blockDim.y*blockIdx.y; if(x<(nx+1)&&y<(ny+1)) { Cezeic[dgetCell(x,y,nx+1)] = (2*(eps0-eps0*eps_r_z[dgetCell(x,y,nx+1)])-sigma_e_z[dgetCell(x,y,nx+1)]*dt)/(2*eps0*eps_r_z[dgetCell(x,y,nx+1)]+sigma_e_z[dgetCell(x,y,nx+1)]*dt); Cezeip[dgetCell(x,y,nx+1)] = -1*(2*(eps0-eps0*eps_r_z[dgetCell(x,y,nx+1)])+sigma_e_z[dgetCell(x,y,nx+1)]*dt)/(2*eps0*eps_r_z[dgetCell(x,y,nx+1)]+sigma_e_z[dgetCell(x,y,nx+1)]*dt); } } int getCell(int x, int y,int size)//size will just be the width in the x dimension of the array. { return x+y*size; } float* Make2DfloatArray(int arraySizeX, int arraySizeY) { float* theArray; theArray = (float*) malloc(arraySizeX*arraySizeY*sizeof(float*)); return theArray; } void waveform_time_init(float*time1) { int size = number_of_time_steps; for(int i = 0;i<size;i++) { time1[i]=(float)i*dt; } } void Jz_waveform(float * time,float*Jz_impressed) { float w = 2*PI*center_freq;//center_freq is the frequency for(int i = 0;i<number_of_time_steps;i++) { Jz_impressed[i]= 10*sin(w*time[i]); //Jz_impressed[i]=exp(-1*((time[i]-2e-10)/5e-11)*(time[i]-2e-10)/(5e-11)); } } void Ceze_init(float * eps_r_z, float* sig_e_z, float* Ceze) { int size = nx+1; for(int j=0;j<ny+1;j++) { for(int i=0;i<size;i++) { Ceze[getCell(i,j,nx+1)] = (2*eps_r_z[getCell(i,j,nx+1)]*eps0-dt*sig_e_z[getCell(i,j,nx+1)])/(2*eps_r_z[getCell(i,j,nx+1)]*eps0+dt*sig_e_z[getCell(i,j,nx+1)]); } } } void Cezhy_init(float*eps_r_z, float* sigma_e_z,float* Cezhy,float*kex) { int size = nx+1; for(int j =0;j<ny+1;j++) { for(int i=0;i<size;i++) { Cezhy[getCell(i,j,size)] = (2*dt/dx)/(2*eps_r_z[getCell(i,j,size)]*eps0+dt*sigma_e_z[getCell(i,j,size)]); } } } void Cezhx_init(float* eps_r_z,float*sigma_e_z,float*Cezhx,float*kex) { int size=nx+1; for(int j=0;j<ny+1;j++) { for(int i =0;i<nx+1;i++) { Cezhx[getCell(i,j,size)]=(2*dt/dy)/(2*eps_r_z[getCell(i,j,size)]*eps0+dt*sigma_e_z[getCell(i,j,size)]); } } } void Cezj_init(float*eps_r_z,float*sigma_e_z,float*Cezj) { int size =nx+1; for(int j=0;j<ny+1;j++) { for(int i=0;i<nx+1;i++) { Cezj[getCell(i,j,size)] = (-2*dt)/(2*eps_r_z[getCell(i,j,size)]*eps0+dt*sigma_e_z[getCell(i,j,size)]); } } } void Ez_init(float*Ez) { int size=nx+1; for(int j = 0;j<ny+1;j++) { for(int i = 0;i<nx+1;i++) { Ez[getCell(i,j,size)] = (float)0; } } } /*void Jz_init(float*Jz) { for(int j =0;j<ny+1;j++) { for(int i = 0;i<nx+1;i++) { Jz[getCell(i,j,nx+1)] = 0; } } }*/ void Chyh_init(float*mu_r_y,float*sigma_m_y,float*Chyh) { int size=nx; for(int i = 0;i<nx;i++) for(int j =0;j<ny;j++) { { Chyh[getCell(i,j,size)] = (2*mu_r_y[getCell(i,j,size)]*mu0-dt*sigma_m_y[getCell(i,j,size)])/(2*mu_r_y[getCell(i,j,size)]*mu0+dt*sigma_m_y[getCell(i,j,size)]); } } } void Chxh_init(float*mu_r_x,float*sigma_m_x,float*Chxh) { int size=nx; for(int i = 0;i<nx;i++) for(int j =0;j<ny;j++) { { Chxh[getCell(i,j,size)] = (2*mu_r_x[getCell(i,j,size)]*mu0-dt*sigma_m_x[getCell(i,j,size)])/(2*mu_r_x[getCell(i,j,size)]*mu0+dt*sigma_m_x[getCell(i,j,size)]); } } } void Chyez_init(float*mu_r_y,float*sigma_m_y,float*Chyez) { int size = nx; for(int j =0;j<ny;j++) { for(int i = 0;i<size;i++) { Chyez[getCell(i,j,size)] = (2*dt/dx)/(2*mu_r_y[getCell(i,j,size)]*mu0+dt*sigma_m_y[getCell(i,j,size)]); } } } void Chxez_init(float*mu_r_x,float*sigma_m_x,float*Chxez) { int size = nx; for(int j =0;j<ny;j++) { for(int i = 0;i<size;i++) { Chxez[getCell(i,j,size)] = (2*dt/dy)/(2*mu_r_x[getCell(i,j,size)]*mu0+dt*sigma_m_x[getCell(i,j,size)]); } } } /*void Chym_init(float*mu_r_y,float*sigma_m_y,float*Chym) { int size = nx; for(int j =0;j<ny;j++) { for(int i = 0;i<size;i++) { Chym[getCell(i,j,size)] = (-2*dt)/(2*mu_r_y[getCell(i,j,size)]*mu0+dt*sigma_m_y[getCell(i,j,size)]); } } } void Chxm_init(float*mu_r_x,float*sigma_m_x,float*Chxm) { int size = nx; for(int j =0;j<ny;j++) { for(int i = 0;i<size;i++) { Chxm[getCell(i,j,size)] = (-2*dt)/(2*mu_r_x[getCell(i,j,size)]*mu0+dt*sigma_m_x[getCell(i,j,size)]); } } }*/ void eps_r_z_init(float * eps_r_z,const vector<float> &argument) { int size = nx+1; float radius;//tumor_radius,tumor_radius_2,tumor_radius_3; for(int j =0;j<ny+1;j++) { for(int i = 0;i<nx+1;i++) { eps_r_z[getCell(i,j,size)] = 1; radius = sqrt(pow( ((float)i-nx/2)*dx,2) + pow( ((float)j-ny/2)*dy,2)); // tumor_radius = sqrt(pow( ((float)i - target_x)*dx,2) + pow( ((float)j-target_y)*dy,2)); if(radius<=breast_radius) { eps_r_z[getCell(i,j,size)] = (float)argument.at(getOptimizationCell(i,j)); //This is the line that should be uncommented if using as forward solver //eps_r_z[getCell(i,j,size)] = 10; //if(tumor_radius <= tumor_size)//delete this if using as forward solver //{ // eps_r_z[getCell(i,j,size)] = 60; //} } } } } void sigma_e_z_init(float * sigma_e_z,float*sigma_e_pml, const vector<float> &argument) { int size = nx+1; float radius;//,tumor_radius; for(int j =0;j<ny+1;j++) { for(int i = 0;i<nx+1;i++) { sigma_e_z[getCell(i,j,size)] = 0; radius = sqrt(pow( ((float)i-nx/2)*dx,2) + pow( ((float)j-ny/2)*dy,2)); //tumor_radius = sqrt(pow( ((float)i - target_x)*dx,2) + pow( ((float)j-target_y)*dy,2)); if(radius<=breast_radius) { sigma_e_z[getCell(i,j,size)] = (float)argument.at(getOptimizationCell(i,j)+9*9); //sigma_e_z[getCell(i,j,size)] = 0.15; //if(tumor_radius <= tumor_size)//delete this if using as forward solver //{ // sigma_e_z[getCell(i,j,size)] = 0.7; //} } } } } void Hy_init(float*Hy) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { Hy[getCell(i,j,size)] = 0; } } } void Hx_init(float*Hx) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { Hx[getCell(i,j,size)] = 0; } } } void My_init(float*My) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { My[getCell(i,j,size)] = 0; } } } void Mx_init(float*Mx) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { Mx[getCell(i,j,size)] = 0; } } } void mu_r_y_init(float*mu_r_y) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { mu_r_y[getCell(i,j,size)] =1.000; } } } void mu_r_x_init(float*mu_r_x) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { mu_r_x[getCell(i,j,size)]=1.000; } } } void sigma_m_y_init(float*sigma_m_y) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { sigma_m_y[getCell(i,j,size)] = 0; } } } void sigma_m_x_init(float*sigma_m_x) { int size = nx; for(int j=0;j<ny;j++) { for(int i = 0;i<size;i++) { sigma_m_x[getCell(i,j,size)] = 0; } } } void C_Psi_ezy_init(float *C_Psi_ezy,float*Cezhx) { int size = 20; for(int j = 0;j<ny;j++) for( int i =0;i<size;i++) { if(i<10) { C_Psi_ezy[getCell(i,j,size)]=dy*Cezhx[getCell(i,j,nx)]; } else { C_Psi_ezy[getCell(i,j,size)]=dy*Cezhx[getCell(nx-20+i,j,nx)]; } } } void C_Psi_ezx_init(float* C_Psi_ezx,float*Cezhy) { int size_y=20; for(int j=0;j<size_y;j++) { for(int i=0;i<nx;i++) { if(j<10) { C_Psi_ezx[getCell(i,j,nx)] = dx*Cezhy[getCell(i,j,nx)]; } else { C_Psi_ezx[getCell(i,j,nx)] = dx*Cezhy[getCell(i,ny-20+j,nx)]; } } } } void C_Psi_hyx_init(float*C_Psi_hyx,float*Chyez) { int size_x=20; for(int j=0;j<ny;j++) { for(int i=0;i<size_x;i++) { if(i<10) { C_Psi_hyx[getCell(i,j,size_x)]=dx*Chyez[getCell(i,j,nx)]; } else { C_Psi_hyx[getCell(i,j,size_x)]=dx*Chyez[getCell(nx-20+i,j,nx)]; } } } } void C_psi_hxy_init(float *C_Psi_hxy,float*Chxez) { int size_y=20; for(int j=0;j<size_y;j++) { for(int i=0;i<nx;i++) { if(j<11) { C_Psi_hxy[getCell(i,j,nx)]=dy*Chxez[getCell(i,j,nx)]; } else { C_Psi_hxy[getCell(i,j,nx)]=dy*Chxez[getCell(i,ny-20+j,nx)]; } } } } void aex_init(float*aex,float*sigma_e_pml,float*kex,float*alpha_e_x,float*bex) { int size=ncells; //aex[0]=0.0; //cout<<"aex[0] = "<<aex[0]<<endl; for(int i=0;i<size;i++) { aex[i]=((bex[i]-1)*sigma_e_pml[i])/(dx*(sigma_e_pml[i]*kex[i]+alpha_e_x[i]*kex[i]*kex[i])); //cout<<"aex["<<i<<"] = "<<aex[i]<<endl; } } void bex_init(float*bex ,float*sigma_e_pml,float*kex,float*alpha_e_x) { int size=ncells; for(int i=0;i<size;i++) { bex[i]=exp(-1*(dt/eps0)*(sigma_e_pml[i]/kex[i]+alpha_e_x[i])); //cout<<"bex["<<i<<"] = "<<bex[i]<<endl; } } void aey_init(float*aey,float*sigma_e_pml,float*key,float*alpha_e_y,float*bey) { for(int i=0;i>ncells;i++) { aey[i]=(bey[i]-1)*sigma_e_pml[i]/(dy*(sigma_e_pml[i]*key[i]+alpha_e_y[i]*key[i]*key[i])); } } void bey_init(float*bey,float*sigma_e_pml,float*key,float*alpha_e_y) { int size=ncells; for(int i=0;i<size;i++) { bey[i]=exp(-1*(dt/eps0)*(sigma_e_pml[i]/key[i]+alpha_e_y[i])); } } void amy_init(float*amy,float*sigma_m_pml,float*kmy,float*alpha_m_y,float*bmy) { int size=ncells; for(int i=0;i<size;i++) { amy[i]=(bmy[i]-1)*sigma_m_pml[i]/(dx*(sigma_m_pml[i]*kmy[i]+alpha_m_y[i]*kmy[i]*kmy[i])); } } void bmy_init(float*bmy,float*sigma_m_pml,float*kmy,float*alpha_m_y) { int size=ncells; for(int i=0;i<size;i++) { bmy[i]=exp(-1*(dt/mu0)*(sigma_m_pml[i]/kmy[i]+alpha_m_y[i])); } } void amx_init(float*amx,float*sigma_m_pml,float*kmx,float*alpha_m_x,float*bmx) { int size=ncells; //cout<<" amx = "<<amx[0]<<endl; //amx[0]=0.0; //cout<<" amx = "<<amx[0]<<endl; for(int i=0;i<size;i++) { amx[i]=(bmx[i]-1)*sigma_m_pml[i]/(dx*(sigma_m_pml[i]*kmx[i]+alpha_m_x[i]*kmx[i]*kmx[i])); cout<<" amx = "<<amx[i]<<endl; } } void bmx_init(float*bmx,float*sigma_m_pml,float*kmx,float*alpha_m_x) { int size=10; float argument; //float constant; for(int i=0;i<size;i++) { //constant = dt/mu0; //cout<< "dt/mu0 = "<<constant<<endl; argument = -1*(dt/mu0)*((sigma_m_pml[i]/kmx[i])+alpha_m_x[i]); bmx[i]=exp(argument); //cout<<"argument of bmx = "<<argument<<endl; //cout<<"bmx = "<<bmx[i]<<endl; } } void alpha_e_init(float*alpha_e) { float rho; int size=ncells; for(int i=0;i<ncells;i++) { rho = ((float)i+0.25)/ncells; alpha_e[i]=alpha_min+(alpha_max-alpha_min)*rho; //cout<<"alpha_e = "<<alpha_e[i]<<endl; } } void alpha_m_init(float*alpha_e,float*alpha_m) { int size=ncells; float rho; for(int i=0;i<size;i++) { rho = ((float)i+0.75)/ncells; alpha_m[i]=(mu0/eps0)*(alpha_min+(alpha_max-alpha_min)*rho); //cout<<"alpha_m = "<<alpha_m[i]<<endl; } } void k_e_init(float*k) { int size=ncells; float rho; for(int i=0;i<size;i++) { rho = ((float)i+0.25)/ncells; k[i]=pow(rho,npml)*(kmax-1)+1; //cout<<"kex ["<<i<<"]= "<<k[i]<<endl; } } void k_m_init(float*k) { int size=ncells; float rho; for(int i=0;i<size;i++) { rho = ((float)i+0.75)/ncells; k[i]=pow(rho,npml)*(kmax-1)+1; //cout<<"kmx ["<<i<<"]= "<<k[i]<<endl; } } void sigma_e_pml_init(float* sigma_e_pml) { float sigma_max = (npml+1)/(150*PI*dx); int size = 10; float rho; for(int i=0;i<size;i++) { rho = ((float)i+0.25)/ncells; sigma_e_pml[i]=sigma_max*sigma_factor*pow(rho,npml); cout<<"sigma_e_pml = "<<sigma_e_pml[i]<<endl; } } void sigma_m_pml_init(float*sigma_m_pml,float*sigma_e_pml) { float rho; int size = 10; float sigma_max = (npml+1)/(150*PI*dx); for(int i=0;i<size;i++) { rho = ((float)i+0.75)/ncells; sigma_m_pml[i]=(mu0/eps0)*sigma_max*sigma_factor*pow(rho,npml); cout<<"sigma_m_pml "<<sigma_m_pml[i]<<endl; } } void Psi_ezy_init(float*Psi_ezy) { int size=nx*20; for(int i=0;i<size;i++) { Psi_ezy[i]=0.0; } } void Psi_ezx_init(float*Psi_ezx) { int size=ny*20; for(int i=0;i<size;i++) { Psi_ezx[i]=0.0; } } void Psi_hyx_init(float*Psi_hyx) { int size=ny*20; for(int i=0;i<size;i++) { Psi_hyx[i]=0.0; } } void Psi_hxy_init(float*Psi_hxy) { int size=nx*20; for(int i=0;i<size;i++) { Psi_hxy[i]=0.0; } } void CJ_Init(cuComplex * cjzyn,int size) { cuComplex nullComplex(0,0); for( int i =0; i<size;i++) { cjzyn[i] = nullComplex; } }
54866964de6d2d8ba63228d87f1c92c27111670a.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <iostream> #include <assert.h> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); /////////////////////////////////////////////////////////////////////////////// // Program main /////////////////////////////////////////////////////////////////////////////// int main() { hipSetDevice(MYDEVICE); // pointer and dimension for host memory int dimA = 8; float *h_a; // pointers for device memory float *d_a, *d_b; // allocate and initialize host memory // Bonus: try using hipHostMalloc in place of malloc // it has the same syntax as hipMalloc, but it enables asynchronous copies h_a = (float *) malloc(dimA*sizeof(float)); for (int i = 0; i<dimA; ++i) { h_a[i] = i; } // Part 1 of 5: allocate device memory size_t memSize = dimA*sizeof(float); hipMalloc(&d_a, memSize ); hipMalloc(&d_b, memSize ); // Part 2 of 5: host to device memory copy hipMemcpy(d_a, h_a, memSize, hipMemcpyHostToDevice); // Part 3 of 5: device to device memory copy hipMemcpy(d_b, d_a, memSize, hipMemcpyDeviceToDevice); // clear host memory for (int i=0; i<dimA; ++i ) { h_a[i] = 0.f; } // Part 4 of 5: device to host copy hipMemcpy(h_a, d_b, memSize, hipMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("hipMemcpy calls"); // verify the data on the host is correct for (int i=0; i<dimA; ++i) { assert(h_a[i] == (float) i); } // Part 5 of 5: free device memory pointers d_a and d_b hipFree(d_a); hipFree(d_b); // Check for any CUDA errors checkCUDAError("hipFree"); // free host memory pointer h_a free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! std::cout << "Correct!" << std::endl; return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { std::cerr << "Cuda error: " << msg << " " << hipGetErrorString(err) << std::endl; exit(-1); } }
54866964de6d2d8ba63228d87f1c92c27111670a.cu
// includes, system #include <iostream> #include <assert.h> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); /////////////////////////////////////////////////////////////////////////////// // Program main /////////////////////////////////////////////////////////////////////////////// int main() { cudaSetDevice(MYDEVICE); // pointer and dimension for host memory int dimA = 8; float *h_a; // pointers for device memory float *d_a, *d_b; // allocate and initialize host memory // Bonus: try using cudaMallocHost in place of malloc // it has the same syntax as cudaMalloc, but it enables asynchronous copies h_a = (float *) malloc(dimA*sizeof(float)); for (int i = 0; i<dimA; ++i) { h_a[i] = i; } // Part 1 of 5: allocate device memory size_t memSize = dimA*sizeof(float); cudaMalloc(&d_a, memSize ); cudaMalloc(&d_b, memSize ); // Part 2 of 5: host to device memory copy cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice); // Part 3 of 5: device to device memory copy cudaMemcpy(d_b, d_a, memSize, cudaMemcpyDeviceToDevice); // clear host memory for (int i=0; i<dimA; ++i ) { h_a[i] = 0.f; } // Part 4 of 5: device to host copy cudaMemcpy(h_a, d_b, memSize, cudaMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("cudaMemcpy calls"); // verify the data on the host is correct for (int i=0; i<dimA; ++i) { assert(h_a[i] == (float) i); } // Part 5 of 5: free device memory pointers d_a and d_b cudaFree(d_a); cudaFree(d_b); // Check for any CUDA errors checkCUDAError("cudaFree"); // free host memory pointer h_a free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! std::cout << "Correct!" << std::endl; return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { std::cerr << "Cuda error: " << msg << " " << cudaGetErrorString(err) << std::endl; exit(-1); } }
f4af68446467c36a74050e23cda99fb7df1334ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <kernels/filter-operator-rgb.cuh> __device__ unsigned char fixIntensity(int intensity) { if (intensity > 255) return 255; if (intensity < 0) return 0; return (unsigned char) intensity; } __global__ void filterOperatorRGB(const char *filter, unsigned char *devSource, unsigned char *devDest, size_t pitch, int width, int height) { auto offsetX = (blockDim.x * blockIdx.x + threadIdx.x) * 3 + 3; auto offsetY = (blockDim.y * blockIdx.y + threadIdx.y) + 1; auto sharedX = threadIdx.x * 9; auto sharedY = threadIdx.y + 1; __shared__ unsigned char sharedMemory[16 + 2][(32 + 2) * 9]; for (int i = -1; i < 2; i++) { memcpy( sharedMemory[sharedY + i] + sharedX, devSource + (offsetY + i) * pitch + offsetX - 3, 9); } __syncthreads(); int r = 0; int g = 0; int b = 0; #pragma unroll for (int i = 0; i < 3; i++) { #pragma unroll for (int j = 0; j < 3; j++) { r += sharedMemory[sharedY + i - 1][sharedX + j * 3] * filter[i * 3 + j] / 9; g += sharedMemory[sharedY + i - 1][sharedX + j * 3 + 1] * filter[i * 3 + j] / 9; b += sharedMemory[sharedY + i - 1][sharedX + j * 3 + 2] * filter[i * 3 + j] / 9; } } r = fixIntensity(r); g = fixIntensity(g); b = fixIntensity(b); memcpy(devDest + offsetY * pitch + offsetX, &r, 1); memcpy(devDest + offsetY * pitch + offsetX + 1, &g, 1); memcpy(devDest + offsetY * pitch + offsetX + 2, &b, 1); }
f4af68446467c36a74050e23cda99fb7df1334ab.cu
#include <kernels/filter-operator-rgb.cuh> __device__ unsigned char fixIntensity(int intensity) { if (intensity > 255) return 255; if (intensity < 0) return 0; return (unsigned char) intensity; } __global__ void filterOperatorRGB(const char *filter, unsigned char *devSource, unsigned char *devDest, size_t pitch, int width, int height) { auto offsetX = (blockDim.x * blockIdx.x + threadIdx.x) * 3 + 3; auto offsetY = (blockDim.y * blockIdx.y + threadIdx.y) + 1; auto sharedX = threadIdx.x * 9; auto sharedY = threadIdx.y + 1; __shared__ unsigned char sharedMemory[16 + 2][(32 + 2) * 9]; for (int i = -1; i < 2; i++) { memcpy( sharedMemory[sharedY + i] + sharedX, devSource + (offsetY + i) * pitch + offsetX - 3, 9); } __syncthreads(); int r = 0; int g = 0; int b = 0; #pragma unroll for (int i = 0; i < 3; i++) { #pragma unroll for (int j = 0; j < 3; j++) { r += sharedMemory[sharedY + i - 1][sharedX + j * 3] * filter[i * 3 + j] / 9; g += sharedMemory[sharedY + i - 1][sharedX + j * 3 + 1] * filter[i * 3 + j] / 9; b += sharedMemory[sharedY + i - 1][sharedX + j * 3 + 2] * filter[i * 3 + j] / 9; } } r = fixIntensity(r); g = fixIntensity(g); b = fixIntensity(b); memcpy(devDest + offsetY * pitch + offsetX, &r, 1); memcpy(devDest + offsetY * pitch + offsetX + 1, &g, 1); memcpy(devDest + offsetY * pitch + offsetX + 2, &b, 1); }
c763e3811204782ea894259751cc458aed2e6901.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "quark/util/math_functions.h" namespace quark { template <typename T> __global__ void EltwiseProduct(int64 n, const T* alpha, const T* a, const T* b, T* c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { c[tid] = (*alpha) * a[tid] * b[tid]; } } template <typename T> void quark_gpu_eltwise_prod(hipStream_t stream, const T* alpha, const GpuTensor<T> &a, const GpuTensor<T>& b, GpuTensor<T>* c) { int64 n = a.size(); int num_thread = QUARK_CUDA_BLOCK_SIZE; int64 num_block = QUARK_GET_NUM_BLOCK(n); hipLaunchKernelGGL(( EltwiseProduct), dim3(num_block), dim3(num_thread), 0, stream, n, alpha, a.data(), b.data(), c->mutable_data()); } template void quark_gpu_eltwise_prod(hipStream_t stream, const float* alpha, const GpuTensor<float> &a, const GpuTensor<float>& b, GpuTensor<float>* c); template void quark_gpu_eltwise_prod(hipStream_t stream, const double* alpha, const GpuTensor<double> &a, const GpuTensor<double>& b, GpuTensor<double>* c); } // namespace quark
c763e3811204782ea894259751cc458aed2e6901.cu
#include <stdio.h> #include "quark/util/math_functions.h" namespace quark { template <typename T> __global__ void EltwiseProduct(int64 n, const T* alpha, const T* a, const T* b, T* c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { c[tid] = (*alpha) * a[tid] * b[tid]; } } template <typename T> void quark_gpu_eltwise_prod(cudaStream_t stream, const T* alpha, const GpuTensor<T> &a, const GpuTensor<T>& b, GpuTensor<T>* c) { int64 n = a.size(); int num_thread = QUARK_CUDA_BLOCK_SIZE; int64 num_block = QUARK_GET_NUM_BLOCK(n); EltwiseProduct<<<num_block, num_thread, 0, stream>>>(n, alpha, a.data(), b.data(), c->mutable_data()); } template void quark_gpu_eltwise_prod(cudaStream_t stream, const float* alpha, const GpuTensor<float> &a, const GpuTensor<float>& b, GpuTensor<float>* c); template void quark_gpu_eltwise_prod(cudaStream_t stream, const double* alpha, const GpuTensor<double> &a, const GpuTensor<double>& b, GpuTensor<double>* c); } // namespace quark
8fe3c34ed03565c5646580e0e2cbfe3bd8e31b71.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 4, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
8fe3c34ed03565c5646580e0e2cbfe3bd8e31b71.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 4, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
7119112d253d5b101250ff79fd118f7d27c0a8b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* MCMPC.cu */ #include<stdio.h> #include "../include/MCMPC.cuh" void weighted_mean(InputVector *In, int numElite, float *Out) { float totalWeight = 0.0f; float temp[HORIZON] = { }; for(int i = 0; i < numElite; i++){ if(isnan(In[i].W)) { totalWeight += 0.0f; }else{ totalWeight += In[i].W; } } for(int i = 0; i < HORIZON; i++){ for(int k = 0; k < numElite; k++){ if(isnan(In[k].W)) { temp[i] += 0.0f; }else{ temp[i] += (In[k].W * In[k].Input[i]) / totalWeight; } } if(isnan(temp[i])) { Out[i] = 0.0f; }else{ Out[i] = temp[i]; } } } void shift_Input_vec( float *inputVector) { float temp[HORIZON]= { }; for(int i = 0; i < HORIZON - 1; i++){ temp[i] = inputVector[i+1]; } temp[HORIZON - 1] = inputVector[HORIZON - 1]; for(int i = 0; i < HORIZON; i++){ inputVector[i] = temp[i]; } } __global__ void init_Input_vector(InputVector *d_I, float init_val) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; for(int tm = 0; tm < HORIZON; tm++) { d_I[id].Input[tm] = init_val; } } __global__ void setup_kernel(hiprandState_t *state,int seed) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ hiprand_init(seed, id, 0, &state[id]); } __global__ void callback_elite_sample(InputVector *devOut, InputVector *devIn, int *elite_indices) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; devOut[id].W = devIn[elite_indices[id]].W; devOut[id].L = devIn[elite_indices[id]].L; for(int i = 0; i < HORIZON; i++){ devOut[id].Input[i] = devIn[elite_indices[id]].Input[i]; // devOut[id].dy[i] = devIn[elite_indices[id]].dy[i]; } } __global__ void WeightRecalculation( InputVector *devIn, int *indices) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; float lambda = 0.5f; float mlambda = 0.25f; float variable; float variableForWHM; variable = (devIn[id].L - devIn[indices[0]].L) / lambda; variableForWHM = (devIn[id].L - devIn[indices[0]].L) / mlambda; devIn[id].W = exp(-variable); devIn[id].WHM = exp(-variableForWHM); // devIn[id].WHM = 1 / devIn[id].L; // devIn[id].WHM = 1.0f; __syncthreads(); } __device__ float gen_u(unsigned int id, hiprandState_t *state, float ave, float vr) { float u; hiprandState_t localState = state[id]; u = hiprand_normal(&localState) * vr + ave; return u; } __device__ float inputGenerator(int t, float mean, float var, float *Cov, float *z) { int index; if( t == 0){ index = t; }else{ // index = t; index = t * HORIZON; } float ans, temp; temp = 0.0f; for(int k = 0; k < HORIZON; k++) { // temp += Cov[ index + k * HORIZON] * z[k]; temp += Cov[ index + k] * z[k]; } ans = mean + var * temp; return ans; } //MCMPC for Simple Nonlinear System __global__ void MCMPC_Simple_NonLinear_Example(float *state, hiprandState_t *randomSeed , float *mean, InputVector *d_data, float var, float *d_param, float *d_constraints, float *d_matrix, float *cost_vec){ unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON] = { }; float stateInThisThreads[DIM_OF_STATES] = { }; float dstateInThisThreads[DIM_OF_STATES] = { }; //copy statevector for calculate forward simulation result in each thread. for(int i = 0; i < DIM_OF_STATES; i++){ stateInThisThreads[i] = state[i]; } // do simulation in each thread for(int step = 0; step < HORIZON; step++) { u[step] = gen_u(seq, randomSeed, mean[step], var); seq += NUM_OF_SAMPLES; if(isnan(u[step])){ u[step] = d_data[0].Input[step]; } //printf("id==%d u==%f\n", id, u[step]); calc_nonLinear_example(stateInThisThreads, u[step], d_param, dstateInThisThreads); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]); qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + d_matrix[2] * u[step] * u[step]; total_cost += qx; qx = 0.0f; } //printf("id==%d L==%f %f %f %f %f\n", id, total_cost, u[0], u[1], u[2], u[3]); float KL_COST, S, lambda; lambda = 200;/*HORIZON * DIM_OF_STATES;*/ S = total_cost / lambda; KL_COST = exp(-S); //printf("id==%d L==%f W == %f %f %f %f %f\n", id, total_cost, KL_COST, u[0], u[1], u[2], u[3]); __syncthreads(); d_data[id].W = KL_COST; d_data[id].L = total_cost; cost_vec[id] = total_cost; for(int i = 0; i < HORIZON; i++){ d_data[id].Input[i] = u[i]; } __syncthreads(); } __global__ void MCMPC_Crat_and_SinglePole(float *state, hiprandState_t *randomSeed, float *mean, InputVector *d_data, float var, float *d_param, float *d_constraints, float *d_matrix, float *cost_vec) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON] = { }; float stateInThisThreads[DIM_OF_STATES] = { }; float dstateInThisThreads[DIM_OF_STATES] = { }; for(int i = 0; i < DIM_OF_STATES; i++){ stateInThisThreads[i] = state[i]; } for(int t = 0; t < HORIZON; t++){ if(isnan(mean[t])){ //u[t] = d_data[0].Input[t]; if(t < HORIZON -1){ u[t] = gen_u(seq, randomSeed, d_data[0].Input[t+1], var); seq += NUM_OF_SAMPLES; }else{ u[t] = gen_u(seq, randomSeed, d_data[0].Input[HORIZON - 1], var); seq += NUM_OF_SAMPLES; } }else{ u[t] = gen_u(seq, randomSeed, mean[t], var); seq += NUM_OF_SAMPLES; } if(u[t] < d_constraints[0]){ u[t] = d_constraints[0]; } if(u[t] > d_constraints[1]){ u[t] = d_constraints[1]; } // 100Hz 40step0.4 // 0MPC /*dstateInThisThreads[0] = stateInThisThreads[2]; dstateInThisThreads[1] = stateInThisThreads[3]; dstateInThisThreads[2] = Cart_type_Pendulum_ddx(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); stateInThisThreads[2] = stateInThisThreads[2] + (interval * dstateInThisThreads[2]); stateInThisThreads[3] = stateInThisThreads[3] + (interval * dstateInThisThreads[3]); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]);*/ for(int sec = 0; sec < 1; sec++){ dstateInThisThreads[0] = stateInThisThreads[2]; dstateInThisThreads[1] = stateInThisThreads[3]; dstateInThisThreads[2] = Cart_type_Pendulum_ddx(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); stateInThisThreads[2] = stateInThisThreads[2] + (interval * dstateInThisThreads[2]); stateInThisThreads[3] = stateInThisThreads[3] + (interval * dstateInThisThreads[3]); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]); } /*while(stateInThisThreads[1] > M_PI) stateInThisThreads[1] -= (2 * M_PI); while(stateInThisThreads[1] < -M_PI) stateInThisThreads[1] += (2 * M_PI);*/ // upper side: MATLAB /* qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + u[t] * u[t] * d_matrix[3]; */ /*qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4];*/ qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4]; if(t == HORIZON -1){ qx += stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3]; // qx += stateInThisThreads[0] * stateInThisThreads[0] * (d_matrix[0] + 2.0f) + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] // + stateInThisThreads[2] * stateInThisThreads[2] * (d_matrix[2] + 0.04f) + stateInThisThreads[3] * stateInThisThreads[3] * (d_matrix[3] + 0.009f); } /*qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + (1 - cosf(stateInThisThreads[1])) * (1 - cosf(stateInThisThreads[1])) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4];*/ // constraints described by Barrier Function Method /*if(stateInThisThreads[0] <= 0){ qx += 1 / (powf(stateInThisThreads[0] - d_constraints[2],2) * invBarrier); if(stateInThisThreads[0] < d_constraints[2]){ qx += 1000000; } }else{ qx += 1 / (powf(d_constraints[3] - stateInThisThreads[0],2) * invBarrier); if(stateInThisThreads[0] > d_constraints[3]){ qx += 1000000; } }*/ total_cost += qx; qx = 0.0f; } if(isnan(total_cost)) { total_cost = 1000000 * 5; } float KL_COST, S, lambda, HM_COST, HM; //int NomarizationCost = sizeOfParaboloidElements + addTermForLSM; //LSMparaboloid lambda = 0.5 * HORIZON; HM = total_cost / (0.25*HORIZON); //0.75 S = total_cost / lambda; KL_COST = exp(-S); HM_COST = exp(-HM); __syncthreads(); d_data[id].WHM = HM_COST; d_data[id].W = KL_COST; // d_data[id].L = total_cost / sizeOfParaboloidElements; d_data[id].L = total_cost / HORIZON; cost_vec[id] = total_cost; for(int index = 0; index < HORIZON; index++){ d_data[id].Input[index] = u[index]; } __syncthreads(); } __global__ void CMAMCMPC_Cart_and_SinglePole(float *state, hiprandState_t *r_seed, float *covariance, float *mean, InputVector *d_data, float variance, float *d_param, float *d_constraints, float *d_matrix, float *cost_vec) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; // float qx = 0.0f; //stage_cost float total_cost = 0.0f; //total_cost float u[HORIZON] = { }; // float z[HORIZON] = { }; // float stateInThisThreads[DIM_OF_STATES] = { }; float dstateInThisThreads[DIM_OF_STATES] = { }; for(int i = 0; i < DIM_OF_STATES; i++){ stateInThisThreads[i] = state[i]; } for(int t_u = 0; t_u < HORIZON; t_u++) { z[t_u] = gen_u( id, r_seed, 0.0f, 1.0f); seq += NUM_OF_SAMPLES; // seq += HORIZON; } __syncthreads( ); for(int t = 0; t < HORIZON; t++) { u[t] = inputGenerator(t, mean[t], variance, covariance, z); // if(u[t] < d_constraints[0]){ u[t] = d_constraints[0]; } if(u[t] > d_constraints[1]){ u[t] = d_constraints[1]; } for(int sec = 0; sec < 1; sec++){ dstateInThisThreads[0] = stateInThisThreads[2]; dstateInThisThreads[1] = stateInThisThreads[3]; dstateInThisThreads[2] = Cart_type_Pendulum_ddx(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); stateInThisThreads[2] = stateInThisThreads[2] + (interval * dstateInThisThreads[2]); stateInThisThreads[3] = stateInThisThreads[3] + (interval * dstateInThisThreads[3]); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]); } /*while(stateInThisThreads[1] > M_PI) stateInThisThreads[1] -= (2 * M_PI); while(stateInThisThreads[1] < -M_PI) stateInThisThreads[1] += (2 * M_PI);*/ // upper side: MATLAB /* qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + u[t] * u[t] * d_matrix[3]; */ /*qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4];*/ qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4]; if(t == HORIZON -1){ qx += stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3]; // qx += stateInThisThreads[0] * stateInThisThreads[0] * (d_matrix[0] + 2.0f) + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] // + stateInThisThreads[2] * stateInThisThreads[2] * (d_matrix[2] + 0.04f) + stateInThisThreads[3] * stateInThisThreads[3] * (d_matrix[3] + 0.009f); } /*qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + (1 - cosf(stateInThisThreads[1])) * (1 - cosf(stateInThisThreads[1])) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4];*/ // constraints described by Barrier Function Method /*if(stateInThisThreads[0] <= 0){ qx += 1 / (powf(stateInThisThreads[0] - d_constraints[2],2) * invBarrier); if(stateInThisThreads[0] < d_constraints[2]){ qx += 1000000; } }else{ qx += 1 / (powf(d_constraints[3] - stateInThisThreads[0],2) * invBarrier); if(stateInThisThreads[0] > d_constraints[3]){ qx += 1000000; } }*/ total_cost += qx; qx = 0.0f; } if(isnan(total_cost)) { total_cost = 1000000 * 5; } float KL_COST, S, lambda, HM_COST, HM; //int NomarizationCost = sizeOfParaboloidElements + addTermForLSM; //LSMparaboloid lambda = 0.5 * HORIZON; HM = total_cost / (0.25*HORIZON); //0.75 S = total_cost / lambda; KL_COST = exp(-S); HM_COST = exp(-HM); __syncthreads(); d_data[id].WHM = HM_COST; d_data[id].W = KL_COST; // d_data[id].L = total_cost / sizeOfParaboloidElements; d_data[id].L = total_cost / HORIZON; cost_vec[id] = total_cost; for(int index = 0; index < HORIZON; index++){ d_data[id].Input[index] = u[index]; } __syncthreads(); }
7119112d253d5b101250ff79fd118f7d27c0a8b4.cu
/* MCMPC.cu */ #include<stdio.h> #include "../include/MCMPC.cuh" void weighted_mean(InputVector *In, int numElite, float *Out) { float totalWeight = 0.0f; float temp[HORIZON] = { }; for(int i = 0; i < numElite; i++){ if(isnan(In[i].W)) { totalWeight += 0.0f; }else{ totalWeight += In[i].W; } } for(int i = 0; i < HORIZON; i++){ for(int k = 0; k < numElite; k++){ if(isnan(In[k].W)) { temp[i] += 0.0f; }else{ temp[i] += (In[k].W * In[k].Input[i]) / totalWeight; } } if(isnan(temp[i])) { Out[i] = 0.0f; }else{ Out[i] = temp[i]; } } } void shift_Input_vec( float *inputVector) { float temp[HORIZON]= { }; for(int i = 0; i < HORIZON - 1; i++){ temp[i] = inputVector[i+1]; } temp[HORIZON - 1] = inputVector[HORIZON - 1]; for(int i = 0; i < HORIZON; i++){ inputVector[i] = temp[i]; } } __global__ void init_Input_vector(InputVector *d_I, float init_val) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; for(int tm = 0; tm < HORIZON; tm++) { d_I[id].Input[tm] = init_val; } } __global__ void setup_kernel(curandState *state,int seed) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(seed, id, 0, &state[id]); } __global__ void callback_elite_sample(InputVector *devOut, InputVector *devIn, int *elite_indices) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; devOut[id].W = devIn[elite_indices[id]].W; devOut[id].L = devIn[elite_indices[id]].L; for(int i = 0; i < HORIZON; i++){ devOut[id].Input[i] = devIn[elite_indices[id]].Input[i]; // devOut[id].dy[i] = devIn[elite_indices[id]].dy[i]; } } __global__ void WeightRecalculation( InputVector *devIn, int *indices) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; float lambda = 0.5f; float mlambda = 0.25f; float variable; float variableForWHM; variable = (devIn[id].L - devIn[indices[0]].L) / lambda; variableForWHM = (devIn[id].L - devIn[indices[0]].L) / mlambda; devIn[id].W = exp(-variable); devIn[id].WHM = exp(-variableForWHM); // devIn[id].WHM = 1 / devIn[id].L; // devIn[id].WHM = 1.0f; __syncthreads(); } __device__ float gen_u(unsigned int id, curandState *state, float ave, float vr) { float u; curandState localState = state[id]; u = curand_normal(&localState) * vr + ave; return u; } __device__ float inputGenerator(int t, float mean, float var, float *Cov, float *z) { int index; if( t == 0){ index = t; }else{ // index = t; index = t * HORIZON; } float ans, temp; temp = 0.0f; for(int k = 0; k < HORIZON; k++) { // temp += Cov[ index + k * HORIZON] * z[k]; temp += Cov[ index + k] * z[k]; } ans = mean + var * temp; return ans; } //MCMPC for Simple Nonlinear System __global__ void MCMPC_Simple_NonLinear_Example(float *state, curandState *randomSeed , float *mean, InputVector *d_data, float var, float *d_param, float *d_constraints, float *d_matrix, float *cost_vec){ unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON] = { }; float stateInThisThreads[DIM_OF_STATES] = { }; float dstateInThisThreads[DIM_OF_STATES] = { }; //copy statevector for calculate forward simulation result in each thread. for(int i = 0; i < DIM_OF_STATES; i++){ stateInThisThreads[i] = state[i]; } // do simulation in each thread for(int step = 0; step < HORIZON; step++) { u[step] = gen_u(seq, randomSeed, mean[step], var); seq += NUM_OF_SAMPLES; if(isnan(u[step])){ u[step] = d_data[0].Input[step]; } //printf("id==%d u==%f\n", id, u[step]); calc_nonLinear_example(stateInThisThreads, u[step], d_param, dstateInThisThreads); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]); qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + d_matrix[2] * u[step] * u[step]; total_cost += qx; qx = 0.0f; } //printf("id==%d L==%f %f %f %f %f\n", id, total_cost, u[0], u[1], u[2], u[3]); float KL_COST, S, lambda; lambda = 200;/*HORIZON * DIM_OF_STATES;*/ S = total_cost / lambda; KL_COST = exp(-S); //printf("id==%d L==%f W == %f %f %f %f %f\n", id, total_cost, KL_COST, u[0], u[1], u[2], u[3]); __syncthreads(); d_data[id].W = KL_COST; d_data[id].L = total_cost; cost_vec[id] = total_cost; for(int i = 0; i < HORIZON; i++){ d_data[id].Input[i] = u[i]; } __syncthreads(); } __global__ void MCMPC_Crat_and_SinglePole(float *state, curandState *randomSeed, float *mean, InputVector *d_data, float var, float *d_param, float *d_constraints, float *d_matrix, float *cost_vec) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON] = { }; float stateInThisThreads[DIM_OF_STATES] = { }; float dstateInThisThreads[DIM_OF_STATES] = { }; for(int i = 0; i < DIM_OF_STATES; i++){ stateInThisThreads[i] = state[i]; } for(int t = 0; t < HORIZON; t++){ if(isnan(mean[t])){ //u[t] = d_data[0].Input[t]; if(t < HORIZON -1){ u[t] = gen_u(seq, randomSeed, d_data[0].Input[t+1], var); seq += NUM_OF_SAMPLES; }else{ u[t] = gen_u(seq, randomSeed, d_data[0].Input[HORIZON - 1], var); seq += NUM_OF_SAMPLES; } }else{ u[t] = gen_u(seq, randomSeed, mean[t], var); seq += NUM_OF_SAMPLES; } if(u[t] < d_constraints[0]){ u[t] = d_constraints[0]; } if(u[t] > d_constraints[1]){ u[t] = d_constraints[1]; } // まずは、オイラー積分(100Hz 40stepで倒立できるか) → 0.4秒先まで予測 // 問題が起きたら、0次ホールダーでやってみる、それでもダメならMPCの再設計 /*dstateInThisThreads[0] = stateInThisThreads[2]; dstateInThisThreads[1] = stateInThisThreads[3]; dstateInThisThreads[2] = Cart_type_Pendulum_ddx(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); stateInThisThreads[2] = stateInThisThreads[2] + (interval * dstateInThisThreads[2]); stateInThisThreads[3] = stateInThisThreads[3] + (interval * dstateInThisThreads[3]); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]);*/ for(int sec = 0; sec < 1; sec++){ dstateInThisThreads[0] = stateInThisThreads[2]; dstateInThisThreads[1] = stateInThisThreads[3]; dstateInThisThreads[2] = Cart_type_Pendulum_ddx(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); stateInThisThreads[2] = stateInThisThreads[2] + (interval * dstateInThisThreads[2]); stateInThisThreads[3] = stateInThisThreads[3] + (interval * dstateInThisThreads[3]); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]); } /*while(stateInThisThreads[1] > M_PI) stateInThisThreads[1] -= (2 * M_PI); while(stateInThisThreads[1] < -M_PI) stateInThisThreads[1] += (2 * M_PI);*/ // upper side: MATLAB で使用している評価関数を参考 /* qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + u[t] * u[t] * d_matrix[3]; */ /*qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4];*/ qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4]; if(t == HORIZON -1){ qx += stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3]; // qx += stateInThisThreads[0] * stateInThisThreads[0] * (d_matrix[0] + 2.0f) + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] // + stateInThisThreads[2] * stateInThisThreads[2] * (d_matrix[2] + 0.04f) + stateInThisThreads[3] * stateInThisThreads[3] * (d_matrix[3] + 0.009f); } /*qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + (1 - cosf(stateInThisThreads[1])) * (1 - cosf(stateInThisThreads[1])) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4];*/ // constraints described by Barrier Function Method /*if(stateInThisThreads[0] <= 0){ qx += 1 / (powf(stateInThisThreads[0] - d_constraints[2],2) * invBarrier); if(stateInThisThreads[0] < d_constraints[2]){ qx += 1000000; } }else{ qx += 1 / (powf(d_constraints[3] - stateInThisThreads[0],2) * invBarrier); if(stateInThisThreads[0] > d_constraints[3]){ qx += 1000000; } }*/ total_cost += qx; qx = 0.0f; } if(isnan(total_cost)) { total_cost = 1000000 * 5; } float KL_COST, S, lambda, HM_COST, HM; //int NomarizationCost = sizeOfParaboloidElements + addTermForLSM; //LSMでparaboloidをフィッティングする際に行列の lambda = 0.5 * HORIZON; HM = total_cost / (0.25*HORIZON); //0.75 S = total_cost / lambda; KL_COST = exp(-S); HM_COST = exp(-HM); __syncthreads(); d_data[id].WHM = HM_COST; d_data[id].W = KL_COST; // d_data[id].L = total_cost / sizeOfParaboloidElements; d_data[id].L = total_cost / HORIZON; cost_vec[id] = total_cost; for(int index = 0; index < HORIZON; index++){ d_data[id].Input[index] = u[index]; } __syncthreads(); } __global__ void CMAMCMPC_Cart_and_SinglePole(float *state, curandState *r_seed, float *covariance, float *mean, InputVector *d_data, float variance, float *d_param, float *d_constraints, float *d_matrix, float *cost_vec) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; //種のインデックス(これを更新しないと全く同じ乱数が生成される) float qx = 0.0f; //stage_cost float total_cost = 0.0f; //total_cost float u[HORIZON] = { }; //シミュレーションに使用する入力 float z[HORIZON] = { }; //↑の入力から前回の推定値分平行移動した入力 float stateInThisThreads[DIM_OF_STATES] = { }; float dstateInThisThreads[DIM_OF_STATES] = { }; for(int i = 0; i < DIM_OF_STATES; i++){ stateInThisThreads[i] = state[i]; } for(int t_u = 0; t_u < HORIZON; t_u++) { z[t_u] = gen_u( id, r_seed, 0.0f, 1.0f); seq += NUM_OF_SAMPLES; // seq += HORIZON; } __syncthreads( ); for(int t = 0; t < HORIZON; t++) { u[t] = inputGenerator(t, mean[t], variance, covariance, z); // 入力制約の実装 if(u[t] < d_constraints[0]){ u[t] = d_constraints[0]; } if(u[t] > d_constraints[1]){ u[t] = d_constraints[1]; } for(int sec = 0; sec < 1; sec++){ dstateInThisThreads[0] = stateInThisThreads[2]; dstateInThisThreads[1] = stateInThisThreads[3]; dstateInThisThreads[2] = Cart_type_Pendulum_ddx(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); //ddx dstateInThisThreads[3] = Cart_type_Pendulum_ddtheta(u[t], stateInThisThreads[0], stateInThisThreads[1], stateInThisThreads[2], stateInThisThreads[3], d_param); stateInThisThreads[2] = stateInThisThreads[2] + (interval * dstateInThisThreads[2]); stateInThisThreads[3] = stateInThisThreads[3] + (interval * dstateInThisThreads[3]); stateInThisThreads[0] = stateInThisThreads[0] + (interval * dstateInThisThreads[0]); stateInThisThreads[1] = stateInThisThreads[1] + (interval * dstateInThisThreads[1]); } /*while(stateInThisThreads[1] > M_PI) stateInThisThreads[1] -= (2 * M_PI); while(stateInThisThreads[1] < -M_PI) stateInThisThreads[1] += (2 * M_PI);*/ // upper side: MATLAB で使用している評価関数を参考 /* qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + u[t] * u[t] * d_matrix[3]; */ /*qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + stateInThisThreads[1] * stateInThisThreads[1] * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4];*/ qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4]; if(t == HORIZON -1){ qx += stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3]; // qx += stateInThisThreads[0] * stateInThisThreads[0] * (d_matrix[0] + 2.0f) + sinf(stateInThisThreads[1] / 2) * sinf(stateInThisThreads[1]/2) * d_matrix[1] // + stateInThisThreads[2] * stateInThisThreads[2] * (d_matrix[2] + 0.04f) + stateInThisThreads[3] * stateInThisThreads[3] * (d_matrix[3] + 0.009f); } /*qx = stateInThisThreads[0] * stateInThisThreads[0] * d_matrix[0] + (1 - cosf(stateInThisThreads[1])) * (1 - cosf(stateInThisThreads[1])) * d_matrix[1] + stateInThisThreads[2] * stateInThisThreads[2] * d_matrix[2] + stateInThisThreads[3] * stateInThisThreads[3] * d_matrix[3] + u[t] * u[t] * d_matrix[4];*/ // constraints described by Barrier Function Method /*if(stateInThisThreads[0] <= 0){ qx += 1 / (powf(stateInThisThreads[0] - d_constraints[2],2) * invBarrier); if(stateInThisThreads[0] < d_constraints[2]){ qx += 1000000; } }else{ qx += 1 / (powf(d_constraints[3] - stateInThisThreads[0],2) * invBarrier); if(stateInThisThreads[0] > d_constraints[3]){ qx += 1000000; } }*/ total_cost += qx; qx = 0.0f; } if(isnan(total_cost)) { total_cost = 1000000 * 5; } float KL_COST, S, lambda, HM_COST, HM; //int NomarizationCost = sizeOfParaboloidElements + addTermForLSM; //LSMでparaboloidをフィッティングする際に行列の lambda = 0.5 * HORIZON; HM = total_cost / (0.25*HORIZON); //0.75 S = total_cost / lambda; KL_COST = exp(-S); HM_COST = exp(-HM); __syncthreads(); d_data[id].WHM = HM_COST; d_data[id].W = KL_COST; // d_data[id].L = total_cost / sizeOfParaboloidElements; d_data[id].L = total_cost / HORIZON; cost_vec[id] = total_cost; for(int index = 0; index < HORIZON; index++){ d_data[id].Input[index] = u[index]; } __syncthreads(); }
953f27c4a019f6721a1d9308bfa0de11a533258f.hip
// !!! This is a file automatically generated by hipify!!! #include "stdafx.h" #include "Recon_Runtime.h" #include <hip/hip_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include "cudaMatrixOp.cuh" #include <thrust\device_ptr.h> #include <thrust\transform.h> #include <thrust\functional.h> #include <thrust\reduce.h> using namespace cusp; using namespace thrust; Recon_Runtime::Recon_Runtime() { } Recon_Runtime::~Recon_Runtime() { } // struct saxpy_functor { const float a; saxpy_functor(float _a) : a(_a) {}; __host__ __device__ complex<float> operator() (const complex<float> & x, const complex<float> & y) const { return (x * a + y); } }; struct saxmy_functor { const float a; saxmy_functor(float _a) : a(_a) {}; __host__ __device__ complex<float> operator() (const complex<float> & x, const complex<float> & y) const { return x * a - y; } }; struct sparse_energy { const float a; const float b; sparse_energy(float _a, float _b) : a(_a), b(_b) {}; __host__ __device__ float operator() (const complex<float> & x, const complex<float> & y) const { return sqrt(norm(x*a + y) + b); } }; struct sparse_gradient { const float a; sparse_gradient(float _a) : a(_a) {}; __host__ __device__ complex<float> operator() (const complex<float> & x) const { return x / sqrt(norm(x) + a); } }; struct module_functor { __host__ __device__ float operator() (const complex<float> & x) const { return abs(x); } }; // struct conj_multiply { __host__ __device__ complex<float> operator() (const complex<float> & x, const complex<float> & y) const { return conj(x) * y; } }; //Set kernel extern "C" void setConvolutionLowKernel(float *filter) { hipMemcpyToSymbol(l_Kernel, filter, sizeof(float) * KERNEL_LENGTH); } extern "C" void setConvolutionHighKernel(float *filter) { hipMemcpyToSymbol(h_Kernel, filter, sizeof(float) * KERNEL_LENGTH); } extern "C" void Recon_Runtime::SetFilterParams(unsigned int coarse_level) { _coarse_level = coarse_level; } extern "C" void Recon_Runtime::GenerateFilter(wchar_t * filter_type_name, unsigned int coarse_level) { _filter.clear(); assert(filter_type_name != NULL); SetFilterParams(coarse_level); std::wifstream filter_file; filter_file.open(filter_type_name); float fdata; if (!filter_file) { return; } if (filter_file.is_open()) { while (filter_file.good() && !filter_file.eof()) { filter_file >> fdata; _filter.push_back(fdata); } } filter_file.close(); } extern "C" unsigned int Recon_Runtime::QuadLength(unsigned int length) { unsigned int k = 1; unsigned int J = 0; while (k < length) { k *= 2; J += 1; } return J; } extern "C" void Fwt2D(complex<float>* d_raw, complex<float>* d_odata, unsigned int pitch, unsigned int p, unsigned int scale, unsigned int J, unsigned int imageW, unsigned int imageH) { const unsigned int nc = imageW; unsigned int mem_shared; dim3 block; dim3 grid; block.x = nc; //256 grid.y = nc; unsigned int length = nc; dim3 dimBlock(imageW, 1, 1); dim3 dimGrid(1, imageH, 1); complex_copy << <dimGrid, dimBlock >> >(d_odata, d_raw, imageW, imageH); device_vector<complex<float>> d_tranp(nc*nc); for (unsigned int jscale = J - 1; jscale >= scale; --jscale) { block.x = length + p; block.y = 1; grid.x = 1; grid.y = length; mem_shared = (length + p)*sizeof(float) * 2; // DownsampleLow << <grid, block, mem_shared >> >(p, d_odata, raw_pointer_cast(d_tranp.data()), length, pitch); DownsampleHigh << <grid, block, mem_shared >> >(p, d_odata, raw_pointer_cast(d_tranp.data()), length, pitch); // DownsampleLow << <grid, block, mem_shared >> >(p, raw_pointer_cast(d_tranp.data()), d_odata, length, pitch); DownsampleHigh << <grid, block, mem_shared >> >(p, raw_pointer_cast(d_tranp.data()), d_odata, length, pitch); length = length >> 1; } } extern "C" void IFwt2D(complex<float>* d_raw, complex<float>* d_odata, unsigned int pitch, unsigned int p, unsigned int scale, unsigned int J, unsigned int imageW, unsigned int imageH, unsigned int np) { unsigned int nc = imageW; unsigned int mem_shared; //copy kernel dim3 block; dim3 grid; unsigned int length = np; dim3 dimBlock(imageW, 1, 1); dim3 dimGrid(1, imageH, 1); complex_copy << <dimGrid, dimBlock >> >(d_odata, d_raw, imageW, imageH); device_vector<complex<float>> d_tranp(nc*nc); for (unsigned int jscale = scale; jscale <= J - 1; ++jscale) { mem_shared = (length + p)*sizeof(complex<float>) * 2; block.x = 1; block.y = length + p; grid.x = length; grid.y = 1; //, IdwtDb1D << <grid, block, mem_shared >> >(p, d_odata, raw_pointer_cast(d_tranp.data()), length, pitch); // IdwtDb1D << <grid, block, mem_shared >> >(p, raw_pointer_cast(d_tranp.data()), d_odata, length, pitch); length = length << 1; } } extern "C" void Recon_Runtime::ComputeGradient(complex<float> * d_kspace, device_vector<complex<float>>& d_gradient, ParameterSet& params, float * d_mask, complex<float> * d_undersampled_kspace, hipfftHandle& plan, unsigned int lpitch, unsigned int rpitch, unsigned int p, unsigned int scale, unsigned int J, VariationSet& variationSet) { dim3 dimBlock(_imageW, 1, 1); dim3 dimGrid(1, _imageH, 1); GetFidelityGradient << <dimGrid, dimBlock >> >(raw_pointer_cast(d_gradient.data()), d_kspace, d_mask, d_undersampled_kspace, lpitch, rpitch, _imageW, _imageH); //d_kspace float epsilon = static_cast<float>(::pow(10, -7)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); Fwt2D(raw_pointer_cast(variationSet.d_image.data()), raw_pointer_cast(variationSet.wavelet_buffer.data()), lpitch, p, scale, J, _imageW, _imageH); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); FTV(raw_pointer_cast(variationSet.d_image.data()), variationSet.tv_vertical_buffer, variationSet.tv_horizontal_buffer, _imageW, _imageH, lpitch); if (params.tv_weight || params.wavelet_weight) { GetSparseGradient << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.tv_vertical_buffer.data()), raw_pointer_cast(variationSet.tv_horizontal_buffer.data()), raw_pointer_cast(variationSet.wavelet_buffer.data()), epsilon, lpitch, _imageW, _imageH);// } unsigned int np = static_cast<unsigned int>(::pow(2, scale + 1)); IFwt2D(raw_pointer_cast(variationSet.wavelet_buffer.data()), raw_pointer_cast(variationSet.wavelet_buffer.data()), lpitch, p, scale, J, _imageW, _imageH, np); // bool ft_direction = true; Fft(raw_pointer_cast(variationSet.wavelet_buffer.data()), raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.wavelet_buffer.data()), ft_direction, plan, _imageW, _imageH, lpitch); // IFTV(variationSet.tv_vertical_buffer, variationSet.tv_horizontal_buffer, variationSet.tv_buffer, variationSet.itv_horizontal_buffer, variationSet.itv_vertical_buffer, _imageW, _imageH, lpitch); Fft(raw_pointer_cast(variationSet.tv_buffer.data()), raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.tv_buffer.data()), ft_direction, plan, _imageW, _imageH, lpitch); // GetGradient << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.wavelet_buffer.data()), raw_pointer_cast(variationSet.tv_buffer.data()), raw_pointer_cast(d_gradient.data()), params.wavelet_weight, params.tv_weight, _imageW, _imageH, lpitch); } extern "C" float Recon_Runtime::CalculateEnergy(device_vector<complex<float>>& d_recon_kspace, device_vector<complex<float>>& diff_recon_kspace, device_vector<complex<float>>& d_recon_wavelet, device_vector<complex<float>>& diff_recon_wavelet, device_vector<complex<float>>& d_tv_horizontal, device_vector<complex<float>>& d_tv_vertical, device_vector<complex<float>>& diff_recon_tv_vertical, device_vector<complex<float>>& diff_recon_tv_horizontal, complex<float>* d_undersampled_kspace, device_vector<float> & energy_buffer, ParameterSet&params, float step, unsigned int lpitch, unsigned int rpitch) { float sum_energy = 0.0f; dim3 dimGrid(1, _imageH, 1); dim3 dimBlock(_imageW, 1, 1); // device_vector<float> result(_imageH*_imageW); CalculateFidelity << <dimGrid, dimBlock >> >(raw_pointer_cast(diff_recon_kspace.data()), raw_pointer_cast(d_recon_kspace.data()), step, d_undersampled_kspace, raw_pointer_cast(result.data()), lpitch, rpitch, _imageW, _imageH); // sum_energy += thrust::reduce(result.begin(), result.end(), 0.0f, thrust::plus<float>()); float epsilon = static_cast<float>(::pow(10, -7)); // transform(diff_recon_wavelet.begin(), diff_recon_wavelet.end(), d_recon_wavelet.begin(), result.begin(), sparse_energy(step, epsilon)); // if (params.wavelet_weight) { sum_energy += thrust::reduce(result.begin(), result.end(), 0.0f, thrust::plus<float>()) * params.wavelet_weight; } // TV if (params.tv_weight) { transform(diff_recon_tv_vertical.begin(), diff_recon_tv_vertical.end(), d_tv_vertical.begin(), result.begin(), sparse_energy(step, epsilon)); // sum_energy += thrust::reduce(result.begin(), result.end(), 0.0f, thrust::plus<float>()) * params.tv_weight; transform(diff_recon_tv_horizontal.begin(), diff_recon_tv_horizontal.end(), d_tv_horizontal.begin(), result.begin(), sparse_energy(step, epsilon)); // sum_energy += thrust::reduce(result.begin(), result.end(), 0.0f, thrust::plus<float>()) * params.tv_weight; } return sum_energy; } extern "C" void Recon_Runtime::GetReconData(std::vector<float> mask, std::vector<std::complex<float>> raw_data, ParameterSet& params, unsigned int width, unsigned int height, float& elapsed_time, std::complex<float> * recon_data) { _imageW = width; _imageH = height; complex<float> *d_kspace, *d_undersampled_kspace, *d_image; float *d_mask; size_t lpitch, rpitch; //lpitch rpitch dim3 dimBlock(_imageW, 1, 1); dim3 dimGrid(1, _imageH, 1); device_vector<float> realPart(_imageH*_imageW); device_vector<float> imagPart(_imageH*_imageW); hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); hipEventRecord(start, 0); //fft plan bool ft_direction = false; //false k; truek hipfftHandle plan; checkCudaErrors(hipfftPlan2d(&plan, _imageH, _imageW, HIPFFT_C2C)); checkCudaErrors(hipMallocPitch((void **)&d_kspace, &lpitch, _imageW*sizeof(complex<float>), _imageH)); checkCudaErrors(hipMallocPitch((void **)&d_undersampled_kspace, &lpitch, _imageW*sizeof(complex<float>), _imageH)); checkCudaErrors(hipMallocPitch((void **)&d_mask, &rpitch, _imageW*sizeof(float), _imageH)); checkCudaErrors(hipMallocPitch((void **)&d_image, &lpitch, _imageW*sizeof(complex<float>), _imageH)); checkCudaErrors(hipMemcpy2D(d_kspace, lpitch, raw_data.data(), sizeof(float) * 2 * _imageW, sizeof(float) * 2 * _imageW, _imageH, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy2D(d_mask, rpitch, mask.data(), sizeof(float)*_imageW, sizeof(float)*_imageW, _imageH, hipMemcpyHostToDevice)); complex_copy << <dimGrid, dimBlock >> >(d_undersampled_kspace, d_kspace, _imageW, _imageH); // // std::shared_ptr<CWavelet> wavelet = std::shared_ptr<CWavelet>(new CWavelet()); GenerateFilter(L"daub2.flt", 4); std::vector<float> filter = GetFilter(); unsigned int scale = GetCoarseLevel(); unsigned int J = QuadLength(_imageW); unsigned int p = filter.size(); std::vector<float> float_filter(filter.size()); std::vector<float> mirror_filter(filter.size()); auto mirror_cursor = mirror_filter.data(); auto filter_cursor = filter.data(); auto float_cursor = float_filter.data(); auto end_cursor = filter_cursor + filter.size(); for (unsigned int i = 0; i < filter.size(); ++i) { *float_cursor = static_cast<float>(*filter_cursor); if (i % 2 == 0) *mirror_cursor = *float_cursor; else *mirror_cursor = -(*float_cursor); ++float_cursor; ++filter_cursor; ++mirror_cursor; } // setConvolutionLowKernel(float_filter.data()); setConvolutionHighKernel(mirror_filter.data()); // device_vector<complex<float>> d_gradient(_imageH*_imageW), temp_image(_imageH*_imageW), d_wavelet(_imageH*_imageW), d_tv_vertical(_imageH*_imageW), d_tv_horizontal(_imageH*_imageW), d_new_gradient(_imageH*_imageW), d_recon_image(_imageH*_imageW), diff_recon_image(_imageH*_imageW), d_recon_wavelet(_imageH*_imageW), diff_recon_wavelet(_imageH*_imageW), d_recon_tv_horizontal(_imageH*_imageW), d_recon_tv_vertical(_imageH*_imageW), diff_recon_tv_horizontal(_imageH*_imageW), diff_recon_tv_vertical(_imageH*_imageW), d_recon_kspace(_imageH*_imageW), diff_recon_data(_imageH*_imageW), diff_recon_kspace(_imageW*_imageH), d_conj_module(_imageW*_imageH), fft_buffer(_imageW*_imageH), itv_horizontal_buffer(_imageW*_imageH), itv_vertical_buffer(_imageW*_imageH); device_vector<float> diff_recon_data_module(_imageW*_imageH); device_vector<float> g1_norm(_imageH*_imageW); device_vector<float> g0_norm(_imageW*_imageH); device_vector<float> energy_buffer(_imageW*_imageH); device_vector<complex<float>> wavelet_buffer(_imageW*_imageH); device_vector<complex<float>> tv_buffer(_imageW*_imageH); device_vector<complex<float>> tv_vertical_buffer(_imageW*_imageH); device_vector<complex<float>> tv_horizontal_buffer(_imageW*_imageH); VariationSet tmpSet = { d_gradient, temp_image, d_wavelet, d_tv_vertical, d_tv_horizontal, d_new_gradient, d_recon_image, diff_recon_image, d_recon_wavelet, diff_recon_wavelet, d_recon_tv_horizontal, d_recon_tv_vertical, diff_recon_tv_horizontal, diff_recon_tv_vertical, d_recon_kspace, diff_recon_data, diff_recon_kspace, d_conj_module ,diff_recon_data_module, g0_norm, g1_norm, fft_buffer, itv_horizontal_buffer, itv_vertical_buffer, wavelet_buffer, tv_buffer, tv_horizontal_buffer, tv_vertical_buffer, energy_buffer }; // // for (int i = 0; i < 5; ++i) // { Run(d_kspace, d_undersampled_kspace, d_mask, params, lpitch, rpitch, ft_direction, plan, realPart, imagPart, scale, p, J, tmpSet); // } // Fft(d_kspace, raw_pointer_cast(tmpSet.fft_buffer.data()), d_image, ft_direction, plan, _imageW, _imageH, lpitch); checkCudaErrors(hipMemcpy2D(recon_data, sizeof(float)*_imageW * 2, d_kspace, lpitch, sizeof(float)*_imageW * 2, _imageH, hipMemcpyDeviceToHost)); hipEventRecord(stop, 0); hipEventSynchronize(stop); checkCudaErrors(hipEventElapsedTime(&elapsed_time, start, stop)); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); // auto cursor = recon_data; // for (auto iter = raw_data.begin(); iter != raw_data.end(); ++iter) // { // float real = static_cast<float>((*iter).real()); // float imag = static_cast<float>((*iter).imag()); // *cursor = std::complex<float>(real, imag); // ++cursor; // } checkCudaErrors(hipfftDestroy(plan)); checkCudaErrors(hipFree(d_kspace)); checkCudaErrors(hipFree(d_mask)); checkCudaErrors(hipFree(d_undersampled_kspace)); checkCudaErrors(hipFree(d_image)); } extern "C" void Recon_Runtime::Run(complex<float> *d_kspace, complex<float> *d_undersampled_kspace, float *d_mask, ParameterSet& params, unsigned int lpitch, unsigned int rpitch, bool& ft_direction, hipfftHandle& plan, device_vector<float>& realPart, device_vector<float>& imagPart, unsigned int p, unsigned int scale, unsigned int J, VariationSet& variationSet) { _iteration_count = 0; float eps = (float)(2.22044604925031 * ::pow(10, -16)); //kernel dim3 dimBlock(_imageW, 1, 1); dim3 dimGrid(1, _imageH, 1); device_ptr<complex<float>> kspace_ptr(d_kspace); Fft(d_kspace, raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.d_image.data()), ft_direction, plan, _imageW, _imageH, lpitch); //k [] ComputeGradient(d_kspace, variationSet.d_gradient, params, d_mask, d_undersampled_kspace, plan, lpitch, rpitch, p, scale, J, variationSet); //g0 //transform(variationSet.d_gradient.begin(), variationSet.d_gradient.end(), variationSet.diff_recon_data.begin(), opposite_functor()); //diff_recon_data = -g0dx opposite << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.d_gradient.data()), raw_pointer_cast(variationSet.diff_recon_data.data()), _imageW, _imageH); float epsilon = ::pow(10, -7); // while (true) { // diff_recon_kspace = -g0 * mask; dot_multiply << <dimGrid, dimBlock >> >(d_kspace, raw_pointer_cast(variationSet.diff_recon_data.data()), d_mask, raw_pointer_cast(variationSet.d_recon_kspace.data()), raw_pointer_cast(variationSet.diff_recon_kspace.data()), lpitch, rpitch, _imageW, _imageH); Fft(d_kspace, raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.d_recon_image.data()), ft_direction, plan, _imageW, _imageH, lpitch); // Fft(raw_pointer_cast(variationSet.diff_recon_data.data()), raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.diff_recon_image.data()), ft_direction, plan, _imageW, _imageH, lpitch); // Fwt2D(raw_pointer_cast(variationSet.d_recon_image.data()), raw_pointer_cast(variationSet.d_recon_wavelet.data()), lpitch, p, scale, J, _imageW, _imageH); // Fwt2D(raw_pointer_cast(variationSet.diff_recon_image.data()), raw_pointer_cast(variationSet.diff_recon_wavelet.data()), lpitch, p, scale, J, _imageW, _imageH); // FTV(raw_pointer_cast(variationSet.d_recon_image.data()), variationSet.d_tv_vertical, variationSet.d_tv_horizontal, _imageW, _imageH, lpitch); // FTV(raw_pointer_cast(variationSet.diff_recon_image.data()), variationSet.diff_recon_tv_vertical, variationSet.diff_recon_tv_horizontal, _imageW, _imageH, lpitch); // float initial_step = 0.0f; float initial_energy = 0.0f; initial_energy = CalculateEnergy(variationSet.d_recon_kspace, variationSet.diff_recon_kspace, variationSet.d_recon_wavelet, variationSet.diff_recon_wavelet, variationSet.d_tv_horizontal, variationSet.d_tv_vertical, variationSet.diff_recon_tv_vertical, variationSet.diff_recon_tv_horizontal, d_undersampled_kspace, variationSet.energy_buffer, params, initial_step, lpitch, rpitch); float step = params.initial_line_search_step; float final_energy = 0.0f; final_energy = CalculateEnergy(variationSet.d_recon_kspace, variationSet.diff_recon_kspace, variationSet.d_recon_wavelet, variationSet.diff_recon_wavelet, variationSet.d_tv_horizontal, variationSet.d_tv_vertical, variationSet.diff_recon_tv_vertical, variationSet.diff_recon_tv_horizontal, d_undersampled_kspace, variationSet.energy_buffer, params, step, lpitch, rpitch); unsigned int line_search_times = 0; transform(variationSet.d_gradient.begin(), variationSet.d_gradient.end(), variationSet.diff_recon_data.begin(), variationSet.d_conj_module.begin(), conj_multiply()); // depart_functor << <dimGrid, dimBlock >> >(raw_pointer_cast(realPart.data()), raw_pointer_cast(imagPart.data()), raw_pointer_cast(variationSet.d_conj_module.data()), lpitch, rpitch, _imageW, _imageH); float realResult = thrust::reduce(realPart.begin(), realPart.end(), 0.0f, thrust::plus<float>()); float imagResult = thrust::reduce(imagPart.begin(), imagPart.end(), 0.0f, thrust::plus<float>()); float energy_variation_g0 = std::sqrt(realResult*realResult + imagResult*imagResult); // while ((final_energy > initial_energy - params.line_search_alpha * step * energy_variation_g0) && (line_search_times < params.max_line_search_times)) { line_search_times++; step = step * params.line_search_beta; final_energy = CalculateEnergy(variationSet.d_recon_kspace, variationSet.diff_recon_kspace, variationSet.d_recon_wavelet, variationSet.diff_recon_wavelet, variationSet.d_tv_horizontal, variationSet.d_tv_vertical, variationSet.diff_recon_tv_vertical, variationSet.diff_recon_tv_horizontal, d_undersampled_kspace, variationSet.energy_buffer, params, step, lpitch, rpitch); } if (line_search_times == params.max_line_search_times) { assert(0); } if (line_search_times > 2) { params.initial_line_search_step = params.initial_line_search_step * params.line_search_beta; } if (line_search_times < 1) { params.initial_line_search_step = params.initial_line_search_step / params.line_search_beta; } transform(variationSet.diff_recon_data.begin(), variationSet.diff_recon_data.end(), kspace_ptr, kspace_ptr, saxpy_functor(step)); //d_kspace Fft(d_kspace, raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.d_image.data()), ft_direction, plan, _imageW, _imageH, lpitch); //d_kspaced_image ComputeGradient(d_kspace, variationSet.d_new_gradient, params, d_mask, d_undersampled_kspace, plan, lpitch, rpitch, p, scale, J, variationSet); ////d_new_gradient, g1 norm_kernel << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.d_gradient.data()), raw_pointer_cast(variationSet.d_new_gradient.data()), raw_pointer_cast(variationSet.g0_norm.data()), raw_pointer_cast(variationSet.g1_norm.data()), lpitch, rpitch, _imageW, _imageH); float sum_energy_g0 = thrust::reduce(variationSet.g0_norm.begin(), variationSet.g0_norm.end(), 0.0f, thrust::plus<float>()); float sum_energy_g1 = thrust::reduce(variationSet.g1_norm.begin(), variationSet.g1_norm.end(), 0.0f, thrust::plus<float>()); float ellipse_factor = sum_energy_g1 / (sum_energy_g0 + eps); complex_copy << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.d_gradient.data()), raw_pointer_cast(variationSet.d_new_gradient.data()), _imageW, _imageH); //g0 = g1 g0 transform(variationSet.diff_recon_data.begin(), variationSet.diff_recon_data.end(), variationSet.d_new_gradient.begin(), variationSet.diff_recon_data.begin(), saxmy_functor(ellipse_factor)); //diff_recon_data; ++_iteration_count; transform(variationSet.diff_recon_data.begin(), variationSet.diff_recon_data.end(), variationSet.diff_recon_data_module.begin(), module_functor()); float differential_reconstruct_data_sum = thrust::reduce(variationSet.diff_recon_data_module.begin(), variationSet.diff_recon_data_module.end(), 0.0f, thrust::plus<float>()); if ((_iteration_count > params.max_conjugate_gradient_iteration_times) || (differential_reconstruct_data_sum < params.gradient_tollerance)) { break; } } }
953f27c4a019f6721a1d9308bfa0de11a533258f.cu
#include "stdafx.h" #include "Recon_Runtime.h" #include <cuda_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include "cudaMatrixOp.cuh" #include <thrust\device_ptr.h> #include <thrust\transform.h> #include <thrust\functional.h> #include <thrust\reduce.h> using namespace cusp; using namespace thrust; Recon_Runtime::Recon_Runtime() { } Recon_Runtime::~Recon_Runtime() { } //仿函数定义 struct saxpy_functor { const float a; saxpy_functor(float _a) : a(_a) {}; __host__ __device__ complex<float> operator() (const complex<float> & x, const complex<float> & y) const { return (x * a + y); } }; struct saxmy_functor { const float a; saxmy_functor(float _a) : a(_a) {}; __host__ __device__ complex<float> operator() (const complex<float> & x, const complex<float> & y) const { return x * a - y; } }; struct sparse_energy { const float a; const float b; sparse_energy(float _a, float _b) : a(_a), b(_b) {}; __host__ __device__ float operator() (const complex<float> & x, const complex<float> & y) const { return sqrt(norm(x*a + y) + b); } }; struct sparse_gradient { const float a; sparse_gradient(float _a) : a(_a) {}; __host__ __device__ complex<float> operator() (const complex<float> & x) const { return x / sqrt(norm(x) + a); } }; struct module_functor { __host__ __device__ float operator() (const complex<float> & x) const { return abs(x); } }; //复数矩阵的共轭矩阵乘以复数矩阵 struct conj_multiply { __host__ __device__ complex<float> operator() (const complex<float> & x, const complex<float> & y) const { return conj(x) * y; } }; //Set kernel extern "C" void setConvolutionLowKernel(float *filter) { cudaMemcpyToSymbol(l_Kernel, filter, sizeof(float) * KERNEL_LENGTH); } extern "C" void setConvolutionHighKernel(float *filter) { cudaMemcpyToSymbol(h_Kernel, filter, sizeof(float) * KERNEL_LENGTH); } extern "C" void Recon_Runtime::SetFilterParams(unsigned int coarse_level) { _coarse_level = coarse_level; } extern "C" void Recon_Runtime::GenerateFilter(wchar_t * filter_type_name, unsigned int coarse_level) { _filter.clear(); assert(filter_type_name != NULL); SetFilterParams(coarse_level); std::wifstream filter_file; filter_file.open(filter_type_name); float fdata; if (!filter_file) { return; } if (filter_file.is_open()) { while (filter_file.good() && !filter_file.eof()) { filter_file >> fdata; _filter.push_back(fdata); } } filter_file.close(); } extern "C" unsigned int Recon_Runtime::QuadLength(unsigned int length) { unsigned int k = 1; unsigned int J = 0; while (k < length) { k *= 2; J += 1; } return J; } extern "C" void Fwt2D(complex<float>* d_raw, complex<float>* d_odata, unsigned int pitch, unsigned int p, unsigned int scale, unsigned int J, unsigned int imageW, unsigned int imageH) { const unsigned int nc = imageW; unsigned int mem_shared; dim3 block; dim3 grid; block.x = nc; //初始值为256 grid.y = nc; unsigned int length = nc; dim3 dimBlock(imageW, 1, 1); dim3 dimGrid(1, imageH, 1); complex_copy << <dimGrid, dimBlock >> >(d_odata, d_raw, imageW, imageH); device_vector<complex<float>> d_tranp(nc*nc); for (unsigned int jscale = J - 1; jscale >= scale; --jscale) { block.x = length + p; block.y = 1; grid.x = 1; grid.y = length; mem_shared = (length + p)*sizeof(float) * 2; //行方向小波变换 DownsampleLow << <grid, block, mem_shared >> >(p, d_odata, raw_pointer_cast(d_tranp.data()), length, pitch); DownsampleHigh << <grid, block, mem_shared >> >(p, d_odata, raw_pointer_cast(d_tranp.data()), length, pitch); //列方向小波变换 DownsampleLow << <grid, block, mem_shared >> >(p, raw_pointer_cast(d_tranp.data()), d_odata, length, pitch); DownsampleHigh << <grid, block, mem_shared >> >(p, raw_pointer_cast(d_tranp.data()), d_odata, length, pitch); length = length >> 1; } } extern "C" void IFwt2D(complex<float>* d_raw, complex<float>* d_odata, unsigned int pitch, unsigned int p, unsigned int scale, unsigned int J, unsigned int imageW, unsigned int imageH, unsigned int np) { unsigned int nc = imageW; unsigned int mem_shared; //copy kernel 配置 dim3 block; dim3 grid; unsigned int length = np; dim3 dimBlock(imageW, 1, 1); dim3 dimGrid(1, imageH, 1); complex_copy << <dimGrid, dimBlock >> >(d_odata, d_raw, imageW, imageH); device_vector<complex<float>> d_tranp(nc*nc); for (unsigned int jscale = scale; jscale <= J - 1; ++jscale) { mem_shared = (length + p)*sizeof(complex<float>) * 2; block.x = 1; block.y = length + p; grid.x = length; grid.y = 1; //上采样,行方向小波变换 IdwtDb1D << <grid, block, mem_shared >> >(p, d_odata, raw_pointer_cast(d_tranp.data()), length, pitch); //上采样,列方向小波变换 IdwtDb1D << <grid, block, mem_shared >> >(p, raw_pointer_cast(d_tranp.data()), d_odata, length, pitch); length = length << 1; } } extern "C" void Recon_Runtime::ComputeGradient(complex<float> * d_kspace, device_vector<complex<float>>& d_gradient, ParameterSet& params, float * d_mask, complex<float> * d_undersampled_kspace, cufftHandle& plan, unsigned int lpitch, unsigned int rpitch, unsigned int p, unsigned int scale, unsigned int J, VariationSet& variationSet) { dim3 dimBlock(_imageW, 1, 1); dim3 dimGrid(1, _imageH, 1); GetFidelityGradient << <dimGrid, dimBlock >> >(raw_pointer_cast(d_gradient.data()), d_kspace, d_mask, d_undersampled_kspace, lpitch, rpitch, _imageW, _imageH); //原位运算,初始值为d_kspace float epsilon = static_cast<float>(std::pow(10, -7)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); Fwt2D(raw_pointer_cast(variationSet.d_image.data()), raw_pointer_cast(variationSet.wavelet_buffer.data()), lpitch, p, scale, J, _imageW, _imageH); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); FTV(raw_pointer_cast(variationSet.d_image.data()), variationSet.tv_vertical_buffer, variationSet.tv_horizontal_buffer, _imageW, _imageH, lpitch); if (params.tv_weight || params.wavelet_weight) { GetSparseGradient << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.tv_vertical_buffer.data()), raw_pointer_cast(variationSet.tv_horizontal_buffer.data()), raw_pointer_cast(variationSet.wavelet_buffer.data()), epsilon, lpitch, _imageW, _imageH);//原位运算 } unsigned int np = static_cast<unsigned int>(std::pow(2, scale + 1)); IFwt2D(raw_pointer_cast(variationSet.wavelet_buffer.data()), raw_pointer_cast(variationSet.wavelet_buffer.data()), lpitch, p, scale, J, _imageW, _imageH, np); //原位运算 bool ft_direction = true; Fft(raw_pointer_cast(variationSet.wavelet_buffer.data()), raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.wavelet_buffer.data()), ft_direction, plan, _imageW, _imageH, lpitch); //原位运算 IFTV(variationSet.tv_vertical_buffer, variationSet.tv_horizontal_buffer, variationSet.tv_buffer, variationSet.itv_horizontal_buffer, variationSet.itv_vertical_buffer, _imageW, _imageH, lpitch); Fft(raw_pointer_cast(variationSet.tv_buffer.data()), raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.tv_buffer.data()), ft_direction, plan, _imageW, _imageH, lpitch); //原位运算 GetGradient << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.wavelet_buffer.data()), raw_pointer_cast(variationSet.tv_buffer.data()), raw_pointer_cast(d_gradient.data()), params.wavelet_weight, params.tv_weight, _imageW, _imageH, lpitch); } extern "C" float Recon_Runtime::CalculateEnergy(device_vector<complex<float>>& d_recon_kspace, device_vector<complex<float>>& diff_recon_kspace, device_vector<complex<float>>& d_recon_wavelet, device_vector<complex<float>>& diff_recon_wavelet, device_vector<complex<float>>& d_tv_horizontal, device_vector<complex<float>>& d_tv_vertical, device_vector<complex<float>>& diff_recon_tv_vertical, device_vector<complex<float>>& diff_recon_tv_horizontal, complex<float>* d_undersampled_kspace, device_vector<float> & energy_buffer, ParameterSet&params, float step, unsigned int lpitch, unsigned int rpitch) { float sum_energy = 0.0f; dim3 dimGrid(1, _imageH, 1); dim3 dimBlock(_imageW, 1, 1); //分配空间 device_vector<float> result(_imageH*_imageW); CalculateFidelity << <dimGrid, dimBlock >> >(raw_pointer_cast(diff_recon_kspace.data()), raw_pointer_cast(d_recon_kspace.data()), step, d_undersampled_kspace, raw_pointer_cast(result.data()), lpitch, rpitch, _imageW, _imageH); //三元运算 sum_energy += thrust::reduce(result.begin(), result.end(), 0.0f, thrust::plus<float>()); float epsilon = static_cast<float>(std::pow(10, -7)); //计算小波项 transform(diff_recon_wavelet.begin(), diff_recon_wavelet.end(), d_recon_wavelet.begin(), result.begin(), sparse_energy(step, epsilon)); //二元运算 if (params.wavelet_weight) { sum_energy += thrust::reduce(result.begin(), result.end(), 0.0f, thrust::plus<float>()) * params.wavelet_weight; } // 计算TV项能量值 if (params.tv_weight) { transform(diff_recon_tv_vertical.begin(), diff_recon_tv_vertical.end(), d_tv_vertical.begin(), result.begin(), sparse_energy(step, epsilon)); //二元运算 sum_energy += thrust::reduce(result.begin(), result.end(), 0.0f, thrust::plus<float>()) * params.tv_weight; transform(diff_recon_tv_horizontal.begin(), diff_recon_tv_horizontal.end(), d_tv_horizontal.begin(), result.begin(), sparse_energy(step, epsilon)); //二元运算 sum_energy += thrust::reduce(result.begin(), result.end(), 0.0f, thrust::plus<float>()) * params.tv_weight; } return sum_energy; } extern "C" void Recon_Runtime::GetReconData(std::vector<float> mask, std::vector<std::complex<float>> raw_data, ParameterSet& params, unsigned int width, unsigned int height, float& elapsed_time, std::complex<float> * recon_data) { _imageW = width; _imageH = height; complex<float> *d_kspace, *d_undersampled_kspace, *d_image; float *d_mask; size_t lpitch, rpitch; //lpitch代表元素为复数类的对齐参数, rpitch代表元素为实数类的对齐参数 dim3 dimBlock(_imageW, 1, 1); dim3 dimGrid(1, _imageH, 1); device_vector<float> realPart(_imageH*_imageW); device_vector<float> imagPart(_imageH*_imageW); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); cudaEventRecord(start, 0); //制定fft plan和方向 bool ft_direction = false; //false为傅立叶逆变换, 从k空间到图像域; true为傅立叶正变换,从图像域到k空间 cufftHandle plan; checkCudaErrors(cufftPlan2d(&plan, _imageH, _imageW, CUFFT_C2C)); checkCudaErrors(cudaMallocPitch((void **)&d_kspace, &lpitch, _imageW*sizeof(complex<float>), _imageH)); checkCudaErrors(cudaMallocPitch((void **)&d_undersampled_kspace, &lpitch, _imageW*sizeof(complex<float>), _imageH)); checkCudaErrors(cudaMallocPitch((void **)&d_mask, &rpitch, _imageW*sizeof(float), _imageH)); checkCudaErrors(cudaMallocPitch((void **)&d_image, &lpitch, _imageW*sizeof(complex<float>), _imageH)); checkCudaErrors(cudaMemcpy2D(d_kspace, lpitch, raw_data.data(), sizeof(float) * 2 * _imageW, sizeof(float) * 2 * _imageW, _imageH, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy2D(d_mask, rpitch, mask.data(), sizeof(float)*_imageW, sizeof(float)*_imageW, _imageH, cudaMemcpyHostToDevice)); complex_copy << <dimGrid, dimBlock >> >(d_undersampled_kspace, d_kspace, _imageW, _imageH); //小波变换的掩模 // std::shared_ptr<CWavelet> wavelet = std::shared_ptr<CWavelet>(new CWavelet()); GenerateFilter(L"daub2.flt", 4); std::vector<float> filter = GetFilter(); unsigned int scale = GetCoarseLevel(); unsigned int J = QuadLength(_imageW); unsigned int p = filter.size(); std::vector<float> float_filter(filter.size()); std::vector<float> mirror_filter(filter.size()); auto mirror_cursor = mirror_filter.data(); auto filter_cursor = filter.data(); auto float_cursor = float_filter.data(); auto end_cursor = filter_cursor + filter.size(); for (unsigned int i = 0; i < filter.size(); ++i) { *float_cursor = static_cast<float>(*filter_cursor); if (i % 2 == 0) *mirror_cursor = *float_cursor; else *mirror_cursor = -(*float_cursor); ++float_cursor; ++filter_cursor; ++mirror_cursor; } //将小波变换基拷贝到设备端 setConvolutionLowKernel(float_filter.data()); setConvolutionHighKernel(mirror_filter.data()); //定义临时变量 device_vector<complex<float>> d_gradient(_imageH*_imageW), temp_image(_imageH*_imageW), d_wavelet(_imageH*_imageW), d_tv_vertical(_imageH*_imageW), d_tv_horizontal(_imageH*_imageW), d_new_gradient(_imageH*_imageW), d_recon_image(_imageH*_imageW), diff_recon_image(_imageH*_imageW), d_recon_wavelet(_imageH*_imageW), diff_recon_wavelet(_imageH*_imageW), d_recon_tv_horizontal(_imageH*_imageW), d_recon_tv_vertical(_imageH*_imageW), diff_recon_tv_horizontal(_imageH*_imageW), diff_recon_tv_vertical(_imageH*_imageW), d_recon_kspace(_imageH*_imageW), diff_recon_data(_imageH*_imageW), diff_recon_kspace(_imageW*_imageH), d_conj_module(_imageW*_imageH), fft_buffer(_imageW*_imageH), itv_horizontal_buffer(_imageW*_imageH), itv_vertical_buffer(_imageW*_imageH); device_vector<float> diff_recon_data_module(_imageW*_imageH); device_vector<float> g1_norm(_imageH*_imageW); device_vector<float> g0_norm(_imageW*_imageH); device_vector<float> energy_buffer(_imageW*_imageH); device_vector<complex<float>> wavelet_buffer(_imageW*_imageH); device_vector<complex<float>> tv_buffer(_imageW*_imageH); device_vector<complex<float>> tv_vertical_buffer(_imageW*_imageH); device_vector<complex<float>> tv_horizontal_buffer(_imageW*_imageH); VariationSet tmpSet = { d_gradient, temp_image, d_wavelet, d_tv_vertical, d_tv_horizontal, d_new_gradient, d_recon_image, diff_recon_image, d_recon_wavelet, diff_recon_wavelet, d_recon_tv_horizontal, d_recon_tv_vertical, diff_recon_tv_horizontal, diff_recon_tv_vertical, d_recon_kspace, diff_recon_data, diff_recon_kspace, d_conj_module ,diff_recon_data_module, g0_norm, g1_norm, fft_buffer, itv_horizontal_buffer, itv_vertical_buffer, wavelet_buffer, tv_buffer, tv_horizontal_buffer, tv_vertical_buffer, energy_buffer }; //重建迭代 // for (int i = 0; i < 5; ++i) // { Run(d_kspace, d_undersampled_kspace, d_mask, params, lpitch, rpitch, ft_direction, plan, realPart, imagPart, scale, p, J, tmpSet); // } // Fft(d_kspace, raw_pointer_cast(tmpSet.fft_buffer.data()), d_image, ft_direction, plan, _imageW, _imageH, lpitch); checkCudaErrors(cudaMemcpy2D(recon_data, sizeof(float)*_imageW * 2, d_kspace, lpitch, sizeof(float)*_imageW * 2, _imageH, cudaMemcpyDeviceToHost)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start, stop)); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); // auto cursor = recon_data; // for (auto iter = raw_data.begin(); iter != raw_data.end(); ++iter) // { // float real = static_cast<float>((*iter).real()); // float imag = static_cast<float>((*iter).imag()); // *cursor = std::complex<float>(real, imag); // ++cursor; // } checkCudaErrors(cufftDestroy(plan)); checkCudaErrors(cudaFree(d_kspace)); checkCudaErrors(cudaFree(d_mask)); checkCudaErrors(cudaFree(d_undersampled_kspace)); checkCudaErrors(cudaFree(d_image)); } extern "C" void Recon_Runtime::Run(complex<float> *d_kspace, complex<float> *d_undersampled_kspace, float *d_mask, ParameterSet& params, unsigned int lpitch, unsigned int rpitch, bool& ft_direction, cufftHandle& plan, device_vector<float>& realPart, device_vector<float>& imagPart, unsigned int p, unsigned int scale, unsigned int J, VariationSet& variationSet) { _iteration_count = 0; float eps = (float)(2.22044604925031 * std::pow(10, -16)); //kernel配置 dim3 dimBlock(_imageW, 1, 1); dim3 dimGrid(1, _imageH, 1); device_ptr<complex<float>> kspace_ptr(d_kspace); Fft(d_kspace, raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.d_image.data()), ft_direction, plan, _imageW, _imageH, lpitch); //由k空间数据得到初始图像 [非原位运算] ComputeGradient(d_kspace, variationSet.d_gradient, params, d_mask, d_undersampled_kspace, plan, lpitch, rpitch, p, scale, J, variationSet); //求得g0 //transform(variationSet.d_gradient.begin(), variationSet.d_gradient.end(), variationSet.diff_recon_data.begin(), opposite_functor()); //diff_recon_data = -g0,得到dx的初始值 opposite << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.d_gradient.data()), raw_pointer_cast(variationSet.diff_recon_data.data()), _imageW, _imageH); float epsilon = std::pow(10, -7); //重建过程 while (true) { // diff_recon_kspace = -g0 * mask; dot_multiply << <dimGrid, dimBlock >> >(d_kspace, raw_pointer_cast(variationSet.diff_recon_data.data()), d_mask, raw_pointer_cast(variationSet.d_recon_kspace.data()), raw_pointer_cast(variationSet.diff_recon_kspace.data()), lpitch, rpitch, _imageW, _imageH); Fft(d_kspace, raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.d_recon_image.data()), ft_direction, plan, _imageW, _imageH, lpitch); //临时变量 Fft(raw_pointer_cast(variationSet.diff_recon_data.data()), raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.diff_recon_image.data()), ft_direction, plan, _imageW, _imageH, lpitch); //临时变量 Fwt2D(raw_pointer_cast(variationSet.d_recon_image.data()), raw_pointer_cast(variationSet.d_recon_wavelet.data()), lpitch, p, scale, J, _imageW, _imageH); //临时变量 Fwt2D(raw_pointer_cast(variationSet.diff_recon_image.data()), raw_pointer_cast(variationSet.diff_recon_wavelet.data()), lpitch, p, scale, J, _imageW, _imageH); //临时变量 FTV(raw_pointer_cast(variationSet.d_recon_image.data()), variationSet.d_tv_vertical, variationSet.d_tv_horizontal, _imageW, _imageH, lpitch); //无临时变量 FTV(raw_pointer_cast(variationSet.diff_recon_image.data()), variationSet.diff_recon_tv_vertical, variationSet.diff_recon_tv_horizontal, _imageW, _imageH, lpitch); //无临时变量 float initial_step = 0.0f; float initial_energy = 0.0f; initial_energy = CalculateEnergy(variationSet.d_recon_kspace, variationSet.diff_recon_kspace, variationSet.d_recon_wavelet, variationSet.diff_recon_wavelet, variationSet.d_tv_horizontal, variationSet.d_tv_vertical, variationSet.diff_recon_tv_vertical, variationSet.diff_recon_tv_horizontal, d_undersampled_kspace, variationSet.energy_buffer, params, initial_step, lpitch, rpitch); float step = params.initial_line_search_step; float final_energy = 0.0f; final_energy = CalculateEnergy(variationSet.d_recon_kspace, variationSet.diff_recon_kspace, variationSet.d_recon_wavelet, variationSet.diff_recon_wavelet, variationSet.d_tv_horizontal, variationSet.d_tv_vertical, variationSet.diff_recon_tv_vertical, variationSet.diff_recon_tv_horizontal, d_undersampled_kspace, variationSet.energy_buffer, params, step, lpitch, rpitch); unsigned int line_search_times = 0; transform(variationSet.d_gradient.begin(), variationSet.d_gradient.end(), variationSet.diff_recon_data.begin(), variationSet.d_conj_module.begin(), conj_multiply()); //求搜索最优的梯度方向 depart_functor << <dimGrid, dimBlock >> >(raw_pointer_cast(realPart.data()), raw_pointer_cast(imagPart.data()), raw_pointer_cast(variationSet.d_conj_module.data()), lpitch, rpitch, _imageW, _imageH); float realResult = thrust::reduce(realPart.begin(), realPart.end(), 0.0f, thrust::plus<float>()); float imagResult = thrust::reduce(imagPart.begin(), imagPart.end(), 0.0f, thrust::plus<float>()); float energy_variation_g0 = std::sqrt(realResult*realResult + imagResult*imagResult); //线搜索 while ((final_energy > initial_energy - params.line_search_alpha * step * energy_variation_g0) && (line_search_times < params.max_line_search_times)) { line_search_times++; step = step * params.line_search_beta; final_energy = CalculateEnergy(variationSet.d_recon_kspace, variationSet.diff_recon_kspace, variationSet.d_recon_wavelet, variationSet.diff_recon_wavelet, variationSet.d_tv_horizontal, variationSet.d_tv_vertical, variationSet.diff_recon_tv_vertical, variationSet.diff_recon_tv_horizontal, d_undersampled_kspace, variationSet.energy_buffer, params, step, lpitch, rpitch); } if (line_search_times == params.max_line_search_times) { assert(0); } if (line_search_times > 2) { params.initial_line_search_step = params.initial_line_search_step * params.line_search_beta; } if (line_search_times < 1) { params.initial_line_search_step = params.initial_line_search_step / params.line_search_beta; } transform(variationSet.diff_recon_data.begin(), variationSet.diff_recon_data.end(), kspace_ptr, kspace_ptr, saxpy_functor(step)); //更新d_kspace Fft(d_kspace, raw_pointer_cast(variationSet.fft_buffer.data()), raw_pointer_cast(variationSet.d_image.data()), ft_direction, plan, _imageW, _imageH, lpitch); //d_kspace更新,d_image更新 ComputeGradient(d_kspace, variationSet.d_new_gradient, params, d_mask, d_undersampled_kspace, plan, lpitch, rpitch, p, scale, J, variationSet); ////更新d_new_gradient, g1 norm_kernel << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.d_gradient.data()), raw_pointer_cast(variationSet.d_new_gradient.data()), raw_pointer_cast(variationSet.g0_norm.data()), raw_pointer_cast(variationSet.g1_norm.data()), lpitch, rpitch, _imageW, _imageH); float sum_energy_g0 = thrust::reduce(variationSet.g0_norm.begin(), variationSet.g0_norm.end(), 0.0f, thrust::plus<float>()); float sum_energy_g1 = thrust::reduce(variationSet.g1_norm.begin(), variationSet.g1_norm.end(), 0.0f, thrust::plus<float>()); float ellipse_factor = sum_energy_g1 / (sum_energy_g0 + eps); complex_copy << <dimGrid, dimBlock >> >(raw_pointer_cast(variationSet.d_gradient.data()), raw_pointer_cast(variationSet.d_new_gradient.data()), _imageW, _imageH); //g0 = g1 更新g0 transform(variationSet.diff_recon_data.begin(), variationSet.diff_recon_data.end(), variationSet.d_new_gradient.begin(), variationSet.diff_recon_data.begin(), saxmy_functor(ellipse_factor)); //更新diff_recon_data; ++_iteration_count; transform(variationSet.diff_recon_data.begin(), variationSet.diff_recon_data.end(), variationSet.diff_recon_data_module.begin(), module_functor()); float differential_reconstruct_data_sum = thrust::reduce(variationSet.diff_recon_data_module.begin(), variationSet.diff_recon_data_module.end(), 0.0f, thrust::plus<float>()); if ((_iteration_count > params.max_conjugate_gradient_iteration_times) || (differential_reconstruct_data_sum < params.gradient_tollerance)) { break; } } }
df7a67ef25e5dd2b658fc16b185bdb7c66ff1d37.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "backward_sam_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in_w_h_c_delta = NULL; hipMalloc(&in_w_h_c_delta, XSIZE*YSIZE); int size = XSIZE*YSIZE; int channel_size = XSIZE*YSIZE; float *in_scales_c = NULL; hipMalloc(&in_scales_c, XSIZE*YSIZE); float *out_from_delta = NULL; hipMalloc(&out_from_delta, XSIZE*YSIZE); float *in_from_output = NULL; hipMalloc(&in_from_output, XSIZE*YSIZE); float *out_state_delta = NULL; hipMalloc(&out_state_delta, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( backward_sam_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, in_w_h_c_delta,size,channel_size,in_scales_c,out_from_delta,in_from_output,out_state_delta); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( backward_sam_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, in_w_h_c_delta,size,channel_size,in_scales_c,out_from_delta,in_from_output,out_state_delta); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( backward_sam_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, in_w_h_c_delta,size,channel_size,in_scales_c,out_from_delta,in_from_output,out_state_delta); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
df7a67ef25e5dd2b658fc16b185bdb7c66ff1d37.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "backward_sam_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in_w_h_c_delta = NULL; cudaMalloc(&in_w_h_c_delta, XSIZE*YSIZE); int size = XSIZE*YSIZE; int channel_size = XSIZE*YSIZE; float *in_scales_c = NULL; cudaMalloc(&in_scales_c, XSIZE*YSIZE); float *out_from_delta = NULL; cudaMalloc(&out_from_delta, XSIZE*YSIZE); float *in_from_output = NULL; cudaMalloc(&in_from_output, XSIZE*YSIZE); float *out_state_delta = NULL; cudaMalloc(&out_state_delta, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); backward_sam_kernel<<<gridBlock,threadBlock>>>(in_w_h_c_delta,size,channel_size,in_scales_c,out_from_delta,in_from_output,out_state_delta); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { backward_sam_kernel<<<gridBlock,threadBlock>>>(in_w_h_c_delta,size,channel_size,in_scales_c,out_from_delta,in_from_output,out_state_delta); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { backward_sam_kernel<<<gridBlock,threadBlock>>>(in_w_h_c_delta,size,channel_size,in_scales_c,out_from_delta,in_from_output,out_state_delta); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b4f4dbac375ebeacfdda14c6bf9283acac9f7fa6.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_mtgp32_host.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #include <vector> #include <math.h> #include <rocrand/rocrand_mtgp32_11213.h> #include "philox_random.h" #include "philox_pytorch.h" #include <stdio.h> #include <stdlib.h> #include <time.h> using namespace std; static uint64_t offset=0; // float holdy1=pow(2.0,-10.0); // float holdy2=pow(2.0,-24.0); __device__ const float twoten=0.0009765625; __device__ const float twominustwentyfour=0.000000059604644775390625; template<typename T> __device__ __forceinline__ T maybe_upcast(__half x){ return T(__half2float(x)); } template<> __device__ __forceinline__ __half maybe_upcast<__half>(__half x){ return x; } __device__ __forceinline__ float get_delta_fp16(float x){ int e_actual; frexpf(x, &e_actual); e_actual-=1; // int e_actual=e_stored-127; if(e_actual>=-14){ return twoten*pow(2,e_actual); } else{ return twominustwentyfour; } } template <typename scalar_t> __device__ __forceinline__ scalar_t natalia_magic(float x,hiprandStatePhilox4_32_10_t state){ if(x==0.0){ return scalar_t(0.0); } float delta=get_delta_fp16(x); float randy=hiprand_uniform(&state); float val; if(x<0.0){ val=x-randy*delta; } else{ val=x+randy*delta; } // To guarantee representability, route through a guaranteed FP16 cast. return maybe_upcast<scalar_t>(__float2half_rz(val)); } template <typename scalar_t> __global__ void stochround(float* mtx,scalar_t* new_mtx, int n, uint64_t seed, uint64_t offset){ int threadnum=blockDim.x*blockIdx.x+threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(seed,threadnum,offset,&state); for(int i = threadnum; i <n ; i +=blockDim.x*gridDim.x ){ float mtx_holder=static_cast<float>(mtx[i]); new_mtx[i]=natalia_magic<scalar_t>(mtx_holder,state); } } torch::Tensor stochroundfortensor(torch::Tensor mtx,torch::Tensor half_mtx){ torch::IntArrayRef sizes=mtx.sizes(); int dims=sizes.size(); size_t n = 1; for(int county=0;county<dims;county++){ n=n*sizes[county]; } uint64_t seed= 12345ul; const int threads = 256.0; // printf("%d \n \n \n \n ",offset); float sm_max=72.0; float numthreads_per_sm=1024.0; const dim3 blocks(ceil(sm_max*numthreads_per_sm/threads),1,1); AT_DISPATCH_FLOATING_TYPES_AND_HALF(half_mtx.scalar_type(),"stochastic_tensor_round",([&] hipLaunchKernelGGL(({stochround<scalar_t>), dim3(blocks), dim3(threads), 0, 0, mtx.data<float>(),half_mtx.data<scalar_t>(),n,seed,offset);})); offset = offset + (n + blocks.x*threads - 1)/(blocks.x*threads); // printf("%d \n \n \n \n ",offset); return half_mtx; }
b4f4dbac375ebeacfdda14c6bf9283acac9f7fa6.cu
#include <torch/extension.h> #include <curand_kernel.h> #include <curand.h> #include <curand_mtgp32_host.h> #include <cuda.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #include <vector> #include <math.h> #include <curand_mtgp32dc_p_11213.h> #include "philox_random.h" #include "philox_pytorch.h" #include <stdio.h> #include <stdlib.h> #include <time.h> using namespace std; static uint64_t offset=0; // float holdy1=pow(2.0,-10.0); // float holdy2=pow(2.0,-24.0); __device__ const float twoten=0.0009765625; __device__ const float twominustwentyfour=0.000000059604644775390625; template<typename T> __device__ __forceinline__ T maybe_upcast(__half x){ return T(__half2float(x)); } template<> __device__ __forceinline__ __half maybe_upcast<__half>(__half x){ return x; } __device__ __forceinline__ float get_delta_fp16(float x){ int e_actual; frexpf(x, &e_actual); e_actual-=1; // int e_actual=e_stored-127; if(e_actual>=-14){ return twoten*pow(2,e_actual); } else{ return twominustwentyfour; } } template <typename scalar_t> __device__ __forceinline__ scalar_t natalia_magic(float x,curandStatePhilox4_32_10_t state){ if(x==0.0){ return scalar_t(0.0); } float delta=get_delta_fp16(x); float randy=curand_uniform(&state); float val; if(x<0.0){ val=x-randy*delta; } else{ val=x+randy*delta; } // To guarantee representability, route through a guaranteed FP16 cast. return maybe_upcast<scalar_t>(__float2half_rz(val)); } template <typename scalar_t> __global__ void stochround(float* mtx,scalar_t* new_mtx, int n, uint64_t seed, uint64_t offset){ int threadnum=blockDim.x*blockIdx.x+threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(seed,threadnum,offset,&state); for(int i = threadnum; i <n ; i +=blockDim.x*gridDim.x ){ float mtx_holder=static_cast<float>(mtx[i]); new_mtx[i]=natalia_magic<scalar_t>(mtx_holder,state); } } torch::Tensor stochroundfortensor(torch::Tensor mtx,torch::Tensor half_mtx){ torch::IntArrayRef sizes=mtx.sizes(); int dims=sizes.size(); size_t n = 1; for(int county=0;county<dims;county++){ n=n*sizes[county]; } uint64_t seed= 12345ul; const int threads = 256.0; // printf("%d \n \n \n \n ",offset); float sm_max=72.0; float numthreads_per_sm=1024.0; const dim3 blocks(ceil(sm_max*numthreads_per_sm/threads),1,1); AT_DISPATCH_FLOATING_TYPES_AND_HALF(half_mtx.scalar_type(),"stochastic_tensor_round",([&] {stochround<scalar_t><<<blocks, threads>>>(mtx.data<float>(),half_mtx.data<scalar_t>(),n,seed,offset);})); offset = offset + (n + blocks.x*threads - 1)/(blocks.x*threads); // printf("%d \n \n \n \n ",offset); return half_mtx; }
f5734a73bc50977e463d8ab4c59a3084dec621e5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> int main(int argc, char ** argv) { int deviceCount; hipGetDeviceCount(&deviceCount); printf("Device count %d\n", deviceCount); int deviceIndex; for (deviceIndex = 0; deviceIndex < deviceCount; deviceIndex++){ #ifdef __cplusplus hipDeviceProp_t deviceProperties; #else struct hipDeviceProp_t deviceProperties; #endif hipGetDeviceProperties(&deviceProperties, deviceIndex); if (deviceIndex == 0) { if (deviceProperties.major == 9999 && deviceProperties.minor == 9999) { printf("No CUDA GPU has been detected"); } else if (deviceCount == 1) { printf("There is 1 device supporting CUDA"); } else { printf("There are %d devices supporting CUDA", deviceCount); } } printf("Device %d name %s\n", deviceIndex, deviceProperties.name); printf("Computational Capabilities: %d.%d\n", deviceIndex, deviceIndex); printf("Maximum global memory size: %zu\n", deviceProperties.totalGlobalMem); } return 0; }
f5734a73bc50977e463d8ab4c59a3084dec621e5.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> int main(int argc, char ** argv) { int deviceCount; cudaGetDeviceCount(&deviceCount); printf("Device count %d\n", deviceCount); int deviceIndex; for (deviceIndex = 0; deviceIndex < deviceCount; deviceIndex++){ #ifdef __cplusplus cudaDeviceProp deviceProperties; #else struct cudaDeviceProp deviceProperties; #endif cudaGetDeviceProperties(&deviceProperties, deviceIndex); if (deviceIndex == 0) { if (deviceProperties.major == 9999 && deviceProperties.minor == 9999) { printf("No CUDA GPU has been detected"); } else if (deviceCount == 1) { printf("There is 1 device supporting CUDA"); } else { printf("There are %d devices supporting CUDA", deviceCount); } } printf("Device %d name %s\n", deviceIndex, deviceProperties.name); printf("Computational Capabilities: %d.%d\n", deviceIndex, deviceIndex); printf("Maximum global memory size: %zu\n", deviceProperties.totalGlobalMem); } return 0; }
b0984d95f4e0382a8506c85f9aaf377534805b41.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> typedef struct { double _r; double _g; double _b; double _m; double _n; } Point; #define CUDA_CALL(x) {if((x) != hipSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", hipGetErrorString(hipGetLastError())); \ exit(EXIT_FAILURE);}} void readImageSize(FILE *ifp,int* K,int* a,int* b) { fscanf(ifp,"%d\n",K); printf("%d\n",*K); fscanf(ifp,"%d\n",a); printf("%d\n",*a); fscanf(ifp,"%d\n",b); printf("%d\n",*b); } //reads the ifp file and stores in structure void readPoints(FILE* ifp,Point *points, int num_points) { int i; for(i=0;i<num_points;i++) { fscanf(ifp,"%lf,%lf,%lf,%lf,%lf", &points[i]._r, &points[i]._g, &points[i]._b, &points[i]._m, &points[i]._n); //printf("%lf,%lf,%lf,%lf,%lf\n", points[i]._r, points[i]._g, points[i]._b, points[i]._m, points[i]._n); } } //Initialize random points as assumed means void initialize(Point* mean,int K, int num_points, Point* points) { int i, a, p=2; srand(time(NULL)); for(i=0;i<K;i++) { a = num_points/p; //printf("\n num_points: %d\n", num_points/p); mean[i]._r = points[a]._r; mean[i]._g = points[a]._g; mean[i]._b = points[a]._b; mean[i]._m = points[a]._m; mean[i]._n = points[a]._n; /*mean[i]._r=((double)(rand()%1000))/1000; mean[i]._g=((double)(2*rand()%1000))/1000; mean[i]._b=((double)(3*rand()%1000))/1000; mean[i]._m=((double)(4*rand()%1000))/1000; mean[i]._n=((double)(5*rand()%1000))/1000;*/ //printf("%lf,%lf,%lf,%lf,%lf\n",mean[i]._r,mean[i]._g,mean[i]._b,mean[i]._m,mean[i]._n); p++; /*mean[i]._r=((double)(rand()%1000))/1000; mean[i]._g=((double)(2*rand()%1000))/1000; mean[i]._b=((double)(3*rand()%1000))/1000; mean[i]._m=((double)(4*rand()%1000))/1000; mean[i]._n=((double)(5*rand()%1000))/1000;*/ } } //All points having no clusters void IntClusterMem(int *cluster, int num_points) { int i; for(i=0;i<num_points;i++) { cluster[i]=-1; } } //to calculate which cluster is the point belonging to. __global__ void pointsCluster(int* after_cluster_d, Point* point_d,Point* Dmean,int K, int x, int y) { //__shared__ Point Dmean[105]; //printf("\n%d\t%d\t%d\n",K,x,y); int j, k, i; j = blockIdx.x*blockDim.x+threadIdx.x; k = blockIdx.y*blockDim.y+threadIdx.y; //if(j==599 && k==319) //printf("%d, %d\n",j,k); /*for(i=0;i<K;i++) { Dmean[i]=mean_d[i]; }*/ int parent=0; double dist=0; int t = (k*(x)+j); //if(t>204790) //printf("t = %d\n",t); double minDist= sqrt((pow((point_d[t]._r-Dmean[0]._r),2)+pow((point_d[t]._g-Dmean[0]._g),2)+pow((point_d[t]._b-Dmean[0]._b),2)+pow((point_d[t]._m-Dmean[0]._m),2)+pow((point_d[t]._n-Dmean[0]._n),2))); for(i=1;i<K;i++) { dist = sqrt((pow((point_d[t]._r-Dmean[i]._r),2)+pow((point_d[t]._g-Dmean[i]._g),2)+pow((point_d[t]._b-Dmean[i]._b),2)+pow((point_d[t]._m-Dmean[i]._m),2)+pow((point_d[t]._n-Dmean[i]._n),2))); if(minDist>=dist) { parent=i; minDist=dist; } } after_cluster_d[t] = parent; } //calculate new mean void calcNewMean(Point* points,int* cluster,Point* mean,int K,int num_points) { Point* newMean=(Point*)malloc(sizeof(Point)*K); int* members=(int*)malloc(sizeof(int)*(K)); int i; for(i=0;i<K;i++) { members[i]=0; newMean[i]._r=0; newMean[i]._g=0; newMean[i]._b=0; newMean[i]._m=0; newMean[i]._n=0; } for(i=0;i<num_points;i++) { members[cluster[i]]++; newMean[cluster[i]]._r+=points[i]._r; newMean[cluster[i]]._g+=points[i]._g; newMean[cluster[i]]._b+=points[i]._b; newMean[cluster[i]]._m+=points[i]._m; newMean[cluster[i]]._n+=points[i]._n; } for(i=0;i<K;i++) { if(members[i]!=0.0) { newMean[i]._r/=members[i]; newMean[i]._g/=members[i]; newMean[i]._b/=members[i]; newMean[i]._m/=members[i]; newMean[i]._n/=members[i]; } else { newMean[i]._r=0; newMean[i]._g=0; newMean[i]._b=0; newMean[i]._m=0; newMean[i]._n=0; } } for(i=0;i<K;i++) { mean[i]._r=newMean[i]._r; mean[i]._g=newMean[i]._g; mean[i]._b=newMean[i]._b; mean[i]._m=newMean[i]._m; mean[i]._n=newMean[i]._n; } } //check for convergence // it checks that is each points cluster remaining the same int chkConvrg(int *before_clusters,int *after_cluster,int num_points, float tol) { int i; tol = num_points*tol; for(i=0;i<num_points;i++) { if(abs(before_clusters[i]-after_cluster[i])>tol) { //check = abs(before_clusters[i]-after_cluster[i]); //printf("check = %d, after_cluster[%d]=%d, before_clusters[%d]=%d\n",check,i,after_cluster[i],i,before_clusters[i]); return -1; } } return 0; } int main(int argc, char* argv[]) { //cpu variables int K; int num_points; int * before_clusters; int i; int job_done=0; int x,y,iter=0,iterations; Point* mean; Point* points; int * after_cluster; float tol; //gpu variables Point* points_d; Point* mean_d; int * after_cluster_d; int * before_cluster_d; hipEvent_t startinit, endinit, startmean, endmean, startcal, endcal, startindex, endindex; hipEvent_t start1, end1; float timeinit, timemean, timecal, timeindex; float time1; //float totTime = 0; tol = atof(argv[3]); //iterations = atof(argv[3]); //printf("Enter Tolerance: "); //scanf("%f",&tol); printf("Tolerance = %.10f\n",tol); hipEventCreate(&start1); hipEventCreate(&end1); hipEventRecord(start1, 0); //Readinf file FILE *ifp; ifp=fopen(argv[1],"r"); readImageSize(ifp,&K,&x,&y); K = atoi(argv[6]); num_points = x*y; int blockX=atoi(argv[4]); int blockY=atoi(argv[5]); //allocate CPU memory points=(Point*)malloc(sizeof(Point)*num_points); readPoints(ifp,points,num_points); fclose(ifp); //printf("Input Read Successfully \n"); before_clusters=(int*)malloc(sizeof(int)*num_points); after_cluster=(int*)malloc(sizeof(int)*num_points); mean=(Point*)malloc(sizeof(Point)*K); //initializing to default values initialize(mean,K, num_points, points); IntClusterMem(before_clusters,num_points); IntClusterMem(after_cluster,num_points); //printf("points = %lf",points[0]._r); //allocate gpu memory //printf("No problem till Here1\n"); CUDA_CALL(hipMalloc((void**) &after_cluster_d, sizeof(int)*num_points)); CUDA_CALL(hipMalloc((void**) &before_cluster_d, sizeof(int)*num_points)); CUDA_CALL(hipMalloc((void**) &points_d, sizeof(Point)*num_points)); CUDA_CALL(hipMalloc((void**) &mean_d, sizeof(Point)*K)); //printf("No problem till Here2\n"); hipEventCreate(&startinit); hipEventCreate(&endinit); hipEventRecord(startinit, 0); //copy data points to device CUDA_CALL(hipMemcpy(points_d, points, sizeof(Point)*num_points, hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(after_cluster_d, after_cluster, sizeof(int)*num_points, hipMemcpyHostToDevice)); hipEventRecord(endinit, 0); hipEventSynchronize(endinit); hipEventElapsedTime(&timeinit, startinit, endinit); //printf("No problem till Here3\n"); while(1) { //printf("No problem till Here4\n"); iter++; hipEventCreate(&startmean); hipEventCreate(&endmean); hipEventRecord(startmean, 0); //copy initial centroids to device CUDA_CALL(hipMemcpy(mean_d, mean, sizeof(Point)*K, hipMemcpyHostToDevice)); hipEventRecord(endmean, 0); hipEventSynchronize(endmean); hipEventElapsedTime(&timemean, startmean, endmean); //cuda memory copy //CUDA_CALL(hipMemcpy(after_cluster_d, after_cluster, sizeof(int)*num_points, hipMemcpyHostToDevice)); //CUDA_CALL(hipMemcpy(before_cluster_d, before_clusters, sizeof(int)*num_points, hipMemcpyHostToDevice)); //CUDA_CALL(hipMemcpy(x_d, &x, sizeof(int), hipMemcpyHostToDevice)); //CUDA_CALL(hipMemcpy(y_d, &y, sizeof(int), hipMemcpyHostToDevice)); //CUDA_CALL(hipMemcpy(K_d, &K, sizeof(int), hipMemcpyHostToDevice)); hipEventCreate(&startcal); hipEventCreate(&endcal); hipEventRecord(startcal, 0); dim3 block(blockX, blockY); dim3 grid((x+blockX-1)/blockX, (y+blockY-1)/blockY); hipLaunchKernelGGL(( pointsCluster), dim3(grid),dim3(block), 0, 0, after_cluster_d, points_d,mean_d,K,x,y); //printf("Time taken by parallel portion: %f\n",time); //totTime +=time; //printf("No problem till Here5\n"); hipDeviceSynchronize(); hipEventRecord(endcal, 0); hipEventSynchronize(endcal); hipEventElapsedTime(&timecal, startcal, endcal); hipEventCreate(&startindex); hipEventCreate(&endindex); hipEventRecord(startindex, 0); CUDA_CALL(hipMemcpy(after_cluster, after_cluster_d, sizeof(int)*num_points, hipMemcpyDeviceToHost)); hipEventRecord(endindex, 0); hipEventSynchronize(endindex); hipEventElapsedTime(&timeindex, startindex, endindex); calcNewMean(points,after_cluster,mean,K,num_points); //printf("New Centroids are calculated!\n"); if(iter>iterations) { printf("K-mean algorithm Converged with iterations = %d!\n",iter); job_done=1; } else { //printf("Not converged!\n"); for(i=0;i<num_points;i++) { //printf("1 after_cluster[%d]=%d, before_clusters[%d]=%d\n",i,after_cluster[i],i,before_clusters[i]); before_clusters[i]=after_cluster[i]; //printf("after_cluster[%d]=%d, before_clusters[%d]=%d\n",i,after_cluster[i],i,before_clusters[i]); } } if(job_done==1) break; } //Outputting to the ofp file FILE* ofp=fopen(argv[2],"w"); fprintf(ofp,"%d\n",K); fprintf(ofp,"%d\n",x); fprintf(ofp,"%d\n",y); for(i=0;i<K;i++) fprintf(ofp,"%lf,%lf,%lf,%lf,%lf\n",mean[i]._r,mean[i]._g,mean[i]._b,mean[i]._m,mean[i]._n); for(i=0;i<num_points;i++) fprintf(ofp,"%lf,%lf,%lf,%lf,%lf,%d\n",points[i]._r,points[i]._g,points[i]._b,points[i]._m,points[i]._n,after_cluster[i]+1); fclose(ofp); hipEventRecord(end1, 0); hipEventSynchronize(end1); hipEventElapsedTime(&time1, start1, end1); printf("Time for sending initial data from host to device : %f\t sec\n",timeinit/1000); printf("Time for sending new means from host to device : %f\t sec\n",timemean/1000); printf("Time for calculation : %f\t sec\n",timecal/1000); printf("Time for sending new index from device to host : %f\t sec\n",timeindex/1000); printf("Total Time : %f\t sec\n",time1/1000); CUDA_CALL(hipFree(after_cluster_d)); CUDA_CALL(hipFree(mean_d)); CUDA_CALL(hipFree(points_d)); free(before_clusters); free(mean); free(points); free(after_cluster); //End of all return 0; }
b0984d95f4e0382a8506c85f9aaf377534805b41.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <cuda.h> typedef struct { double _r; double _g; double _b; double _m; double _n; } Point; #define CUDA_CALL(x) {if((x) != cudaSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE);}} void readImageSize(FILE *ifp,int* K,int* a,int* b) { fscanf(ifp,"%d\n",K); printf("%d\n",*K); fscanf(ifp,"%d\n",a); printf("%d\n",*a); fscanf(ifp,"%d\n",b); printf("%d\n",*b); } //reads the ifp file and stores in structure void readPoints(FILE* ifp,Point *points, int num_points) { int i; for(i=0;i<num_points;i++) { fscanf(ifp,"%lf,%lf,%lf,%lf,%lf", &points[i]._r, &points[i]._g, &points[i]._b, &points[i]._m, &points[i]._n); //printf("%lf,%lf,%lf,%lf,%lf\n", points[i]._r, points[i]._g, points[i]._b, points[i]._m, points[i]._n); } } //Initialize random points as assumed means void initialize(Point* mean,int K, int num_points, Point* points) { int i, a, p=2; srand(time(NULL)); for(i=0;i<K;i++) { a = num_points/p; //printf("\n num_points: %d\n", num_points/p); mean[i]._r = points[a]._r; mean[i]._g = points[a]._g; mean[i]._b = points[a]._b; mean[i]._m = points[a]._m; mean[i]._n = points[a]._n; /*mean[i]._r=((double)(rand()%1000))/1000; mean[i]._g=((double)(2*rand()%1000))/1000; mean[i]._b=((double)(3*rand()%1000))/1000; mean[i]._m=((double)(4*rand()%1000))/1000; mean[i]._n=((double)(5*rand()%1000))/1000;*/ //printf("%lf,%lf,%lf,%lf,%lf\n",mean[i]._r,mean[i]._g,mean[i]._b,mean[i]._m,mean[i]._n); p++; /*mean[i]._r=((double)(rand()%1000))/1000; mean[i]._g=((double)(2*rand()%1000))/1000; mean[i]._b=((double)(3*rand()%1000))/1000; mean[i]._m=((double)(4*rand()%1000))/1000; mean[i]._n=((double)(5*rand()%1000))/1000;*/ } } //All points having no clusters void IntClusterMem(int *cluster, int num_points) { int i; for(i=0;i<num_points;i++) { cluster[i]=-1; } } //to calculate which cluster is the point belonging to. __global__ void pointsCluster(int* after_cluster_d, Point* point_d,Point* Dmean,int K, int x, int y) { //__shared__ Point Dmean[105]; //printf("\n%d\t%d\t%d\n",K,x,y); int j, k, i; j = blockIdx.x*blockDim.x+threadIdx.x; k = blockIdx.y*blockDim.y+threadIdx.y; //if(j==599 && k==319) //printf("%d, %d\n",j,k); /*for(i=0;i<K;i++) { Dmean[i]=mean_d[i]; }*/ int parent=0; double dist=0; int t = (k*(x)+j); //if(t>204790) //printf("t = %d\n",t); double minDist= sqrt((pow((point_d[t]._r-Dmean[0]._r),2)+pow((point_d[t]._g-Dmean[0]._g),2)+pow((point_d[t]._b-Dmean[0]._b),2)+pow((point_d[t]._m-Dmean[0]._m),2)+pow((point_d[t]._n-Dmean[0]._n),2))); for(i=1;i<K;i++) { dist = sqrt((pow((point_d[t]._r-Dmean[i]._r),2)+pow((point_d[t]._g-Dmean[i]._g),2)+pow((point_d[t]._b-Dmean[i]._b),2)+pow((point_d[t]._m-Dmean[i]._m),2)+pow((point_d[t]._n-Dmean[i]._n),2))); if(minDist>=dist) { parent=i; minDist=dist; } } after_cluster_d[t] = parent; } //calculate new mean void calcNewMean(Point* points,int* cluster,Point* mean,int K,int num_points) { Point* newMean=(Point*)malloc(sizeof(Point)*K); int* members=(int*)malloc(sizeof(int)*(K)); int i; for(i=0;i<K;i++) { members[i]=0; newMean[i]._r=0; newMean[i]._g=0; newMean[i]._b=0; newMean[i]._m=0; newMean[i]._n=0; } for(i=0;i<num_points;i++) { members[cluster[i]]++; newMean[cluster[i]]._r+=points[i]._r; newMean[cluster[i]]._g+=points[i]._g; newMean[cluster[i]]._b+=points[i]._b; newMean[cluster[i]]._m+=points[i]._m; newMean[cluster[i]]._n+=points[i]._n; } for(i=0;i<K;i++) { if(members[i]!=0.0) { newMean[i]._r/=members[i]; newMean[i]._g/=members[i]; newMean[i]._b/=members[i]; newMean[i]._m/=members[i]; newMean[i]._n/=members[i]; } else { newMean[i]._r=0; newMean[i]._g=0; newMean[i]._b=0; newMean[i]._m=0; newMean[i]._n=0; } } for(i=0;i<K;i++) { mean[i]._r=newMean[i]._r; mean[i]._g=newMean[i]._g; mean[i]._b=newMean[i]._b; mean[i]._m=newMean[i]._m; mean[i]._n=newMean[i]._n; } } //check for convergence // it checks that is each points cluster remaining the same int chkConvrg(int *before_clusters,int *after_cluster,int num_points, float tol) { int i; tol = num_points*tol; for(i=0;i<num_points;i++) { if(abs(before_clusters[i]-after_cluster[i])>tol) { //check = abs(before_clusters[i]-after_cluster[i]); //printf("check = %d, after_cluster[%d]=%d, before_clusters[%d]=%d\n",check,i,after_cluster[i],i,before_clusters[i]); return -1; } } return 0; } int main(int argc, char* argv[]) { //cpu variables int K; int num_points; int * before_clusters; int i; int job_done=0; int x,y,iter=0,iterations; Point* mean; Point* points; int * after_cluster; float tol; //gpu variables Point* points_d; Point* mean_d; int * after_cluster_d; int * before_cluster_d; cudaEvent_t startinit, endinit, startmean, endmean, startcal, endcal, startindex, endindex; cudaEvent_t start1, end1; float timeinit, timemean, timecal, timeindex; float time1; //float totTime = 0; tol = atof(argv[3]); //iterations = atof(argv[3]); //printf("Enter Tolerance: "); //scanf("%f",&tol); printf("Tolerance = %.10f\n",tol); cudaEventCreate(&start1); cudaEventCreate(&end1); cudaEventRecord(start1, 0); //Readinf file FILE *ifp; ifp=fopen(argv[1],"r"); readImageSize(ifp,&K,&x,&y); K = atoi(argv[6]); num_points = x*y; int blockX=atoi(argv[4]); int blockY=atoi(argv[5]); //allocate CPU memory points=(Point*)malloc(sizeof(Point)*num_points); readPoints(ifp,points,num_points); fclose(ifp); //printf("Input Read Successfully \n"); before_clusters=(int*)malloc(sizeof(int)*num_points); after_cluster=(int*)malloc(sizeof(int)*num_points); mean=(Point*)malloc(sizeof(Point)*K); //initializing to default values initialize(mean,K, num_points, points); IntClusterMem(before_clusters,num_points); IntClusterMem(after_cluster,num_points); //printf("points = %lf",points[0]._r); //allocate gpu memory //printf("No problem till Here1\n"); CUDA_CALL(cudaMalloc((void**) &after_cluster_d, sizeof(int)*num_points)); CUDA_CALL(cudaMalloc((void**) &before_cluster_d, sizeof(int)*num_points)); CUDA_CALL(cudaMalloc((void**) &points_d, sizeof(Point)*num_points)); CUDA_CALL(cudaMalloc((void**) &mean_d, sizeof(Point)*K)); //printf("No problem till Here2\n"); cudaEventCreate(&startinit); cudaEventCreate(&endinit); cudaEventRecord(startinit, 0); //copy data points to device CUDA_CALL(cudaMemcpy(points_d, points, sizeof(Point)*num_points, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(after_cluster_d, after_cluster, sizeof(int)*num_points, cudaMemcpyHostToDevice)); cudaEventRecord(endinit, 0); cudaEventSynchronize(endinit); cudaEventElapsedTime(&timeinit, startinit, endinit); //printf("No problem till Here3\n"); while(1) { //printf("No problem till Here4\n"); iter++; cudaEventCreate(&startmean); cudaEventCreate(&endmean); cudaEventRecord(startmean, 0); //copy initial centroids to device CUDA_CALL(cudaMemcpy(mean_d, mean, sizeof(Point)*K, cudaMemcpyHostToDevice)); cudaEventRecord(endmean, 0); cudaEventSynchronize(endmean); cudaEventElapsedTime(&timemean, startmean, endmean); //cuda memory copy //CUDA_CALL(cudaMemcpy(after_cluster_d, after_cluster, sizeof(int)*num_points, cudaMemcpyHostToDevice)); //CUDA_CALL(cudaMemcpy(before_cluster_d, before_clusters, sizeof(int)*num_points, cudaMemcpyHostToDevice)); //CUDA_CALL(cudaMemcpy(x_d, &x, sizeof(int), cudaMemcpyHostToDevice)); //CUDA_CALL(cudaMemcpy(y_d, &y, sizeof(int), cudaMemcpyHostToDevice)); //CUDA_CALL(cudaMemcpy(K_d, &K, sizeof(int), cudaMemcpyHostToDevice)); cudaEventCreate(&startcal); cudaEventCreate(&endcal); cudaEventRecord(startcal, 0); dim3 block(blockX, blockY); dim3 grid((x+blockX-1)/blockX, (y+blockY-1)/blockY); pointsCluster<<<grid,block>>>(after_cluster_d, points_d,mean_d,K,x,y); //printf("Time taken by parallel portion: %f\n",time); //totTime +=time; //printf("No problem till Here5\n"); cudaDeviceSynchronize(); cudaEventRecord(endcal, 0); cudaEventSynchronize(endcal); cudaEventElapsedTime(&timecal, startcal, endcal); cudaEventCreate(&startindex); cudaEventCreate(&endindex); cudaEventRecord(startindex, 0); CUDA_CALL(cudaMemcpy(after_cluster, after_cluster_d, sizeof(int)*num_points, cudaMemcpyDeviceToHost)); cudaEventRecord(endindex, 0); cudaEventSynchronize(endindex); cudaEventElapsedTime(&timeindex, startindex, endindex); calcNewMean(points,after_cluster,mean,K,num_points); //printf("New Centroids are calculated!\n"); if(iter>iterations) { printf("K-mean algorithm Converged with iterations = %d!\n",iter); job_done=1; } else { //printf("Not converged!\n"); for(i=0;i<num_points;i++) { //printf("1 after_cluster[%d]=%d, before_clusters[%d]=%d\n",i,after_cluster[i],i,before_clusters[i]); before_clusters[i]=after_cluster[i]; //printf("after_cluster[%d]=%d, before_clusters[%d]=%d\n",i,after_cluster[i],i,before_clusters[i]); } } if(job_done==1) break; } //Outputting to the ofp file FILE* ofp=fopen(argv[2],"w"); fprintf(ofp,"%d\n",K); fprintf(ofp,"%d\n",x); fprintf(ofp,"%d\n",y); for(i=0;i<K;i++) fprintf(ofp,"%lf,%lf,%lf,%lf,%lf\n",mean[i]._r,mean[i]._g,mean[i]._b,mean[i]._m,mean[i]._n); for(i=0;i<num_points;i++) fprintf(ofp,"%lf,%lf,%lf,%lf,%lf,%d\n",points[i]._r,points[i]._g,points[i]._b,points[i]._m,points[i]._n,after_cluster[i]+1); fclose(ofp); cudaEventRecord(end1, 0); cudaEventSynchronize(end1); cudaEventElapsedTime(&time1, start1, end1); printf("Time for sending initial data from host to device : %f\t sec\n",timeinit/1000); printf("Time for sending new means from host to device : %f\t sec\n",timemean/1000); printf("Time for calculation : %f\t sec\n",timecal/1000); printf("Time for sending new index from device to host : %f\t sec\n",timeindex/1000); printf("Total Time : %f\t sec\n",time1/1000); CUDA_CALL(cudaFree(after_cluster_d)); CUDA_CALL(cudaFree(mean_d)); CUDA_CALL(cudaFree(points_d)); free(before_clusters); free(mean); free(points); free(after_cluster); //End of all return 0; }
9c3534713465ecd8ee9f463757f840b326039d34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zlaswp_sym.cu, normal z -> d, Tue Aug 30 09:38:33 2016 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { double *dA; int n, lda, j0, npivots; int ipiv[MAX_PIVOTS]; } dlaswp_sym_params_t; // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswp_sym_kernel( dlaswp_sym_params_t params ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < params.n ) { for( int ii = params.j0; ii < params.npivots; ++ii ) { int i1 = ii; int i2 = params.ipiv[ii]; // swap: i1 <-> i2 // this thread is responsible for the tid-th element double *A1 = NULL, *A2 = NULL; if (tid < i1) { // row swap: (i1,tid) <-> (i2,tid) A1 = params.dA + tid*params.lda + i1; A2 = params.dA + tid*params.lda + i2; } else if (tid == i1) { // diagonal swap: (i1,i1) <-> (i2,i2) A1 = params.dA + i1*params.lda + i1; A2 = params.dA + i2*params.lda + i2; } else if (tid < i2) { // row-col swap: (tid,i1) <-> (i2,tid) A1 = params.dA + i1*params.lda + tid; A2 = params.dA + tid*params.lda + i2; } else if (tid == i2) { // diagonal swap: done by i1-th thread } else if (tid > i2) { // column swap: (tid,i1) <-> (tid,i2) A1 = params.dA + i1*params.lda + tid; A2 = params.dA + i2*params.lda + tid; } if ( A1 != NULL && A2 != NULL) { double temp = *A1; *A1 = *A2; *A2 = temp; } } } } // Launch dlaswp_sym kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each. extern "C" void dlaswp_sym( dlaswp_sym_params_t &params, magma_queue_t queue ) { int blocks = magma_ceildiv(params.n, NTHREADS); hipLaunchKernelGGL(( dlaswp_sym_kernel), dim3(blocks), dim3(NTHREADS), 0, queue->cuda_stream() , params ); } /***************************************************************************//** Purpose: ============= DLASWP_SYM applies a series of symmetric pivoting on a symmetric matrix A. Currently, it is only implemented for the lower-triangular part of the matrix. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA DOUBLE PRECISION array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] lda INTEGER Stride between elements in same column. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp_sym *******************************************************************************/ extern "C" void magmablas_dlaswp_sym_q( magma_int_t n, double *dA, magma_int_t lda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); // fields are: dA n lda j0 npivots dlaswp_sym_params_t params = { dA, int(n), int(lda), int(k), int(k+npivots) }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - 1; } dlaswp_sym( params, queue ); } }
9c3534713465ecd8ee9f463757f840b326039d34.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zlaswp_sym.cu, normal z -> d, Tue Aug 30 09:38:33 2016 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { double *dA; int n, lda, j0, npivots; int ipiv[MAX_PIVOTS]; } dlaswp_sym_params_t; // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswp_sym_kernel( dlaswp_sym_params_t params ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < params.n ) { for( int ii = params.j0; ii < params.npivots; ++ii ) { int i1 = ii; int i2 = params.ipiv[ii]; // swap: i1 <-> i2 // this thread is responsible for the tid-th element double *A1 = NULL, *A2 = NULL; if (tid < i1) { // row swap: (i1,tid) <-> (i2,tid) A1 = params.dA + tid*params.lda + i1; A2 = params.dA + tid*params.lda + i2; } else if (tid == i1) { // diagonal swap: (i1,i1) <-> (i2,i2) A1 = params.dA + i1*params.lda + i1; A2 = params.dA + i2*params.lda + i2; } else if (tid < i2) { // row-col swap: (tid,i1) <-> (i2,tid) A1 = params.dA + i1*params.lda + tid; A2 = params.dA + tid*params.lda + i2; } else if (tid == i2) { // diagonal swap: done by i1-th thread } else if (tid > i2) { // column swap: (tid,i1) <-> (tid,i2) A1 = params.dA + i1*params.lda + tid; A2 = params.dA + i2*params.lda + tid; } if ( A1 != NULL && A2 != NULL) { double temp = *A1; *A1 = *A2; *A2 = temp; } } } } // Launch dlaswp_sym kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each. extern "C" void dlaswp_sym( dlaswp_sym_params_t &params, magma_queue_t queue ) { int blocks = magma_ceildiv(params.n, NTHREADS); dlaswp_sym_kernel<<< blocks, NTHREADS, 0, queue->cuda_stream() >>>( params ); } /***************************************************************************//** Purpose: ============= DLASWP_SYM applies a series of symmetric pivoting on a symmetric matrix A. Currently, it is only implemented for the lower-triangular part of the matrix. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA DOUBLE PRECISION array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] lda INTEGER Stride between elements in same column. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp_sym *******************************************************************************/ extern "C" void magmablas_dlaswp_sym_q( magma_int_t n, double *dA, magma_int_t lda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); // fields are: dA n lda j0 npivots dlaswp_sym_params_t params = { dA, int(n), int(lda), int(k), int(k+npivots) }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - 1; } dlaswp_sym( params, queue ); } }
09d8c96524405aca0ad9a45d6731f562ee6c7cdd.hip
// !!! This is a file automatically generated by hipify!!! #define CUDA #include "isoplotter.h" #include <hip/hip_runtime.h> #include <iostream> #define xcuda(stmt) { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ std::cerr << __FILE__ << ":" << __LINE__ << ": Failed to run " << #stmt << ". Reason: " << hipGetErrorString(err) << std::endl; \ exit(1); \ } \ } static const uint Threads_Per_Block = 512; __device__ __host__ double entropy(double gcmean, double atmean) { return -(gcmean * log2(gcmean) + atmean * log2(atmean)); } __device__ __host__ double entropy(double gcmean) { return entropy(gcmean, 1 - gcmean); } __global__ void find_best_split_kernel(gc_sum_t gc_sum, double segment_entropy, double *Djs, uint64_t *is) { double best_Djs = 0.0; uint64_t best_i = 0; uint64_t n = gc_sum.length(); for(uint64_t i = 2 + (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { uint64_t len_left = i; uint64_t len_right = n - i; double entropy_left = entropy( gc_sum.get(i-1) / len_left ); double entropy_right = entropy( gc_sum.get_reverse(i) / len_right ); double weighted_entropy_left = double(len_left) / n * entropy_left; double weighted_entropy_right = double(len_right) / n * entropy_right; double candidateDjs = segment_entropy - (weighted_entropy_left + weighted_entropy_right); if(best_i == 0 || candidateDjs > best_Djs) { best_Djs = candidateDjs; best_i = i; } } __shared__ double Djs_shared[Threads_Per_Block]; __shared__ uint64_t is_shared[Threads_Per_Block]; int tid = threadIdx.x; Djs_shared[tid] = best_Djs; is_shared[tid] = best_i; for(int stride = Threads_Per_Block / 2; stride > 0; stride >>= 1) { __syncthreads(); if(tid < stride) { if(Djs_shared[tid + stride] > Djs_shared[tid]) { Djs_shared[tid] = Djs_shared[tid + stride]; is_shared[tid] = is_shared[tid + stride]; } } } if(tid == 0) { Djs[blockIdx.x] = Djs_shared[0]; is[blockIdx.x] = is_shared[0]; } } uint64_t find_best_split_cuda(segment_t segment) { uint64_t n = segment.len() - 2; uint64_t nblocks = (n - 1) / Threads_Per_Block + 1; if(nblocks > 2048) nblocks = 2048; double *Djs = new double[nblocks]; double *gpu_Djs; uint64_t *is = new uint64_t[nblocks]; uint64_t *gpu_is; xcuda( hipMalloc((void**)&gpu_Djs, nblocks*sizeof(double)) ); xcuda( hipMalloc((void**)&gpu_is, nblocks*sizeof(uint64_t)) ); hipLaunchKernelGGL(( find_best_split_kernel), dim3(nblocks), dim3(Threads_Per_Block), 0, 0, segment.gc_sum.gpu_copy(), segment.entropy, gpu_Djs, gpu_is); xcuda( hipPeekAtLastError() ); xcuda( hipDeviceSynchronize() ); xcuda( hipMemcpy(Djs, gpu_Djs, nblocks * sizeof(double), hipMemcpyDeviceToHost) ); xcuda( hipMemcpy(is, gpu_is, nblocks * sizeof(uint64_t), hipMemcpyDeviceToHost) ); double best_Djs = Djs[0]; uint64_t best_i = is[0]; for(uint64_t i = 1; i < nblocks; i++) { double d = Djs[i]; if(d > best_Djs) { best_Djs = d; best_i = is[i]; } } delete [] Djs; delete [] is; xcuda( hipFree(gpu_Djs) ); xcuda( hipFree(gpu_is) ); return best_i; }
09d8c96524405aca0ad9a45d6731f562ee6c7cdd.cu
#define CUDA #include "isoplotter.h" #include <cuda.h> #include <iostream> #define xcuda(stmt) { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ std::cerr << __FILE__ << ":" << __LINE__ << ": Failed to run " << #stmt << ". Reason: " << cudaGetErrorString(err) << std::endl; \ exit(1); \ } \ } static const uint Threads_Per_Block = 512; __device__ __host__ double entropy(double gcmean, double atmean) { return -(gcmean * log2(gcmean) + atmean * log2(atmean)); } __device__ __host__ double entropy(double gcmean) { return entropy(gcmean, 1 - gcmean); } __global__ void find_best_split_kernel(gc_sum_t gc_sum, double segment_entropy, double *Djs, uint64_t *is) { double best_Djs = 0.0; uint64_t best_i = 0; uint64_t n = gc_sum.length(); for(uint64_t i = 2 + (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { uint64_t len_left = i; uint64_t len_right = n - i; double entropy_left = entropy( gc_sum.get(i-1) / len_left ); double entropy_right = entropy( gc_sum.get_reverse(i) / len_right ); double weighted_entropy_left = double(len_left) / n * entropy_left; double weighted_entropy_right = double(len_right) / n * entropy_right; double candidateDjs = segment_entropy - (weighted_entropy_left + weighted_entropy_right); if(best_i == 0 || candidateDjs > best_Djs) { best_Djs = candidateDjs; best_i = i; } } __shared__ double Djs_shared[Threads_Per_Block]; __shared__ uint64_t is_shared[Threads_Per_Block]; int tid = threadIdx.x; Djs_shared[tid] = best_Djs; is_shared[tid] = best_i; for(int stride = Threads_Per_Block / 2; stride > 0; stride >>= 1) { __syncthreads(); if(tid < stride) { if(Djs_shared[tid + stride] > Djs_shared[tid]) { Djs_shared[tid] = Djs_shared[tid + stride]; is_shared[tid] = is_shared[tid + stride]; } } } if(tid == 0) { Djs[blockIdx.x] = Djs_shared[0]; is[blockIdx.x] = is_shared[0]; } } uint64_t find_best_split_cuda(segment_t segment) { uint64_t n = segment.len() - 2; uint64_t nblocks = (n - 1) / Threads_Per_Block + 1; if(nblocks > 2048) nblocks = 2048; double *Djs = new double[nblocks]; double *gpu_Djs; uint64_t *is = new uint64_t[nblocks]; uint64_t *gpu_is; xcuda( cudaMalloc((void**)&gpu_Djs, nblocks*sizeof(double)) ); xcuda( cudaMalloc((void**)&gpu_is, nblocks*sizeof(uint64_t)) ); find_best_split_kernel<<<nblocks, Threads_Per_Block>>>(segment.gc_sum.gpu_copy(), segment.entropy, gpu_Djs, gpu_is); xcuda( cudaPeekAtLastError() ); xcuda( cudaThreadSynchronize() ); xcuda( cudaMemcpy(Djs, gpu_Djs, nblocks * sizeof(double), cudaMemcpyDeviceToHost) ); xcuda( cudaMemcpy(is, gpu_is, nblocks * sizeof(uint64_t), cudaMemcpyDeviceToHost) ); double best_Djs = Djs[0]; uint64_t best_i = is[0]; for(uint64_t i = 1; i < nblocks; i++) { double d = Djs[i]; if(d > best_Djs) { best_Djs = d; best_i = is[i]; } } delete [] Djs; delete [] is; xcuda( cudaFree(gpu_Djs) ); xcuda( cudaFree(gpu_is) ); return best_i; }
20d8716a8650ec047c0dbd17aee21aecea19fec5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/util/math_functions.hpp" namespace caffe { /***************************************************************** *Function: NesterovUpdate() *Description: *Calls: *Called By: nesterov_update_gpu() *Input: *Output: *Return: *Others: .NesterovMomentum,Momentum On the importance of initialization and momentum in deep learning *****************************************************************/ template <typename Dtype> __global__ void NesterovUpdate(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { float hi = h[i]; float hi_new = h[i] = momentum * hi + local_rate * g[i]; g[i] = (1+momentum) * hi_new - momentum * hi; } } /***************************************************************** *Function: nesterov_update_gpu() *Description: *Calls: NesterovUpdate() *Called By: *Input: *Output: *Return: *Others: GPU *****************************************************************/ template <typename Dtype> void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate) { NesterovUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, g, h, momentum, local_rate); CUDA_POST_KERNEL_CHECK; } template void nesterov_update_gpu<float>(int, float*, float*, float, float); template void nesterov_update_gpu<double>(int, double*, double*, double, double); } // namespace caffe
20d8716a8650ec047c0dbd17aee21aecea19fec5.cu
#include "caffe/util/math_functions.hpp" namespace caffe { /***************************************************************** *Function: NesterovUpdate() *Description: 计算更新值 *Calls: *Called By: nesterov_update_gpu() *Input: *Output: *Return: *Others: .Nesterov是Momentum的变种,相当于添加了矫正因子的Momentum 參考文獻:On the importance of initialization and momentum in deep learning *****************************************************************/ template <typename Dtype> __global__ void NesterovUpdate(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { float hi = h[i]; float hi_new = h[i] = momentum * hi + local_rate * g[i]; g[i] = (1+momentum) * hi_new - momentum * hi; } } /***************************************************************** *Function: nesterov_update_gpu() *Description: 计算更新值 *Calls: NesterovUpdate() *Called By: *Input: *Output: *Return: *Others: GPU版本 *****************************************************************/ template <typename Dtype> void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate) { NesterovUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, g, h, momentum, local_rate); CUDA_POST_KERNEL_CHECK; } template void nesterov_update_gpu<float>(int, float*, float*, float, float); template void nesterov_update_gpu<double>(int, double*, double*, double, double); } // namespace caffe
b8280c6d45c6a398288a1c4e4d849fcc4c807b8f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2018 Saman Ashkiani * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing permissions and * limitations under the License. */ #include "slab_alloc_hip.cuh"
b8280c6d45c6a398288a1c4e4d849fcc4c807b8f.cu
/* * Copyright 2018 Saman Ashkiani * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing permissions and * limitations under the License. */ #include "slab_alloc.cuh"
4b16f7bb2ecb6fe9757ec25e61f51f48619e02e3.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/sort.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "Buffer.h" #include "Message.h" namespace gpucbt { void Buffer::GPUSort(uint32_t num) { // initialize host vector thrust::device_vector<Message> d(messages_, messages_ + num); try { thrust::sort(d.begin(), d.end()); } catch(std::bad_alloc &e) { fprintf(stderr, "Ran out of memory while sorting\n"); exit(-1); } thrust::copy(d.begin(), d.end(), messages_); } bool Buffer::GPUAggregate() { return false; } }
4b16f7bb2ecb6fe9757ec25e61f51f48619e02e3.cu
#include <thrust/sort.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cuda.h> #include <cuda_runtime.h> #include "Buffer.h" #include "Message.h" namespace gpucbt { void Buffer::GPUSort(uint32_t num) { // initialize host vector thrust::device_vector<Message> d(messages_, messages_ + num); try { thrust::sort(d.begin(), d.end()); } catch(std::bad_alloc &e) { fprintf(stderr, "Ran out of memory while sorting\n"); exit(-1); } thrust::copy(d.begin(), d.end(), messages_); } bool Buffer::GPUAggregate() { return false; } }
1f1711e1645919c08445857a316f38e27ab92620.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * CUDA accelerated coulombic potential grid test code * John E. Stone <johns@ks.uiuc.edu> * http://www.ks.uiuc.edu/~johns/ * * Coulombic potential grid calculation microbenchmark based on the time * consuming portions of the 'cionize' ion placement tool. */ #ifdef __MCUDA__ #include <mcuda.h> #else #include <hip/hip_runtime.h> #endif #include <parboil.h> #include <stdio.h> #include <stdlib.h> #include "cuenergy.h" /* initatoms() * Store a pseudorandom arrangement of point charges in *atombuf. */ static int initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) { dim3 size; int i; float *atoms; srand(54321); // Ensure that atom placement is repeatable atoms = (float *) malloc(count * 4 * sizeof(float)); *atombuf = atoms; // compute grid dimensions in angstroms size.x = gridspacing * volsize.x; size.y = gridspacing * volsize.y; size.z = gridspacing * volsize.z; for (i=0; i<count; i++) { int addr = i * 4; atoms[addr ] = (rand() / (float) RAND_MAX) * size.x; atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y; atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z; atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge } return 0; } /* writeenergy() * Write part of the energy array to an output file for verification. */ static int writeenergy(char *filename, float *energy, dim3 volsize) { FILE *outfile; int x, y; outfile = fopen(filename, "w"); if (outfile == NULL) { fputs("Cannot open output file\n", stderr); return -1; } /* Print the execution parameters */ fprintf(outfile, "%d %d %d %d\n", volsize.x, volsize.y, volsize.z, ATOMCOUNT); /* Print a checksum */ { double sum = 0.0; for (y = 0; y < volsize.y; y++) { for (x = 0; x < volsize.x; x++) { double t = energy[y*volsize.x+x]; t = fmax(-20.0, fmin(20.0, t)); sum += t; } } fprintf(outfile, "%.4g\n", sum); } /* Print several rows of the computed data */ //for (y = 0; y < 17; y++) { for (y = 0; y < volsize.y; y++) { //need to print all for (x = 0; x < volsize.x; x++) { int addr = y * volsize.x + x; fprintf(outfile, "%.4g ", energy[addr]); } fprintf(outfile, "\n"); } fclose(outfile); return 0; } int main(int argc, char** argv) { struct pb_TimerSet timers; struct pb_Parameters *parameters; float *energy = NULL; // Output of device calculation float *atoms = NULL; dim3 volsize, Gsz, Bsz; // int final_iteration_count; // number of atoms to simulate int atomcount = ATOMCOUNT; // voxel spacing const float gridspacing = 0.1; // Size of buffer on GPU int volmemsz; printf("CUDA accelerated coulombic potential microbenchmark\n"); printf("Original version by John E. Stone <johns@ks.uiuc.edu>\n"); printf("This version maintained by Chris Rodrigues\n"); parameters = pb_ReadParameters(&argc, argv); if (!parameters) return -1; if (parameters->inpFiles[0]) { fputs("No input files expected\n", stderr); return -1; } pb_InitializeTimerSet(&timers); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); // setup energy grid size volsize.x = VOLSIZEX; volsize.y = VOLSIZEY; volsize.z = 1; // setup CUDA grid and block sizes Bsz.x = BLOCKSIZEX; // each thread does multiple Xs Bsz.y = BLOCKSIZEY; Bsz.z = 1; Gsz.x = volsize.x / (Bsz.x * UNROLLX); // each thread does multiple Xs Gsz.y = volsize.y / Bsz.y; Gsz.z = volsize.z / Bsz.z; #if 1 printf("Grid size: %d x %d x %d\n", volsize.x, volsize.y, volsize.z); printf("Running kernel(atoms:%d, gridspacing %g, z %d)\n", atomcount, gridspacing, 0); #endif // allocate and initialize atom coordinates and charges if (initatoms(&atoms, atomcount, volsize, gridspacing)) return -1; // allocate and initialize the GPU output array volmemsz = sizeof(float) * volsize.x * volsize.y * volsize.z; // Main computation { float *d_output = NULL; // Output on device int iterations=0; int atomstart; pb_SwitchToTimer(&timers, pb_TimerID_COPY); hipMalloc((void**)&d_output, volmemsz); CUERR // check and clear any existing errors hipMemset(d_output, 0, volmemsz); CUERR // check and clear any existing errors pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); for (atomstart=0; atomstart<atomcount; atomstart+=MAXATOMS) { int atomsremaining = atomcount - atomstart; int runatoms = (atomsremaining > MAXATOMS) ? MAXATOMS : atomsremaining; iterations++; // copy the atoms to the GPU pb_SwitchToTimer(&timers, pb_TimerID_COPY); if (copyatomstoconstbuf(atoms + 4*atomstart, runatoms, 0*gridspacing)) return -1; pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); // RUN the kernel... hipLaunchKernelGGL(( cenergy), dim3(Gsz), dim3(Bsz), 0, 0, runatoms, 0.1, d_output); CUERR // check and clear any existing errors pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); // final_iteration_count = iterations; } #if 0 printf("Done\n"); #endif // Copy the GPU output data back to the host and use/store it.. energy = (float *) malloc(volmemsz); pb_SwitchToTimer(&timers, pb_TimerID_COPY); hipMemcpy(energy, d_output, volmemsz, hipMemcpyDeviceToHost); CUERR // check and clear any existing errors hipFree(d_output); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } /* Print a subset of the results to a file */ if (parameters->outFile) { pb_SwitchToTimer(&timers, pb_TimerID_IO); if (writeenergy(parameters->outFile, energy, volsize) == -1) return -1; pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } free(atoms); free(energy); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(parameters); return 0; }
1f1711e1645919c08445857a316f38e27ab92620.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * CUDA accelerated coulombic potential grid test code * John E. Stone <johns@ks.uiuc.edu> * http://www.ks.uiuc.edu/~johns/ * * Coulombic potential grid calculation microbenchmark based on the time * consuming portions of the 'cionize' ion placement tool. */ #ifdef __MCUDA__ #include <mcuda.h> #else #include <cuda.h> #endif #include <parboil.h> #include <stdio.h> #include <stdlib.h> #include "cuenergy.h" /* initatoms() * Store a pseudorandom arrangement of point charges in *atombuf. */ static int initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) { dim3 size; int i; float *atoms; srand(54321); // Ensure that atom placement is repeatable atoms = (float *) malloc(count * 4 * sizeof(float)); *atombuf = atoms; // compute grid dimensions in angstroms size.x = gridspacing * volsize.x; size.y = gridspacing * volsize.y; size.z = gridspacing * volsize.z; for (i=0; i<count; i++) { int addr = i * 4; atoms[addr ] = (rand() / (float) RAND_MAX) * size.x; atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y; atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z; atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge } return 0; } /* writeenergy() * Write part of the energy array to an output file for verification. */ static int writeenergy(char *filename, float *energy, dim3 volsize) { FILE *outfile; int x, y; outfile = fopen(filename, "w"); if (outfile == NULL) { fputs("Cannot open output file\n", stderr); return -1; } /* Print the execution parameters */ fprintf(outfile, "%d %d %d %d\n", volsize.x, volsize.y, volsize.z, ATOMCOUNT); /* Print a checksum */ { double sum = 0.0; for (y = 0; y < volsize.y; y++) { for (x = 0; x < volsize.x; x++) { double t = energy[y*volsize.x+x]; t = fmax(-20.0, fmin(20.0, t)); sum += t; } } fprintf(outfile, "%.4g\n", sum); } /* Print several rows of the computed data */ //for (y = 0; y < 17; y++) { for (y = 0; y < volsize.y; y++) { //need to print all for (x = 0; x < volsize.x; x++) { int addr = y * volsize.x + x; fprintf(outfile, "%.4g ", energy[addr]); } fprintf(outfile, "\n"); } fclose(outfile); return 0; } int main(int argc, char** argv) { struct pb_TimerSet timers; struct pb_Parameters *parameters; float *energy = NULL; // Output of device calculation float *atoms = NULL; dim3 volsize, Gsz, Bsz; // int final_iteration_count; // number of atoms to simulate int atomcount = ATOMCOUNT; // voxel spacing const float gridspacing = 0.1; // Size of buffer on GPU int volmemsz; printf("CUDA accelerated coulombic potential microbenchmark\n"); printf("Original version by John E. Stone <johns@ks.uiuc.edu>\n"); printf("This version maintained by Chris Rodrigues\n"); parameters = pb_ReadParameters(&argc, argv); if (!parameters) return -1; if (parameters->inpFiles[0]) { fputs("No input files expected\n", stderr); return -1; } pb_InitializeTimerSet(&timers); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); // setup energy grid size volsize.x = VOLSIZEX; volsize.y = VOLSIZEY; volsize.z = 1; // setup CUDA grid and block sizes Bsz.x = BLOCKSIZEX; // each thread does multiple Xs Bsz.y = BLOCKSIZEY; Bsz.z = 1; Gsz.x = volsize.x / (Bsz.x * UNROLLX); // each thread does multiple Xs Gsz.y = volsize.y / Bsz.y; Gsz.z = volsize.z / Bsz.z; #if 1 printf("Grid size: %d x %d x %d\n", volsize.x, volsize.y, volsize.z); printf("Running kernel(atoms:%d, gridspacing %g, z %d)\n", atomcount, gridspacing, 0); #endif // allocate and initialize atom coordinates and charges if (initatoms(&atoms, atomcount, volsize, gridspacing)) return -1; // allocate and initialize the GPU output array volmemsz = sizeof(float) * volsize.x * volsize.y * volsize.z; // Main computation { float *d_output = NULL; // Output on device int iterations=0; int atomstart; pb_SwitchToTimer(&timers, pb_TimerID_COPY); cudaMalloc((void**)&d_output, volmemsz); CUERR // check and clear any existing errors cudaMemset(d_output, 0, volmemsz); CUERR // check and clear any existing errors pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); for (atomstart=0; atomstart<atomcount; atomstart+=MAXATOMS) { int atomsremaining = atomcount - atomstart; int runatoms = (atomsremaining > MAXATOMS) ? MAXATOMS : atomsremaining; iterations++; // copy the atoms to the GPU pb_SwitchToTimer(&timers, pb_TimerID_COPY); if (copyatomstoconstbuf(atoms + 4*atomstart, runatoms, 0*gridspacing)) return -1; pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); // RUN the kernel... cenergy<<<Gsz, Bsz, 0>>>(runatoms, 0.1, d_output); CUERR // check and clear any existing errors pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); // final_iteration_count = iterations; } #if 0 printf("Done\n"); #endif // Copy the GPU output data back to the host and use/store it.. energy = (float *) malloc(volmemsz); pb_SwitchToTimer(&timers, pb_TimerID_COPY); cudaMemcpy(energy, d_output, volmemsz, cudaMemcpyDeviceToHost); CUERR // check and clear any existing errors cudaFree(d_output); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } /* Print a subset of the results to a file */ if (parameters->outFile) { pb_SwitchToTimer(&timers, pb_TimerID_IO); if (writeenergy(parameters->outFile, energy, volsize) == -1) return -1; pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } free(atoms); free(energy); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(parameters); return 0; }
5b8f03f867589a2b6789ad1c47fbb6113eaf7b47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <algorithm> #include <stdio.h> #include "../CommonFiles/commonFunction.h" #include "../CommonFiles/TestFunction.h" #define ARRAY_SIZE 3 #include<cmath> using namespace std; __global__ void calculateSimilarity(float* c, float *a, const int NA, const int NB, const int NMax); hipError_t calculateSimilarityWithCuda(float* c, const float *a, const int NA, const int NB, const int NMax, string fileName); /** int main() { const int NA = 7; const int NB = 7; const int NMax = 1; hipError_t cudaStatus; float A[NA*NB*NMax] = {4}; float C[NMax]; cudaStatus = calculateSimilarityWithCuda(C, A,NA,NB, NMax, "calculateSimilarity1.txt"); //print out C for correctness checking printf("C[] array is %.2f\n", C[0]); testFunction(A, NA, NB, NMax, 1000,10000,1000,"calculateSimilarity1.txt", &calculateSimilarityWithCuda); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } **/ /** Algorithm: (1) Sort the elements of the atom match matri into order of decreasing similiarity (not necessary because we will need to find max anyway) (2) Scan the atom match matrix to find the remaining pair of atoms, one from A and one from B, that has the largest calculated value for S(i,j) (3) Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)] (4) Remove A(i) and B(j) from further consideration (5) Return to step 2 if it is possible to map further atoms in A to atoms in B The itermolecular similarity is sum[S(i,j) from 1 to NA]/NA input: array of float c: containing result of total of NA max_element over NA array of float a: containing coordinates NA*NB*NMax elements to find max_element const int NA: number of atoms in molecule A const int NB: number of atoms in each molecule in B const int NMax: number of molecules in B output: void **/ __global__ void calculateSimilarity(float* c, float *a, const int NA, const int NB, const int NMax){ float total; int position,start; int tid= blockIdx.x*blockDim.x+threadIdx.x; if (tid<NMax) { //start is the first element every thread to check start = tid*NA*NB; // Initialised each thread's total to 0 total = 0; //loop through NA atoms of molecule A for (int k =0;k<NA; k++) { /** Step 2: Scan the atom match matrix to find the remaining pair of atoms, one from A and one from B, that has the largest calculated value for S(i,j) **/ // Find the max_element and position of max_element in the array of NA*NB float position = 0; float max = a[start]; for (int t = 0; t<NA*NB; t++) { if ( a[start + t] > max) { max = a[start + t]; position=t; } } /** Step 3: Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)] **/ // Sum the max into total total = total + max; // Get the position of max_element in 2D array int a1 = position/NB; //y axis int b1 = position%NB; // x axis /** Step 4: Remove A(i) and B(j) from further consideration **/ // Set all the elements in the same row and column of max_element to 0 // set all elements in the same y axis of max = 0 for (int i =0; i<NB; i++ ) a[start + a1*NB+i] =0; // set all elements in the same x axis of max = 0 for (int j =0; j<NA; j++) a[start + j*NB+b1] =0; } //The similiarity score is total/NA c[tid] = total /NA; } } // Helper function for using CUDA to add vectors in parallel. hipError_t calculateSimilarityWithCuda(float* c, const float *a, const int NA, const int NB, const int NMax, string fileName) { float *dev_a = 0; float *dev_c = 0; hipError_t cudaStatus; hipEvent_t start, stop; float milliseconds; cudaStatus = hipEventCreate(&start); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventCreate(& start) failed! in scanWithCuda\n"); goto Error; } cudaStatus = hipEventCreate(&stop); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventCreate(& stop) failed! in scanWithCuda\n"); goto Error; } //Start recording time cudaStatus = hipEventRecord(start); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventRecord(start) failed! in scanWithCuda\n"); goto Error; } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, NMax*sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! for dev_c in scanWithCuda\n"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, NA*NB*NMax * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! for dev_a in scanWithCuda\n"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, NA * NB *NMax* sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed for dev_a! in scanWithCuda"); goto Error; } for (int i = 0; i <100; i++) { // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( calculateSimilarity), dim3(NMax/1024 +1), dim3(1024), 0, 0, dev_c, dev_a, NA, NB, NMax); } // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s in scanWithCuda\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, NMax*sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed for dev_c! in scanWithCuda`\n"); goto Error; } cudaStatus = hipEventRecord(stop); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventRecord(start) failed! in scanWithCuda\n"); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus); goto Error; } cudaStatus = hipEventElapsedTime(&milliseconds, start, stop); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventElapsedTime failed! in scanWithCuda\n"); goto Error; } //printf("NA =%d, NB=%d, NMax=%d time = %.4f\n", NA, NB, NMax, milliseconds); //printf("elapsed time of scanning matrix of NA = %d, NB = %d, NMax = %d is %.4f milliseconds \n", NA,NB,NMax, milliseconds); writeResult2File (NA, NB, NMax, milliseconds/100.0, "milliseconds", fileName); Error: hipFree(dev_c); hipFree(dev_a); return cudaStatus; }
5b8f03f867589a2b6789ad1c47fbb6113eaf7b47.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <algorithm> #include <stdio.h> #include "../CommonFiles/commonFunction.h" #include "../CommonFiles/TestFunction.h" #define ARRAY_SIZE 3 #include<cmath> using namespace std; __global__ void calculateSimilarity(float* c, float *a, const int NA, const int NB, const int NMax); cudaError_t calculateSimilarityWithCuda(float* c, const float *a, const int NA, const int NB, const int NMax, string fileName); /** int main() { const int NA = 7; const int NB = 7; const int NMax = 1; cudaError_t cudaStatus; float A[NA*NB*NMax] = {4}; float C[NMax]; cudaStatus = calculateSimilarityWithCuda(C, A,NA,NB, NMax, "calculateSimilarity1.txt"); //print out C for correctness checking printf("C[] array is %.2f\n", C[0]); testFunction(A, NA, NB, NMax, 1000,10000,1000,"calculateSimilarity1.txt", &calculateSimilarityWithCuda); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } **/ /** Algorithm: (1) Sort the elements of the atom match matri into order of decreasing similiarity (not necessary because we will need to find max anyway) (2) Scan the atom match matrix to find the remaining pair of atoms, one from A and one from B, that has the largest calculated value for S(i,j) (3) Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)] (4) Remove A(i) and B(j) from further consideration (5) Return to step 2 if it is possible to map further atoms in A to atoms in B The itermolecular similarity is sum[S(i,j) from 1 to NA]/NA input: array of float c: containing result of total of NA max_element over NA array of float a: containing coordinates NA*NB*NMax elements to find max_element const int NA: number of atoms in molecule A const int NB: number of atoms in each molecule in B const int NMax: number of molecules in B output: void **/ __global__ void calculateSimilarity(float* c, float *a, const int NA, const int NB, const int NMax){ float total; int position,start; int tid= blockIdx.x*blockDim.x+threadIdx.x; if (tid<NMax) { //start is the first element every thread to check start = tid*NA*NB; // Initialised each thread's total to 0 total = 0; //loop through NA atoms of molecule A for (int k =0;k<NA; k++) { /** Step 2: Scan the atom match matrix to find the remaining pair of atoms, one from A and one from B, that has the largest calculated value for S(i,j) **/ // Find the max_element and position of max_element in the array of NA*NB float position = 0; float max = a[start]; for (int t = 0; t<NA*NB; t++) { if ( a[start + t] > max) { max = a[start + t]; position=t; } } /** Step 3: Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)] **/ // Sum the max into total total = total + max; // Get the position of max_element in 2D array int a1 = position/NB; //y axis int b1 = position%NB; // x axis /** Step 4: Remove A(i) and B(j) from further consideration **/ // Set all the elements in the same row and column of max_element to 0 // set all elements in the same y axis of max = 0 for (int i =0; i<NB; i++ ) a[start + a1*NB+i] =0; // set all elements in the same x axis of max = 0 for (int j =0; j<NA; j++) a[start + j*NB+b1] =0; } //The similiarity score is total/NA c[tid] = total /NA; } } // Helper function for using CUDA to add vectors in parallel. cudaError_t calculateSimilarityWithCuda(float* c, const float *a, const int NA, const int NB, const int NMax, string fileName) { float *dev_a = 0; float *dev_c = 0; cudaError_t cudaStatus; cudaEvent_t start, stop; float milliseconds; cudaStatus = cudaEventCreate(&start); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventCreate(& start) failed! in scanWithCuda\n"); goto Error; } cudaStatus = cudaEventCreate(&stop); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventCreate(& stop) failed! in scanWithCuda\n"); goto Error; } //Start recording time cudaStatus = cudaEventRecord(start); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventRecord(start) failed! in scanWithCuda\n"); goto Error; } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, NMax*sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! for dev_c in scanWithCuda\n"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, NA*NB*NMax * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! for dev_a in scanWithCuda\n"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, NA * NB *NMax* sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed for dev_a! in scanWithCuda"); goto Error; } for (int i = 0; i <100; i++) { // Launch a kernel on the GPU with one thread for each element. calculateSimilarity<<<NMax/1024 +1, 1024>>>(dev_c, dev_a, NA, NB, NMax); } // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s in scanWithCuda\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, NMax*sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed for dev_c! in scanWithCuda`\n"); goto Error; } cudaStatus = cudaEventRecord(stop); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventRecord(start) failed! in scanWithCuda\n"); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus); goto Error; } cudaStatus = cudaEventElapsedTime(&milliseconds, start, stop); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventElapsedTime failed! in scanWithCuda\n"); goto Error; } //printf("NA =%d, NB=%d, NMax=%d time = %.4f\n", NA, NB, NMax, milliseconds); //printf("elapsed time of scanning matrix of NA = %d, NB = %d, NMax = %d is %.4f milliseconds \n", NA,NB,NMax, milliseconds); writeResult2File (NA, NB, NMax, milliseconds/100.0, "milliseconds", fileName); Error: cudaFree(dev_c); cudaFree(dev_a); return cudaStatus; }
0bcf40bee267ed354b30a748df4133629b7e75cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void update_e( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) { int idx = blockIdx.x*blockDim.x + threadIdx.x; int Nyz = Ny*Nz; int fidx = idx + idx/(Nz-1) + idx/( (Ny-1)*(Nz-1) )*Nz + Nyz + Nz + 1; if ( fidx < (Nx-1)*(Ny-1)*(Nz-1) ) { Ex[fidx] += CEx[fidx]*( Hz[fidx+Nz] - Hz[fidx] - Hy[fidx+1] + Hy[fidx] ); Ey[fidx] += CEy[fidx]*( Hx[fidx+1] - Hx[fidx] - Hz[fidx+Nyz] + Hz[fidx] ); Ez[fidx] += CEz[fidx]*( Hy[fidx+Nyz] - Hy[fidx] - Hx[fidx+Nz] + Hx[fidx] ); } } __global__ void update_h( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) { int idx = blockIdx.x*blockDim.x + threadIdx.x; int Nyz = Ny*Nz; int fidx = idx + idx/(Nz-1) + idx/( (Ny-1)*(Nz-1) )*Nz + Nyz + Nz + 1; if ( fidx < (Nx-1)*(Ny-1)*(Nz-1) ) { Hx[fidx] -= 0.5*( Ez[fidx] - Ez[fidx-Nz] - Ey[fidx] + Ey[fidx-1] ); Hy[fidx] -= 0.5*( Ex[fidx] - Ex[fidx-1] - Ez[fidx] + Ez[fidx-Nyz] ); Hz[fidx] -= 0.5*( Ey[fidx] - Ey[fidx-Nyz] - Ex[fidx] + Ex[fidx-Nz] ); } }
0bcf40bee267ed354b30a748df4133629b7e75cb.cu
__global__ void update_e( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) { int idx = blockIdx.x*blockDim.x + threadIdx.x; int Nyz = Ny*Nz; int fidx = idx + idx/(Nz-1) + idx/( (Ny-1)*(Nz-1) )*Nz + Nyz + Nz + 1; if ( fidx < (Nx-1)*(Ny-1)*(Nz-1) ) { Ex[fidx] += CEx[fidx]*( Hz[fidx+Nz] - Hz[fidx] - Hy[fidx+1] + Hy[fidx] ); Ey[fidx] += CEy[fidx]*( Hx[fidx+1] - Hx[fidx] - Hz[fidx+Nyz] + Hz[fidx] ); Ez[fidx] += CEz[fidx]*( Hy[fidx+Nyz] - Hy[fidx] - Hx[fidx+Nz] + Hx[fidx] ); } } __global__ void update_h( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) { int idx = blockIdx.x*blockDim.x + threadIdx.x; int Nyz = Ny*Nz; int fidx = idx + idx/(Nz-1) + idx/( (Ny-1)*(Nz-1) )*Nz + Nyz + Nz + 1; if ( fidx < (Nx-1)*(Ny-1)*(Nz-1) ) { Hx[fidx] -= 0.5*( Ez[fidx] - Ez[fidx-Nz] - Ey[fidx] + Ey[fidx-1] ); Hy[fidx] -= 0.5*( Ex[fidx] - Ex[fidx-1] - Ez[fidx] + Ez[fidx-Nyz] ); Hz[fidx] -= 0.5*( Ey[fidx] - Ey[fidx-Nyz] - Ex[fidx] + Ex[fidx-Nz] ); } }
36e7acab023df4fb3e3afe19e2bf1c23c83e1df1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THCS_GENERIC_FILE #define THCS_GENERIC_FILE "generic/THCSTensorMath.cu" #else #include "THHThrustAllocator.cuh" #include "THHNumerics.cuh" #include <thrust/device_ptr.h> #include <thrust/sequence.h> #define ROW_PTR2(t, r) (THCTensor_(data)(THCState *state, t) + (r) * (t)->stride[0]) #define COL_PTR2(t, c) (THCTensor_(data)(THCState *state, t) + (c) * (t)->stride[1]) #define I_INFO(tensor) getTensorInfo<THCIndexTensor, uint64_t>(state, tensor) #define V_INFO(tensor) getTensorInfo<THCTensor, uint64_t>(state, tensor) /* THCudaIntTensor *THCSTensor_(toCSR)(THCState *state, THCIndexTensor *rowIndices, int64_t dim, int64_t nnz) { THCudaIntTensor *csr = THCudaIntTensor_newWithSize1d(state, dim + 1); THCudaIntTensor *rowIndicesInt = THCudaIntTensor_newWithSize1d(state, rowIndices->size[0]); THCudaIntTensor_copyCudaLong(state, rowIndicesInt, rowIndices); THCudaSparse_Xcoo2csr( state, THCudaIntTensor_data(state, rowIndicesInt), nnz, dim, THCudaIntTensor_data(state, csr)); THCudaIntTensor_free(state, rowIndicesInt); return csr; } */ void THCSTensor_(zero)(THCState *state, THCSTensor *self) { if (self->indices->nDimension) { THCIndexTensor_(resizeNd)(state, self->indices, 0, NULL, NULL); } if (self->values->nDimension) { THCTensor_(resizeNd)(state, self->values, 0, NULL, NULL); } self->nnz = 0; } void THCSTensor_(zeros)(THCState *state, THCSTensor *r_, THLongStorage *size) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 1, r_)); THCSTensor_(resize)(state, r_, size); THCSTensor_(zero)(state, r_); } void THCSTensor_(zerosLike)(THCState *state, THCSTensor *r_, THCSTensor *input) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 2, r_, input)); THCSTensor_(resizeAs)(state, r_, input); THCSTensor_(zero)(state, r_); } void THCTensor_(spaddcmul)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) { THError("WARNING: Sparse Cuda Tensor op spaddcmul is not implemented"); } void THCTensor_(spaddcdiv)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) { THError("WARNING: Sparse Cuda Tensor op spaddcdiv is not implemented"); } void THCSTensor_(spaddmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCSTensor *sparse_, THCTensor *dense) { #if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 4, sparse_, r_, t, dense)); THCIndexTensor *indices; THCTensor *values, *r__, *dense_; THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THCSTensor_(size)(state, sparse_, 0); int64_t k = THCSTensor_(size)(state, sparse_, 1); int64_t n = THCTensor_(size)(state, dense, 1); THCTensor_(resize2d)(state, r_, m, n); THArgCheck(THCTensor_(size)(state, t, 0) == m, 1, "Expected dim 0 size %d, got %d", m, THCTensor_(size)(state, t, 0)); THArgCheck(THCTensor_(size)(state, t, 1) == n, 1, "Expected dim 1 size %d, got %d", n, THCTensor_(size)(state, t, 1)); THArgCheck(THCTensor_(size)(state, dense, 0) == k, 3, "Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0)); THCSTensor *sparse = THCSTensor_(newWithCSR)(state, sparse_); int64_t nnz = THCSTensor_(nnz)(state, sparse); indices = THCSTensor_(newIndices)(state, sparse); values = THCSTensor_(newValues)(state, sparse); THCIndexTensor *rowIndices = THCIndexTensor_(newSelect)(state, indices, 0, 0); THCIndexTensor *colIndices = THCIndexTensor_(newSelect)(state, indices, 0, 1); THCudaIntTensor *colIndicesInt = THCudaIntTensor_newWithSize1d(state, colIndices->size[0]); THCudaIntTensor_copyCudaLong(state, colIndicesInt, colIndices); THCIndexTensor *csr = THCSTensor_(newCSR)(state, sparse); THCudaIntTensor *csrInt = THCudaIntTensor_newWithSize1d(state, csr->size[0]); THCudaIntTensor_copyCudaLong(state, csrInt, csr); char transpose_dense; if (beta == 0) { THCTensor_(zero)(state, r_); } else if (beta == ScalarConvert<int, real>::to(1)) { if (t != r_) { THCTensor_(copy)(state, r_, t); } } else { THCTensor_(mul)(state, r_, t, beta); } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] == r_->size[0]) { r__ = r_; THCTensor_(retain)(state, r__); } else { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* dense */ if(dense->stride[0] == 1 && dense->stride[1] == dense->size[0]) { transpose_dense = 'n'; dense_ = dense; THCTensor_(retain)(state, dense_); } else if(dense->stride[1] == 1 && dense->stride[0] != dense->size[1]) { transpose_dense = 't'; dense_ = dense; THCTensor_(retain)(state, dense_); } else { transpose_dense = 't'; dense_ = THCTensor_(newContiguous)(state, dense); } #if defined(THCS_REAL_IS_FLOAT) THCudaSparse_Scsrmm2( #elif defined(THCS_REAL_IS_DOUBLE) THCudaSparse_Dcsrmm2( #endif state, 'n', transpose_dense, m, n, k, nnz, alpha, THCTensor_(data)(state, values), THCudaIntTensor_data(state, csrInt), THCudaIntTensor_data(state, colIndicesInt), THCTensor_(data)(state, dense_), (transpose_dense == 'n' ? dense_->stride[1] : dense_->stride[0]), beta, THCTensor_(data)(state, r__), r__->stride[1]); /* free intermediate variables */ THCTensor_(free)(state, dense_); THCTensor_(freeCopyTo)(state, r__, r_); THCudaIntTensor_free(state, colIndicesInt); THCudaIntTensor_free(state, csrInt); THCIndexTensor_(free)(state, csr); THCIndexTensor_(free)(state, indices); THCIndexTensor_(free)(state, rowIndices); THCIndexTensor_(free)(state, colIndices); THCTensor_(free)(state, values); THCSTensor_(free)(state, sparse); #else THError("unimplemented data type"); #endif } void THCSTensor_(sspaddmm)(THCState *state, THCSTensor *r_, real beta, THCSTensor *t, real alpha, THCSTensor *sparse, THCTensor *dense) { THError("WARNING: Sparse Cuda Tensor op sspaddmm is not implemented"); // TODO Write some kernels } void THCSTensor_(hspmm)(THCState *state, THCSTensor *r_, real alpha, THCSTensor *sparse_, THCTensor *dense) { #if TORCH_HIP_VERSION >= 7000 THCThrustAllocator thrustAlloc(state); #define THRUST_EXEC(fn, ...) fn(thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__) #else #define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__) #endif THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 3, r_, sparse_, dense)); THArgCheck(sparse_->nDimensionI == 2, 3, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 3, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 4, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THCSTensor_(size)(state, sparse_, 0); int64_t k = THCSTensor_(size)(state, sparse_, 1); int64_t n = THCTensor_(size)(state, dense, 1); THArgCheck(THCTensor_(size)(state, dense, 0) == k, 4, "Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0)); int64_t size[2] = {m, n}; THCSTensor_(rawResize)(state, r_, 1, 1, size); THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_); int64_t nnz = THCSTensor_(nnz)(state, sparse); THCIndexTensor *indices = THCIndexTensor_(newWithSize2d)(state, 1, nnz); // create values in column-major format to avoid copying in spaddmm THCTensor *values = THCTensor_(newWithSize2d)(state, n, nnz); THCTensor_(transpose)(state, values, NULL, 0, 1); // why does sparse need to be cloned? If this is really necessary maybe we // need to fuse this with newCoalesce THCSTensor *newSparse = THCSTensor_(newClone)(state, sparse); THCIndexTensor *spIndices = THCSTensor_(newIndices)(state, newSparse); THCIndexTensor *dstIndices = THCIndexTensor_(newSelect)(state, spIndices, 0, 0); // Save destination indices to output hybrid tensor THCIndexTensor_(copy)(state, indices, dstIndices); // Replace destination indices with 0, 1, 2, 3, ... and compute output values // tensor with sparse * dense multiplication thrust::device_ptr<indexT> indicesIter(THCIndexTensor_(data)(state, dstIndices)); THRUST_EXEC(thrust::sequence, indicesIter, indicesIter + nnz); newSparse->size[0] = nnz; THCSTensor_(spaddmm)(state, values, ScalarConvert<int, real>::to(0), values, alpha, newSparse, dense); THCSTensor_(_move)(state, r_, indices, values); THCSTensor_(free)(state, newSparse); THCIndexTensor_(free)(state, spIndices); THCIndexTensor_(free)(state, dstIndices); THCSTensor_(free)(state, sparse); #undef THRUST_EXEC } void THCSTensor_(spcadd)(THCState *state, THCTensor *r_, THCTensor *dense, real value, THCSTensor *sparse) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 3, sparse, r_, dense)); const ptrdiff_t nnz = THCSTensor_(nnz)(state, sparse); if (nnz == 0) { THCTensor_(resizeAs)(state, r_, dense); THCTensor_(copy)(state, r_, dense); return; } THCTensor *r = r_; if (r != dense) { THCTensor_(retain)(state, r); THCTensor_(resizeAs)(state, r, dense); THCTensor_(copy)(state, r, dense); } else { if (!THCTensor_(isContiguous)(state, r_)) { THError("CUDA sparse spcadd: known bug"); } r = THCTensor_(newContiguous)(state, r_); } THCIndexTensor *indices = THCSTensor_(newIndices)(state, sparse); THCTensor *values = THCSTensor_(newValues)(state, sparse); int64_t nDim = THCTensor_(nDimension)(state, dense); int64_t nDimI = THCSTensor_(nDimensionI)(state, sparse); if (THCSTensor_(isCoalesced)(state, sparse)) { // TODO benchmark to decide whether to remove this special case const dim3 block = getApplyBlock(); dim3 grid; if (sparse->nDimensionV == 0) { THArgCheck(getApplyGrid(state, nnz, grid), 1, CUTORCH_DIM_WARNING); hipLaunchKernelGGL(( THCSTensor_sparseElementwiseKernelScalar<TensorCAddOp<real>, uint64_t, real>) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), TensorCAddOp<real>(value), V_INFO(r_), I_INFO(indices), V_INFO(values), (uint64_t) nnz); } else { THArgCheck(getApplyGrid(state, nnz * block.x, grid), 1, CUTORCH_DIM_WARNING); hipLaunchKernelGGL(( THCSTensor_sparseElementwiseKernel<TensorCAddOp<real>, uint64_t, real>) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), TensorCAddOp<real>(value), V_INFO(r_), I_INFO(indices), V_INFO(values), (uint64_t) nnz); } } else { THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, sparse, 0); THCIndexTensor_(resize1d)(state, indices1D, nnz); if (value != ScalarConvert<int, real>::to(1)) { // FIXME: at some point we can wrap the scale into indexAdd THCTensor *scaled = THCTensor_(new)(state); THCTensor_(mul)(state, scaled, values, value); THCTensor_(free)(state, values); values = scaled; } int64_t view_rows = 1; int64_t view_columns = 1; THLongStorage *r_size = THCTensor_(newSizeOf)(state, r); for (int i = 0; i < nDimI; i++) view_rows *= r_size->data[i]; for (int i = nDimI; i < nDim; i++) view_columns *= r_size->data[i]; THLongStorage *r_view_size = THLongStorage_newWithSize2(view_rows, view_columns); THCTensor *r_view = THCTensor_(newView)(state, r, r_view_size); THCTensor_(resize2d)(state, values, nnz, view_columns); THCTensor_(indexAdd)(state, r_view, 0, indices1D, values); THCIndexTensor_(free)(state, indices1D); THLongStorage_free(r_size); THLongStorage_free(r_view_size); THCTensor_(free)(state, r_view); } THCudaCheck(hipGetLastError()); THCIndexTensor_(free)(state, indices); THCTensor_(free)(state, values); THCTensor_(free)(state, r); } void THCSTensor_(mul)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) { if (r_ == t) { THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCTensor_(mul)(state, r_values_, r_values_, value); THCTensor_(free)(state, r_values_); } else { THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(mul)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); } } void THCSTensor_(div)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) { if (r_ == t) { THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCTensor_(div)(state, r_values_, r_values_, value); THCTensor_(free)(state, r_values_); } else { THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(div)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); } } void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t, src)); if(!THCSTensor_(isSameSizeAs)(state, t, src)) { THError("cadd operands have incompatible sizes or dimension types"); } if (src->nnz == 0) { THCSTensor_(copy)(state, r_, t); return; } if (t->nnz == 0) { THCSTensor_(mul)(state, r_, src, value); return; } // We deliberately choose to simply concat the indices and values tensors // rather than merging them. This removes the need to synchronously fetch nnz // at the end of the operation, at the cost of having a non-coalesced result. // This trade-off is preferable for the common use-case of gradient accumulation. THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src); THCTensor *s_values_ = THCSTensor_(newValues)(state, src); if (value != ScalarConvert<int, real>::to(1)) { THCTensor *s_values_orig = s_values_; s_values_ = THCTensor_(new)(state); THCTensor_(mul)(state, s_values_, s_values_orig, value); THCTensor_(free)(state, s_values_orig); } THCIndexTensor *r_indices_ = THCIndexTensor_(new)(state); THCTensor *r_values_ = THCTensor_(new)(state); THCIndexTensor_(cat)(state, r_indices_, t_indices_, s_indices_, 1); THCTensor_(cat)(state, r_values_, t_values_, s_values_, 0); THCSTensor_(resizeAs)(state, r_, src); THCSTensor_(_move)(state, r_, r_indices_, r_values_); THCSTensor_(uncoalesce)(state, r_); // FIXME: add some heuristic about when to call coalesce() here, so that // tensors don't totally blow up in size by concatenation; e.g. // r->minUnique = max(a->minUnique + b->minUnique); // if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) { // THCSTensor_(contiguous)(r); // r->minUnique = r->nnz; // } THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCIndexTensor_(free)(state, s_indices_); THCTensor_(free)(state, s_values_); } void THCSTensor_(csub)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) { THCSTensor_(cadd)(state, r_, t, ScalarNegate<real>::to(value), src); } void THCSTensor_(cmul)(THCState *state, THCSTensor *r_, THCSTensor *t_, THCSTensor *src_) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t_, src_)); if(!THCSTensor_(isSameSizeAs)(state, t_, src_)) { THError("cmul operands have incompatible sizes or dimension types"); } THCSTensor *t = THCSTensor_(newCoalesce)(state, t_); THCSTensor *src = THCSTensor_(newCoalesce)(state, src_); if (t->nnz == 0 || src->nnz == 0) { THCSTensor_(zero)(state, r_); return; } // saving those because they can be overwritten when doing in-place operations ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz; ptrdiff_t max_nnz = t_nnz < s_nnz ? t_nnz : s_nnz; int64_t nDimI = src->nDimensionI; THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src); THCTensor *s_values_ = THCSTensor_(newValues)(state, src); THCIndexTensor *r_indices_ = THCIndexTensor_(newWithSize2d)(state, nDimI, max_nnz); THCTensor *r_values_ = THCSTensor_(newValuesWithSizeOf)(state, s_values_, max_nnz); THCTensor_(zero)(state, r_values_); THCSTensor_(resizeAs)(state, r_, src); THCSTensor_(_move)(state, r_, r_indices_, r_values_); int64_t valueSize = t_values_->stride[0]; const dim3 block = dim3(min((int64_t) getApplyBlock().x, valueSize)); dim3 grid; THArgCheck(getApplyGrid(state, valueSize, grid), 1, CUTORCH_DIM_WARNING); hipLaunchKernelGGL(( THCSTensor_valueSparseIntersectionKernel<TensorMulOp<real>, uint64_t, real>) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), TensorMulOp<real>(), I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_), (uint64_t)t_nnz, (uint64_t)s_nnz); THCudaCheck(hipGetLastError()); THCudaLongStorage *resultNnz = THCudaLongStorage_newWithSize(state, 1); hipLaunchKernelGGL(( THCSTensor_indexSparseIntersectionKernel<uint64_t, real>) , dim3(1), dim3(1), 0, THCState_getCurrentStream(state), I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), (uint64_t)t_nnz, (uint64_t)s_nnz, (uint64_t*)resultNnz->data); THCudaCheck(hipGetLastError()); r_->nnz = THCudaLongStorage_get(state, resultNnz, 0); THCudaLongStorage_free(state, resultNnz); r_->coalesced = 1; THCSTensor_(invalidateCSR)(state, r_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCIndexTensor_(free)(state, s_indices_); THCTensor_(free)(state, s_values_); THCSTensor_(free)(state, t); THCSTensor_(free)(state, src); } void THCSTensor_(pow)(THCState *state, THCSTensor *r_, THCSTensor *t_, real value) { if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(0))) { THError("cannot raise to zeroth power on sparse tensor"); } THCSTensor *t = THCSTensor_(newCoalesce)(state, t_); THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(pow)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCSTensor_(free)(state, t); } #if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) || defined(THCS_REAL_IS_HALF) accreal THCSTensor_(normall)(THCState *state, THCSTensor *self, real value) { THCSTensor* self_coalesced = THCSTensor_(newCoalesce)(state, self); accreal result = THCTensor_(normall)(state, self_coalesced->values, value); THCSTensor_(free)(state, self_coalesced); return result; } #endif #undef ROW_PTR2 #undef COL_PTR2 #endif
36e7acab023df4fb3e3afe19e2bf1c23c83e1df1.cu
#ifndef THCS_GENERIC_FILE #define THCS_GENERIC_FILE "generic/THCSTensorMath.cu" #else #include "THCThrustAllocator.cuh" #include "THCNumerics.cuh" #include <thrust/device_ptr.h> #include <thrust/sequence.h> #define ROW_PTR2(t, r) (THCTensor_(data)(THCState *state, t) + (r) * (t)->stride[0]) #define COL_PTR2(t, c) (THCTensor_(data)(THCState *state, t) + (c) * (t)->stride[1]) #define I_INFO(tensor) getTensorInfo<THCIndexTensor, uint64_t>(state, tensor) #define V_INFO(tensor) getTensorInfo<THCTensor, uint64_t>(state, tensor) /* THCudaIntTensor *THCSTensor_(toCSR)(THCState *state, THCIndexTensor *rowIndices, int64_t dim, int64_t nnz) { THCudaIntTensor *csr = THCudaIntTensor_newWithSize1d(state, dim + 1); THCudaIntTensor *rowIndicesInt = THCudaIntTensor_newWithSize1d(state, rowIndices->size[0]); THCudaIntTensor_copyCudaLong(state, rowIndicesInt, rowIndices); THCudaSparse_Xcoo2csr( state, THCudaIntTensor_data(state, rowIndicesInt), nnz, dim, THCudaIntTensor_data(state, csr)); THCudaIntTensor_free(state, rowIndicesInt); return csr; } */ void THCSTensor_(zero)(THCState *state, THCSTensor *self) { if (self->indices->nDimension) { THCIndexTensor_(resizeNd)(state, self->indices, 0, NULL, NULL); } if (self->values->nDimension) { THCTensor_(resizeNd)(state, self->values, 0, NULL, NULL); } self->nnz = 0; } void THCSTensor_(zeros)(THCState *state, THCSTensor *r_, THLongStorage *size) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 1, r_)); THCSTensor_(resize)(state, r_, size); THCSTensor_(zero)(state, r_); } void THCSTensor_(zerosLike)(THCState *state, THCSTensor *r_, THCSTensor *input) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 2, r_, input)); THCSTensor_(resizeAs)(state, r_, input); THCSTensor_(zero)(state, r_); } void THCTensor_(spaddcmul)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) { THError("WARNING: Sparse Cuda Tensor op spaddcmul is not implemented"); } void THCTensor_(spaddcdiv)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) { THError("WARNING: Sparse Cuda Tensor op spaddcdiv is not implemented"); } void THCSTensor_(spaddmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCSTensor *sparse_, THCTensor *dense) { #if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 4, sparse_, r_, t, dense)); THCIndexTensor *indices; THCTensor *values, *r__, *dense_; THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THCSTensor_(size)(state, sparse_, 0); int64_t k = THCSTensor_(size)(state, sparse_, 1); int64_t n = THCTensor_(size)(state, dense, 1); THCTensor_(resize2d)(state, r_, m, n); THArgCheck(THCTensor_(size)(state, t, 0) == m, 1, "Expected dim 0 size %d, got %d", m, THCTensor_(size)(state, t, 0)); THArgCheck(THCTensor_(size)(state, t, 1) == n, 1, "Expected dim 1 size %d, got %d", n, THCTensor_(size)(state, t, 1)); THArgCheck(THCTensor_(size)(state, dense, 0) == k, 3, "Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0)); THCSTensor *sparse = THCSTensor_(newWithCSR)(state, sparse_); int64_t nnz = THCSTensor_(nnz)(state, sparse); indices = THCSTensor_(newIndices)(state, sparse); values = THCSTensor_(newValues)(state, sparse); THCIndexTensor *rowIndices = THCIndexTensor_(newSelect)(state, indices, 0, 0); THCIndexTensor *colIndices = THCIndexTensor_(newSelect)(state, indices, 0, 1); THCudaIntTensor *colIndicesInt = THCudaIntTensor_newWithSize1d(state, colIndices->size[0]); THCudaIntTensor_copyCudaLong(state, colIndicesInt, colIndices); THCIndexTensor *csr = THCSTensor_(newCSR)(state, sparse); THCudaIntTensor *csrInt = THCudaIntTensor_newWithSize1d(state, csr->size[0]); THCudaIntTensor_copyCudaLong(state, csrInt, csr); char transpose_dense; if (beta == 0) { THCTensor_(zero)(state, r_); } else if (beta == ScalarConvert<int, real>::to(1)) { if (t != r_) { THCTensor_(copy)(state, r_, t); } } else { THCTensor_(mul)(state, r_, t, beta); } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] == r_->size[0]) { r__ = r_; THCTensor_(retain)(state, r__); } else { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* dense */ if(dense->stride[0] == 1 && dense->stride[1] == dense->size[0]) { transpose_dense = 'n'; dense_ = dense; THCTensor_(retain)(state, dense_); } else if(dense->stride[1] == 1 && dense->stride[0] != dense->size[1]) { transpose_dense = 't'; dense_ = dense; THCTensor_(retain)(state, dense_); } else { transpose_dense = 't'; dense_ = THCTensor_(newContiguous)(state, dense); } #if defined(THCS_REAL_IS_FLOAT) THCudaSparse_Scsrmm2( #elif defined(THCS_REAL_IS_DOUBLE) THCudaSparse_Dcsrmm2( #endif state, 'n', transpose_dense, m, n, k, nnz, alpha, THCTensor_(data)(state, values), THCudaIntTensor_data(state, csrInt), THCudaIntTensor_data(state, colIndicesInt), THCTensor_(data)(state, dense_), (transpose_dense == 'n' ? dense_->stride[1] : dense_->stride[0]), beta, THCTensor_(data)(state, r__), r__->stride[1]); /* free intermediate variables */ THCTensor_(free)(state, dense_); THCTensor_(freeCopyTo)(state, r__, r_); THCudaIntTensor_free(state, colIndicesInt); THCudaIntTensor_free(state, csrInt); THCIndexTensor_(free)(state, csr); THCIndexTensor_(free)(state, indices); THCIndexTensor_(free)(state, rowIndices); THCIndexTensor_(free)(state, colIndices); THCTensor_(free)(state, values); THCSTensor_(free)(state, sparse); #else THError("unimplemented data type"); #endif } void THCSTensor_(sspaddmm)(THCState *state, THCSTensor *r_, real beta, THCSTensor *t, real alpha, THCSTensor *sparse, THCTensor *dense) { THError("WARNING: Sparse Cuda Tensor op sspaddmm is not implemented"); // TODO Write some kernels } void THCSTensor_(hspmm)(THCState *state, THCSTensor *r_, real alpha, THCSTensor *sparse_, THCTensor *dense) { #if CUDA_VERSION >= 7000 THCThrustAllocator thrustAlloc(state); #define THRUST_EXEC(fn, ...) fn(thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__) #else #define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__) #endif THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 3, r_, sparse_, dense)); THArgCheck(sparse_->nDimensionI == 2, 3, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 3, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 4, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THCSTensor_(size)(state, sparse_, 0); int64_t k = THCSTensor_(size)(state, sparse_, 1); int64_t n = THCTensor_(size)(state, dense, 1); THArgCheck(THCTensor_(size)(state, dense, 0) == k, 4, "Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0)); int64_t size[2] = {m, n}; THCSTensor_(rawResize)(state, r_, 1, 1, size); THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_); int64_t nnz = THCSTensor_(nnz)(state, sparse); THCIndexTensor *indices = THCIndexTensor_(newWithSize2d)(state, 1, nnz); // create values in column-major format to avoid copying in spaddmm THCTensor *values = THCTensor_(newWithSize2d)(state, n, nnz); THCTensor_(transpose)(state, values, NULL, 0, 1); // why does sparse need to be cloned? If this is really necessary maybe we // need to fuse this with newCoalesce THCSTensor *newSparse = THCSTensor_(newClone)(state, sparse); THCIndexTensor *spIndices = THCSTensor_(newIndices)(state, newSparse); THCIndexTensor *dstIndices = THCIndexTensor_(newSelect)(state, spIndices, 0, 0); // Save destination indices to output hybrid tensor THCIndexTensor_(copy)(state, indices, dstIndices); // Replace destination indices with 0, 1, 2, 3, ... and compute output values // tensor with sparse * dense multiplication thrust::device_ptr<indexT> indicesIter(THCIndexTensor_(data)(state, dstIndices)); THRUST_EXEC(thrust::sequence, indicesIter, indicesIter + nnz); newSparse->size[0] = nnz; THCSTensor_(spaddmm)(state, values, ScalarConvert<int, real>::to(0), values, alpha, newSparse, dense); THCSTensor_(_move)(state, r_, indices, values); THCSTensor_(free)(state, newSparse); THCIndexTensor_(free)(state, spIndices); THCIndexTensor_(free)(state, dstIndices); THCSTensor_(free)(state, sparse); #undef THRUST_EXEC } void THCSTensor_(spcadd)(THCState *state, THCTensor *r_, THCTensor *dense, real value, THCSTensor *sparse) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 3, sparse, r_, dense)); const ptrdiff_t nnz = THCSTensor_(nnz)(state, sparse); if (nnz == 0) { THCTensor_(resizeAs)(state, r_, dense); THCTensor_(copy)(state, r_, dense); return; } THCTensor *r = r_; if (r != dense) { THCTensor_(retain)(state, r); THCTensor_(resizeAs)(state, r, dense); THCTensor_(copy)(state, r, dense); } else { if (!THCTensor_(isContiguous)(state, r_)) { THError("CUDA sparse spcadd: known bug"); } r = THCTensor_(newContiguous)(state, r_); } THCIndexTensor *indices = THCSTensor_(newIndices)(state, sparse); THCTensor *values = THCSTensor_(newValues)(state, sparse); int64_t nDim = THCTensor_(nDimension)(state, dense); int64_t nDimI = THCSTensor_(nDimensionI)(state, sparse); if (THCSTensor_(isCoalesced)(state, sparse)) { // TODO benchmark to decide whether to remove this special case const dim3 block = getApplyBlock(); dim3 grid; if (sparse->nDimensionV == 0) { THArgCheck(getApplyGrid(state, nnz, grid), 1, CUTORCH_DIM_WARNING); THCSTensor_sparseElementwiseKernelScalar<TensorCAddOp<real>, uint64_t, real> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( TensorCAddOp<real>(value), V_INFO(r_), I_INFO(indices), V_INFO(values), (uint64_t) nnz); } else { THArgCheck(getApplyGrid(state, nnz * block.x, grid), 1, CUTORCH_DIM_WARNING); THCSTensor_sparseElementwiseKernel<TensorCAddOp<real>, uint64_t, real> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( TensorCAddOp<real>(value), V_INFO(r_), I_INFO(indices), V_INFO(values), (uint64_t) nnz); } } else { THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, sparse, 0); THCIndexTensor_(resize1d)(state, indices1D, nnz); if (value != ScalarConvert<int, real>::to(1)) { // FIXME: at some point we can wrap the scale into indexAdd THCTensor *scaled = THCTensor_(new)(state); THCTensor_(mul)(state, scaled, values, value); THCTensor_(free)(state, values); values = scaled; } int64_t view_rows = 1; int64_t view_columns = 1; THLongStorage *r_size = THCTensor_(newSizeOf)(state, r); for (int i = 0; i < nDimI; i++) view_rows *= r_size->data[i]; for (int i = nDimI; i < nDim; i++) view_columns *= r_size->data[i]; THLongStorage *r_view_size = THLongStorage_newWithSize2(view_rows, view_columns); THCTensor *r_view = THCTensor_(newView)(state, r, r_view_size); THCTensor_(resize2d)(state, values, nnz, view_columns); THCTensor_(indexAdd)(state, r_view, 0, indices1D, values); THCIndexTensor_(free)(state, indices1D); THLongStorage_free(r_size); THLongStorage_free(r_view_size); THCTensor_(free)(state, r_view); } THCudaCheck(cudaGetLastError()); THCIndexTensor_(free)(state, indices); THCTensor_(free)(state, values); THCTensor_(free)(state, r); } void THCSTensor_(mul)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) { if (r_ == t) { THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCTensor_(mul)(state, r_values_, r_values_, value); THCTensor_(free)(state, r_values_); } else { THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(mul)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); } } void THCSTensor_(div)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) { if (r_ == t) { THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCTensor_(div)(state, r_values_, r_values_, value); THCTensor_(free)(state, r_values_); } else { THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(div)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); } } void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t, src)); if(!THCSTensor_(isSameSizeAs)(state, t, src)) { THError("cadd operands have incompatible sizes or dimension types"); } if (src->nnz == 0) { THCSTensor_(copy)(state, r_, t); return; } if (t->nnz == 0) { THCSTensor_(mul)(state, r_, src, value); return; } // We deliberately choose to simply concat the indices and values tensors // rather than merging them. This removes the need to synchronously fetch nnz // at the end of the operation, at the cost of having a non-coalesced result. // This trade-off is preferable for the common use-case of gradient accumulation. THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src); THCTensor *s_values_ = THCSTensor_(newValues)(state, src); if (value != ScalarConvert<int, real>::to(1)) { THCTensor *s_values_orig = s_values_; s_values_ = THCTensor_(new)(state); THCTensor_(mul)(state, s_values_, s_values_orig, value); THCTensor_(free)(state, s_values_orig); } THCIndexTensor *r_indices_ = THCIndexTensor_(new)(state); THCTensor *r_values_ = THCTensor_(new)(state); THCIndexTensor_(cat)(state, r_indices_, t_indices_, s_indices_, 1); THCTensor_(cat)(state, r_values_, t_values_, s_values_, 0); THCSTensor_(resizeAs)(state, r_, src); THCSTensor_(_move)(state, r_, r_indices_, r_values_); THCSTensor_(uncoalesce)(state, r_); // FIXME: add some heuristic about when to call coalesce() here, so that // tensors don't totally blow up in size by concatenation; e.g. // r->minUnique = max(a->minUnique + b->minUnique); // if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) { // THCSTensor_(contiguous)(r); // r->minUnique = r->nnz; // } THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCIndexTensor_(free)(state, s_indices_); THCTensor_(free)(state, s_values_); } void THCSTensor_(csub)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) { THCSTensor_(cadd)(state, r_, t, ScalarNegate<real>::to(value), src); } void THCSTensor_(cmul)(THCState *state, THCSTensor *r_, THCSTensor *t_, THCSTensor *src_) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t_, src_)); if(!THCSTensor_(isSameSizeAs)(state, t_, src_)) { THError("cmul operands have incompatible sizes or dimension types"); } THCSTensor *t = THCSTensor_(newCoalesce)(state, t_); THCSTensor *src = THCSTensor_(newCoalesce)(state, src_); if (t->nnz == 0 || src->nnz == 0) { THCSTensor_(zero)(state, r_); return; } // saving those because they can be overwritten when doing in-place operations ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz; ptrdiff_t max_nnz = t_nnz < s_nnz ? t_nnz : s_nnz; int64_t nDimI = src->nDimensionI; THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src); THCTensor *s_values_ = THCSTensor_(newValues)(state, src); THCIndexTensor *r_indices_ = THCIndexTensor_(newWithSize2d)(state, nDimI, max_nnz); THCTensor *r_values_ = THCSTensor_(newValuesWithSizeOf)(state, s_values_, max_nnz); THCTensor_(zero)(state, r_values_); THCSTensor_(resizeAs)(state, r_, src); THCSTensor_(_move)(state, r_, r_indices_, r_values_); int64_t valueSize = t_values_->stride[0]; const dim3 block = dim3(min((int64_t) getApplyBlock().x, valueSize)); dim3 grid; THArgCheck(getApplyGrid(state, valueSize, grid), 1, CUTORCH_DIM_WARNING); THCSTensor_valueSparseIntersectionKernel<TensorMulOp<real>, uint64_t, real> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( TensorMulOp<real>(), I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_), (uint64_t)t_nnz, (uint64_t)s_nnz); THCudaCheck(cudaGetLastError()); THCudaLongStorage *resultNnz = THCudaLongStorage_newWithSize(state, 1); THCSTensor_indexSparseIntersectionKernel<uint64_t, real> <<<1, 1, 0, THCState_getCurrentStream(state)>>>( I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), (uint64_t)t_nnz, (uint64_t)s_nnz, (uint64_t*)resultNnz->data); THCudaCheck(cudaGetLastError()); r_->nnz = THCudaLongStorage_get(state, resultNnz, 0); THCudaLongStorage_free(state, resultNnz); r_->coalesced = 1; THCSTensor_(invalidateCSR)(state, r_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCIndexTensor_(free)(state, s_indices_); THCTensor_(free)(state, s_values_); THCSTensor_(free)(state, t); THCSTensor_(free)(state, src); } void THCSTensor_(pow)(THCState *state, THCSTensor *r_, THCSTensor *t_, real value) { if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(0))) { THError("cannot raise to zeroth power on sparse tensor"); } THCSTensor *t = THCSTensor_(newCoalesce)(state, t_); THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(pow)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCSTensor_(free)(state, t); } #if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) || defined(THCS_REAL_IS_HALF) accreal THCSTensor_(normall)(THCState *state, THCSTensor *self, real value) { THCSTensor* self_coalesced = THCSTensor_(newCoalesce)(state, self); accreal result = THCTensor_(normall)(state, self_coalesced->values, value); THCSTensor_(free)(state, self_coalesced); return result; } #endif #undef ROW_PTR2 #undef COL_PTR2 #endif
ecef9e4ab7af4327a263751d7d521d707c230705.hip
// !!! This is a file automatically generated by hipify!!! // nvcc mcpi.cu -o mcpi -lcuda #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #include <time.h> #include <hiprand/hiprand_kernel.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 #define PI 3.1415926535 // known value of pi // hiprandState_t = etat du generateur de nombres aleatoires __global__ void gpu_monte_carlo(float *estimate, hiprandState_t *states) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; float x, y; hiprand_init(1234, tid, 0, &states[tid]); //Initialize CURAND for (int i = 0; i < TRIALS_PER_THREAD; ++i) { x = hiprand_uniform(&states[tid]); y = hiprand_uniform(&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; //estimate of pi } float host_monte_carlo(long trials) { float x, y; long points_in_circle; for (long i = 0; i < trials; i++) { x = rand() / (float)RAND_MAX; y = rand() / (float)RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } return 4.0f * points_in_circle / trials; } int main (int argc, char *argv[]) { clock_t start, stop; float host[BLOCKS * THREADS]; float *dev; hiprandState_t *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", TRIALS_PER_THREAD, BLOCKS, THREADS); start = clock(); hipMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); hipMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(hiprandState_t) ); hipLaunchKernelGGL(( gpu_monte_carlo), dim3(BLOCKS), dim3(THREADS), 0, 0, dev, devStates); hipMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), hipMemcpyDeviceToHost); float pi_gpu; for (int i = 0; i < BLOCKS * THREADS; ++i) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); start = clock(); float pi_cpu = host_monte_carlo(BLOCKS * THREADS * TRIALS_PER_THREAD); stop = clock(); printf("CPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %f [error of %f]\n", pi_cpu, pi_cpu - PI); return 0; }
ecef9e4ab7af4327a263751d7d521d707c230705.cu
// nvcc mcpi.cu -o mcpi -lcuda #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <math.h> #include <time.h> #include <curand_kernel.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 #define PI 3.1415926535 // known value of pi // curandState = etat du generateur de nombres aleatoires __global__ void gpu_monte_carlo(float *estimate, curandState *states) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; float x, y; curand_init(1234, tid, 0, &states[tid]); //Initialize CURAND for (int i = 0; i < TRIALS_PER_THREAD; ++i) { x = curand_uniform(&states[tid]); y = curand_uniform(&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; //estimate of pi } float host_monte_carlo(long trials) { float x, y; long points_in_circle; for (long i = 0; i < trials; i++) { x = rand() / (float)RAND_MAX; y = rand() / (float)RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } return 4.0f * points_in_circle / trials; } int main (int argc, char *argv[]) { clock_t start, stop; float host[BLOCKS * THREADS]; float *dev; curandState *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", TRIALS_PER_THREAD, BLOCKS, THREADS); start = clock(); cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); cudaMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(curandState) ); gpu_monte_carlo<<<BLOCKS, THREADS>>>(dev, devStates); cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost); float pi_gpu; for (int i = 0; i < BLOCKS * THREADS; ++i) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); start = clock(); float pi_cpu = host_monte_carlo(BLOCKS * THREADS * TRIALS_PER_THREAD); stop = clock(); printf("CPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %f [error of %f]\n", pi_cpu, pi_cpu - PI); return 0; }
ae641dd129244a6c0a40186b22f9fbaebba13776.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stereotgv.h" /// scalar field to upscale texture<float, hipTextureType2D, hipReadModeElementType> tgvTexCoarse; texture<float2, hipTextureType2D, hipReadModeElementType> tgvTtexCoarseFloat2; __global__ void TgvUpscaleKernel(int width, int height, int stride, float scale, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if (ix >= width || iy >= height) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution out[ix + iy * stride] = tex2D(tgvTexCoarse, x, y) * scale; } void StereoTgv::Upscale(const float *src, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range tgvTexCoarse.addressMode[0] = hipAddressModeMirror; tgvTexCoarse.addressMode[1] = hipAddressModeMirror; tgvTexCoarse.filterMode = hipFilterModeLinear; tgvTexCoarse.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, tgvTexCoarse, src, width, height, stride * sizeof(float)); TgvUpscaleKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out); } //****************************** // Upscaling for Float2 //****************************** __global__ void TgvUpscaleFloat2Kernel(int width, int height, int stride, float scale, float2 *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if (ix >= width || iy >= height) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution float2 src = tex2D(tgvTtexCoarseFloat2, x, y); out[ix + iy * stride].x = src.x * scale; out[ix + iy * stride].y = src.y * scale; } void StereoTgv::Upscale(const float2 *src, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float2 *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range tgvTtexCoarseFloat2.addressMode[0] = hipAddressModeMirror; tgvTtexCoarseFloat2.addressMode[1] = hipAddressModeMirror; tgvTtexCoarseFloat2.filterMode = hipFilterModeLinear; tgvTtexCoarseFloat2.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float2>(); hipBindTexture2D(0, tgvTtexCoarseFloat2, src, width, height, stride * sizeof(float2)); TgvUpscaleFloat2Kernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out); } // ******************************** // MASKED // ******************************** __global__ void TgvUpscaleMaskedKernel(float * mask, int width, int height, int stride, float scale, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if ((iy >= height) && (ix >= width)) return; int pos = ix + iy * stride; //if (mask[pos] == 0.0f) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution out[pos] = tex2D(tgvTexCoarse, x, y) * scale; //if (ix >= width || iy >= height) return; //// exploit hardware interpolation //// and scale interpolated vector to match next pyramid level resolution //out[ix + iy * stride] = tex2D(texCoarse, x, y) * scale; } void StereoTgv::UpscaleMasked(const float *src, float* mask, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range tgvTexCoarse.addressMode[0] = hipAddressModeMirror; tgvTexCoarse.addressMode[1] = hipAddressModeMirror; tgvTexCoarse.filterMode = hipFilterModeLinear; tgvTexCoarse.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, tgvTexCoarse, src, width, height, stride * sizeof(float)); TgvUpscaleMaskedKernel << < blocks, threads >> > (mask, newWidth, newHeight, newStride, scale, out); } //****************************** // Upscaling for Float2 //****************************** __global__ void TgvUpscaleFloat2MaskedKernel(float * mask, int width, int height, int stride, float scale, float2 *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if ((iy >= height) && (ix >= width)) return; int pos = ix + iy * stride; if (mask[pos] == 0.0f) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution float2 src = tex2D(tgvTtexCoarseFloat2, x, y); out[pos].x = src.x * scale; out[pos].y = src.y * scale; } void StereoTgv::UpscaleMasked(const float2 *src, float * mask, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float2 *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range tgvTtexCoarseFloat2.addressMode[0] = hipAddressModeMirror; tgvTtexCoarseFloat2.addressMode[1] = hipAddressModeMirror; tgvTtexCoarseFloat2.filterMode = hipFilterModeLinear; tgvTtexCoarseFloat2.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float2>(); hipBindTexture2D(0, tgvTtexCoarseFloat2, src, width, height, stride * sizeof(float2)); TgvUpscaleFloat2MaskedKernel << < blocks, threads >> > (mask, newWidth, newHeight, newStride, scale, out); }
ae641dd129244a6c0a40186b22f9fbaebba13776.cu
#include "stereotgv.h" /// scalar field to upscale texture<float, cudaTextureType2D, cudaReadModeElementType> tgvTexCoarse; texture<float2, cudaTextureType2D, cudaReadModeElementType> tgvTtexCoarseFloat2; __global__ void TgvUpscaleKernel(int width, int height, int stride, float scale, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if (ix >= width || iy >= height) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution out[ix + iy * stride] = tex2D(tgvTexCoarse, x, y) * scale; } void StereoTgv::Upscale(const float *src, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range tgvTexCoarse.addressMode[0] = cudaAddressModeMirror; tgvTexCoarse.addressMode[1] = cudaAddressModeMirror; tgvTexCoarse.filterMode = cudaFilterModeLinear; tgvTexCoarse.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, tgvTexCoarse, src, width, height, stride * sizeof(float)); TgvUpscaleKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out); } //****************************** // Upscaling for Float2 //****************************** __global__ void TgvUpscaleFloat2Kernel(int width, int height, int stride, float scale, float2 *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if (ix >= width || iy >= height) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution float2 src = tex2D(tgvTtexCoarseFloat2, x, y); out[ix + iy * stride].x = src.x * scale; out[ix + iy * stride].y = src.y * scale; } void StereoTgv::Upscale(const float2 *src, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float2 *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range tgvTtexCoarseFloat2.addressMode[0] = cudaAddressModeMirror; tgvTtexCoarseFloat2.addressMode[1] = cudaAddressModeMirror; tgvTtexCoarseFloat2.filterMode = cudaFilterModeLinear; tgvTtexCoarseFloat2.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float2>(); cudaBindTexture2D(0, tgvTtexCoarseFloat2, src, width, height, stride * sizeof(float2)); TgvUpscaleFloat2Kernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out); } // ******************************** // MASKED // ******************************** __global__ void TgvUpscaleMaskedKernel(float * mask, int width, int height, int stride, float scale, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if ((iy >= height) && (ix >= width)) return; int pos = ix + iy * stride; //if (mask[pos] == 0.0f) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution out[pos] = tex2D(tgvTexCoarse, x, y) * scale; //if (ix >= width || iy >= height) return; //// exploit hardware interpolation //// and scale interpolated vector to match next pyramid level resolution //out[ix + iy * stride] = tex2D(texCoarse, x, y) * scale; } void StereoTgv::UpscaleMasked(const float *src, float* mask, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range tgvTexCoarse.addressMode[0] = cudaAddressModeMirror; tgvTexCoarse.addressMode[1] = cudaAddressModeMirror; tgvTexCoarse.filterMode = cudaFilterModeLinear; tgvTexCoarse.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, tgvTexCoarse, src, width, height, stride * sizeof(float)); TgvUpscaleMaskedKernel << < blocks, threads >> > (mask, newWidth, newHeight, newStride, scale, out); } //****************************** // Upscaling for Float2 //****************************** __global__ void TgvUpscaleFloat2MaskedKernel(float * mask, int width, int height, int stride, float scale, float2 *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if ((iy >= height) && (ix >= width)) return; int pos = ix + iy * stride; if (mask[pos] == 0.0f) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution float2 src = tex2D(tgvTtexCoarseFloat2, x, y); out[pos].x = src.x * scale; out[pos].y = src.y * scale; } void StereoTgv::UpscaleMasked(const float2 *src, float * mask, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float2 *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range tgvTtexCoarseFloat2.addressMode[0] = cudaAddressModeMirror; tgvTtexCoarseFloat2.addressMode[1] = cudaAddressModeMirror; tgvTtexCoarseFloat2.filterMode = cudaFilterModeLinear; tgvTtexCoarseFloat2.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float2>(); cudaBindTexture2D(0, tgvTtexCoarseFloat2, src, width, height, stride * sizeof(float2)); TgvUpscaleFloat2MaskedKernel << < blocks, threads >> > (mask, newWidth, newHeight, newStride, scale, out); }
4c6ac5e24533332d2472b7e1dc2d4f0de630f337.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Functions.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/hip/CachingHostAllocator.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPEvent.h> #include <ATen/hip/PeerToPeerAccess.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <ATen/native/Copy.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> namespace at { namespace native { void neg_kernel_cuda(TensorIteratorBase &iter); void conj_kernel_cuda(TensorIteratorBase &iter); namespace { void direct_copy_kernel_cuda(TensorIteratorBase &iter) { ScalarType dtype = iter.dtype(0); if (isQIntType(dtype)) { AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBool, kBFloat16, dtype, "copy_", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); }); } } void neg_conj_kernel_cuda(TensorIteratorBase &iter) { AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "neg_conj_cuda", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return -std::conj(x); }); }); } } // namespace (anonymous) using namespace at::cuda; // device-to-device copy, does type conversion void copy_device_to_device(TensorIterator& iter, bool non_blocking) { int64_t numel = iter.numel(); // We can memcpy the memory if both tensors have the same type AND both // tensors are contiguous after dimension coalescing and reordering. bool same_type = iter.dtype(0) == iter.dtype(1); bool same_conj = iter.tensor(0).is_conj() == iter.tensor(1).is_conj(); bool same_neg = iter.tensor(0).is_neg() == iter.tensor(1).is_neg(); bool memcpy_eligible = same_type && same_conj && same_neg && iter.is_contiguous(); Device dst_device = iter.device(0); Device src_device = iter.device(1); HIPGuardMasqueradingAsCUDA device_guard(src_device); // We always perform the copy on the source device, using the current stream // on the source device, and we fully synchronize on both src and dst's // current streams for completion of the copy. We have to explicitly do this // for non-contig copies. This mimics the behavior of cross-device // hipMemcpyAsync on the default stream. HIPStreamMasqueradingAsCUDA copy_stream = getCurrentHIPStreamMasqueradingAsCUDA(src_device.index()); if (src_device != dst_device) { // This is a cross-device copy on the src current stream and dst current // stream. We perform a two-way barrier between both devices' streams // before the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are handled, so // that no one is operating on the dst memory when we perform the copy. // src waits on dst barrier (src already waits on src) CUDAEvent dst_ready; device_guard.set_device(dst_device); dst_ready.record(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index())); device_guard.set_device(src_device); dst_ready.block(copy_stream); } if (memcpy_eligible) { void *dst = iter.data_ptr(0); void *src = iter.data_ptr(1); size_t size = numel * iter.element_size(0); if (src != dst || src_device != dst_device) { // Perform the copy AT_CUDA_CHECK(hipMemcpyAsync( dst, src, size, hipMemcpyDeviceToDevice, copy_stream)); } } else { if (same_neg) { if (!same_conj) { conj_kernel_cuda(iter); } else { direct_copy_kernel_cuda(iter); } } else { if (!same_conj) { neg_conj_kernel_cuda(iter); } else { neg_kernel_cuda(iter); } } } if (src_device != dst_device) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on src_device, record stream event CUDAEvent src_ready; src_ready.record(copy_stream); device_guard.set_device(dst_device); src_ready.block(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index())); } AT_CUDA_CHECK(hipGetLastError()); } static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) { Device dst_device = iter.device(0); Device src_device = iter.device(1); if (dst_device == src_device) { // We never require temporaries for copies on the same GPU. TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda()); return false; } bool same_dtype = iter.dtype(0) == iter.dtype(1); if (same_dtype && iter.is_contiguous()) { // Contiguous same-dtype copies can always use hipMemcpyAsync return false; } else if (dst_device.is_cuda() && src_device.is_cuda()) { // Copies between GPUs can use the copy kernel if P2P is supported return !p2p_enabled; } else { // The remaining cases require temporaries. For example, this includes // non-contiguous copies between CPU and GPU. return true; } } static bool maybe_enable_p2p_access(Device dst_device, Device src_device) { if (dst_device.is_cpu() || src_device.is_cpu()) { return false; } return at::cuda::get_p2p_access(src_device.index(), dst_device.index()); } static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) { AT_ASSERT(iter.ntensors() == 2); Device dst_device = iter.device(0); Device src_device = iter.device(1); // Enable p2p access between devices. (No-op if it involves the CPU) bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device); if (copy_requires_temporaries(iter, p2p_enabled)) { // NB: this involves recursive calls to copy. Be careful that those copies // don't require temporaries or you will cause an infinite recursion! auto& dst = iter.tensor(0); Tensor dst_contig; Tensor src_contig; // Type conversions are performed on the CPU for CPU-GPU copies and on // the src device for GPU-GPU copies. if (iter.device_type(0) == kCUDA) { dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous(); } else { bool same_type = iter.dtype(0) == iter.dtype(1); dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).expand_as(dst).contiguous(); } // propagate the correct conjugate bit dst_contig._set_conj(dst.is_conj()); src_contig._set_conj(iter.tensor(1).is_conj()); dst_contig._set_neg(dst.is_neg()); src_contig._set_neg(iter.tensor(1).is_neg()); // perform a same-dtype copy on contiguous tensors TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes())); TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type()); dst_contig.copy_(src_contig, non_blocking); // if necessary, copy back into dst if (!dst_contig.is_same(dst)) { TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device()); dst.copy_(dst_contig, non_blocking); } return; } // Copy on GPU (or between GPUs) if (dst_device.is_cuda() && src_device.is_cuda()) { copy_device_to_device(iter, non_blocking); return; } // Copy between CPU and GPU hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; hipMemcpyKind kind; if (dst_device.is_cuda() && src_device.is_cpu()) { device_guard.set_device(dst_device); kind = hipMemcpyHostToDevice; } else if (dst_device.is_cpu() && src_device.is_cuda()) { device_guard.set_device(src_device); kind = hipMemcpyDeviceToHost; } else { TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()"); } void* dst = iter.data_ptr(0); void* src = iter.data_ptr(1); int64_t nbytes = iter.numel() * iter.element_size(0); HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA(); if (non_blocking) { AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream)); void* ptr = (dst_device == kCPU ? dst : src); AT_CUDA_CHECK(CachingHostAllocator_recordEvent(ptr, stream)); } else { at::cuda::memcpy_and_sync(dst, src, nbytes, kind, stream); } if (iter.tensor(0).is_conj() != iter.tensor(1).is_conj()) { iter.tensor(0).conj_physical_(); } if (iter.tensor(0).is_neg() != iter.tensor(1).is_neg()) { iter.tensor(0).neg_(); } } REGISTER_DISPATCH(copy_stub, &copy_kernel_cuda); } // namespace native } // namespace at
4c6ac5e24533332d2472b7e1dc2d4f0de630f337.cu
#include <ATen/Functions.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CachingHostAllocator.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAEvent.h> #include <ATen/cuda/PeerToPeerAccess.h> #include <c10/cuda/CUDAStream.h> #include <ATen/native/Copy.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> namespace at { namespace native { void neg_kernel_cuda(TensorIteratorBase &iter); void conj_kernel_cuda(TensorIteratorBase &iter); namespace { void direct_copy_kernel_cuda(TensorIteratorBase &iter) { ScalarType dtype = iter.dtype(0); if (isQIntType(dtype)) { AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBool, kBFloat16, dtype, "copy_", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); }); } } void neg_conj_kernel_cuda(TensorIteratorBase &iter) { AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "neg_conj_cuda", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return -std::conj(x); }); }); } } // namespace (anonymous) using namespace at::cuda; // device-to-device copy, does type conversion void copy_device_to_device(TensorIterator& iter, bool non_blocking) { int64_t numel = iter.numel(); // We can memcpy the memory if both tensors have the same type AND both // tensors are contiguous after dimension coalescing and reordering. bool same_type = iter.dtype(0) == iter.dtype(1); bool same_conj = iter.tensor(0).is_conj() == iter.tensor(1).is_conj(); bool same_neg = iter.tensor(0).is_neg() == iter.tensor(1).is_neg(); bool memcpy_eligible = same_type && same_conj && same_neg && iter.is_contiguous(); Device dst_device = iter.device(0); Device src_device = iter.device(1); CUDAGuard device_guard(src_device); // We always perform the copy on the source device, using the current stream // on the source device, and we fully synchronize on both src and dst's // current streams for completion of the copy. We have to explicitly do this // for non-contig copies. This mimics the behavior of cross-device // cudaMemcpyAsync on the default stream. CUDAStream copy_stream = getCurrentCUDAStream(src_device.index()); if (src_device != dst_device) { // This is a cross-device copy on the src current stream and dst current // stream. We perform a two-way barrier between both devices' streams // before the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are handled, so // that no one is operating on the dst memory when we perform the copy. // src waits on dst barrier (src already waits on src) CUDAEvent dst_ready; device_guard.set_device(dst_device); dst_ready.record(getCurrentCUDAStream(dst_device.index())); device_guard.set_device(src_device); dst_ready.block(copy_stream); } if (memcpy_eligible) { void *dst = iter.data_ptr(0); void *src = iter.data_ptr(1); size_t size = numel * iter.element_size(0); if (src != dst || src_device != dst_device) { // Perform the copy AT_CUDA_CHECK(cudaMemcpyAsync( dst, src, size, cudaMemcpyDeviceToDevice, copy_stream)); } } else { if (same_neg) { if (!same_conj) { conj_kernel_cuda(iter); } else { direct_copy_kernel_cuda(iter); } } else { if (!same_conj) { neg_conj_kernel_cuda(iter); } else { neg_kernel_cuda(iter); } } } if (src_device != dst_device) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on src_device, record stream event CUDAEvent src_ready; src_ready.record(copy_stream); device_guard.set_device(dst_device); src_ready.block(getCurrentCUDAStream(dst_device.index())); } AT_CUDA_CHECK(cudaGetLastError()); } static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) { Device dst_device = iter.device(0); Device src_device = iter.device(1); if (dst_device == src_device) { // We never require temporaries for copies on the same GPU. TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda()); return false; } bool same_dtype = iter.dtype(0) == iter.dtype(1); if (same_dtype && iter.is_contiguous()) { // Contiguous same-dtype copies can always use cudaMemcpyAsync return false; } else if (dst_device.is_cuda() && src_device.is_cuda()) { // Copies between GPUs can use the copy kernel if P2P is supported return !p2p_enabled; } else { // The remaining cases require temporaries. For example, this includes // non-contiguous copies between CPU and GPU. return true; } } static bool maybe_enable_p2p_access(Device dst_device, Device src_device) { if (dst_device.is_cpu() || src_device.is_cpu()) { return false; } return at::cuda::get_p2p_access(src_device.index(), dst_device.index()); } static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) { AT_ASSERT(iter.ntensors() == 2); Device dst_device = iter.device(0); Device src_device = iter.device(1); // Enable p2p access between devices. (No-op if it involves the CPU) bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device); if (copy_requires_temporaries(iter, p2p_enabled)) { // NB: this involves recursive calls to copy. Be careful that those copies // don't require temporaries or you will cause an infinite recursion! auto& dst = iter.tensor(0); Tensor dst_contig; Tensor src_contig; // Type conversions are performed on the CPU for CPU-GPU copies and on // the src device for GPU-GPU copies. if (iter.device_type(0) == kCUDA) { dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous(); } else { bool same_type = iter.dtype(0) == iter.dtype(1); dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).expand_as(dst).contiguous(); } // propagate the correct conjugate bit dst_contig._set_conj(dst.is_conj()); src_contig._set_conj(iter.tensor(1).is_conj()); dst_contig._set_neg(dst.is_neg()); src_contig._set_neg(iter.tensor(1).is_neg()); // perform a same-dtype copy on contiguous tensors TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes())); TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type()); dst_contig.copy_(src_contig, non_blocking); // if necessary, copy back into dst if (!dst_contig.is_same(dst)) { TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device()); dst.copy_(dst_contig, non_blocking); } return; } // Copy on GPU (or between GPUs) if (dst_device.is_cuda() && src_device.is_cuda()) { copy_device_to_device(iter, non_blocking); return; } // Copy between CPU and GPU cuda::OptionalCUDAGuard device_guard; cudaMemcpyKind kind; if (dst_device.is_cuda() && src_device.is_cpu()) { device_guard.set_device(dst_device); kind = cudaMemcpyHostToDevice; } else if (dst_device.is_cpu() && src_device.is_cuda()) { device_guard.set_device(src_device); kind = cudaMemcpyDeviceToHost; } else { TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()"); } void* dst = iter.data_ptr(0); void* src = iter.data_ptr(1); int64_t nbytes = iter.numel() * iter.element_size(0); CUDAStream stream = getCurrentCUDAStream(); if (non_blocking) { AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream)); void* ptr = (dst_device == kCPU ? dst : src); AT_CUDA_CHECK(CachingHostAllocator_recordEvent(ptr, stream)); } else { at::cuda::memcpy_and_sync(dst, src, nbytes, kind, stream); } if (iter.tensor(0).is_conj() != iter.tensor(1).is_conj()) { iter.tensor(0).conj_physical_(); } if (iter.tensor(0).is_neg() != iter.tensor(1).is_neg()) { iter.tensor(0).neg_(); } } REGISTER_DISPATCH(copy_stub, &copy_kernel_cuda); } // namespace native } // namespace at
db1750b0034c89a36612480d5767858d338cde6a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "GammasRGB.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float3 __restrict__ *inOutImg = NULL; hipMalloc(&inOutImg, XSIZE*YSIZE); int imgWidth = XSIZE; int imgHeight = YSIZE; int imgPitch = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( GammasRGB), dim3(gridBlock),dim3(threadBlock), 0, 0, inOutImg,imgWidth,imgHeight,imgPitch); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( GammasRGB), dim3(gridBlock),dim3(threadBlock), 0, 0, inOutImg,imgWidth,imgHeight,imgPitch); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( GammasRGB), dim3(gridBlock),dim3(threadBlock), 0, 0, inOutImg,imgWidth,imgHeight,imgPitch); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
db1750b0034c89a36612480d5767858d338cde6a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "GammasRGB.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float3 __restrict__ *inOutImg = NULL; cudaMalloc(&inOutImg, XSIZE*YSIZE); int imgWidth = XSIZE; int imgHeight = YSIZE; int imgPitch = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); GammasRGB<<<gridBlock,threadBlock>>>(inOutImg,imgWidth,imgHeight,imgPitch); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { GammasRGB<<<gridBlock,threadBlock>>>(inOutImg,imgWidth,imgHeight,imgPitch); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { GammasRGB<<<gridBlock,threadBlock>>>(inOutImg,imgWidth,imgHeight,imgPitch); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
eb8f830e5ffaae3c838bafb58bf392bf77735c5b.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SparseLinear.cu" #else static bool THNN_(checkInput)(THCTensor* t) { return !t->is_empty() && t->_dim() == 2 && t->size(1) == 3; } static bool THNN_(checkSize2D)(THCTensor* t, int64_t size0, int64_t size1) { return !t->is_empty() && t->_dim() == 2 && t->size(0) == size0 && t->size(1) == size1; } static bool THNN_(checkSize1D)(THCTensor* t, int64_t size0) { return !t->is_empty() && t->_dim() == 1 && t->size(0) == size0; } static inline void THNN_(copyCudaFloatingType)(THCState *state, THCudaIntTensor *buf, THCTensor *t) { #ifdef THC_REAL_IS_FLOAT THCudaIntTensor_copyCudaFloat(state, buf, t); #elif defined(THC_REAL_IS_DOUBLE) THCudaIntTensor_copyCudaDouble(state, buf, t); #elif defined(THC_REAL_IS_HALF) THCudaIntTensor_copyCudaHalf(state, buf, t); #endif } void THNN_(SparseLinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight, THCTensor *bias) { THAssert(THCTensor_(checkGPU)(state, 4, input, output, weight, bias)); int64_t h; int64_t outDim = THCTensor_(size)(state, weight, 0); int64_t inDim = THCTensor_(size)(state, weight, 1); THArgCheck(THNN_(checkInput)(input), 2, "input size must be nnz x 3"); AT_CHECK(!output->is_empty() && THCTensor_(nDimension)(state, output) == 2, "output must be batchsize x outputsize, got size: ", output->sizes()); THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong"); weight = THCTensor_(newContiguous)(state, weight); int64_t batchnum = THCTensor_(size)(state, output, 0); int64_t nnz = THCTensor_(size)(state, input, 0); THCTensor *buffer = THCTensor_(new)(state); THCTensor *sel = THCTensor_(new)(state); THCTensor *values = THCTensor_(new)(state); THCudaIntTensor *rowbuf = THCudaIntTensor_new(state); THCudaIntTensor *csrPtrs = THCudaIntTensor_new(state); THCudaIntTensor *colInds = THCudaIntTensor_new(state); THCTensor_(resize1d)(state, values, nnz); THCudaIntTensor_resize1d(state, rowbuf, nnz); THCudaIntTensor_resize1d(state, colInds, nnz); THCudaIntTensor_resize1d(state, csrPtrs, batchnum+1); // Get data ready for cusparse, need CudaInt buffers // We do not need to sort, since rows are already in order // If rows might get out of order in future implementations, or if cusparse // complains with an illegal memory access, sort like we do in AccGradParameters THCTensor_(select)(state, sel, input, 1, 0); THNN_(copyCudaFloatingType)(state, rowbuf, sel); THCTensor_(select)(state, sel, input, 1, 1); THNN_(copyCudaFloatingType)(state, colInds, sel); THCTensor_(select)(state, sel, input, 1, 2); THCTensor_(copyCuda)(state, values, sel); init_cusparse(); hipsparseXcoo2csr(cusparse_handle, THCudaIntTensor_data(state, rowbuf), nnz, batchnum, THCudaIntTensor_data(state, csrPtrs), HIPSPARSE_INDEX_BASE_ONE); // output = bias THCTensor_(resize2d)(state, buffer, outDim, batchnum); THCTensor_(zero)(state, buffer); for (h=0; h<batchnum; h++) { THCTensor_(select)(state, sel, buffer, 1, h); THCTensor_(copy)(state, sel, bias); } // output = W * x real one = ScalarConvert<int, real>::to(1); hipsparseMatDescr_t descr = 0; hipsparseCreateMatDescr(&descr); hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ONE); #ifdef THC_REAL_IS_FLOAT hipsparseScsrmm(cusparse_handle, #elif defined(THC_REAL_IS_DOUBLE) hipsparseDcsrmm(cusparse_handle, #endif HIPSPARSE_OPERATION_NON_TRANSPOSE, batchnum, outDim, inDim, nnz, &one, descr, THCTensor_(data)(state, values), THCudaIntTensor_data(state, csrPtrs), THCudaIntTensor_data(state, colInds), THCTensor_(data)(state, weight), inDim, &one, THCTensor_(data)(state, buffer), batchnum ); THCTensor_(transpose)(state, buffer, NULL, 0, 1); // We do work in the buffer to keep the output contiguous THCTensor_(copy)(state, output, buffer); hipsparseDestroyMatDescr(descr); descr = 0; THCTensor_(free)(state, buffer); THCTensor_(free)(state, sel); THCTensor_(free)(state, values); THCTensor_(free)(state, weight); THCudaIntTensor_free(state, rowbuf); THCudaIntTensor_free(state, colInds); THCudaIntTensor_free(state, csrPtrs); } void THNN_(SparseLinear_accGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *weight, THCTensor *bias, accreal weightDecay, accreal scale) { int64_t outDim = THCTensor_(size)(state, weight, 0); int64_t inDim = THCTensor_(size)(state, weight, 1); THArgCheck(THNN_(checkInput)(input), 2, "input size must be batchsize x nnz x 2"); THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, "gradWeight size wrong"); THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong"); weight = THCTensor_(newContiguous)(state, weight); int64_t nnz = THCTensor_(size)(state, input, 0); int64_t batchnum = THCTensor_(size)(state, gradOutput, 0); THCTensor *buf = THCTensor_(new)(state); THCTensor *cols = THCTensor_(new)(state); THCTensor *sel = THCTensor_(new)(state); THCudaLongTensor *inds = THCudaLongTensor_new(state); THCTensor *values = THCTensor_(new)(state); THCudaIntTensor *colbuf = THCudaIntTensor_new(state); THCudaIntTensor *colPtrs = THCudaIntTensor_new(state); THCudaIntTensor *rowInds = THCudaIntTensor_new(state); THCTensor_(select)(state, sel, input, 1, 0); // rowInds THCTensor_(select)(state, cols, input, 1, 1); // colInds THCTensor_(cadd)(state, buf, sel, batchnum, cols); // colInds * buatchdim + rowInds THCTensor_(sort)(state, buf, inds, buf, 0, 0); // Indices are now in ind THCTensor_(indexSelect)(state, buf, input, 0, inds); THCTensor_(resize1d)(state, values, nnz); THCudaIntTensor_resize1d(state, colbuf, nnz); THCudaIntTensor_resize1d(state, rowInds, nnz); THCudaIntTensor_resize1d(state, colPtrs, inDim+1); // Get data ready for cusparse, need CudaInt buffers THCTensor_(select)(state, sel, buf, 1, 0); THNN_(copyCudaFloatingType)(state, rowInds, sel); THCTensor_(select)(state, sel, buf, 1, 1); THNN_(copyCudaFloatingType)(state, colbuf, sel); THCTensor_(select)(state, sel, buf, 1, 2); THCTensor_(copyCuda)(state, values, sel); init_cusparse(); // Secretly coo2csc hipsparseXcoo2csr(cusparse_handle, THCudaIntTensor_data(state, colbuf), nnz, inDim, THCudaIntTensor_data(state, colPtrs), HIPSPARSE_INDEX_BASE_ONE); // FORTRAN expects contiguous col-major matricies THCTensor *tgradOutput = THCTensor_(new)(state); THCTensor_(transpose)(state, tgradOutput, gradOutput, 0, 1); THCTensor_(resize2d)(state, buf, batchnum, outDim); THCTensor_(copy)(state, buf, tgradOutput); THCTensor_(free)(state, tgradOutput); real one = ScalarConvert<int, real>::to(1); hipsparseMatDescr_t descr = 0; hipsparseCreateMatDescr(&descr); hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ONE); #ifdef THC_REAL_IS_FLOAT hipsparseScsrmm(cusparse_handle, #elif defined(THC_REAL_IS_DOUBLE) hipsparseDcsrmm(cusparse_handle, #endif HIPSPARSE_OPERATION_NON_TRANSPOSE, inDim, outDim, batchnum, nnz, &one, descr, THCTensor_(data)(state, values), THCudaIntTensor_data(state, colPtrs), THCudaIntTensor_data(state, rowInds), THCTensor_(data)(state, buf), batchnum, &one, THCTensor_(data)(state, gradWeight), inDim ); THCTensor_(sum)(state, buf, gradOutput, 0, 1); THCTensor_(resize1d)(state, buf, outDim); THCTensor_(cadd)(state, gradBias, gradBias, scale, buf); if (weightDecay != 0) { THCTensor_(cadd)(state, gradWeight, gradWeight, weightDecay, weight); THCTensor_(cadd)(state, gradBias, gradBias, weightDecay, bias); } THCTensor_(free)(state, weight); THCTensor_(free)(state, buf); THCTensor_(free)(state, sel); THCTensor_(free)(state, cols); THCudaLongTensor_free(state, inds); THCTensor_(free)(state, values); THCudaIntTensor_free(state, colbuf); THCudaIntTensor_free(state, rowInds); THCudaIntTensor_free(state, colPtrs); } void THNN_(SparseLinear_legacyUpdateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight, THCTensor *bias) { THError("CUDA does not support legacy input format, please use a table of nnz x 2 vectors"); } void THNN_(SparseLinear_legacyAccGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *weight, THCTensor *bias, accreal weightDecay, accreal scale) { THError("CUDA does not support legacy input format, please use a table of nnz x 2 vectors"); } // Dense updates are pretty fast on the GPU void THNN_(SparseLinear_zeroGradParameters)( THCState *state, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *lastInput) { THCTensor_(zero)(state, gradWeight); THCTensor_(zero)(state, gradBias); } void THNN_(SparseLinear_updateParameters)( THCState *state, THCTensor *weight, THCTensor *bias, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *lastInput, accreal learningRate) { THCTensor_(cadd)(state, weight, weight, -learningRate, gradWeight); THCTensor_(cadd)(state, bias, bias, -learningRate, gradBias); } #endif
eb8f830e5ffaae3c838bafb58bf392bf77735c5b.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SparseLinear.cu" #else static bool THNN_(checkInput)(THCTensor* t) { return !t->is_empty() && t->_dim() == 2 && t->size(1) == 3; } static bool THNN_(checkSize2D)(THCTensor* t, int64_t size0, int64_t size1) { return !t->is_empty() && t->_dim() == 2 && t->size(0) == size0 && t->size(1) == size1; } static bool THNN_(checkSize1D)(THCTensor* t, int64_t size0) { return !t->is_empty() && t->_dim() == 1 && t->size(0) == size0; } static inline void THNN_(copyCudaFloatingType)(THCState *state, THCudaIntTensor *buf, THCTensor *t) { #ifdef THC_REAL_IS_FLOAT THCudaIntTensor_copyCudaFloat(state, buf, t); #elif defined(THC_REAL_IS_DOUBLE) THCudaIntTensor_copyCudaDouble(state, buf, t); #elif defined(THC_REAL_IS_HALF) THCudaIntTensor_copyCudaHalf(state, buf, t); #endif } void THNN_(SparseLinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight, THCTensor *bias) { THAssert(THCTensor_(checkGPU)(state, 4, input, output, weight, bias)); int64_t h; int64_t outDim = THCTensor_(size)(state, weight, 0); int64_t inDim = THCTensor_(size)(state, weight, 1); THArgCheck(THNN_(checkInput)(input), 2, "input size must be nnz x 3"); AT_CHECK(!output->is_empty() && THCTensor_(nDimension)(state, output) == 2, "output must be batchsize x outputsize, got size: ", output->sizes()); THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong"); weight = THCTensor_(newContiguous)(state, weight); int64_t batchnum = THCTensor_(size)(state, output, 0); int64_t nnz = THCTensor_(size)(state, input, 0); THCTensor *buffer = THCTensor_(new)(state); THCTensor *sel = THCTensor_(new)(state); THCTensor *values = THCTensor_(new)(state); THCudaIntTensor *rowbuf = THCudaIntTensor_new(state); THCudaIntTensor *csrPtrs = THCudaIntTensor_new(state); THCudaIntTensor *colInds = THCudaIntTensor_new(state); THCTensor_(resize1d)(state, values, nnz); THCudaIntTensor_resize1d(state, rowbuf, nnz); THCudaIntTensor_resize1d(state, colInds, nnz); THCudaIntTensor_resize1d(state, csrPtrs, batchnum+1); // Get data ready for cusparse, need CudaInt buffers // We do not need to sort, since rows are already in order // If rows might get out of order in future implementations, or if cusparse // complains with an illegal memory access, sort like we do in AccGradParameters THCTensor_(select)(state, sel, input, 1, 0); THNN_(copyCudaFloatingType)(state, rowbuf, sel); THCTensor_(select)(state, sel, input, 1, 1); THNN_(copyCudaFloatingType)(state, colInds, sel); THCTensor_(select)(state, sel, input, 1, 2); THCTensor_(copyCuda)(state, values, sel); init_cusparse(); cusparseXcoo2csr(cusparse_handle, THCudaIntTensor_data(state, rowbuf), nnz, batchnum, THCudaIntTensor_data(state, csrPtrs), CUSPARSE_INDEX_BASE_ONE); // output = bias THCTensor_(resize2d)(state, buffer, outDim, batchnum); THCTensor_(zero)(state, buffer); for (h=0; h<batchnum; h++) { THCTensor_(select)(state, sel, buffer, 1, h); THCTensor_(copy)(state, sel, bias); } // output = W * x real one = ScalarConvert<int, real>::to(1); cusparseMatDescr_t descr = 0; cusparseCreateMatDescr(&descr); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ONE); #ifdef THC_REAL_IS_FLOAT cusparseScsrmm(cusparse_handle, #elif defined(THC_REAL_IS_DOUBLE) cusparseDcsrmm(cusparse_handle, #endif CUSPARSE_OPERATION_NON_TRANSPOSE, batchnum, outDim, inDim, nnz, &one, descr, THCTensor_(data)(state, values), THCudaIntTensor_data(state, csrPtrs), THCudaIntTensor_data(state, colInds), THCTensor_(data)(state, weight), inDim, &one, THCTensor_(data)(state, buffer), batchnum ); THCTensor_(transpose)(state, buffer, NULL, 0, 1); // We do work in the buffer to keep the output contiguous THCTensor_(copy)(state, output, buffer); cusparseDestroyMatDescr(descr); descr = 0; THCTensor_(free)(state, buffer); THCTensor_(free)(state, sel); THCTensor_(free)(state, values); THCTensor_(free)(state, weight); THCudaIntTensor_free(state, rowbuf); THCudaIntTensor_free(state, colInds); THCudaIntTensor_free(state, csrPtrs); } void THNN_(SparseLinear_accGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *weight, THCTensor *bias, accreal weightDecay, accreal scale) { int64_t outDim = THCTensor_(size)(state, weight, 0); int64_t inDim = THCTensor_(size)(state, weight, 1); THArgCheck(THNN_(checkInput)(input), 2, "input size must be batchsize x nnz x 2"); THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, "gradWeight size wrong"); THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong"); weight = THCTensor_(newContiguous)(state, weight); int64_t nnz = THCTensor_(size)(state, input, 0); int64_t batchnum = THCTensor_(size)(state, gradOutput, 0); THCTensor *buf = THCTensor_(new)(state); THCTensor *cols = THCTensor_(new)(state); THCTensor *sel = THCTensor_(new)(state); THCudaLongTensor *inds = THCudaLongTensor_new(state); THCTensor *values = THCTensor_(new)(state); THCudaIntTensor *colbuf = THCudaIntTensor_new(state); THCudaIntTensor *colPtrs = THCudaIntTensor_new(state); THCudaIntTensor *rowInds = THCudaIntTensor_new(state); THCTensor_(select)(state, sel, input, 1, 0); // rowInds THCTensor_(select)(state, cols, input, 1, 1); // colInds THCTensor_(cadd)(state, buf, sel, batchnum, cols); // colInds * buatchdim + rowInds THCTensor_(sort)(state, buf, inds, buf, 0, 0); // Indices are now in ind THCTensor_(indexSelect)(state, buf, input, 0, inds); THCTensor_(resize1d)(state, values, nnz); THCudaIntTensor_resize1d(state, colbuf, nnz); THCudaIntTensor_resize1d(state, rowInds, nnz); THCudaIntTensor_resize1d(state, colPtrs, inDim+1); // Get data ready for cusparse, need CudaInt buffers THCTensor_(select)(state, sel, buf, 1, 0); THNN_(copyCudaFloatingType)(state, rowInds, sel); THCTensor_(select)(state, sel, buf, 1, 1); THNN_(copyCudaFloatingType)(state, colbuf, sel); THCTensor_(select)(state, sel, buf, 1, 2); THCTensor_(copyCuda)(state, values, sel); init_cusparse(); // Secretly coo2csc cusparseXcoo2csr(cusparse_handle, THCudaIntTensor_data(state, colbuf), nnz, inDim, THCudaIntTensor_data(state, colPtrs), CUSPARSE_INDEX_BASE_ONE); // FORTRAN expects contiguous col-major matricies THCTensor *tgradOutput = THCTensor_(new)(state); THCTensor_(transpose)(state, tgradOutput, gradOutput, 0, 1); THCTensor_(resize2d)(state, buf, batchnum, outDim); THCTensor_(copy)(state, buf, tgradOutput); THCTensor_(free)(state, tgradOutput); real one = ScalarConvert<int, real>::to(1); cusparseMatDescr_t descr = 0; cusparseCreateMatDescr(&descr); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ONE); #ifdef THC_REAL_IS_FLOAT cusparseScsrmm(cusparse_handle, #elif defined(THC_REAL_IS_DOUBLE) cusparseDcsrmm(cusparse_handle, #endif CUSPARSE_OPERATION_NON_TRANSPOSE, inDim, outDim, batchnum, nnz, &one, descr, THCTensor_(data)(state, values), THCudaIntTensor_data(state, colPtrs), THCudaIntTensor_data(state, rowInds), THCTensor_(data)(state, buf), batchnum, &one, THCTensor_(data)(state, gradWeight), inDim ); THCTensor_(sum)(state, buf, gradOutput, 0, 1); THCTensor_(resize1d)(state, buf, outDim); THCTensor_(cadd)(state, gradBias, gradBias, scale, buf); if (weightDecay != 0) { THCTensor_(cadd)(state, gradWeight, gradWeight, weightDecay, weight); THCTensor_(cadd)(state, gradBias, gradBias, weightDecay, bias); } THCTensor_(free)(state, weight); THCTensor_(free)(state, buf); THCTensor_(free)(state, sel); THCTensor_(free)(state, cols); THCudaLongTensor_free(state, inds); THCTensor_(free)(state, values); THCudaIntTensor_free(state, colbuf); THCudaIntTensor_free(state, rowInds); THCudaIntTensor_free(state, colPtrs); } void THNN_(SparseLinear_legacyUpdateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight, THCTensor *bias) { THError("CUDA does not support legacy input format, please use a table of nnz x 2 vectors"); } void THNN_(SparseLinear_legacyAccGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *weight, THCTensor *bias, accreal weightDecay, accreal scale) { THError("CUDA does not support legacy input format, please use a table of nnz x 2 vectors"); } // Dense updates are pretty fast on the GPU void THNN_(SparseLinear_zeroGradParameters)( THCState *state, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *lastInput) { THCTensor_(zero)(state, gradWeight); THCTensor_(zero)(state, gradBias); } void THNN_(SparseLinear_updateParameters)( THCState *state, THCTensor *weight, THCTensor *bias, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *lastInput, accreal learningRate) { THCTensor_(cadd)(state, weight, weight, -learningRate, gradWeight); THCTensor_(cadd)(state, bias, bias, -learningRate, gradBias); } #endif
487896f02a9ef23928aa53065cdc54135108b2a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> // Thread block size #define BLOCK_SIZE 32 // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; float* elements; } Matrix; // Get a matrix element __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc(&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc(&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); // Read C from device memory hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e <= BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); } int main(){ const int num_m = 3; // we need 3 matrices const int side_dim = 128; // side dimension of square matrix Matrix *m = new Matrix[num_m]; // allocate matrix storage part 1 for (int i = 0; i < num_m; i++){ m[i].width = m[i].height = m[i].stride = side_dim; // set matrix params m[i].elements = new float[side_dim*side_dim]; // allocate matrix storage part 2 if (i < 2) // initialize first two matrices for (int j = 0; j < side_dim*side_dim; j++) m[i].elements[j] = 1.0f; } MatMul(m[0], m[1], m[2]); // perform matrix-multiply std::cout << hipGetErrorString(hipGetLastError()) << std::endl; for (int i = 0; i < side_dim*side_dim; i++) // perform results checking if (m[2].elements[i] != (float)side_dim) {std::cout << "Mismatch at index: " << i << " expected: " << (float)side_dim << " got: " << m[2].elements[i] << std::endl; return 0;} std::cout << "Success!" << std::endl; for (int i = 0; i < num_m; i++) delete[] m[i].elements; delete[] m; return 0; }
487896f02a9ef23928aa53065cdc54135108b2a9.cu
#include <iostream> // Thread block size #define BLOCK_SIZE 32 // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; float* elements; } Matrix; // Get a matrix element __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e <= BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); } int main(){ const int num_m = 3; // we need 3 matrices const int side_dim = 128; // side dimension of square matrix Matrix *m = new Matrix[num_m]; // allocate matrix storage part 1 for (int i = 0; i < num_m; i++){ m[i].width = m[i].height = m[i].stride = side_dim; // set matrix params m[i].elements = new float[side_dim*side_dim]; // allocate matrix storage part 2 if (i < 2) // initialize first two matrices for (int j = 0; j < side_dim*side_dim; j++) m[i].elements[j] = 1.0f; } MatMul(m[0], m[1], m[2]); // perform matrix-multiply std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl; for (int i = 0; i < side_dim*side_dim; i++) // perform results checking if (m[2].elements[i] != (float)side_dim) {std::cout << "Mismatch at index: " << i << " expected: " << (float)side_dim << " got: " << m[2].elements[i] << std::endl; return 0;} std::cout << "Success!" << std::endl; for (int i = 0; i < num_m; i++) delete[] m[i].elements; delete[] m; return 0; }
28f3bbf728a9a1329e949815c1d1522c66910cab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUDA_funcs.cuh" #include "CPU_funcs.h" __device__ double cuda_x2P(double x, double y, double rho, double vol) { return exp(x + (2 * rho * exp(0.5 * y)) / vol); } __device__ double cuda_P2x(double P, double y, double rho, double vol) { return log(P) - 2 * rho * exp(0.5 * y) / vol; } __global__ void cuda_explicit_step1(double* this_mat, double* last_mat, size_t x_i, size_t n_i, size_t M, double r, constants::OptionType op_type, double dt, double dy, double Y_MIN, double rho, double vol, double K) { size_t y_i = blockDim.x * blockIdx.x + threadIdx.x; if (y_i >= M) { return; } // notice that P is a monotonic increasing function of x and y, so we set boundary condition according to the option value, which is a function of P. double dsc = 1 / (1 + r * dt), P = 0.0; if (y_i == M - 1) { if (op_type == constants::Call) { P = cuda_x2P(x_i, y_i * dy + Y_MIN, rho, vol) - K * pow(dsc, (double)n_i); MATRIX_GET(this_mat, x_i, y_i, M) = P; } else { MATRIX_GET(this_mat, x_i, y_i, M) = 0.0; // put upper bound } } else if (y_i == 0) { if (op_type == constants::Call) { MATRIX_GET(this_mat, x_i, y_i, M) = 0.0; // call lower bound } else { P = K * pow(dsc, (double)n_i) - cuda_x2P(x_i, y_i * dy + Y_MIN, rho, vol); MATRIX_GET(this_mat, x_i, y_i, M) = P; } } else { double vol_dt_dy = vol * vol * dt / dy / dy; MATRIX_GET(this_mat, x_i, y_i, M) = 0.5 * (MATRIX_GET(last_mat, x_i, y_i + 1, M) + MATRIX_GET(last_mat, x_i, y_i - 1, M)) * vol_dt_dy + MATRIX_GET(last_mat, x_i, y_i, M) * (1 - vol_dt_dy); } } __global__ void cuda_explicit_step2(double* this_mat, double* last_mat, size_t x_i, size_t n_i, size_t M, double r, constants::OptionType op_type, double dt, double dy, double Y_MIN, double kappa, double theta, double rho, double vol, double K) { size_t y_i = blockDim.x * blockIdx.x + threadIdx.x; if (y_i >= M) { return; } // notice that P is a monotonic increasing function of x and y, so we set boundary condition according to the option value, which is a function of P. double dsc = 1 / (1 + r * dt), P = 0.0; if (y_i == M - 1) { if (op_type == constants::Call) { P = cuda_x2P(x_i, y_i * dy + Y_MIN, rho, vol) - K * pow(dsc, (double)n_i); MATRIX_GET(this_mat, x_i, y_i, M) = P; } else { MATRIX_GET(this_mat, x_i, y_i, M) = 0.0; // put upper bound } } else if (y_i == 0) { if (op_type == constants::Call) { MATRIX_GET(this_mat, x_i, y_i, M) = 0.0; // call lower bound } else { P = K * pow(dsc, (double)n_i) - cuda_x2P(x_i, y_i * dy + Y_MIN, rho, vol); MATRIX_GET(this_mat, x_i, y_i, M) = P; } } else { double k_theta = kappa * (theta - Y_MIN - dy * y_i), dt_dy = dt / dy; MATRIX_GET(this_mat, x_i, y_i, M) = MATRIX_GET(last_mat, x_i, y_i, M) + MAX(0.0, k_theta) * dt_dy * (MATRIX_GET(last_mat, x_i, y_i + 1, M) - MATRIX_GET(last_mat, x_i, y_i, M)) - MAX(0.0, -k_theta) * dt_dy * (MATRIX_GET(last_mat, x_i, y_i, M) - MATRIX_GET(last_mat, x_i, y_i - 1, M)); } } __global__ void cuda_implicit_step(double* this_mat, double* last_mat, double* c_vec, size_t n_i, size_t M, double r, constants::OptionType op_type, double dt, double dx, double dy, double Y_MIN, double rho, double kappa, double theta, double vol, double K) { size_t y_i = blockDim.x * blockIdx.x + threadIdx.x; if (y_i >= M) { return; } double dsc = 1 / (1 + r * dt), P = 0.0; c_vec += y_i * M; double y = Y_MIN + y_i * dy, dt_dx = dt / dx / 2, ey_rho_dt_dx = exp(y) * (1 - rho * rho) * dt / dx / dx; double rho_term = rho * exp(0.5 * y) * (kappa * (theta - y) + 0.25 * vol * vol) / vol; //the matrix is a constant matrix double a_i = -0.5 * ey_rho_dt_dx - (r - 0.5 * exp(y) - rho_term) * dt_dx; double b_i = 1 + r * dt + ey_rho_dt_dx; double c_i = -0.5 * ey_rho_dt_dx + (r - 0.5 * exp(y) - rho_term) * dt_dx; // solve for this column c_vec[0] = c_i / b_i; FOR_LOOP(i, 1, M - 1) { c_vec[i] = c_i / (b_i - a_i * c_vec[i - 1]); } MATRIX_GET(last_mat, 0, y_i, M) = MATRIX_GET(last_mat, 0, y_i, M) / b_i; FOR_LOOP(i, 1, M) { MATRIX_GET(last_mat, i, y_i, M) = (MATRIX_GET(last_mat, i, y_i, M) - a_i * MATRIX_GET(last_mat, i - 1, y_i, M)) / (b_i - a_i * c_vec[i - 1]); } MATRIX_GET(this_mat, M - 1, y_i, M) = MATRIX_GET(last_mat, M - 1, y_i, M); for (long int i = M - 2; i >= 0; --i) { MATRIX_GET(this_mat, i, y_i, M) = MATRIX_GET(last_mat, i, y_i, M) - c_vec[i] * MATRIX_GET(this_mat, i + 1, y_i, M); } } __global__ void cuda_early_exercise(double* this_mat, size_t x_i, size_t M, constants::OptionType op_type, double dx, double X_MIN, double dy, double Y_MIN, double rho, double vol, double K) { size_t y_i = blockDim.x * blockIdx.x + threadIdx.x; if (y_i >= M) { return; } double P = cuda_x2P(X_MIN + dx * x_i, Y_MIN + dy * y_i, rho, vol), itsc = op_type == constants::Call ? P - K : K - P; MATRIX_GET(this_mat, x_i, y_i, M) = MAX(MATRIX_GET(this_mat, x_i, y_i, M), itsc); } double cuda_get_price_stochastic_vol(double P0, double y0, double K, double r, double vol, double tao, double theta, double rho, double kappa, constants::OptionType op_type, size_t M, size_t N, double P_MIN, double P_MAX, double Y_MIN, double Y_MAX) { hipSetDevice(0); double *d_value_mat0 = nullptr, *d_value_mat1 = nullptr, *d_value_mat2 = nullptr; hipMalloc((void**)d_value_mat0, sizeof(double) * M * M); hipMalloc((void**)d_value_mat1, sizeof(double) * M * M); hipMalloc((void**)d_value_mat2, sizeof(double) * M * M); double *value_mat2 = new double[M * M]; double *mat_lst[] = { d_value_mat0, d_value_mat1, d_value_mat2 }; // initialize parameters double dy = (Y_MAX - Y_MIN) / M; double dt = tao / N; double X_MIN = P2x(P_MAX, Y_MIN, rho, vol), X_MAX = P2x(P_MIN, Y_MAX, rho, vol), dx = (X_MAX - X_MIN) / M; double this_x = 0.0, P = 0.0; FOR_LOOP(x_i, 0, M) { this_x = X_MIN + dx * x_i; if (op_type == constants::Call) { FOR_LOOP(y_i, 0, M) { P = x2P(this_x, Y_MIN + dy * y_i, rho, vol) - K; MATRIX_GET(value_mat2, x_i, y_i, M) = MAX(P, 0.0); } } else { FOR_LOOP(y_i, 0, M) { P = K - x2P(this_x, Y_MIN + dy * y_i, rho, vol); MATRIX_GET(value_mat2, x_i, y_i, M) = MAX(P, 0.0); } } } hipMemcpy(d_value_mat2, value_mat2, sizeof(double) * M * M, hipMemcpyHostToDevice); // use parallel computing for backward deduction const size_t block_count = M / parameters::GPU_THREADS + 1; double* c_vec = nullptr; hipMalloc((void**)&c_vec, sizeof(double) * M * M); for (size_t j = N - 1; j > 0; --j) { FOR_LOOP(x_i, 0, M) { //multi_map(parameters::CPU_THREAD_COUNT, M, explicit_step1, mat_lst[1], mat_lst[2], x_i, N - j, M, r, op_type, dt, dy, Y_MIN, rho, vol, K); hipLaunchKernelGGL(( cuda_explicit_step1) , dim3(block_count), dim3(parameters::GPU_THREADS), 0, 0, mat_lst[1], mat_lst[2], x_i, N - j, M, r, op_type, dt, dy, Y_MIN, rho, vol, K); hipDeviceSynchronize(); } FOR_LOOP(x_i, 0, M) { //multi_map(parameters::CPU_THREAD_COUNT, M, explicit_step2, mat_lst[0], mat_lst[1], x_i, N - j, M, r, op_type, dt, dy, Y_MIN, kappa, theta, rho, vol, K); hipLaunchKernelGGL(( cuda_explicit_step2) , dim3(block_count), dim3(parameters::GPU_THREADS), 0, 0, mat_lst[0], mat_lst[1], x_i, N - j, M, r, op_type, dt, dy, Y_MIN, kappa, theta, rho, vol, K); hipDeviceSynchronize(); } hipLaunchKernelGGL(( cuda_implicit_step) , dim3(block_count), dim3(parameters::GPU_THREADS), 0, 0, mat_lst[2], mat_lst[0], c_vec, N - j, M, r, op_type, dt, dx, dy, Y_MIN, rho, kappa, theta, vol, K); hipDeviceSynchronize(); //multi_map(parameters::CPU_THREAD_COUNT, M, implicit_step, mat_lst[2], mat_lst[0], c_vec, N - j, M, r, op_type, dt, dx, dy, Y_MIN, rho, kappa, theta, vol, K); FOR_LOOP(x_i, 0, M) { hipLaunchKernelGGL(( cuda_early_exercise) , dim3(block_count), dim3(parameters::GPU_THREADS), 0, 0, mat_lst[2], x_i, M, op_type, dx, X_MIN, dy, Y_MIN, rho, vol, K); //multi_map(parameters::CPU_THREAD_COUNT, M, early_exercise, mat_lst[2], x_i, M, op_type, dx, X_MIN, dy, Y_MIN, rho, vol, K); hipDeviceSynchronize(); } } hipFree(c_vec); hipFree(d_value_mat0); hipFree(d_value_mat1); hipMemcpy(value_mat2, d_value_mat2, sizeof(double) * M * M, hipMemcpyDeviceToHost); hipFree(d_value_mat2); //find the best fit of start double this_diff = 0.0, min_diff = NULL, rst = 0.0; this_x = 0.0; FOR_LOOP(x_i, 0, M) { this_x = X_MIN + x_i * dx; FOR_LOOP(y_i, 0, M) { this_diff = abs(x2P(this_x, Y_MIN + y_i * dy, rho, vol) - P0); if (min_diff == NULL || min_diff > this_diff) { rst = MATRIX_GET(value_mat2, x_i, y_i, M); min_diff = this_diff; } } } delete[] value_mat2; hipDeviceReset(); return rst; }
28f3bbf728a9a1329e949815c1d1522c66910cab.cu
#include "CUDA_funcs.cuh" #include "CPU_funcs.h" __device__ double cuda_x2P(double x, double y, double rho, double vol) { return exp(x + (2 * rho * exp(0.5 * y)) / vol); } __device__ double cuda_P2x(double P, double y, double rho, double vol) { return log(P) - 2 * rho * exp(0.5 * y) / vol; } __global__ void cuda_explicit_step1(double* this_mat, double* last_mat, size_t x_i, size_t n_i, size_t M, double r, constants::OptionType op_type, double dt, double dy, double Y_MIN, double rho, double vol, double K) { size_t y_i = blockDim.x * blockIdx.x + threadIdx.x; if (y_i >= M) { return; } // notice that P is a monotonic increasing function of x and y, so we set boundary condition according to the option value, which is a function of P. double dsc = 1 / (1 + r * dt), P = 0.0; if (y_i == M - 1) { if (op_type == constants::Call) { P = cuda_x2P(x_i, y_i * dy + Y_MIN, rho, vol) - K * pow(dsc, (double)n_i); MATRIX_GET(this_mat, x_i, y_i, M) = P; } else { MATRIX_GET(this_mat, x_i, y_i, M) = 0.0; // put upper bound } } else if (y_i == 0) { if (op_type == constants::Call) { MATRIX_GET(this_mat, x_i, y_i, M) = 0.0; // call lower bound } else { P = K * pow(dsc, (double)n_i) - cuda_x2P(x_i, y_i * dy + Y_MIN, rho, vol); MATRIX_GET(this_mat, x_i, y_i, M) = P; } } else { double vol_dt_dy = vol * vol * dt / dy / dy; MATRIX_GET(this_mat, x_i, y_i, M) = 0.5 * (MATRIX_GET(last_mat, x_i, y_i + 1, M) + MATRIX_GET(last_mat, x_i, y_i - 1, M)) * vol_dt_dy + MATRIX_GET(last_mat, x_i, y_i, M) * (1 - vol_dt_dy); } } __global__ void cuda_explicit_step2(double* this_mat, double* last_mat, size_t x_i, size_t n_i, size_t M, double r, constants::OptionType op_type, double dt, double dy, double Y_MIN, double kappa, double theta, double rho, double vol, double K) { size_t y_i = blockDim.x * blockIdx.x + threadIdx.x; if (y_i >= M) { return; } // notice that P is a monotonic increasing function of x and y, so we set boundary condition according to the option value, which is a function of P. double dsc = 1 / (1 + r * dt), P = 0.0; if (y_i == M - 1) { if (op_type == constants::Call) { P = cuda_x2P(x_i, y_i * dy + Y_MIN, rho, vol) - K * pow(dsc, (double)n_i); MATRIX_GET(this_mat, x_i, y_i, M) = P; } else { MATRIX_GET(this_mat, x_i, y_i, M) = 0.0; // put upper bound } } else if (y_i == 0) { if (op_type == constants::Call) { MATRIX_GET(this_mat, x_i, y_i, M) = 0.0; // call lower bound } else { P = K * pow(dsc, (double)n_i) - cuda_x2P(x_i, y_i * dy + Y_MIN, rho, vol); MATRIX_GET(this_mat, x_i, y_i, M) = P; } } else { double k_theta = kappa * (theta - Y_MIN - dy * y_i), dt_dy = dt / dy; MATRIX_GET(this_mat, x_i, y_i, M) = MATRIX_GET(last_mat, x_i, y_i, M) + MAX(0.0, k_theta) * dt_dy * (MATRIX_GET(last_mat, x_i, y_i + 1, M) - MATRIX_GET(last_mat, x_i, y_i, M)) - MAX(0.0, -k_theta) * dt_dy * (MATRIX_GET(last_mat, x_i, y_i, M) - MATRIX_GET(last_mat, x_i, y_i - 1, M)); } } __global__ void cuda_implicit_step(double* this_mat, double* last_mat, double* c_vec, size_t n_i, size_t M, double r, constants::OptionType op_type, double dt, double dx, double dy, double Y_MIN, double rho, double kappa, double theta, double vol, double K) { size_t y_i = blockDim.x * blockIdx.x + threadIdx.x; if (y_i >= M) { return; } double dsc = 1 / (1 + r * dt), P = 0.0; c_vec += y_i * M; double y = Y_MIN + y_i * dy, dt_dx = dt / dx / 2, ey_rho_dt_dx = exp(y) * (1 - rho * rho) * dt / dx / dx; double rho_term = rho * exp(0.5 * y) * (kappa * (theta - y) + 0.25 * vol * vol) / vol; //the matrix is a constant matrix double a_i = -0.5 * ey_rho_dt_dx - (r - 0.5 * exp(y) - rho_term) * dt_dx; double b_i = 1 + r * dt + ey_rho_dt_dx; double c_i = -0.5 * ey_rho_dt_dx + (r - 0.5 * exp(y) - rho_term) * dt_dx; // solve for this column c_vec[0] = c_i / b_i; FOR_LOOP(i, 1, M - 1) { c_vec[i] = c_i / (b_i - a_i * c_vec[i - 1]); } MATRIX_GET(last_mat, 0, y_i, M) = MATRIX_GET(last_mat, 0, y_i, M) / b_i; FOR_LOOP(i, 1, M) { MATRIX_GET(last_mat, i, y_i, M) = (MATRIX_GET(last_mat, i, y_i, M) - a_i * MATRIX_GET(last_mat, i - 1, y_i, M)) / (b_i - a_i * c_vec[i - 1]); } MATRIX_GET(this_mat, M - 1, y_i, M) = MATRIX_GET(last_mat, M - 1, y_i, M); for (long int i = M - 2; i >= 0; --i) { MATRIX_GET(this_mat, i, y_i, M) = MATRIX_GET(last_mat, i, y_i, M) - c_vec[i] * MATRIX_GET(this_mat, i + 1, y_i, M); } } __global__ void cuda_early_exercise(double* this_mat, size_t x_i, size_t M, constants::OptionType op_type, double dx, double X_MIN, double dy, double Y_MIN, double rho, double vol, double K) { size_t y_i = blockDim.x * blockIdx.x + threadIdx.x; if (y_i >= M) { return; } double P = cuda_x2P(X_MIN + dx * x_i, Y_MIN + dy * y_i, rho, vol), itsc = op_type == constants::Call ? P - K : K - P; MATRIX_GET(this_mat, x_i, y_i, M) = MAX(MATRIX_GET(this_mat, x_i, y_i, M), itsc); } double cuda_get_price_stochastic_vol(double P0, double y0, double K, double r, double vol, double tao, double theta, double rho, double kappa, constants::OptionType op_type, size_t M, size_t N, double P_MIN, double P_MAX, double Y_MIN, double Y_MAX) { cudaSetDevice(0); double *d_value_mat0 = nullptr, *d_value_mat1 = nullptr, *d_value_mat2 = nullptr; cudaMalloc((void**)d_value_mat0, sizeof(double) * M * M); cudaMalloc((void**)d_value_mat1, sizeof(double) * M * M); cudaMalloc((void**)d_value_mat2, sizeof(double) * M * M); double *value_mat2 = new double[M * M]; double *mat_lst[] = { d_value_mat0, d_value_mat1, d_value_mat2 }; // initialize parameters double dy = (Y_MAX - Y_MIN) / M; double dt = tao / N; double X_MIN = P2x(P_MAX, Y_MIN, rho, vol), X_MAX = P2x(P_MIN, Y_MAX, rho, vol), dx = (X_MAX - X_MIN) / M; double this_x = 0.0, P = 0.0; FOR_LOOP(x_i, 0, M) { this_x = X_MIN + dx * x_i; if (op_type == constants::Call) { FOR_LOOP(y_i, 0, M) { P = x2P(this_x, Y_MIN + dy * y_i, rho, vol) - K; MATRIX_GET(value_mat2, x_i, y_i, M) = MAX(P, 0.0); } } else { FOR_LOOP(y_i, 0, M) { P = K - x2P(this_x, Y_MIN + dy * y_i, rho, vol); MATRIX_GET(value_mat2, x_i, y_i, M) = MAX(P, 0.0); } } } cudaMemcpy(d_value_mat2, value_mat2, sizeof(double) * M * M, cudaMemcpyHostToDevice); // use parallel computing for backward deduction const size_t block_count = M / parameters::GPU_THREADS + 1; double* c_vec = nullptr; cudaMalloc((void**)&c_vec, sizeof(double) * M * M); for (size_t j = N - 1; j > 0; --j) { FOR_LOOP(x_i, 0, M) { //multi_map(parameters::CPU_THREAD_COUNT, M, explicit_step1, mat_lst[1], mat_lst[2], x_i, N - j, M, r, op_type, dt, dy, Y_MIN, rho, vol, K); cuda_explicit_step1 <<<block_count, parameters::GPU_THREADS>>> (mat_lst[1], mat_lst[2], x_i, N - j, M, r, op_type, dt, dy, Y_MIN, rho, vol, K); cudaDeviceSynchronize(); } FOR_LOOP(x_i, 0, M) { //multi_map(parameters::CPU_THREAD_COUNT, M, explicit_step2, mat_lst[0], mat_lst[1], x_i, N - j, M, r, op_type, dt, dy, Y_MIN, kappa, theta, rho, vol, K); cuda_explicit_step2 <<<block_count, parameters::GPU_THREADS>>> (mat_lst[0], mat_lst[1], x_i, N - j, M, r, op_type, dt, dy, Y_MIN, kappa, theta, rho, vol, K); cudaDeviceSynchronize(); } cuda_implicit_step <<<block_count, parameters::GPU_THREADS>>> (mat_lst[2], mat_lst[0], c_vec, N - j, M, r, op_type, dt, dx, dy, Y_MIN, rho, kappa, theta, vol, K); cudaDeviceSynchronize(); //multi_map(parameters::CPU_THREAD_COUNT, M, implicit_step, mat_lst[2], mat_lst[0], c_vec, N - j, M, r, op_type, dt, dx, dy, Y_MIN, rho, kappa, theta, vol, K); FOR_LOOP(x_i, 0, M) { cuda_early_exercise <<<block_count, parameters::GPU_THREADS>>> (mat_lst[2], x_i, M, op_type, dx, X_MIN, dy, Y_MIN, rho, vol, K); //multi_map(parameters::CPU_THREAD_COUNT, M, early_exercise, mat_lst[2], x_i, M, op_type, dx, X_MIN, dy, Y_MIN, rho, vol, K); cudaDeviceSynchronize(); } } cudaFree(c_vec); cudaFree(d_value_mat0); cudaFree(d_value_mat1); cudaMemcpy(value_mat2, d_value_mat2, sizeof(double) * M * M, cudaMemcpyDeviceToHost); cudaFree(d_value_mat2); //find the best fit of start double this_diff = 0.0, min_diff = NULL, rst = 0.0; this_x = 0.0; FOR_LOOP(x_i, 0, M) { this_x = X_MIN + x_i * dx; FOR_LOOP(y_i, 0, M) { this_diff = abs(x2P(this_x, Y_MIN + y_i * dy, rho, vol) - P0); if (min_diff == NULL || min_diff > this_diff) { rst = MATRIX_GET(value_mat2, x_i, y_i, M); min_diff = this_diff; } } } delete[] value_mat2; cudaDeviceReset(); return rst; }
04692fc23c10d2e20d2a786364dd24c0f8966672.hip
// !!! This is a file automatically generated by hipify!!! #include <primitiv/config.h> #include <random> #include <primitiv/core/error.h> #include <primitiv/devices/cuda16/device.h> #include <primitiv/internal/cuda/utils.h> namespace { // Helper to obtain a single integer representing the compute capability. constexpr std::uint32_t get_capability( std::uint32_t major, std::uint32_t minor) { return 10000 * major + minor; } } // namespace namespace primitiv { namespace devices { std::uint32_t CUDA16::num_devices() { int ret; CUDA_CALL(::hipGetDeviceCount(&ret)); return ret; } void CUDA16::assert_support(std::uint32_t device_id) { if (device_id >= num_devices()) { PRIMITIV_THROW_ERROR("Invalid device ID: " << device_id); } ::hipDeviceProp_t prop; CUDA_CALL(::hipGetDeviceProperties(&prop, device_id)); // Checks compute capability // NOTE(odashi): // At least following compute capabilities are required: // float <-> half conversion ............ 5.0 // Full support of typical operations ... 5.3 constexpr std::uint32_t MIN_CC_MAJOR = 5; constexpr std::uint32_t MIN_CC_MINOR = 0; constexpr std::uint32_t MIN_CC = ::get_capability(MIN_CC_MAJOR, MIN_CC_MINOR); const std::uint32_t dev_cc = ::get_capability(prop.major, prop.minor); if (dev_cc < MIN_CC) { PRIMITIV_THROW_ERROR( "CUDA Device " << device_id << " does not satisfy the " "minimum requirement of the compute capability: " << prop.major << '.' << prop.minor << " < " << MIN_CC_MAJOR << '.' << MIN_CC_MINOR << " (required by primitiv::devices::CUDA16)"); } // Checks other minimum requirements. #define CHECK_REQUIREMENT(name, value) \ { \ if (prop.name < (value)) { \ PRIMITIV_THROW_ERROR( \ "CUDA Device " << device_id \ << " does not satisfy the minimum requirement by primitiv. " \ << "property: " << #name << ", " \ << "value: " << prop.name << ", " \ << "required at least: " << (value)); \ } \ } #define CHECK_REQUIREMENT_VECTOR(name, index, value) \ { \ if (prop.name[index] < (value)) { \ PRIMITIV_THROW_ERROR( \ "CUDA16 Device " << device_id \ << " does not satisfy the minimum requirement by primitiv. " \ << "property: " << #name << "[" << #index << "], " \ << "value: " << prop.name[index] << ", " \ << "required at least: " << (value)); \ } \ } CHECK_REQUIREMENT(totalGlobalMem, 1ull * (1ull << 30)); CHECK_REQUIREMENT(sharedMemPerBlock, 16ull * (1ull << 10)); CHECK_REQUIREMENT(maxThreadsPerBlock, 256); CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 0, 256); CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 1, 16); CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 2, 1); CHECK_REQUIREMENT_VECTOR(maxGridSize, 0, 32767); CHECK_REQUIREMENT_VECTOR(maxGridSize, 1, 32767); CHECK_REQUIREMENT_VECTOR(maxGridSize, 2, 32767); #undef CHECK_REQUIREMENT #undef CHECK_REQUIREMENT_VECTOR } void CUDA16::initialize() { assert_support(dev_id_); // Retrieves device properties. ::hipDeviceProp_t prop; CUDA_CALL(::hipGetDeviceProperties(&prop, dev_id_)); // Calculates size of dims to be used in CUDA kernels. dim1_x_ = 1; while (dim1_x_ < 1024 && dim1_x_ < static_cast<std::uint32_t>(prop.maxThreadsPerBlock)) { dim1_x_ <<= 1; } dim2_y_ = dim1_x_; dim2_x_ = 1; while (dim2_x_ < dim2_y_) { dim2_x_ <<= 1; dim2_y_ >>= 1; } max_batch_ = prop.maxGridSize[1]; // Initializes additional libraries state_.reset(new cuda::InternalState(dev_id_, rng_seed_)); state_->prop = prop; // Initializes the device pointer for integer IDs. ids_ptr_ = state_->pool.allocate(sizeof(std::uint32_t) * max_batch_); // Check half operation support. support_half_ops_ = ::get_capability(prop.major, prop.minor) >= 50003; } CUDA16::CUDA16(std::uint32_t device_id, std::uint32_t rng_seed) : dev_id_(device_id) , rng_seed_(rng_seed) { initialize(); } CUDA16::CUDA16(std::uint32_t device_id) : CUDA16(device_id, std::random_device()()) {} CUDA16::~CUDA16() { // Nothing to do for now. } } // namespace devices } // namespace primitiv
04692fc23c10d2e20d2a786364dd24c0f8966672.cu
#include <primitiv/config.h> #include <random> #include <primitiv/core/error.h> #include <primitiv/devices/cuda16/device.h> #include <primitiv/internal/cuda/utils.h> namespace { // Helper to obtain a single integer representing the compute capability. constexpr std::uint32_t get_capability( std::uint32_t major, std::uint32_t minor) { return 10000 * major + minor; } } // namespace namespace primitiv { namespace devices { std::uint32_t CUDA16::num_devices() { int ret; CUDA_CALL(::cudaGetDeviceCount(&ret)); return ret; } void CUDA16::assert_support(std::uint32_t device_id) { if (device_id >= num_devices()) { PRIMITIV_THROW_ERROR("Invalid device ID: " << device_id); } ::cudaDeviceProp prop; CUDA_CALL(::cudaGetDeviceProperties(&prop, device_id)); // Checks compute capability // NOTE(odashi): // At least following compute capabilities are required: // float <-> half conversion ............ 5.0 // Full support of typical operations ... 5.3 constexpr std::uint32_t MIN_CC_MAJOR = 5; constexpr std::uint32_t MIN_CC_MINOR = 0; constexpr std::uint32_t MIN_CC = ::get_capability(MIN_CC_MAJOR, MIN_CC_MINOR); const std::uint32_t dev_cc = ::get_capability(prop.major, prop.minor); if (dev_cc < MIN_CC) { PRIMITIV_THROW_ERROR( "CUDA Device " << device_id << " does not satisfy the " "minimum requirement of the compute capability: " << prop.major << '.' << prop.minor << " < " << MIN_CC_MAJOR << '.' << MIN_CC_MINOR << " (required by primitiv::devices::CUDA16)"); } // Checks other minimum requirements. #define CHECK_REQUIREMENT(name, value) \ { \ if (prop.name < (value)) { \ PRIMITIV_THROW_ERROR( \ "CUDA Device " << device_id \ << " does not satisfy the minimum requirement by primitiv. " \ << "property: " << #name << ", " \ << "value: " << prop.name << ", " \ << "required at least: " << (value)); \ } \ } #define CHECK_REQUIREMENT_VECTOR(name, index, value) \ { \ if (prop.name[index] < (value)) { \ PRIMITIV_THROW_ERROR( \ "CUDA16 Device " << device_id \ << " does not satisfy the minimum requirement by primitiv. " \ << "property: " << #name << "[" << #index << "], " \ << "value: " << prop.name[index] << ", " \ << "required at least: " << (value)); \ } \ } CHECK_REQUIREMENT(totalGlobalMem, 1ull * (1ull << 30)); CHECK_REQUIREMENT(sharedMemPerBlock, 16ull * (1ull << 10)); CHECK_REQUIREMENT(maxThreadsPerBlock, 256); CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 0, 256); CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 1, 16); CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 2, 1); CHECK_REQUIREMENT_VECTOR(maxGridSize, 0, 32767); CHECK_REQUIREMENT_VECTOR(maxGridSize, 1, 32767); CHECK_REQUIREMENT_VECTOR(maxGridSize, 2, 32767); #undef CHECK_REQUIREMENT #undef CHECK_REQUIREMENT_VECTOR } void CUDA16::initialize() { assert_support(dev_id_); // Retrieves device properties. ::cudaDeviceProp prop; CUDA_CALL(::cudaGetDeviceProperties(&prop, dev_id_)); // Calculates size of dims to be used in CUDA kernels. dim1_x_ = 1; while (dim1_x_ < 1024 && dim1_x_ < static_cast<std::uint32_t>(prop.maxThreadsPerBlock)) { dim1_x_ <<= 1; } dim2_y_ = dim1_x_; dim2_x_ = 1; while (dim2_x_ < dim2_y_) { dim2_x_ <<= 1; dim2_y_ >>= 1; } max_batch_ = prop.maxGridSize[1]; // Initializes additional libraries state_.reset(new cuda::InternalState(dev_id_, rng_seed_)); state_->prop = prop; // Initializes the device pointer for integer IDs. ids_ptr_ = state_->pool.allocate(sizeof(std::uint32_t) * max_batch_); // Check half operation support. support_half_ops_ = ::get_capability(prop.major, prop.minor) >= 50003; } CUDA16::CUDA16(std::uint32_t device_id, std::uint32_t rng_seed) : dev_id_(device_id) , rng_seed_(rng_seed) { initialize(); } CUDA16::CUDA16(std::uint32_t device_id) : CUDA16(device_id, std::random_device()()) {} CUDA16::~CUDA16() { // Nothing to do for now. } } // namespace devices } // namespace primitiv
017da8cce582b62ac7ff8d7bdb4ea12e3de52c6b.hip
// !!! This is a file automatically generated by hipify!!! #include "host_device_vector.h" #include "bitfield.h" template <typename T> class HostDeviceVectorImpl { public: HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) { if (device >= 0) { gpu_access_ = GPUAccess::kWrite; SetDevice(); data_d_->resize(size, v); } else { data_h_.resize(size, v); } } // Initializer can be std::vector<T> or std::initializer_list<T> template <class Initializer> HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) { if (device >= 0) { gpu_access_ = GPUAccess::kWrite; LazyResizeDevice(init.size()); Copy(init); } else { data_h_ = init; } } HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) : device_{ that.device_ }, data_h_{ std::move(that.data_h_) }, data_d_{ std::move(that.data_d_) }, gpu_access_{ that.gpu_access_ } {} ~HostDeviceVectorImpl() { if (device_ >= 0) { SetDevice(); } } size_t Size() const { return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0; } int DeviceIdx() const { return device_; } T* DevicePointer() { LazySyncDevice(GPUAccess::kWrite); return data_d_->data().get(); } const T* ConstDevicePointer() { LazySyncDevice(GPUAccess::kRead); return data_d_->data().get(); } common::Span<T> DeviceSpan() { LazySyncDevice(GPUAccess::kWrite); return { data_d_->data().get(), Size() }; } common::Span<const T> ConstDeviceSpan() { LazySyncDevice(GPUAccess::kRead); return { data_d_->data().get(), Size() }; } void Fill(T v) { // NOLINT if (HostCanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { gpu_access_ = GPUAccess::kWrite; SetDevice(); auto s_data = common::ToSpan(*data_d_); common::LaunchN(data_d_->size(), [=] XGBOOST_DEVICE(size_t i) { s_data[i] = v; }); } } void Copy(HostDeviceVectorImpl<T>* other) { //CHECK_EQ(Size(), other->Size()); SetDevice(other->device_); // Data is on host. if (HostCanWrite() && other->HostCanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } SetDevice(); CopyToDevice(other); } void Copy(const std::vector<T>& other) { //CHECK_EQ(Size(), other.size()); if (HostCanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { CopyToDevice(other.data()); } } void Copy(std::initializer_list<T> other) { //CHECK_EQ(Size(), other.size()); if (HostCanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { CopyToDevice(other.begin()); } } void Extend(HostDeviceVectorImpl* other) { auto ori_size = this->Size(); this->Resize(ori_size + other->Size(), T()); if (HostCanWrite() && other->HostCanRead()) { auto& h_vec = this->HostVector(); auto& other_vec = other->HostVector(); //CHECK_EQ(h_vec.size(), ori_size + other->Size()); std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size); } else { auto ptr = other->ConstDevicePointer(); SetDevice(); //CHECK_EQ(this->DeviceIdx(), other->DeviceIdx()); cuda_handler(hipMemcpyAsync(this->DevicePointer() + ori_size, ptr, other->Size() * sizeof(T), hipMemcpyDeviceToDevice)); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kNone); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void SetDevice(int device) { if (device_ == device) { return; } if (device_ >= 0) { LazySyncHost(GPUAccess::kNone); } device_ = device; if (device_ >= 0) { LazyResizeDevice(data_h_.size()); } } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) { // fast on-device resize gpu_access_ = GPUAccess::kWrite; SetDevice(); data_d_->resize(new_size, v); } else { // resize on host LazySyncHost(GPUAccess::kNone); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (HostCanAccess(access)) { return; } if (HostCanRead()) { // data is present, just need to deny access to the device gpu_access_ = access; return; } gpu_access_ = access; if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); } SetDevice(); cuda_handler(hipMemcpy(data_h_.data(), data_d_->data().get(), data_d_->size() * sizeof(T), hipMemcpyDeviceToHost)); } void LazySyncDevice(GPUAccess access) { if (DeviceCanAccess(access)) { return; } if (DeviceCanRead()) { // deny read to the host gpu_access_ = access; return; } // data is on the host LazyResizeDevice(data_h_.size()); SetDevice(); cuda_handler(hipMemcpyAsync(data_d_->data().get(), data_h_.data(), data_d_->size() * sizeof(T), hipMemcpyHostToDevice)); gpu_access_ = access; } bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; } bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); } bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); } bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; } bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); } bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); } GPUAccess Access() const { return gpu_access_; } private: int device_{ -1 }; std::vector<T> data_h_{}; std::unique_ptr<thrust::device_vector<T>> data_d_{}; GPUAccess gpu_access_{ GPUAccess::kNone }; void CopyToDevice(HostDeviceVectorImpl* other) { if (other->HostCanWrite()) { CopyToDevice(other->data_h_.data()); } else { LazyResizeDevice(Size()); gpu_access_ = GPUAccess::kWrite; SetDevice(); cuda_handler(hipMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(), data_d_->size() * sizeof(T), hipMemcpyDefault)); } } void CopyToDevice(const T* begin) { LazyResizeDevice(Size()); gpu_access_ = GPUAccess::kWrite; SetDevice(); cuda_handler(hipMemcpyAsync(data_d_->data().get(), begin, data_d_->size() * sizeof(T), hipMemcpyDefault)); } void LazyResizeDevice(size_t new_size) { if (data_d_ && new_size == data_d_->size()) { return; } SetDevice(); data_d_->resize(new_size); } void SetDevice() { //CHECK_GE(device_, 0); if (cudaSetDeviceHandler == nullptr) { //std::cout << device_ << std::endl; //cuda_handler(hipSetDevice(device_)); cuda_handler(hipSetDevice(0)); } else { (*cudaSetDeviceHandler)(device_); } if (!data_d_) { data_d_.reset(new thrust::device_vector<T>); } } }; template<typename T> HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device) : impl_(new HostDeviceVectorImpl<T>(size, v, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device) : impl_(new HostDeviceVectorImpl<T>(init, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device) : impl_(new HostDeviceVectorImpl<T>(init, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other) : impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {} template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) { if (this == &other) { return *this; } std::unique_ptr<HostDeviceVectorImpl<T>> new_impl( new HostDeviceVectorImpl<T>(std::move(*other.impl_))); delete impl_; impl_ = new_impl.release(); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { delete impl_; impl_ = nullptr; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); } template <typename T> T* HostDeviceVector<T>::DevicePointer() { return impl_->DevicePointer(); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer() const { return impl_->ConstDevicePointer(); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan() { return impl_->DeviceSpan(); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const { return impl_->ConstDeviceSpan(); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Extend(HostDeviceVector const& other) { impl_->Extend(other.impl_); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanRead() const { return impl_->HostCanRead(); } template <typename T> bool HostDeviceVector<T>::HostCanWrite() const { return impl_->HostCanWrite(); } template <typename T> bool HostDeviceVector<T>::DeviceCanRead() const { return impl_->DeviceCanRead(); } template <typename T> bool HostDeviceVector<T>::DeviceCanWrite() const { return impl_->DeviceCanWrite(); } template <typename T> GPUAccess HostDeviceVector<T>::DeviceAccess() const { return impl_->Access(); } template <typename T> void HostDeviceVector<T>::SetDevice(int device) const { impl_->SetDevice(device); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only //template class HostDeviceVector<bst_float>; //template class HostDeviceVector<GradientPair>; template class HostDeviceVector<int32_t>; // bst_node_t template class HostDeviceVector<uint8_t>; //template class HostDeviceVector<FeatureType>; //template class HostDeviceVector<Entry>; template class HostDeviceVector<uint64_t>; // bst_row_t template class HostDeviceVector<uint32_t>; // bst_feature_t //template class HostDeviceVector<RegTree::Node>; //template class HostDeviceVector<RegTree::Segment>; //template class HostDeviceVector<RTreeNodeStat>; template class HostDeviceVector<LBitField64>; template class HostDeviceVector<float>;
017da8cce582b62ac7ff8d7bdb4ea12e3de52c6b.cu
#include "host_device_vector.h" #include "bitfield.h" template <typename T> class HostDeviceVectorImpl { public: HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) { if (device >= 0) { gpu_access_ = GPUAccess::kWrite; SetDevice(); data_d_->resize(size, v); } else { data_h_.resize(size, v); } } // Initializer can be std::vector<T> or std::initializer_list<T> template <class Initializer> HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) { if (device >= 0) { gpu_access_ = GPUAccess::kWrite; LazyResizeDevice(init.size()); Copy(init); } else { data_h_ = init; } } HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) : device_{ that.device_ }, data_h_{ std::move(that.data_h_) }, data_d_{ std::move(that.data_d_) }, gpu_access_{ that.gpu_access_ } {} ~HostDeviceVectorImpl() { if (device_ >= 0) { SetDevice(); } } size_t Size() const { return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0; } int DeviceIdx() const { return device_; } T* DevicePointer() { LazySyncDevice(GPUAccess::kWrite); return data_d_->data().get(); } const T* ConstDevicePointer() { LazySyncDevice(GPUAccess::kRead); return data_d_->data().get(); } common::Span<T> DeviceSpan() { LazySyncDevice(GPUAccess::kWrite); return { data_d_->data().get(), Size() }; } common::Span<const T> ConstDeviceSpan() { LazySyncDevice(GPUAccess::kRead); return { data_d_->data().get(), Size() }; } void Fill(T v) { // NOLINT if (HostCanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { gpu_access_ = GPUAccess::kWrite; SetDevice(); auto s_data = common::ToSpan(*data_d_); common::LaunchN(data_d_->size(), [=] XGBOOST_DEVICE(size_t i) { s_data[i] = v; }); } } void Copy(HostDeviceVectorImpl<T>* other) { //CHECK_EQ(Size(), other->Size()); SetDevice(other->device_); // Data is on host. if (HostCanWrite() && other->HostCanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } SetDevice(); CopyToDevice(other); } void Copy(const std::vector<T>& other) { //CHECK_EQ(Size(), other.size()); if (HostCanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { CopyToDevice(other.data()); } } void Copy(std::initializer_list<T> other) { //CHECK_EQ(Size(), other.size()); if (HostCanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { CopyToDevice(other.begin()); } } void Extend(HostDeviceVectorImpl* other) { auto ori_size = this->Size(); this->Resize(ori_size + other->Size(), T()); if (HostCanWrite() && other->HostCanRead()) { auto& h_vec = this->HostVector(); auto& other_vec = other->HostVector(); //CHECK_EQ(h_vec.size(), ori_size + other->Size()); std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size); } else { auto ptr = other->ConstDevicePointer(); SetDevice(); //CHECK_EQ(this->DeviceIdx(), other->DeviceIdx()); cuda_handler(cudaMemcpyAsync(this->DevicePointer() + ori_size, ptr, other->Size() * sizeof(T), cudaMemcpyDeviceToDevice)); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kNone); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void SetDevice(int device) { if (device_ == device) { return; } if (device_ >= 0) { LazySyncHost(GPUAccess::kNone); } device_ = device; if (device_ >= 0) { LazyResizeDevice(data_h_.size()); } } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) { // fast on-device resize gpu_access_ = GPUAccess::kWrite; SetDevice(); data_d_->resize(new_size, v); } else { // resize on host LazySyncHost(GPUAccess::kNone); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (HostCanAccess(access)) { return; } if (HostCanRead()) { // data is present, just need to deny access to the device gpu_access_ = access; return; } gpu_access_ = access; if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); } SetDevice(); cuda_handler(cudaMemcpy(data_h_.data(), data_d_->data().get(), data_d_->size() * sizeof(T), cudaMemcpyDeviceToHost)); } void LazySyncDevice(GPUAccess access) { if (DeviceCanAccess(access)) { return; } if (DeviceCanRead()) { // deny read to the host gpu_access_ = access; return; } // data is on the host LazyResizeDevice(data_h_.size()); SetDevice(); cuda_handler(cudaMemcpyAsync(data_d_->data().get(), data_h_.data(), data_d_->size() * sizeof(T), cudaMemcpyHostToDevice)); gpu_access_ = access; } bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; } bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); } bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); } bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; } bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); } bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); } GPUAccess Access() const { return gpu_access_; } private: int device_{ -1 }; std::vector<T> data_h_{}; std::unique_ptr<thrust::device_vector<T>> data_d_{}; GPUAccess gpu_access_{ GPUAccess::kNone }; void CopyToDevice(HostDeviceVectorImpl* other) { if (other->HostCanWrite()) { CopyToDevice(other->data_h_.data()); } else { LazyResizeDevice(Size()); gpu_access_ = GPUAccess::kWrite; SetDevice(); cuda_handler(cudaMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(), data_d_->size() * sizeof(T), cudaMemcpyDefault)); } } void CopyToDevice(const T* begin) { LazyResizeDevice(Size()); gpu_access_ = GPUAccess::kWrite; SetDevice(); cuda_handler(cudaMemcpyAsync(data_d_->data().get(), begin, data_d_->size() * sizeof(T), cudaMemcpyDefault)); } void LazyResizeDevice(size_t new_size) { if (data_d_ && new_size == data_d_->size()) { return; } SetDevice(); data_d_->resize(new_size); } void SetDevice() { //CHECK_GE(device_, 0); if (cudaSetDeviceHandler == nullptr) { //std::cout << device_ << std::endl; //cuda_handler(cudaSetDevice(device_)); cuda_handler(cudaSetDevice(0)); } else { (*cudaSetDeviceHandler)(device_); } if (!data_d_) { data_d_.reset(new thrust::device_vector<T>); } } }; template<typename T> HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device) : impl_(new HostDeviceVectorImpl<T>(size, v, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device) : impl_(new HostDeviceVectorImpl<T>(init, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device) : impl_(new HostDeviceVectorImpl<T>(init, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other) : impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {} template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) { if (this == &other) { return *this; } std::unique_ptr<HostDeviceVectorImpl<T>> new_impl( new HostDeviceVectorImpl<T>(std::move(*other.impl_))); delete impl_; impl_ = new_impl.release(); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { delete impl_; impl_ = nullptr; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); } template <typename T> T* HostDeviceVector<T>::DevicePointer() { return impl_->DevicePointer(); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer() const { return impl_->ConstDevicePointer(); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan() { return impl_->DeviceSpan(); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const { return impl_->ConstDeviceSpan(); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Extend(HostDeviceVector const& other) { impl_->Extend(other.impl_); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanRead() const { return impl_->HostCanRead(); } template <typename T> bool HostDeviceVector<T>::HostCanWrite() const { return impl_->HostCanWrite(); } template <typename T> bool HostDeviceVector<T>::DeviceCanRead() const { return impl_->DeviceCanRead(); } template <typename T> bool HostDeviceVector<T>::DeviceCanWrite() const { return impl_->DeviceCanWrite(); } template <typename T> GPUAccess HostDeviceVector<T>::DeviceAccess() const { return impl_->Access(); } template <typename T> void HostDeviceVector<T>::SetDevice(int device) const { impl_->SetDevice(device); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only //template class HostDeviceVector<bst_float>; //template class HostDeviceVector<GradientPair>; template class HostDeviceVector<int32_t>; // bst_node_t template class HostDeviceVector<uint8_t>; //template class HostDeviceVector<FeatureType>; //template class HostDeviceVector<Entry>; template class HostDeviceVector<uint64_t>; // bst_row_t template class HostDeviceVector<uint32_t>; // bst_feature_t //template class HostDeviceVector<RegTree::Node>; //template class HostDeviceVector<RegTree::Segment>; //template class HostDeviceVector<RTreeNodeStat>; template class HostDeviceVector<LBitField64>; template class HostDeviceVector<float>;
00b68acc6c48e79955e6506835c98dbde413a676.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> void checkErrors(hipError_t err, const char *msg) { if (err != hipSuccess) { fprintf(stderr, msg); fprintf(stderr, " [Erro CUDA: %s]\n", hipGetErrorString(err)); exit(-1); } } void compareResults(float *C1, float *C2, int numElements) { float epsilon = 0.00001; for(int i = 0; i < numElements; i ++) { if (abs(C1[i] - C2[i]) > epsilon) { printf("Comparao de resultados falhou\n"); exit(-1); } } printf("Comparao de resultados passou\n"); } void vecAddCPU(float *A, float *B, float *C, int numElements) { for(int i = 0; i < numElements; i ++) { C[i] = A[i] + B[i]; } } __global__ void vecAdd(float *A, float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } int main(int argc, char **argv) { int maxBlockSize = 1024; int numElements = 50000; int size = numElements * sizeof(float); printf("Alocando vetores no host\n"); float *h_A = (float *) malloc(size); float *h_B = (float *) malloc(size); float *h_C = (float *) malloc(size); if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Falha em alocar vetores no host\n"); exit(-1); } printf("Inicializando vetores no host\n"); for (int i = 0; i < numElements; ++i) { h_A[i] = rand() / (float) (RAND_MAX); h_B[i] = rand() / (float) (RAND_MAX); } float *d_A; float *d_B; float *d_C; printf("Alocando vetores no device\n"); hipMalloc(&d_A, size); hipMalloc(&d_B, size); hipMalloc(&d_C, size); checkErrors(hipGetLastError(), "Malloc nos vetores do device"); printf("Copiando memria do host para o device\n"); hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); checkErrors(hipGetLastError(), "Cpia para o device"); /* Precisamos de um nmero inteiro de blocks, mesmo se "numElements" no for divisvel por "maxBlockSize" */ int numBlocks = (numElements + maxBlockSize - 1) / maxBlockSize; printf("Lanando um kernel com %d threads, com %d blocks de tamanho %d\n", numBlocks * maxBlockSize, numBlocks, maxBlockSize); hipLaunchKernelGGL(( vecAdd), dim3(numBlocks), dim3(maxBlockSize), 0, 0, d_A, d_B, d_C, numElements); checkErrors(hipGetLastError(), "Lanamento do kernel"); printf("Copiando memria do device para o host\n"); hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); checkErrors(hipGetLastError(), "Cpia para o host"); printf("Alocando vetor de teste no host\n"); float *h_D = (float *) malloc(size); if (h_D == NULL) { fprintf(stderr, "Falha em alocar vetores no host\n"); exit(-1); } printf("Lanando clculo na CPU\n"); vecAddCPU(h_A, h_B, h_D, numElements); printf("Comparando resultados na CPU e na GPU\n"); compareResults(h_C, h_D, numElements); printf("Liberando memria\n"); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); free(h_D); printf("Fim\n"); return 0; }
00b68acc6c48e79955e6506835c98dbde413a676.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> void checkErrors(cudaError_t err, const char *msg) { if (err != cudaSuccess) { fprintf(stderr, msg); fprintf(stderr, " [Erro CUDA: %s]\n", cudaGetErrorString(err)); exit(-1); } } void compareResults(float *C1, float *C2, int numElements) { float epsilon = 0.00001; for(int i = 0; i < numElements; i ++) { if (abs(C1[i] - C2[i]) > epsilon) { printf("Comparação de resultados falhou\n"); exit(-1); } } printf("Comparação de resultados passou\n"); } void vecAddCPU(float *A, float *B, float *C, int numElements) { for(int i = 0; i < numElements; i ++) { C[i] = A[i] + B[i]; } } __global__ void vecAdd(float *A, float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } int main(int argc, char **argv) { int maxBlockSize = 1024; int numElements = 50000; int size = numElements * sizeof(float); printf("Alocando vetores no host\n"); float *h_A = (float *) malloc(size); float *h_B = (float *) malloc(size); float *h_C = (float *) malloc(size); if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Falha em alocar vetores no host\n"); exit(-1); } printf("Inicializando vetores no host\n"); for (int i = 0; i < numElements; ++i) { h_A[i] = rand() / (float) (RAND_MAX); h_B[i] = rand() / (float) (RAND_MAX); } float *d_A; float *d_B; float *d_C; printf("Alocando vetores no device\n"); cudaMalloc(&d_A, size); cudaMalloc(&d_B, size); cudaMalloc(&d_C, size); checkErrors(cudaGetLastError(), "Malloc nos vetores do device"); printf("Copiando memória do host para o device\n"); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); checkErrors(cudaGetLastError(), "Cópia para o device"); /* Precisamos de um número inteiro de blocks, mesmo se "numElements" não for divisível por "maxBlockSize" */ int numBlocks = (numElements + maxBlockSize - 1) / maxBlockSize; printf("Lançando um kernel com %d threads, com %d blocks de tamanho %d\n", numBlocks * maxBlockSize, numBlocks, maxBlockSize); vecAdd<<<numBlocks, maxBlockSize>>>(d_A, d_B, d_C, numElements); checkErrors(cudaGetLastError(), "Lançamento do kernel"); printf("Copiando memória do device para o host\n"); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); checkErrors(cudaGetLastError(), "Cópia para o host"); printf("Alocando vetor de teste no host\n"); float *h_D = (float *) malloc(size); if (h_D == NULL) { fprintf(stderr, "Falha em alocar vetores no host\n"); exit(-1); } printf("Lançando cálculo na CPU\n"); vecAddCPU(h_A, h_B, h_D, numElements); printf("Comparando resultados na CPU e na GPU\n"); compareResults(h_C, h_D, numElements); printf("Liberando memória\n"); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); free(h_D); printf("Fim\n"); return 0; }
822e827e464424f6ed5105eca69ad701c242cdfb.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2019 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <unistd.h> #include <algorithm> #include <complex> #include <limits> #include <string> #include <custatevec.h> #include "../lib/circuit_qsim_parser.h" #include "../lib/formux.h" #include "../lib/fuser_mqubit.h" #include "../lib/gates_qsim.h" #include "../lib/io_file.h" #include "../lib/run_qsim.h" #include "../lib/simulator_custatevec.h" #include "../lib/util_custatevec.h" struct Options { std::string circuit_file; unsigned maxtime = std::numeric_limits<unsigned>::max(); unsigned seed = 1; unsigned max_fused_size = 2; unsigned verbosity = 0; }; Options GetOptions(int argc, char* argv[]) { constexpr char usage[] = "usage:\n ./qsim_base -c circuit -d maxtime " "-s seed -f max_fused_size -v verbosity\n"; Options opt; int k; while ((k = getopt(argc, argv, "c:d:s:f:v:")) != -1) { switch (k) { case 'c': opt.circuit_file = optarg; break; case 'd': opt.maxtime = std::atoi(optarg); break; case 's': opt.seed = std::atoi(optarg); break; case 'f': opt.max_fused_size = std::atoi(optarg); break; case 'v': opt.verbosity = std::atoi(optarg); break; default: qsim::IO::errorf(usage); exit(1); } } return opt; } bool ValidateOptions(const Options& opt) { if (opt.circuit_file.empty()) { qsim::IO::errorf("circuit file is not provided.\n"); return false; } return true; } template <typename StateSpace, typename State> void PrintAmplitudes( unsigned num_qubits, const StateSpace& state_space, const State& state) { static constexpr char const* bits[8] = { "000", "001", "010", "011", "100", "101", "110", "111", }; uint64_t size = ::min(uint64_t{8}, uint64_t{1} << num_qubits); unsigned s = 3 - ::min(unsigned{3}, num_qubits); for (uint64_t i = 0; i < size; ++i) { auto a = state_space.GetAmpl(state, i); qsim::IO::messagef("%s:%16.8g%16.8g%16.8g\n", bits[i] + s, std::real(a), std::imag(a), std::norm(a)); } } int main(int argc, char* argv[]) { using namespace qsim; auto opt = GetOptions(argc, argv); if (!ValidateOptions(opt)) { return 1; } using fp_type = float; Circuit<GateQSim<fp_type>> circuit; if (!CircuitQsimParser<IOFile>::FromFile(opt.maxtime, opt.circuit_file, circuit)) { return 1; } struct Factory { using Simulator = qsim::SimulatorCuStateVec<fp_type>; using StateSpace = Simulator::StateSpace; Factory() { ErrorCheck(hipblasCreate(&cublas_handle)); ErrorCheck(custatevecCreate(&custatevec_handle)); } ~Factory() { ErrorCheck(hipblasDestroy(cublas_handle)); ErrorCheck(custatevecDestroy(custatevec_handle)); } StateSpace CreateStateSpace() const { return StateSpace(cublas_handle, custatevec_handle); } Simulator CreateSimulator() const { return Simulator(cublas_handle, custatevec_handle); } hipblasHandle_t cublas_handle; custatevecHandle_t custatevec_handle; }; using Simulator = Factory::Simulator; using StateSpace = Simulator::StateSpace; using State = StateSpace::State; using Fuser = MultiQubitGateFuser<IO, GateQSim<fp_type>>; using Runner = QSimRunner<IO, Fuser, Factory>; Factory factory; StateSpace state_space = factory.CreateStateSpace(); State state = state_space.Create(circuit.num_qubits); if (state_space.IsNull(state)) { IO::errorf("not enough memory: is the number of qubits too large?\n"); return 1; } state_space.SetStateZero(state); Runner::Parameter param; param.max_fused_size = opt.max_fused_size; param.seed = opt.seed; param.verbosity = opt.verbosity; if (Runner::Run(param, factory, circuit, state)) { PrintAmplitudes(circuit.num_qubits, state_space, state); } return 0; }
822e827e464424f6ed5105eca69ad701c242cdfb.cu
// Copyright 2019 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <unistd.h> #include <algorithm> #include <complex> #include <limits> #include <string> #include <custatevec.h> #include "../lib/circuit_qsim_parser.h" #include "../lib/formux.h" #include "../lib/fuser_mqubit.h" #include "../lib/gates_qsim.h" #include "../lib/io_file.h" #include "../lib/run_qsim.h" #include "../lib/simulator_custatevec.h" #include "../lib/util_custatevec.h" struct Options { std::string circuit_file; unsigned maxtime = std::numeric_limits<unsigned>::max(); unsigned seed = 1; unsigned max_fused_size = 2; unsigned verbosity = 0; }; Options GetOptions(int argc, char* argv[]) { constexpr char usage[] = "usage:\n ./qsim_base -c circuit -d maxtime " "-s seed -f max_fused_size -v verbosity\n"; Options opt; int k; while ((k = getopt(argc, argv, "c:d:s:f:v:")) != -1) { switch (k) { case 'c': opt.circuit_file = optarg; break; case 'd': opt.maxtime = std::atoi(optarg); break; case 's': opt.seed = std::atoi(optarg); break; case 'f': opt.max_fused_size = std::atoi(optarg); break; case 'v': opt.verbosity = std::atoi(optarg); break; default: qsim::IO::errorf(usage); exit(1); } } return opt; } bool ValidateOptions(const Options& opt) { if (opt.circuit_file.empty()) { qsim::IO::errorf("circuit file is not provided.\n"); return false; } return true; } template <typename StateSpace, typename State> void PrintAmplitudes( unsigned num_qubits, const StateSpace& state_space, const State& state) { static constexpr char const* bits[8] = { "000", "001", "010", "011", "100", "101", "110", "111", }; uint64_t size = std::min(uint64_t{8}, uint64_t{1} << num_qubits); unsigned s = 3 - std::min(unsigned{3}, num_qubits); for (uint64_t i = 0; i < size; ++i) { auto a = state_space.GetAmpl(state, i); qsim::IO::messagef("%s:%16.8g%16.8g%16.8g\n", bits[i] + s, std::real(a), std::imag(a), std::norm(a)); } } int main(int argc, char* argv[]) { using namespace qsim; auto opt = GetOptions(argc, argv); if (!ValidateOptions(opt)) { return 1; } using fp_type = float; Circuit<GateQSim<fp_type>> circuit; if (!CircuitQsimParser<IOFile>::FromFile(opt.maxtime, opt.circuit_file, circuit)) { return 1; } struct Factory { using Simulator = qsim::SimulatorCuStateVec<fp_type>; using StateSpace = Simulator::StateSpace; Factory() { ErrorCheck(cublasCreate(&cublas_handle)); ErrorCheck(custatevecCreate(&custatevec_handle)); } ~Factory() { ErrorCheck(cublasDestroy(cublas_handle)); ErrorCheck(custatevecDestroy(custatevec_handle)); } StateSpace CreateStateSpace() const { return StateSpace(cublas_handle, custatevec_handle); } Simulator CreateSimulator() const { return Simulator(cublas_handle, custatevec_handle); } cublasHandle_t cublas_handle; custatevecHandle_t custatevec_handle; }; using Simulator = Factory::Simulator; using StateSpace = Simulator::StateSpace; using State = StateSpace::State; using Fuser = MultiQubitGateFuser<IO, GateQSim<fp_type>>; using Runner = QSimRunner<IO, Fuser, Factory>; Factory factory; StateSpace state_space = factory.CreateStateSpace(); State state = state_space.Create(circuit.num_qubits); if (state_space.IsNull(state)) { IO::errorf("not enough memory: is the number of qubits too large?\n"); return 1; } state_space.SetStateZero(state); Runner::Parameter param; param.max_fused_size = opt.max_fused_size; param.seed = opt.seed; param.verbosity = opt.verbosity; if (Runner::Run(param, factory, circuit, state)) { PrintAmplitudes(circuit.num_qubits, state_space, state); } return 0; }
63a0b62b992b359104eef0894f7df78b635c5e94.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe9 { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe9
63a0b62b992b359104eef0894f7df78b635c5e94.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe9 { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe9
b99816c0a02489b5b3ee4e5db1d544222ab97238.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <math.h> #include <hip/hip_runtime.h> // function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; // 1M elements float *x = new float[N]; float *y = new float[N]; hipMallocManaged(&x,N*sizeof(float)); hipMallocManaged(&y,N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); // Free memory return 0; }
b99816c0a02489b5b3ee4e5db1d544222ab97238.cu
#include <iostream> #include <math.h> #include <cuda.h> // function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; // 1M elements float *x = new float[N]; float *y = new float[N]; cudaMallocManaged(&x,N*sizeof(float)); cudaMallocManaged(&y,N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU add<<<1,1>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); // Free memory return 0; }
f80d5728b0946d2086229c97d181bf8107ef785b.hip
// !!! This is a file automatically generated by hipify!!! #include "../inc/kernel.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <cmath> #include <stdio.h> /* * This code is configured for a GTX 1060M with 1024 CUDA cores */ #define HEIGHT 1080 #define WIDTH 1920 #define SIZE (3 * WIDTH * HEIGHT) unsigned char *gaussian; unsigned char *finished; __device__ __forceinline__ int index(int x, int y) { if (x >= WIDTH || y >= HEIGHT || x < 0 || y < 0) return -1; return x + y * WIDTH; } __device__ __forceinline__ int wrap(int val, int limit) { if (val < 0) return limit + val; return val % limit; } __global__ void greyScale(unsigned char * frame, unsigned char*greyBuffer) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= WIDTH || y >= HEIGHT) return; int pindex = 3*index(x,y); int sum = (0.2126f * ((float)frame[pindex])) + (0.7152f * ((float)frame[pindex+1])) + (0.0722f * ((float)frame[pindex+2])); greyBuffer[pindex] = sum; greyBuffer[pindex+1] = sum; greyBuffer[pindex+2] = sum; } __global__ void sobelOp(unsigned char * frame, unsigned char * sobel) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= WIDTH || y >= HEIGHT) return; char GX[9] = { 1, 0, -1, 2, 0, -2, 1, 0, -1 }; char GY[9] = { 1, 2, 1, 0, 0, 0, -1,-2,-1 }; for(int p = 0; p < 3; p++){ int xDir = 0; int yDir = 0; for (int r = -1; r < 2; r++) { int row = y + r; row = wrap(row, HEIGHT); for (int c = -1; c < 2; c++) { int col = x + c; col = wrap(col, WIDTH); int pindex = 3*index(col, row); xDir += frame[pindex+p] * GX[(1 - c) + (1 - r) * 3]; yDir += frame[pindex+p] * GY[(1 - c) + (1 - r) * 3]; } } int pindex = 3*index(x, y); int mag = (int)sqrt((double) xDir * xDir + yDir * yDir); mag = max(0, min(255, mag)); sobel[pindex + p] = mag; } } __global__ void gaussian_filter(const unsigned char *gaussian_input, unsigned char *gaussian_output) { const unsigned int col = threadIdx.x + blockIdx.x * blockDim.x; const unsigned int row = threadIdx.y + blockIdx.y * blockDim.y; if (col >= WIDTH || row >= HEIGHT) return; char gaussian_kernel[25] = { 1, 4, 6, 4, 1, 4, 16, 24, 16, 4, 6, 24, 36, 24, 6, 4, 16, 24, 16, 4, 1, 4, 6, 4, 1 }; for(int p = 0; p < 3; p++){ int blur = 0; for(int i = -2; i < 3; i++) { for(int j = -2; j < 3; j++) { const unsigned int y = wrap(row+i, HEIGHT); const unsigned int x = wrap(col+j, WIDTH); char w = gaussian_kernel[(2-j) + (2-i) * 5]; int pindex = 3*index(x,y); blur += w * gaussian_input[pindex+p]; } } blur = min(255, blur/256); int pindex = 3*index(col,row); gaussian_output[pindex+p] = blur; } } void filter(unsigned char* frame, FilterType filtertype) { dim3 thread(32, 32); dim3 block((WIDTH+31)/32, (HEIGHT+31)/32); hipMemcpy(gaussian, frame, SIZE, hipMemcpyHostToDevice); switch(filtertype){ case GREY:{ hipLaunchKernelGGL(( greyScale), dim3(block), dim3(thread), 0, 0, gaussian, finished); break; } case SOBEL:{ hipLaunchKernelGGL(( sobelOp), dim3(block), dim3(thread), 0, 0, gaussian, finished); break; } case GAUSSIAN:{ for(int i = 0; i < 9; i++){ hipLaunchKernelGGL(( gaussian_filter), dim3(block), dim3(thread), 0, 0, gaussian, finished); hipMemcpy(gaussian, finished, SIZE, hipMemcpyDeviceToDevice); } hipLaunchKernelGGL(( gaussian_filter), dim3(block), dim3(thread), 0, 0, gaussian, finished); break; } } hipMemcpy(frame, finished, SIZE, hipMemcpyDeviceToHost); } #include <stdio.h> void initCuda() { hipMalloc(&gaussian, SIZE); hipMalloc(&finished, SIZE); } void freeCuda() { hipFree(gaussian); hipFree(finished); }
f80d5728b0946d2086229c97d181bf8107ef785b.cu
#include "../inc/kernel.h" #include <cuda.h> #include <device_launch_parameters.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <cmath> #include <stdio.h> /* * This code is configured for a GTX 1060M with 1024 CUDA cores */ #define HEIGHT 1080 #define WIDTH 1920 #define SIZE (3 * WIDTH * HEIGHT) unsigned char *gaussian; unsigned char *finished; __device__ __forceinline__ int index(int x, int y) { if (x >= WIDTH || y >= HEIGHT || x < 0 || y < 0) return -1; return x + y * WIDTH; } __device__ __forceinline__ int wrap(int val, int limit) { if (val < 0) return limit + val; return val % limit; } __global__ void greyScale(unsigned char * frame, unsigned char*greyBuffer) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= WIDTH || y >= HEIGHT) return; int pindex = 3*index(x,y); int sum = (0.2126f * ((float)frame[pindex])) + (0.7152f * ((float)frame[pindex+1])) + (0.0722f * ((float)frame[pindex+2])); greyBuffer[pindex] = sum; greyBuffer[pindex+1] = sum; greyBuffer[pindex+2] = sum; } __global__ void sobelOp(unsigned char * frame, unsigned char * sobel) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= WIDTH || y >= HEIGHT) return; char GX[9] = { 1, 0, -1, 2, 0, -2, 1, 0, -1 }; char GY[9] = { 1, 2, 1, 0, 0, 0, -1,-2,-1 }; for(int p = 0; p < 3; p++){ int xDir = 0; int yDir = 0; for (int r = -1; r < 2; r++) { int row = y + r; row = wrap(row, HEIGHT); for (int c = -1; c < 2; c++) { int col = x + c; col = wrap(col, WIDTH); int pindex = 3*index(col, row); xDir += frame[pindex+p] * GX[(1 - c) + (1 - r) * 3]; yDir += frame[pindex+p] * GY[(1 - c) + (1 - r) * 3]; } } int pindex = 3*index(x, y); int mag = (int)sqrt((double) xDir * xDir + yDir * yDir); mag = max(0, min(255, mag)); sobel[pindex + p] = mag; } } __global__ void gaussian_filter(const unsigned char *gaussian_input, unsigned char *gaussian_output) { const unsigned int col = threadIdx.x + blockIdx.x * blockDim.x; const unsigned int row = threadIdx.y + blockIdx.y * blockDim.y; if (col >= WIDTH || row >= HEIGHT) return; char gaussian_kernel[25] = { 1, 4, 6, 4, 1, 4, 16, 24, 16, 4, 6, 24, 36, 24, 6, 4, 16, 24, 16, 4, 1, 4, 6, 4, 1 }; for(int p = 0; p < 3; p++){ int blur = 0; for(int i = -2; i < 3; i++) { for(int j = -2; j < 3; j++) { const unsigned int y = wrap(row+i, HEIGHT); const unsigned int x = wrap(col+j, WIDTH); char w = gaussian_kernel[(2-j) + (2-i) * 5]; int pindex = 3*index(x,y); blur += w * gaussian_input[pindex+p]; } } blur = min(255, blur/256); int pindex = 3*index(col,row); gaussian_output[pindex+p] = blur; } } void filter(unsigned char* frame, FilterType filtertype) { dim3 thread(32, 32); dim3 block((WIDTH+31)/32, (HEIGHT+31)/32); cudaMemcpy(gaussian, frame, SIZE, cudaMemcpyHostToDevice); switch(filtertype){ case GREY:{ greyScale<<<block, thread>>>(gaussian, finished); break; } case SOBEL:{ sobelOp<<<block, thread>>>(gaussian, finished); break; } case GAUSSIAN:{ for(int i = 0; i < 9; i++){ gaussian_filter<<<block, thread>>>(gaussian, finished); cudaMemcpy(gaussian, finished, SIZE, cudaMemcpyDeviceToDevice); } gaussian_filter<<<block, thread>>>(gaussian, finished); break; } } cudaMemcpy(frame, finished, SIZE, cudaMemcpyDeviceToHost); } #include <stdio.h> void initCuda() { cudaMalloc(&gaussian, SIZE); cudaMalloc(&finished, SIZE); } void freeCuda() { cudaFree(gaussian); cudaFree(finished); }
e0596c9750fbfae9222591806bfdcca2bbfca1c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kSqrt(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = sqrt(mat[i]); } }
e0596c9750fbfae9222591806bfdcca2bbfca1c5.cu
#include "includes.h" __global__ void kSqrt(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = sqrt(mat[i]); } }
922c50e27e5e4eb7b060c8fe4e484d2f3a39d0f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! \file ppmp_cuda.cu * \brief Definitions of the piecewise parabolic reconstruction (Fryxell 2000) functions with limiting in the primative variables. */ #ifdef CUDA #ifdef PPMP #include<cuda.h> #include<math.h> #include"global.h" #include"global_cuda.h" #include"ppmp_cuda.h" #define STEEPENING #define FLATTENING /*! \fn __global__ void PPMP_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real gamma, int dir, int n_fields) * \brief When passed a stencil of conserved variables, returns the left and right boundary values for the interface calculated using ppm with limiting in the primative variables. */ __global__ void PPMP_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields) { int n_cells = nx*ny*nz; int o1, o2, o3; if (dir == 0) { o1 = 1; o2 = 2; o3 = 3; } if (dir == 1) { o1 = 2; o2 = 3; o3 = 1; } if (dir == 2) { o1 = 3; o2 = 1; o3 = 2; } // declare primative variables in the stencil Real d_i, vx_i, vy_i, vz_i, p_i; Real d_imo, vx_imo, vy_imo, vz_imo, p_imo; Real d_ipo, vx_ipo, vy_ipo, vz_ipo, p_ipo; Real d_imt, vx_imt, vy_imt, vz_imt, p_imt; Real d_ipt, vx_ipt, vy_ipt, vz_ipt, p_ipt; #ifdef FLATTENING Real p_imth, p_ipth; #endif // declare left and right interface values Real d_L, vx_L, vy_L, vz_L, p_L; Real d_R, vx_R, vy_R, vz_R, p_R; // declare other variables Real del_q_imo, del_q_i, del_q_ipo; #ifdef CTU Real cs, cl, cr; // sound speed in cell i, and at left and right boundaries Real del_d, del_vx, del_vy, del_vz, del_p; // "slope" accross cell i Real d_6, vx_6, vy_6, vz_6, p_6; Real beta_m, beta_0, beta_p; Real alpha_m, alpha_0, alpha_p; Real lambda_m, lambda_0, lambda_p; // speed of characteristics Real dL_m, vxL_m, pL_m; Real dL_0, vyL_0, vzL_0, pL_0; Real vxL_p, pL_p; Real vxR_m, pR_m; Real dR_0, vyR_0, vzR_0, pR_0; Real dR_p, vxR_p, pR_p; Real chi_L_m, chi_L_0, chi_L_p; Real chi_R_m, chi_R_0, chi_R_p; #endif #ifdef DE Real ge_i, ge_imo, ge_ipo, ge_imt, ge_ipt, ge_L, ge_R; #ifdef CTU Real del_ge, ge_6, geL_0, geR_0; #endif #endif #ifdef SCALAR Real scalar_i[NSCALARS], scalar_imo[NSCALARS], scalar_ipo[NSCALARS], scalar_imt[NSCALARS], scalar_ipt[NSCALARS]; Real scalar_L[NSCALARS], scalar_R[NSCALARS]; #ifdef CTU Real del_scalar[NSCALARS], scalar_6[NSCALARS], scalarL_0[NSCALARS], scalarR_0[NSCALARS]; #endif #endif // get a thread ID int blockId = blockIdx.x + blockIdx.y*gridDim.x; int tid = threadIdx.x + blockId*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; int xs, xe, ys, ye, zs, ze; if (dir == 0) { xs = 3; xe = nx-4; ys = 0; ye = ny; zs = 0; ze = nz; } if (dir == 1) { xs = 0; xe = nx; ys = 3; ye = ny-4; zs = 0; ze = nz; } if (dir == 2) { xs = 0; xe = nx; ys = 0; ye = ny; zs = 3; ze = nz-4; } if (xid >= xs && xid < xe && yid >= ys && yid < ye && zid >= zs && zid < ze) { // load the 5-cell stencil into registers // cell i id = xid + yid*nx + zid*nx*ny; d_i = dev_conserved[ id]; vx_i = dev_conserved[o1*n_cells + id] / d_i; vy_i = dev_conserved[o2*n_cells + id] / d_i; vz_i = dev_conserved[o3*n_cells + id] / d_i; p_i = (dev_conserved[4*n_cells + id] - 0.5*d_i*(vx_i*vx_i + vy_i*vy_i + vz_i*vz_i)) * (gamma - 1.0); p_i = fmax(p_i, (Real) TINY_NUMBER); #ifdef DE ge_i = dev_conserved[(n_fields-1)*n_cells + id] / d_i; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_i[i] = dev_conserved[(5+i)*n_cells + id] / d_i; } #endif // cell i-1 if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny; d_imo = dev_conserved[ id]; vx_imo = dev_conserved[o1*n_cells + id] / d_imo; vy_imo = dev_conserved[o2*n_cells + id] / d_imo; vz_imo = dev_conserved[o3*n_cells + id] / d_imo; p_imo = (dev_conserved[4*n_cells + id] - 0.5*d_imo*(vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo)) * (gamma - 1.0); p_imo = fmax(p_imo, (Real) TINY_NUMBER); #ifdef DE ge_imo = dev_conserved[(n_fields-1)*n_cells + id] / d_imo; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_imo[i] = dev_conserved[(5+i)*n_cells + id] / d_imo; } #endif // cell i+1 if (dir == 0) id = xid+1 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid+1)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid+1)*nx*ny; d_ipo = dev_conserved[ id]; vx_ipo = dev_conserved[o1*n_cells + id] / d_ipo; vy_ipo = dev_conserved[o2*n_cells + id] / d_ipo; vz_ipo = dev_conserved[o3*n_cells + id] / d_ipo; p_ipo = (dev_conserved[4*n_cells + id] - 0.5*d_ipo*(vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo)) * (gamma - 1.0); p_ipo = fmax(p_ipo, (Real) TINY_NUMBER); #ifdef DE ge_ipo = dev_conserved[(n_fields-1)*n_cells + id] / d_ipo; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_ipo[i] = dev_conserved[(5+i)*n_cells + id] / d_ipo; } #endif // cell i-2 if (dir == 0) id = xid-2 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid-2)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid-2)*nx*ny; d_imt = dev_conserved[ id]; vx_imt = dev_conserved[o1*n_cells + id] / d_imt; vy_imt = dev_conserved[o2*n_cells + id] / d_imt; vz_imt = dev_conserved[o3*n_cells + id] / d_imt; p_imt = (dev_conserved[4*n_cells + id] - 0.5*d_imt*(vx_imt*vx_imt + vy_imt*vy_imt + vz_imt*vz_imt)) * (gamma - 1.0); p_imt = fmax(p_imt, (Real) TINY_NUMBER); #ifdef DE ge_imt = dev_conserved[(n_fields-1)*n_cells + id] / d_imt; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_imt[i] = dev_conserved[(5+i)*n_cells + id] / d_imt; } #endif // cell i+2 if (dir == 0) id = xid+2 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid+2)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid+2)*nx*ny; d_ipt = dev_conserved[ id]; vx_ipt = dev_conserved[o1*n_cells + id] / d_ipt; vy_ipt = dev_conserved[o2*n_cells + id] / d_ipt; vz_ipt = dev_conserved[o3*n_cells + id] / d_ipt; p_ipt = (dev_conserved[4*n_cells + id] - 0.5*d_ipt*(vx_ipt*vx_ipt + vy_ipt*vy_ipt + vz_ipt*vz_ipt)) * (gamma - 1.0); p_ipt = fmax(p_ipt, (Real) TINY_NUMBER); #ifdef DE ge_ipt = dev_conserved[(n_fields-1)*n_cells + id] / d_ipt; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_ipt[i] = dev_conserved[(5+i)*n_cells + id] / d_ipt; } #endif #ifdef FLATTENING // cell i-3 if (dir == 0) id = xid-3 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid-3)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid-3)*nx*ny; p_imth = (dev_conserved[4*n_cells + id] - 0.5* (dev_conserved[o1*n_cells + id]*dev_conserved[o1*n_cells + id] + dev_conserved[o2*n_cells + id]*dev_conserved[o2*n_cells + id] + dev_conserved[o3*n_cells + id]*dev_conserved[o3*n_cells + id]) / dev_conserved[id]) * (gamma - 1.0); p_imth = fmax(p_imth, (Real) TINY_NUMBER); // cell i+3 if (dir == 0) id = xid+3 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid+3)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid+3)*nx*ny; p_ipth = (dev_conserved[4*n_cells + id] - 0.5* (dev_conserved[o1*n_cells + id]*dev_conserved[o1*n_cells + id] + dev_conserved[o2*n_cells + id]*dev_conserved[o2*n_cells + id] + dev_conserved[o3*n_cells + id]*dev_conserved[o3*n_cells + id]) / dev_conserved[id]) * (gamma - 1.0); p_ipth = fmax(p_imth, (Real) TINY_NUMBER); #endif //FLATTENING //use ppm routines to set cell boundary values (see Fryxell Sec. 3.1.1) // Calculate the monotonized slopes for cells imo, i, ipo (density) del_q_imo = Calculate_Slope(d_imt, d_imo, d_i); del_q_i = Calculate_Slope(d_imo, d_i, d_ipo); del_q_ipo = Calculate_Slope(d_i, d_ipo, d_ipt); // Calculate the interface values for density Interface_Values_PPM(d_imo, d_i, d_ipo, del_q_imo, del_q_i, del_q_ipo, &d_L, &d_R); // Calculate the monotonized slopes for cells imo, i, ipo (x-velocity) del_q_imo = Calculate_Slope(vx_imt, vx_imo, vx_i); del_q_i = Calculate_Slope(vx_imo, vx_i, vx_ipo); del_q_ipo = Calculate_Slope(vx_i, vx_ipo, vx_ipt); // Calculate the interface values for x-velocity Interface_Values_PPM(vx_imo, vx_i, vx_ipo, del_q_imo, del_q_i, del_q_ipo, &vx_L, &vx_R); // Calculate the monotonized slopes for cells imo, i, ipo (y-velocity) del_q_imo = Calculate_Slope(vy_imt, vy_imo, vy_i); del_q_i = Calculate_Slope(vy_imo, vy_i, vy_ipo); del_q_ipo = Calculate_Slope(vy_i, vy_ipo, vy_ipt); // Calculate the interface values for y-velocity Interface_Values_PPM(vy_imo, vy_i, vy_ipo, del_q_imo, del_q_i, del_q_ipo, &vy_L, &vy_R); // Calculate the monotonized slopes for cells imo, i, ipo (z-velocity) del_q_imo = Calculate_Slope(vz_imt, vz_imo, vz_i); del_q_i = Calculate_Slope(vz_imo, vz_i, vz_ipo); del_q_ipo = Calculate_Slope(vz_i, vz_ipo, vz_ipt); // Calculate the interface values for z-velocity Interface_Values_PPM(vz_imo, vz_i, vz_ipo, del_q_imo, del_q_i, del_q_ipo, &vz_L, &vz_R); // Calculate the monotonized slopes for cells imo, i, ipo (pressure) del_q_imo = Calculate_Slope(p_imt, p_imo, p_i); del_q_i = Calculate_Slope(p_imo, p_i, p_ipo); del_q_ipo = Calculate_Slope(p_i, p_ipo, p_ipt); // Calculate the interface values for pressure Interface_Values_PPM(p_imo, p_i, p_ipo, del_q_imo, del_q_i, del_q_ipo, &p_L, &p_R); #ifdef DE // Calculate the monotonized slopes for cells imo, i, ipo (internal energy) del_q_imo = Calculate_Slope(ge_imt, ge_imo, ge_i); del_q_i = Calculate_Slope(ge_imo, ge_i, ge_ipo); del_q_ipo = Calculate_Slope(ge_i, ge_ipo, ge_ipt); // Calculate the interface values for internal energy Interface_Values_PPM(ge_imo, ge_i, ge_ipo, del_q_imo, del_q_i, del_q_ipo, &ge_L, &ge_R); #endif #ifdef SCALAR // Calculate the monotonized slopes for cells imo, i, ipo (passive scalars) for (int i=0; i<NSCALARS; i++) { del_q_imo = Calculate_Slope(scalar_imt[i], scalar_imo[i], scalar_i[i]); del_q_i = Calculate_Slope(scalar_imo[i], scalar_i[i], scalar_ipo[i]); del_q_ipo = Calculate_Slope(scalar_i[i], scalar_ipo[i], scalar_ipt[i]); // Calculate the interface values for the passive scalars Interface_Values_PPM(scalar_imo[i], scalar_i[i], scalar_ipo[i], del_q_imo, del_q_i, del_q_ipo, &scalar_L[i], &scalar_R[i]); } #endif #ifdef STEEPENING Real d2_rho_imo, d2_rho_ipo, eta_i; //check for contact discontinuities & steepen if necessary (see Fryxell Sec 3.1.2) //if condition 4 (Fryxell Eqn 37) (Colella Eqn 1.16.5) is true, check further conditions, otherwise do nothing if ((fabs(d_ipo - d_imo) / fmin(d_ipo, d_imo)) > 0.01) { //calculate the second derivative of the density in the imo and ipo cells d2_rho_imo = calc_d2_rho(d_imt, d_imo, d_i, dx); d2_rho_ipo = calc_d2_rho(d_i, d_ipo, d_ipt, dx); //if condition 1 (Fryxell Eqn 38) (Colella Eqn 1.16.5) is true, check further conditions, otherwise do nothing if ((d2_rho_imo * d2_rho_ipo) < 0) { //calculate condition 5, pressure vs density jumps (Fryxell Eqn 39) (Colella Eqn 3.2) //if c5 is true, set value of eta for discontinuity steepening if ((fabs(p_ipo - p_imo) / fmin(p_ipo, p_imo)) < 0.1 * gamma * (fabs(d_ipo - d_imo) / fmin(d_ipo, d_imo))) { //calculate first eta value (Fryxell Eqn 36) (Colella Eqn 1.16.5) eta_i = calc_eta(d2_rho_imo, d2_rho_ipo, dx, d_imo, d_ipo); //calculate steepening coefficient (Fryxell Eqn 40) (Colella Eqn 1.16) eta_i = fmax(0, fmin(20*(eta_i-0.05), 1) ); //calculate new left and right interface variables using monotonized slopes del_q_imo = Calculate_Slope(d_imt, d_imo, d_i); del_q_ipo = Calculate_Slope(d_i, d_ipo, d_ipt); //replace left and right interface values of density (Colella Eqn 1.14, 1.15) d_L = d_L*(1-eta_i) + (d_imo + 0.5 * del_q_imo) * eta_i; d_R = d_R*(1-eta_i) + (d_ipo - 0.5 * del_q_ipo) * eta_i; } } } #endif #ifdef FLATTENING Real F_imo, F_i, F_ipo; //flatten shock fronts that are too narrow (see Fryxell Sec 3.1.3) //calculate the shock steepness parameters (Fryxell Eqn 43) //calculate the dimensionless flattening coefficients (Fryxell Eqn 45) F_imo = fmax( 0, fmin(1, 10*(( (p_i - p_imt) / (p_ipo - p_imth)) - 0.75)) ); F_i = fmax( 0, fmin(1, 10*(( (p_ipo - p_imo) / (p_ipt - p_imt)) - 0.75)) ); F_ipo = fmax( 0, fmin(1, 10*(( (p_ipt - p_i) / (p_ipth - p_imo)) - 0.75)) ); //ensure that we are encountering a shock (Fryxell Eqns 46 & 47) if (fabs(p_i - p_imt) / fmin(p_i, p_imt) < 1./3.) {F_imo = 0;} if (fabs(p_ipo - p_imo) / fmin(p_ipo, p_imo) < 1./3.) {F_i = 0;} if (fabs(p_ipt - p_i) / fmin(p_ipt, p_i) < 1./3.) {F_ipo = 0;} if (vx_i - vx_imt > 0) {F_imo = 0;} if (vx_ipo - vx_imo > 0) {F_i = 0;} if (vx_ipt - vx_i > 0) {F_ipo = 0;} //set the flattening coefficient (Fryxell Eqn 48) if (p_ipo - p_imo < 0) {F_i = fmax(F_i, F_ipo);} else {F_i = fmax(F_i, F_imo);} //modify the interface values d_L = F_i * d_i + (1 - F_i) * d_L; vx_L = F_i * vx_i + (1 - F_i) * vx_L; vy_L = F_i * vy_i + (1 - F_i) * vy_L; vz_L = F_i * vz_i + (1 - F_i) * vz_L; p_L = F_i * p_i + (1 - F_i) * p_L; #ifdef DE ge_L = F_i * ge_i + (1 - F_i) * ge_L; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_L[i] = F_i * scalar_i[i] + (1 - F_i) * scalar_L[i]; } #endif d_R = F_i * d_i + (1 - F_i) * d_R; vx_R = F_i * vx_i + (1 - F_i) * vx_R; vy_R = F_i * vy_i + (1 - F_i) * vy_R; vz_R = F_i * vz_i + (1 - F_i) * vz_R; p_R = F_i * p_i + (1 - F_i) * p_R; #ifdef DE ge_R = F_i * ge_i + (1 - F_i) * ge_R; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_R[i] = F_i * scalar_i[i] + (1 - F_i) * scalar_R[i]; } #endif #endif #ifdef CTU // compute sound speed in cell i cs = sqrt(gamma * p_i / d_i); // compute a first guess at the left and right states by taking the average // under the characteristic on each side that has the largest speed // recompute slope across cell for each variable Fryxell Eqn 29 del_d = d_R - d_L; del_vx = vx_R - vx_L; del_vy = vy_R - vy_L; del_vz = vz_R - vz_L; del_p = p_R - p_L; #ifdef DE del_ge = ge_R - ge_L; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { del_scalar[i] = scalar_R[i] - scalar_L[i]; } #endif d_6 = 6.0 * (d_i - 0.5*(d_L + d_R)); // Fryxell Eqn 30 vx_6 = 6.0 * (vx_i - 0.5*(vx_L + vx_R)); // Fryxell Eqn 30 vy_6 = 6.0 * (vy_i - 0.5*(vy_L + vy_R)); // Fryxell Eqn 30 vz_6 = 6.0 * (vz_i - 0.5*(vz_L + vz_R)); // Fryxell Eqn 30 p_6 = 6.0 * (p_i - 0.5*(p_L + p_R)); // Fryxell Eqn 30 #ifdef DE ge_6 = 6.0 * (ge_i - 0.5*(ge_L + ge_R)); // Fryxell Eqn 30 #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_6[i] = 6.0 * (scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])); // Fryxell Eqn 30 } #endif // set speed of characteristics (v-c, v, v+c) using average values of v and c lambda_m = vx_i - cs; lambda_0 = vx_i; lambda_p = vx_i + cs; // calculate betas (for left state guesses) beta_m = fmax( (lambda_m * dt / dx) , 0 ); // Fryxell Eqn 59 beta_0 = fmax( (lambda_0 * dt / dx) , 0 ); // Fryxell Eqn 59 beta_p = fmax( (lambda_p * dt / dx) , 0 ); // Fryxell Eqn 59 //calculate alphas (for right state guesses) alpha_m = fmax( (-lambda_m * dt / dx), 0); // Fryxell Eqn 61 alpha_0 = fmax( (-lambda_0 * dt / dx), 0); // Fryxell Eqn 61 alpha_p = fmax( (-lambda_p * dt / dx), 0); // Fryxell Eqn 61 // average values under characteristics for left interface (Fryxell Eqn 60) dL_m = d_L + 0.5 * alpha_m * (del_d + d_6 * (1 - (2./3.) * alpha_m)); vxL_m = vx_L + 0.5 * alpha_m * (del_vx + vx_6 * (1 - (2./3.) * alpha_m)); pL_m = p_L + 0.5 * alpha_m * (del_p + p_6 * (1 - (2./3.) * alpha_m)); dL_0 = d_L + 0.5 * alpha_0 * (del_d + d_6 * (1 - (2./3.) * alpha_0)); vyL_0 = vy_L + 0.5 * alpha_0 * (del_vy + vy_6 * (1 - (2./3.) * alpha_0)); vzL_0 = vz_L + 0.5 * alpha_0 * (del_vz + vz_6 * (1 - (2./3.) * alpha_0)); #ifdef DE geL_0 = ge_L + 0.5 * alpha_0 * (del_ge + ge_6 * (1 - (2./3.) * alpha_0)); #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalarL_0[i] = scalar_L[i] + 0.5 * alpha_0 * (del_scalar[i] + scalar_6[i] * (1 - (2./3.) * alpha_0)); } #endif pL_0 = p_L + 0.5 * alpha_0 * (del_p + p_6 * (1 - (2./3.) * alpha_0)); vxL_p = vx_L + 0.5 * alpha_p * (del_vx + vx_6 * (1 - (2./3.) * alpha_p)); pL_p = p_L + 0.5 * alpha_p * (del_p + p_6 * (1 - (2./3.) * alpha_p)); // average values under characteristics for right interface (Fryxell Eqn 58) vxR_m = vx_R - 0.5 * beta_m * (del_vx - vx_6 * (1 - (2./3.) * beta_m)); pR_m = p_R - 0.5 * beta_m * (del_p - p_6 * (1 - (2./3.) * beta_m)); dR_0 = d_R - 0.5 * beta_0 * (del_d - d_6 * (1 - (2./3.) * beta_0)); vyR_0 = vy_R - 0.5 * beta_0 * (del_vy - vy_6 * (1 - (2./3.) * beta_0)); vzR_0 = vz_R - 0.5 * beta_0 * (del_vz - vz_6 * (1 - (2./3.) * beta_0)); #ifdef DE geR_0 = ge_R - 0.5 * beta_0 * (del_ge - ge_6 * (1 - (2./3.) * beta_0)); #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalarR_0[i] = scalar_R[i] - 0.5 * beta_0 * (del_scalar[i] - scalar_6[i] * (1 - (2./3.) * beta_0)); } #endif pR_0 = p_R - 0.5 * beta_0 * (del_p - p_6 * (1 - (2./3.) * beta_0)); dR_p = d_R - 0.5 * beta_p * (del_d - d_6 * (1 - (2./3.) * beta_p)); vxR_p = vx_R - 0.5 * beta_p * (del_vx - vx_6 * (1 - (2./3.) * beta_p)); pR_p = p_R - 0.5 * beta_p * (del_p - p_6 * (1 - (2./3.) * beta_p)); // as a first guess, use characteristics with the largest speeds // for transverse velocities, use the 0 characteristic // left d_L = dL_m; vx_L = vxL_m; vy_L = vyL_0; vz_L = vzL_0; p_L = pL_m; #ifdef DE ge_L = geL_0; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_L[i] = scalarL_0[i]; } #endif // right d_R = dR_p; vx_R = vxR_p; vy_R = vyR_0; vz_R = vzR_0; p_R = pR_p; #ifdef DE ge_R = geR_0; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_R[i] = scalarR_0[i]; } #endif // correct these initial guesses by taking into account the number of // characteristics on each side of the interface // calculate the 'guess' sound speeds cl = sqrt(gamma * p_L / d_L); cr = sqrt(gamma * p_R / d_L); // calculate the chi values (Fryxell Eqns 62 & 63) chi_L_m = 1./(2*d_L*cl) * (vx_L - vxL_m - (p_L - pL_m)/(d_L*cl)); chi_L_p = -1./(2*d_L*cl) * (vx_L - vxL_p + (p_L - pL_p)/(d_L*cl)); chi_L_0 = (p_L - pL_0)/(d_L*d_L*cl*cl) + 1./d_L - 1./dL_0; chi_R_m = 1./(2*d_R*cr) * (vx_R - vxR_m - (p_R - pR_m)/(d_R*cr)); chi_R_p = -1./(2*d_R*cr) * (vx_R - vxR_p + (p_R - pR_p)/(d_R*cr)); chi_R_0 = (p_R - pR_0)/(d_R*d_R*cr*cr) + 1./d_R - 1./dR_0; // set chi to 0 if characteristic velocity has the wrong sign (Fryxell Eqn 64) if (lambda_m >= 0) { chi_L_m = 0; } if (lambda_0 >= 0) { chi_L_0 = 0; } if (lambda_p >= 0) { chi_L_p = 0; } if (lambda_m <= 0) { chi_R_m = 0; } if (lambda_0 <= 0) { chi_R_0 = 0; } if (lambda_p <= 0) { chi_R_p = 0; } // use the chi values to correct the initial guesses and calculate final input states p_L = p_L + (d_L*d_L*cl*cl) * (chi_L_p + chi_L_m); vx_L = vx_L + d_L*cl * (chi_L_p - chi_L_m); d_L = pow( ((1.0/d_L) - (chi_L_m + chi_L_0 + chi_L_p)) , -1); p_R = p_L + (d_R*d_R*cr*cr) * (chi_R_p + chi_R_m); vx_R = vx_R + d_R*cr * (chi_R_p - chi_R_m); d_R = pow( ((1.0/d_R) - (chi_R_m + chi_R_0 + chi_R_p)) , -1); #endif //CTU // Apply mimimum constraints d_L = fmax(d_L, (Real) TINY_NUMBER); d_R = fmax(d_R, (Real) TINY_NUMBER); p_L = fmax(p_L, (Real) TINY_NUMBER); p_R = fmax(p_R, (Real) TINY_NUMBER); // Convert the left and right states in the primitive to the conserved variables // send final values back from kernel // bounds_R refers to the right side of the i-1/2 interface if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny; dev_bounds_R[ id] = d_L; dev_bounds_R[o1*n_cells + id] = d_L*vx_L; dev_bounds_R[o2*n_cells + id] = d_L*vy_L; dev_bounds_R[o3*n_cells + id] = d_L*vz_L; dev_bounds_R[4*n_cells + id] = p_L/(gamma-1.0) + 0.5*d_L*(vx_L*vx_L + vy_L*vy_L + vz_L*vz_L); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_bounds_R[(5+i)*n_cells + id] = d_L*scalar_L[i]; } #endif #ifdef DE dev_bounds_R[(n_fields-1)*n_cells + id] = d_L*ge_L; #endif // bounds_L refers to the left side of the i+1/2 interface id = xid + yid*nx + zid*nx*ny; dev_bounds_L[ id] = d_R; dev_bounds_L[o1*n_cells + id] = d_R*vx_R; dev_bounds_L[o2*n_cells + id] = d_R*vy_R; dev_bounds_L[o3*n_cells + id] = d_R*vz_R; dev_bounds_L[4*n_cells + id] = p_R/(gamma-1.0) + 0.5*d_R*(vx_R*vx_R + vy_R*vy_R + vz_R*vz_R); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_bounds_L[(5+i)*n_cells + id] = d_R*scalar_R[i]; } #endif #ifdef DE dev_bounds_L[(n_fields-1)*n_cells + id] = d_R*ge_R; #endif } } /*! \fn __device__ Real Calculate_Slope(Real q_imo, Real q_i, Real q_ipo) * \brief Calculates the limited slope across a cell.*/ __device__ Real Calculate_Slope(Real q_imo, Real q_i, Real q_ipo) { Real del_q_L, del_q_R, del_q_C, del_q_G; Real lim_slope_a, lim_slope_b, del_q_m; // Compute the left, right, and centered differences of the primative variables // Note that here L and R refer to locations relative to the cell center // left del_q_L = q_i - q_imo; // right del_q_R = q_ipo - q_i; // centered del_q_C = 0.5*(q_ipo - q_imo); // Van Leer if (del_q_L*del_q_R > 0.0) { del_q_G = 2.0*del_q_L*del_q_R / (del_q_L+del_q_R); } else { del_q_G = 0.0; } // Monotonize the differences lim_slope_a = fmin(fabs(del_q_L), fabs(del_q_R)); lim_slope_b = fmin(fabs(del_q_C), fabs(del_q_G)); // Minmod limiter //del_q_m = sgn_CUDA(del_q_C)*fmin(2.0*lim_slope_a, fabs(del_q_C)); // Van Leer limiter del_q_m = sgn_CUDA(del_q_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b); return del_q_m; } /*! \fn __device__ void Interface_Values_PPM(Real q_imo, Real q_i, Real q_ipo, Real del_q_imo, Real del_q_i, Real del_q_ipo, Real *q_L, Real *q_R) * \brief Calculates the left and right interface values for a cell using parabolic reconstruction in the primitive variables with limited slopes provided. Applies further monotonicity constraints.*/ __device__ void Interface_Values_PPM(Real q_imo, Real q_i, Real q_ipo, Real del_q_imo, Real del_q_i, Real del_q_ipo, Real *q_L, Real *q_R) { // Calculate the left and right interface values using the limited slopes *q_L = 0.5*(q_i + q_imo) - (1.0/6.0)*(del_q_i - del_q_imo); *q_R = 0.5*(q_ipo + q_i) - (1.0/6.0)*(del_q_ipo - del_q_i); // Apply further monotonicity constraints to ensure interface values lie between // neighboring cell-centered values // local maximum or minimum criterion (Fryxell Eqn 52, Fig 11) if ((*q_R - q_i)*(q_i - *q_L) <= 0) *q_L = *q_R = q_i; // steep gradient criterion (Fryxell Eqn 53, Fig 12) if (6.0*(*q_R - *q_L)*(q_i - 0.5*(*q_L + *q_R)) > (*q_R - *q_L)*(*q_R - *q_L)) *q_L = 3.0*q_i - 2.0*(*q_R); if (6.0*(*q_R - *q_L)*(q_i - 0.5*(*q_L + *q_R)) < -(*q_R - *q_L)*(*q_R - *q_L)) *q_R = 3.0*q_i - 2.0*(*q_L); *q_L = fmax( fmin(q_i, q_imo), *q_L ); *q_L = fmin( fmax(q_i, q_imo), *q_L ); *q_R = fmax( fmin(q_i, q_ipo), *q_R ); *q_R = fmin( fmax(q_i, q_ipo), *q_R ); } /*! \fn calc_d2_rho * \brief Returns the second derivative of rho across zone i. (Fryxell Eqn 35) */ __device__ Real calc_d2_rho(Real rho_imo, Real rho_i, Real rho_ipo, Real dx) { return (1. / (6*dx*dx)) * (rho_ipo - 2*rho_i + rho_imo); } /*! \fn calc_eta * \brief Returns a dimensionless quantity relating the 1st and 3rd derivatives See Fryxell Eqn 36. */ __device__ Real calc_eta(Real d2rho_imo, Real d2rho_ipo, Real dx, Real rho_imo, Real rho_ipo) { Real A, B; A = (d2rho_ipo - d2rho_imo)*dx*dx; B = 1.0 / (rho_ipo - rho_imo); return -A * B; } #endif //PPMP #endif //CUDA
922c50e27e5e4eb7b060c8fe4e484d2f3a39d0f2.cu
/*! \file ppmp_cuda.cu * \brief Definitions of the piecewise parabolic reconstruction (Fryxell 2000) functions with limiting in the primative variables. */ #ifdef CUDA #ifdef PPMP #include<cuda.h> #include<math.h> #include"global.h" #include"global_cuda.h" #include"ppmp_cuda.h" #define STEEPENING #define FLATTENING /*! \fn __global__ void PPMP_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real gamma, int dir, int n_fields) * \brief When passed a stencil of conserved variables, returns the left and right boundary values for the interface calculated using ppm with limiting in the primative variables. */ __global__ void PPMP_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields) { int n_cells = nx*ny*nz; int o1, o2, o3; if (dir == 0) { o1 = 1; o2 = 2; o3 = 3; } if (dir == 1) { o1 = 2; o2 = 3; o3 = 1; } if (dir == 2) { o1 = 3; o2 = 1; o3 = 2; } // declare primative variables in the stencil Real d_i, vx_i, vy_i, vz_i, p_i; Real d_imo, vx_imo, vy_imo, vz_imo, p_imo; Real d_ipo, vx_ipo, vy_ipo, vz_ipo, p_ipo; Real d_imt, vx_imt, vy_imt, vz_imt, p_imt; Real d_ipt, vx_ipt, vy_ipt, vz_ipt, p_ipt; #ifdef FLATTENING Real p_imth, p_ipth; #endif // declare left and right interface values Real d_L, vx_L, vy_L, vz_L, p_L; Real d_R, vx_R, vy_R, vz_R, p_R; // declare other variables Real del_q_imo, del_q_i, del_q_ipo; #ifdef CTU Real cs, cl, cr; // sound speed in cell i, and at left and right boundaries Real del_d, del_vx, del_vy, del_vz, del_p; // "slope" accross cell i Real d_6, vx_6, vy_6, vz_6, p_6; Real beta_m, beta_0, beta_p; Real alpha_m, alpha_0, alpha_p; Real lambda_m, lambda_0, lambda_p; // speed of characteristics Real dL_m, vxL_m, pL_m; Real dL_0, vyL_0, vzL_0, pL_0; Real vxL_p, pL_p; Real vxR_m, pR_m; Real dR_0, vyR_0, vzR_0, pR_0; Real dR_p, vxR_p, pR_p; Real chi_L_m, chi_L_0, chi_L_p; Real chi_R_m, chi_R_0, chi_R_p; #endif #ifdef DE Real ge_i, ge_imo, ge_ipo, ge_imt, ge_ipt, ge_L, ge_R; #ifdef CTU Real del_ge, ge_6, geL_0, geR_0; #endif #endif #ifdef SCALAR Real scalar_i[NSCALARS], scalar_imo[NSCALARS], scalar_ipo[NSCALARS], scalar_imt[NSCALARS], scalar_ipt[NSCALARS]; Real scalar_L[NSCALARS], scalar_R[NSCALARS]; #ifdef CTU Real del_scalar[NSCALARS], scalar_6[NSCALARS], scalarL_0[NSCALARS], scalarR_0[NSCALARS]; #endif #endif // get a thread ID int blockId = blockIdx.x + blockIdx.y*gridDim.x; int tid = threadIdx.x + blockId*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; int xs, xe, ys, ye, zs, ze; if (dir == 0) { xs = 3; xe = nx-4; ys = 0; ye = ny; zs = 0; ze = nz; } if (dir == 1) { xs = 0; xe = nx; ys = 3; ye = ny-4; zs = 0; ze = nz; } if (dir == 2) { xs = 0; xe = nx; ys = 0; ye = ny; zs = 3; ze = nz-4; } if (xid >= xs && xid < xe && yid >= ys && yid < ye && zid >= zs && zid < ze) { // load the 5-cell stencil into registers // cell i id = xid + yid*nx + zid*nx*ny; d_i = dev_conserved[ id]; vx_i = dev_conserved[o1*n_cells + id] / d_i; vy_i = dev_conserved[o2*n_cells + id] / d_i; vz_i = dev_conserved[o3*n_cells + id] / d_i; p_i = (dev_conserved[4*n_cells + id] - 0.5*d_i*(vx_i*vx_i + vy_i*vy_i + vz_i*vz_i)) * (gamma - 1.0); p_i = fmax(p_i, (Real) TINY_NUMBER); #ifdef DE ge_i = dev_conserved[(n_fields-1)*n_cells + id] / d_i; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_i[i] = dev_conserved[(5+i)*n_cells + id] / d_i; } #endif // cell i-1 if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny; d_imo = dev_conserved[ id]; vx_imo = dev_conserved[o1*n_cells + id] / d_imo; vy_imo = dev_conserved[o2*n_cells + id] / d_imo; vz_imo = dev_conserved[o3*n_cells + id] / d_imo; p_imo = (dev_conserved[4*n_cells + id] - 0.5*d_imo*(vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo)) * (gamma - 1.0); p_imo = fmax(p_imo, (Real) TINY_NUMBER); #ifdef DE ge_imo = dev_conserved[(n_fields-1)*n_cells + id] / d_imo; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_imo[i] = dev_conserved[(5+i)*n_cells + id] / d_imo; } #endif // cell i+1 if (dir == 0) id = xid+1 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid+1)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid+1)*nx*ny; d_ipo = dev_conserved[ id]; vx_ipo = dev_conserved[o1*n_cells + id] / d_ipo; vy_ipo = dev_conserved[o2*n_cells + id] / d_ipo; vz_ipo = dev_conserved[o3*n_cells + id] / d_ipo; p_ipo = (dev_conserved[4*n_cells + id] - 0.5*d_ipo*(vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo)) * (gamma - 1.0); p_ipo = fmax(p_ipo, (Real) TINY_NUMBER); #ifdef DE ge_ipo = dev_conserved[(n_fields-1)*n_cells + id] / d_ipo; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_ipo[i] = dev_conserved[(5+i)*n_cells + id] / d_ipo; } #endif // cell i-2 if (dir == 0) id = xid-2 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid-2)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid-2)*nx*ny; d_imt = dev_conserved[ id]; vx_imt = dev_conserved[o1*n_cells + id] / d_imt; vy_imt = dev_conserved[o2*n_cells + id] / d_imt; vz_imt = dev_conserved[o3*n_cells + id] / d_imt; p_imt = (dev_conserved[4*n_cells + id] - 0.5*d_imt*(vx_imt*vx_imt + vy_imt*vy_imt + vz_imt*vz_imt)) * (gamma - 1.0); p_imt = fmax(p_imt, (Real) TINY_NUMBER); #ifdef DE ge_imt = dev_conserved[(n_fields-1)*n_cells + id] / d_imt; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_imt[i] = dev_conserved[(5+i)*n_cells + id] / d_imt; } #endif // cell i+2 if (dir == 0) id = xid+2 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid+2)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid+2)*nx*ny; d_ipt = dev_conserved[ id]; vx_ipt = dev_conserved[o1*n_cells + id] / d_ipt; vy_ipt = dev_conserved[o2*n_cells + id] / d_ipt; vz_ipt = dev_conserved[o3*n_cells + id] / d_ipt; p_ipt = (dev_conserved[4*n_cells + id] - 0.5*d_ipt*(vx_ipt*vx_ipt + vy_ipt*vy_ipt + vz_ipt*vz_ipt)) * (gamma - 1.0); p_ipt = fmax(p_ipt, (Real) TINY_NUMBER); #ifdef DE ge_ipt = dev_conserved[(n_fields-1)*n_cells + id] / d_ipt; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_ipt[i] = dev_conserved[(5+i)*n_cells + id] / d_ipt; } #endif #ifdef FLATTENING // cell i-3 if (dir == 0) id = xid-3 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid-3)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid-3)*nx*ny; p_imth = (dev_conserved[4*n_cells + id] - 0.5* (dev_conserved[o1*n_cells + id]*dev_conserved[o1*n_cells + id] + dev_conserved[o2*n_cells + id]*dev_conserved[o2*n_cells + id] + dev_conserved[o3*n_cells + id]*dev_conserved[o3*n_cells + id]) / dev_conserved[id]) * (gamma - 1.0); p_imth = fmax(p_imth, (Real) TINY_NUMBER); // cell i+3 if (dir == 0) id = xid+3 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid+3)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid+3)*nx*ny; p_ipth = (dev_conserved[4*n_cells + id] - 0.5* (dev_conserved[o1*n_cells + id]*dev_conserved[o1*n_cells + id] + dev_conserved[o2*n_cells + id]*dev_conserved[o2*n_cells + id] + dev_conserved[o3*n_cells + id]*dev_conserved[o3*n_cells + id]) / dev_conserved[id]) * (gamma - 1.0); p_ipth = fmax(p_imth, (Real) TINY_NUMBER); #endif //FLATTENING //use ppm routines to set cell boundary values (see Fryxell Sec. 3.1.1) // Calculate the monotonized slopes for cells imo, i, ipo (density) del_q_imo = Calculate_Slope(d_imt, d_imo, d_i); del_q_i = Calculate_Slope(d_imo, d_i, d_ipo); del_q_ipo = Calculate_Slope(d_i, d_ipo, d_ipt); // Calculate the interface values for density Interface_Values_PPM(d_imo, d_i, d_ipo, del_q_imo, del_q_i, del_q_ipo, &d_L, &d_R); // Calculate the monotonized slopes for cells imo, i, ipo (x-velocity) del_q_imo = Calculate_Slope(vx_imt, vx_imo, vx_i); del_q_i = Calculate_Slope(vx_imo, vx_i, vx_ipo); del_q_ipo = Calculate_Slope(vx_i, vx_ipo, vx_ipt); // Calculate the interface values for x-velocity Interface_Values_PPM(vx_imo, vx_i, vx_ipo, del_q_imo, del_q_i, del_q_ipo, &vx_L, &vx_R); // Calculate the monotonized slopes for cells imo, i, ipo (y-velocity) del_q_imo = Calculate_Slope(vy_imt, vy_imo, vy_i); del_q_i = Calculate_Slope(vy_imo, vy_i, vy_ipo); del_q_ipo = Calculate_Slope(vy_i, vy_ipo, vy_ipt); // Calculate the interface values for y-velocity Interface_Values_PPM(vy_imo, vy_i, vy_ipo, del_q_imo, del_q_i, del_q_ipo, &vy_L, &vy_R); // Calculate the monotonized slopes for cells imo, i, ipo (z-velocity) del_q_imo = Calculate_Slope(vz_imt, vz_imo, vz_i); del_q_i = Calculate_Slope(vz_imo, vz_i, vz_ipo); del_q_ipo = Calculate_Slope(vz_i, vz_ipo, vz_ipt); // Calculate the interface values for z-velocity Interface_Values_PPM(vz_imo, vz_i, vz_ipo, del_q_imo, del_q_i, del_q_ipo, &vz_L, &vz_R); // Calculate the monotonized slopes for cells imo, i, ipo (pressure) del_q_imo = Calculate_Slope(p_imt, p_imo, p_i); del_q_i = Calculate_Slope(p_imo, p_i, p_ipo); del_q_ipo = Calculate_Slope(p_i, p_ipo, p_ipt); // Calculate the interface values for pressure Interface_Values_PPM(p_imo, p_i, p_ipo, del_q_imo, del_q_i, del_q_ipo, &p_L, &p_R); #ifdef DE // Calculate the monotonized slopes for cells imo, i, ipo (internal energy) del_q_imo = Calculate_Slope(ge_imt, ge_imo, ge_i); del_q_i = Calculate_Slope(ge_imo, ge_i, ge_ipo); del_q_ipo = Calculate_Slope(ge_i, ge_ipo, ge_ipt); // Calculate the interface values for internal energy Interface_Values_PPM(ge_imo, ge_i, ge_ipo, del_q_imo, del_q_i, del_q_ipo, &ge_L, &ge_R); #endif #ifdef SCALAR // Calculate the monotonized slopes for cells imo, i, ipo (passive scalars) for (int i=0; i<NSCALARS; i++) { del_q_imo = Calculate_Slope(scalar_imt[i], scalar_imo[i], scalar_i[i]); del_q_i = Calculate_Slope(scalar_imo[i], scalar_i[i], scalar_ipo[i]); del_q_ipo = Calculate_Slope(scalar_i[i], scalar_ipo[i], scalar_ipt[i]); // Calculate the interface values for the passive scalars Interface_Values_PPM(scalar_imo[i], scalar_i[i], scalar_ipo[i], del_q_imo, del_q_i, del_q_ipo, &scalar_L[i], &scalar_R[i]); } #endif #ifdef STEEPENING Real d2_rho_imo, d2_rho_ipo, eta_i; //check for contact discontinuities & steepen if necessary (see Fryxell Sec 3.1.2) //if condition 4 (Fryxell Eqn 37) (Colella Eqn 1.16.5) is true, check further conditions, otherwise do nothing if ((fabs(d_ipo - d_imo) / fmin(d_ipo, d_imo)) > 0.01) { //calculate the second derivative of the density in the imo and ipo cells d2_rho_imo = calc_d2_rho(d_imt, d_imo, d_i, dx); d2_rho_ipo = calc_d2_rho(d_i, d_ipo, d_ipt, dx); //if condition 1 (Fryxell Eqn 38) (Colella Eqn 1.16.5) is true, check further conditions, otherwise do nothing if ((d2_rho_imo * d2_rho_ipo) < 0) { //calculate condition 5, pressure vs density jumps (Fryxell Eqn 39) (Colella Eqn 3.2) //if c5 is true, set value of eta for discontinuity steepening if ((fabs(p_ipo - p_imo) / fmin(p_ipo, p_imo)) < 0.1 * gamma * (fabs(d_ipo - d_imo) / fmin(d_ipo, d_imo))) { //calculate first eta value (Fryxell Eqn 36) (Colella Eqn 1.16.5) eta_i = calc_eta(d2_rho_imo, d2_rho_ipo, dx, d_imo, d_ipo); //calculate steepening coefficient (Fryxell Eqn 40) (Colella Eqn 1.16) eta_i = fmax(0, fmin(20*(eta_i-0.05), 1) ); //calculate new left and right interface variables using monotonized slopes del_q_imo = Calculate_Slope(d_imt, d_imo, d_i); del_q_ipo = Calculate_Slope(d_i, d_ipo, d_ipt); //replace left and right interface values of density (Colella Eqn 1.14, 1.15) d_L = d_L*(1-eta_i) + (d_imo + 0.5 * del_q_imo) * eta_i; d_R = d_R*(1-eta_i) + (d_ipo - 0.5 * del_q_ipo) * eta_i; } } } #endif #ifdef FLATTENING Real F_imo, F_i, F_ipo; //flatten shock fronts that are too narrow (see Fryxell Sec 3.1.3) //calculate the shock steepness parameters (Fryxell Eqn 43) //calculate the dimensionless flattening coefficients (Fryxell Eqn 45) F_imo = fmax( 0, fmin(1, 10*(( (p_i - p_imt) / (p_ipo - p_imth)) - 0.75)) ); F_i = fmax( 0, fmin(1, 10*(( (p_ipo - p_imo) / (p_ipt - p_imt)) - 0.75)) ); F_ipo = fmax( 0, fmin(1, 10*(( (p_ipt - p_i) / (p_ipth - p_imo)) - 0.75)) ); //ensure that we are encountering a shock (Fryxell Eqns 46 & 47) if (fabs(p_i - p_imt) / fmin(p_i, p_imt) < 1./3.) {F_imo = 0;} if (fabs(p_ipo - p_imo) / fmin(p_ipo, p_imo) < 1./3.) {F_i = 0;} if (fabs(p_ipt - p_i) / fmin(p_ipt, p_i) < 1./3.) {F_ipo = 0;} if (vx_i - vx_imt > 0) {F_imo = 0;} if (vx_ipo - vx_imo > 0) {F_i = 0;} if (vx_ipt - vx_i > 0) {F_ipo = 0;} //set the flattening coefficient (Fryxell Eqn 48) if (p_ipo - p_imo < 0) {F_i = fmax(F_i, F_ipo);} else {F_i = fmax(F_i, F_imo);} //modify the interface values d_L = F_i * d_i + (1 - F_i) * d_L; vx_L = F_i * vx_i + (1 - F_i) * vx_L; vy_L = F_i * vy_i + (1 - F_i) * vy_L; vz_L = F_i * vz_i + (1 - F_i) * vz_L; p_L = F_i * p_i + (1 - F_i) * p_L; #ifdef DE ge_L = F_i * ge_i + (1 - F_i) * ge_L; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_L[i] = F_i * scalar_i[i] + (1 - F_i) * scalar_L[i]; } #endif d_R = F_i * d_i + (1 - F_i) * d_R; vx_R = F_i * vx_i + (1 - F_i) * vx_R; vy_R = F_i * vy_i + (1 - F_i) * vy_R; vz_R = F_i * vz_i + (1 - F_i) * vz_R; p_R = F_i * p_i + (1 - F_i) * p_R; #ifdef DE ge_R = F_i * ge_i + (1 - F_i) * ge_R; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_R[i] = F_i * scalar_i[i] + (1 - F_i) * scalar_R[i]; } #endif #endif #ifdef CTU // compute sound speed in cell i cs = sqrt(gamma * p_i / d_i); // compute a first guess at the left and right states by taking the average // under the characteristic on each side that has the largest speed // recompute slope across cell for each variable Fryxell Eqn 29 del_d = d_R - d_L; del_vx = vx_R - vx_L; del_vy = vy_R - vy_L; del_vz = vz_R - vz_L; del_p = p_R - p_L; #ifdef DE del_ge = ge_R - ge_L; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { del_scalar[i] = scalar_R[i] - scalar_L[i]; } #endif d_6 = 6.0 * (d_i - 0.5*(d_L + d_R)); // Fryxell Eqn 30 vx_6 = 6.0 * (vx_i - 0.5*(vx_L + vx_R)); // Fryxell Eqn 30 vy_6 = 6.0 * (vy_i - 0.5*(vy_L + vy_R)); // Fryxell Eqn 30 vz_6 = 6.0 * (vz_i - 0.5*(vz_L + vz_R)); // Fryxell Eqn 30 p_6 = 6.0 * (p_i - 0.5*(p_L + p_R)); // Fryxell Eqn 30 #ifdef DE ge_6 = 6.0 * (ge_i - 0.5*(ge_L + ge_R)); // Fryxell Eqn 30 #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_6[i] = 6.0 * (scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])); // Fryxell Eqn 30 } #endif // set speed of characteristics (v-c, v, v+c) using average values of v and c lambda_m = vx_i - cs; lambda_0 = vx_i; lambda_p = vx_i + cs; // calculate betas (for left state guesses) beta_m = fmax( (lambda_m * dt / dx) , 0 ); // Fryxell Eqn 59 beta_0 = fmax( (lambda_0 * dt / dx) , 0 ); // Fryxell Eqn 59 beta_p = fmax( (lambda_p * dt / dx) , 0 ); // Fryxell Eqn 59 //calculate alphas (for right state guesses) alpha_m = fmax( (-lambda_m * dt / dx), 0); // Fryxell Eqn 61 alpha_0 = fmax( (-lambda_0 * dt / dx), 0); // Fryxell Eqn 61 alpha_p = fmax( (-lambda_p * dt / dx), 0); // Fryxell Eqn 61 // average values under characteristics for left interface (Fryxell Eqn 60) dL_m = d_L + 0.5 * alpha_m * (del_d + d_6 * (1 - (2./3.) * alpha_m)); vxL_m = vx_L + 0.5 * alpha_m * (del_vx + vx_6 * (1 - (2./3.) * alpha_m)); pL_m = p_L + 0.5 * alpha_m * (del_p + p_6 * (1 - (2./3.) * alpha_m)); dL_0 = d_L + 0.5 * alpha_0 * (del_d + d_6 * (1 - (2./3.) * alpha_0)); vyL_0 = vy_L + 0.5 * alpha_0 * (del_vy + vy_6 * (1 - (2./3.) * alpha_0)); vzL_0 = vz_L + 0.5 * alpha_0 * (del_vz + vz_6 * (1 - (2./3.) * alpha_0)); #ifdef DE geL_0 = ge_L + 0.5 * alpha_0 * (del_ge + ge_6 * (1 - (2./3.) * alpha_0)); #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalarL_0[i] = scalar_L[i] + 0.5 * alpha_0 * (del_scalar[i] + scalar_6[i] * (1 - (2./3.) * alpha_0)); } #endif pL_0 = p_L + 0.5 * alpha_0 * (del_p + p_6 * (1 - (2./3.) * alpha_0)); vxL_p = vx_L + 0.5 * alpha_p * (del_vx + vx_6 * (1 - (2./3.) * alpha_p)); pL_p = p_L + 0.5 * alpha_p * (del_p + p_6 * (1 - (2./3.) * alpha_p)); // average values under characteristics for right interface (Fryxell Eqn 58) vxR_m = vx_R - 0.5 * beta_m * (del_vx - vx_6 * (1 - (2./3.) * beta_m)); pR_m = p_R - 0.5 * beta_m * (del_p - p_6 * (1 - (2./3.) * beta_m)); dR_0 = d_R - 0.5 * beta_0 * (del_d - d_6 * (1 - (2./3.) * beta_0)); vyR_0 = vy_R - 0.5 * beta_0 * (del_vy - vy_6 * (1 - (2./3.) * beta_0)); vzR_0 = vz_R - 0.5 * beta_0 * (del_vz - vz_6 * (1 - (2./3.) * beta_0)); #ifdef DE geR_0 = ge_R - 0.5 * beta_0 * (del_ge - ge_6 * (1 - (2./3.) * beta_0)); #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalarR_0[i] = scalar_R[i] - 0.5 * beta_0 * (del_scalar[i] - scalar_6[i] * (1 - (2./3.) * beta_0)); } #endif pR_0 = p_R - 0.5 * beta_0 * (del_p - p_6 * (1 - (2./3.) * beta_0)); dR_p = d_R - 0.5 * beta_p * (del_d - d_6 * (1 - (2./3.) * beta_p)); vxR_p = vx_R - 0.5 * beta_p * (del_vx - vx_6 * (1 - (2./3.) * beta_p)); pR_p = p_R - 0.5 * beta_p * (del_p - p_6 * (1 - (2./3.) * beta_p)); // as a first guess, use characteristics with the largest speeds // for transverse velocities, use the 0 characteristic // left d_L = dL_m; vx_L = vxL_m; vy_L = vyL_0; vz_L = vzL_0; p_L = pL_m; #ifdef DE ge_L = geL_0; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_L[i] = scalarL_0[i]; } #endif // right d_R = dR_p; vx_R = vxR_p; vy_R = vyR_0; vz_R = vzR_0; p_R = pR_p; #ifdef DE ge_R = geR_0; #endif #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { scalar_R[i] = scalarR_0[i]; } #endif // correct these initial guesses by taking into account the number of // characteristics on each side of the interface // calculate the 'guess' sound speeds cl = sqrt(gamma * p_L / d_L); cr = sqrt(gamma * p_R / d_L); // calculate the chi values (Fryxell Eqns 62 & 63) chi_L_m = 1./(2*d_L*cl) * (vx_L - vxL_m - (p_L - pL_m)/(d_L*cl)); chi_L_p = -1./(2*d_L*cl) * (vx_L - vxL_p + (p_L - pL_p)/(d_L*cl)); chi_L_0 = (p_L - pL_0)/(d_L*d_L*cl*cl) + 1./d_L - 1./dL_0; chi_R_m = 1./(2*d_R*cr) * (vx_R - vxR_m - (p_R - pR_m)/(d_R*cr)); chi_R_p = -1./(2*d_R*cr) * (vx_R - vxR_p + (p_R - pR_p)/(d_R*cr)); chi_R_0 = (p_R - pR_0)/(d_R*d_R*cr*cr) + 1./d_R - 1./dR_0; // set chi to 0 if characteristic velocity has the wrong sign (Fryxell Eqn 64) if (lambda_m >= 0) { chi_L_m = 0; } if (lambda_0 >= 0) { chi_L_0 = 0; } if (lambda_p >= 0) { chi_L_p = 0; } if (lambda_m <= 0) { chi_R_m = 0; } if (lambda_0 <= 0) { chi_R_0 = 0; } if (lambda_p <= 0) { chi_R_p = 0; } // use the chi values to correct the initial guesses and calculate final input states p_L = p_L + (d_L*d_L*cl*cl) * (chi_L_p + chi_L_m); vx_L = vx_L + d_L*cl * (chi_L_p - chi_L_m); d_L = pow( ((1.0/d_L) - (chi_L_m + chi_L_0 + chi_L_p)) , -1); p_R = p_L + (d_R*d_R*cr*cr) * (chi_R_p + chi_R_m); vx_R = vx_R + d_R*cr * (chi_R_p - chi_R_m); d_R = pow( ((1.0/d_R) - (chi_R_m + chi_R_0 + chi_R_p)) , -1); #endif //CTU // Apply mimimum constraints d_L = fmax(d_L, (Real) TINY_NUMBER); d_R = fmax(d_R, (Real) TINY_NUMBER); p_L = fmax(p_L, (Real) TINY_NUMBER); p_R = fmax(p_R, (Real) TINY_NUMBER); // Convert the left and right states in the primitive to the conserved variables // send final values back from kernel // bounds_R refers to the right side of the i-1/2 interface if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny; if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny; if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny; dev_bounds_R[ id] = d_L; dev_bounds_R[o1*n_cells + id] = d_L*vx_L; dev_bounds_R[o2*n_cells + id] = d_L*vy_L; dev_bounds_R[o3*n_cells + id] = d_L*vz_L; dev_bounds_R[4*n_cells + id] = p_L/(gamma-1.0) + 0.5*d_L*(vx_L*vx_L + vy_L*vy_L + vz_L*vz_L); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_bounds_R[(5+i)*n_cells + id] = d_L*scalar_L[i]; } #endif #ifdef DE dev_bounds_R[(n_fields-1)*n_cells + id] = d_L*ge_L; #endif // bounds_L refers to the left side of the i+1/2 interface id = xid + yid*nx + zid*nx*ny; dev_bounds_L[ id] = d_R; dev_bounds_L[o1*n_cells + id] = d_R*vx_R; dev_bounds_L[o2*n_cells + id] = d_R*vy_R; dev_bounds_L[o3*n_cells + id] = d_R*vz_R; dev_bounds_L[4*n_cells + id] = p_R/(gamma-1.0) + 0.5*d_R*(vx_R*vx_R + vy_R*vy_R + vz_R*vz_R); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_bounds_L[(5+i)*n_cells + id] = d_R*scalar_R[i]; } #endif #ifdef DE dev_bounds_L[(n_fields-1)*n_cells + id] = d_R*ge_R; #endif } } /*! \fn __device__ Real Calculate_Slope(Real q_imo, Real q_i, Real q_ipo) * \brief Calculates the limited slope across a cell.*/ __device__ Real Calculate_Slope(Real q_imo, Real q_i, Real q_ipo) { Real del_q_L, del_q_R, del_q_C, del_q_G; Real lim_slope_a, lim_slope_b, del_q_m; // Compute the left, right, and centered differences of the primative variables // Note that here L and R refer to locations relative to the cell center // left del_q_L = q_i - q_imo; // right del_q_R = q_ipo - q_i; // centered del_q_C = 0.5*(q_ipo - q_imo); // Van Leer if (del_q_L*del_q_R > 0.0) { del_q_G = 2.0*del_q_L*del_q_R / (del_q_L+del_q_R); } else { del_q_G = 0.0; } // Monotonize the differences lim_slope_a = fmin(fabs(del_q_L), fabs(del_q_R)); lim_slope_b = fmin(fabs(del_q_C), fabs(del_q_G)); // Minmod limiter //del_q_m = sgn_CUDA(del_q_C)*fmin(2.0*lim_slope_a, fabs(del_q_C)); // Van Leer limiter del_q_m = sgn_CUDA(del_q_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b); return del_q_m; } /*! \fn __device__ void Interface_Values_PPM(Real q_imo, Real q_i, Real q_ipo, Real del_q_imo, Real del_q_i, Real del_q_ipo, Real *q_L, Real *q_R) * \brief Calculates the left and right interface values for a cell using parabolic reconstruction in the primitive variables with limited slopes provided. Applies further monotonicity constraints.*/ __device__ void Interface_Values_PPM(Real q_imo, Real q_i, Real q_ipo, Real del_q_imo, Real del_q_i, Real del_q_ipo, Real *q_L, Real *q_R) { // Calculate the left and right interface values using the limited slopes *q_L = 0.5*(q_i + q_imo) - (1.0/6.0)*(del_q_i - del_q_imo); *q_R = 0.5*(q_ipo + q_i) - (1.0/6.0)*(del_q_ipo - del_q_i); // Apply further monotonicity constraints to ensure interface values lie between // neighboring cell-centered values // local maximum or minimum criterion (Fryxell Eqn 52, Fig 11) if ((*q_R - q_i)*(q_i - *q_L) <= 0) *q_L = *q_R = q_i; // steep gradient criterion (Fryxell Eqn 53, Fig 12) if (6.0*(*q_R - *q_L)*(q_i - 0.5*(*q_L + *q_R)) > (*q_R - *q_L)*(*q_R - *q_L)) *q_L = 3.0*q_i - 2.0*(*q_R); if (6.0*(*q_R - *q_L)*(q_i - 0.5*(*q_L + *q_R)) < -(*q_R - *q_L)*(*q_R - *q_L)) *q_R = 3.0*q_i - 2.0*(*q_L); *q_L = fmax( fmin(q_i, q_imo), *q_L ); *q_L = fmin( fmax(q_i, q_imo), *q_L ); *q_R = fmax( fmin(q_i, q_ipo), *q_R ); *q_R = fmin( fmax(q_i, q_ipo), *q_R ); } /*! \fn calc_d2_rho * \brief Returns the second derivative of rho across zone i. (Fryxell Eqn 35) */ __device__ Real calc_d2_rho(Real rho_imo, Real rho_i, Real rho_ipo, Real dx) { return (1. / (6*dx*dx)) * (rho_ipo - 2*rho_i + rho_imo); } /*! \fn calc_eta * \brief Returns a dimensionless quantity relating the 1st and 3rd derivatives See Fryxell Eqn 36. */ __device__ Real calc_eta(Real d2rho_imo, Real d2rho_ipo, Real dx, Real rho_imo, Real rho_ipo) { Real A, B; A = (d2rho_ipo - d2rho_imo)*dx*dx; B = 1.0 / (rho_ipo - rho_imo); return -A * B; } #endif //PPMP #endif //CUDA
2c3320f4a9a217ce211036fc8f1e591cca2e97d1.hip
// !!! This is a file automatically generated by hipify!!! /* * CUDA hotplate implementation */ #include "hotplate.h" #include <stdio.h> #include <unistd.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <string.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> /* calculation kernel */ __global__ void runCalc(float *old_d, float *new_d) { /* get our thread's coordinates */ int y = (blockIdx.y*blockDim.y) + threadIdx.y; int x = (blockIdx.x*blockDim.x) + threadIdx.x; /*printf("thread (%d,%d) calculating in...\n", x,y);*/ /* bail if we're on an edge... */ if((x == 0) || (x == PLATE_SIZE - 1) || (y == 0) || (y == PLATE_SIZE - 1)) { return; } /*PRINT_LINE;*/ /* calculate my spot and bail */ if(!IS_FIXED(x,y)) { new_d[LOC_H(x,y)] = (float)(old_d[LEFT_LOC_H(x,y)] + old_d[RIGHT_LOC_H(x,y)] + old_d[LOWER_LOC_H(x,y)] + old_d[UPPER_LOC_H(x,y)] + 4 * old_d[LOC_H(x,y)] ) / 8; } } /* check to see if we're in a "steady" state */ __global__ void runCheck(float *old_d, float *new_d, abool_t *allSteady_d) { /* get our coordinates */ int y = (blockIdx.y*blockDim.y) + threadIdx.y; int x = (blockIdx.x*blockDim.x) + threadIdx.x; /*printf("thread (%d,%d) checking...\n", x,y);*/ /* bail if we're on an edge... */ if((x == 0) || (x == PLATE_SIZE - 1) || (y == 0) || (y == PLATE_SIZE - 1)) { return; } /* check my spot; if not steady, set the allSteady to false */ if((*allSteady_d == TRUE) && (!IS_FIXED(x,y))) { float me = new_d[LOC_H(x,y)]; float neighborAvg = (new_d[LEFT_LOC_H(x,y)] + new_d[RIGHT_LOC_H(x,y)] + new_d[LOWER_LOC_H(x,y)] + new_d[UPPER_LOC_H(x,y)]) / 4; if(fabsf(me - neighborAvg) >= STEADY_THRESHOLD) { *allSteady_d = FALSE; /*printf("(%d,%d) set allSteady_d to: %d\n", x,y,*allSteady_d);*/ } } /* END if not steady and not fixed */ } /* check for steady kernel */ int main(int argc, char *argv[]) { /* stuff we'll need */ double start; double end; double et; float *oldPlate_d; float *newPlate_d; float *oldPlate_h; float *newPlate_h; float *tmpPlate_d; abool_t *allSteady_h; abool_t *allSteady_d; int iteration = 0; oldPlate_h = (float*) calloc(PLATE_AREA, sizeof(float)); newPlate_h = (float*) calloc(PLATE_AREA, sizeof(float)); allSteady_h = (abool_t*) calloc(1,sizeof(abool_t)); hipMalloc((void**) &oldPlate_d, PLATE_AREA * sizeof(float)); hipMalloc((void**) &newPlate_d, PLATE_AREA * sizeof(float)); hipMalloc((void**) &allSteady_d, sizeof(abool_t)); /* initialize plates */ int x, y; /*printf("main at %d...\n", __LINE__);*/ for(y = 1; y < PLATE_SIZE - 1; y++) { for(x = 1; x < PLATE_SIZE - 1; x++) { oldPlate_h[LOC_H(x,y)] = WARM_START; newPlate_h[LOC_H(x,y)] = WARM_START; } } /*printf("main at %d...\n", __LINE__);*/ /* initialize the edges */ for(x = 0; x < PLATE_SIZE; x++) { /* do the bottom edge */ oldPlate_h[LOC_H(x,0)] = HOT_START; newPlate_h[LOC_H(x,0)] = HOT_START; /*printf("Column %d in row 0\n", LOC(x,0));*/ /* do the left edge */ oldPlate_h[LOC_H(0,x)] = COLD_START; newPlate_h[LOC_H(0,x)] = COLD_START; /*printf("Row %d in column 0\n", LOC(x,0));*/ /* do the right edge */ oldPlate_h[LOC_H(PLATE_SIZE-1,x)] = COLD_START; newPlate_h[LOC_H(PLATE_SIZE-1,x)] = COLD_START; /*printf("Row %d in column %d\n", LOC(x,0),PLATE_SIZE-1);*/ } /*printf("main at %d...\n", __LINE__);*/ /* initialize our hot row */ for(x = 0; x < FIXED_ROW_COL; x++) { oldPlate_h[LOC_H(x,FIXED_ROW)] = HOT_START; newPlate_h[LOC_H(x,FIXED_ROW)] = HOT_START; } /*printf("main at %d...\n", __LINE__);*/ /* initialize our lonely hot dot */ oldPlate_h[LOC_H(DOT_X,DOT_Y)] = HOT_START; newPlate_h[LOC_H(DOT_X,DOT_Y)] = HOT_START; /*printf("main at %d...\n", __LINE__);*/ hipMemcpy((void*)oldPlate_d, (void*) oldPlate_h, PLATE_AREA * sizeof(float), hipMemcpyHostToDevice); /*printf("main at %d...\n", __LINE__);*/ hipMemcpy((void*)newPlate_d, (void*) newPlate_h, PLATE_AREA * sizeof(float), hipMemcpyHostToDevice); /* get our grids/blocks all ready... */ dim3 calcGrid; calcGrid.x = BLOCKS_X; calcGrid.y = BLOCKS_Y; dim3 calcBlock; calcBlock.x = THREADS_X; calcBlock.y = THREADS_Y; dim3 checkGrid; checkGrid.x = BLOCKS_X; checkGrid.y = BLOCKS_Y; dim3 checkBlock; checkBlock.x = THREADS_X; checkBlock.y = THREADS_Y; start = getTime(); while((*allSteady_h != TRUE) && (iteration < MAX_ITERATION)) { /*printf("main at %d...\n", __LINE__);*/ /* run calculation kernel */ /*printf("main at %d...\n", __LINE__);*/ hipLaunchKernelGGL(( runCalc), dim3(calcGrid),dim3(calcBlock), 0, 0, oldPlate_d, newPlate_d); /* synchronize */ /* synchronize and run check kernel every other iteration */ if(iteration ^ 1) { /* XOR faster than mod... */ *allSteady_h = TRUE; /*printf("main set allSteady_h to %d\n", *allSteady_h);*/ hipMemcpy(allSteady_d, allSteady_h, sizeof(abool_t), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( runCheck), dim3(checkGrid),dim3(checkBlock), 0, 0, oldPlate_d, newPlate_d, allSteady_d); } hipDeviceSynchronize(); /* swap plate pointers on the device... */ tmpPlate_d = oldPlate_d; oldPlate_d = newPlate_d; newPlate_d = tmpPlate_d; /* increment iteration count */ iteration++; hipMemcpy(allSteady_h, allSteady_d, sizeof(abool_t), hipMemcpyDeviceToHost); } end = getTime(); et = end - start; printf("%d iterations in %0.4f seconds...\n", iteration, et); free(oldPlate_h); free(newPlate_h); hipFree(oldPlate_d); hipFree(newPlate_d); return 0; } /* * get a high-precision representation of the current time */ double getTime() { struct timeval tp; gettimeofday(&tp, NULL); return (double)tp.tv_sec + (double)tp.tv_usec * 1e-6; }
2c3320f4a9a217ce211036fc8f1e591cca2e97d1.cu
/* * CUDA hotplate implementation */ #include "hotplate.h" #include <stdio.h> #include <unistd.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> /* calculation kernel */ __global__ void runCalc(float *old_d, float *new_d) { /* get our thread's coordinates */ int y = (blockIdx.y*blockDim.y) + threadIdx.y; int x = (blockIdx.x*blockDim.x) + threadIdx.x; /*printf("thread (%d,%d) calculating in...\n", x,y);*/ /* bail if we're on an edge... */ if((x == 0) || (x == PLATE_SIZE - 1) || (y == 0) || (y == PLATE_SIZE - 1)) { return; } /*PRINT_LINE;*/ /* calculate my spot and bail */ if(!IS_FIXED(x,y)) { new_d[LOC_H(x,y)] = (float)(old_d[LEFT_LOC_H(x,y)] + old_d[RIGHT_LOC_H(x,y)] + old_d[LOWER_LOC_H(x,y)] + old_d[UPPER_LOC_H(x,y)] + 4 * old_d[LOC_H(x,y)] ) / 8; } } /* check to see if we're in a "steady" state */ __global__ void runCheck(float *old_d, float *new_d, abool_t *allSteady_d) { /* get our coordinates */ int y = (blockIdx.y*blockDim.y) + threadIdx.y; int x = (blockIdx.x*blockDim.x) + threadIdx.x; /*printf("thread (%d,%d) checking...\n", x,y);*/ /* bail if we're on an edge... */ if((x == 0) || (x == PLATE_SIZE - 1) || (y == 0) || (y == PLATE_SIZE - 1)) { return; } /* check my spot; if not steady, set the allSteady to false */ if((*allSteady_d == TRUE) && (!IS_FIXED(x,y))) { float me = new_d[LOC_H(x,y)]; float neighborAvg = (new_d[LEFT_LOC_H(x,y)] + new_d[RIGHT_LOC_H(x,y)] + new_d[LOWER_LOC_H(x,y)] + new_d[UPPER_LOC_H(x,y)]) / 4; if(fabsf(me - neighborAvg) >= STEADY_THRESHOLD) { *allSteady_d = FALSE; /*printf("(%d,%d) set allSteady_d to: %d\n", x,y,*allSteady_d);*/ } } /* END if not steady and not fixed */ } /* check for steady kernel */ int main(int argc, char *argv[]) { /* stuff we'll need */ double start; double end; double et; float *oldPlate_d; float *newPlate_d; float *oldPlate_h; float *newPlate_h; float *tmpPlate_d; abool_t *allSteady_h; abool_t *allSteady_d; int iteration = 0; oldPlate_h = (float*) calloc(PLATE_AREA, sizeof(float)); newPlate_h = (float*) calloc(PLATE_AREA, sizeof(float)); allSteady_h = (abool_t*) calloc(1,sizeof(abool_t)); cudaMalloc((void**) &oldPlate_d, PLATE_AREA * sizeof(float)); cudaMalloc((void**) &newPlate_d, PLATE_AREA * sizeof(float)); cudaMalloc((void**) &allSteady_d, sizeof(abool_t)); /* initialize plates */ int x, y; /*printf("main at %d...\n", __LINE__);*/ for(y = 1; y < PLATE_SIZE - 1; y++) { for(x = 1; x < PLATE_SIZE - 1; x++) { oldPlate_h[LOC_H(x,y)] = WARM_START; newPlate_h[LOC_H(x,y)] = WARM_START; } } /*printf("main at %d...\n", __LINE__);*/ /* initialize the edges */ for(x = 0; x < PLATE_SIZE; x++) { /* do the bottom edge */ oldPlate_h[LOC_H(x,0)] = HOT_START; newPlate_h[LOC_H(x,0)] = HOT_START; /*printf("Column %d in row 0\n", LOC(x,0));*/ /* do the left edge */ oldPlate_h[LOC_H(0,x)] = COLD_START; newPlate_h[LOC_H(0,x)] = COLD_START; /*printf("Row %d in column 0\n", LOC(x,0));*/ /* do the right edge */ oldPlate_h[LOC_H(PLATE_SIZE-1,x)] = COLD_START; newPlate_h[LOC_H(PLATE_SIZE-1,x)] = COLD_START; /*printf("Row %d in column %d\n", LOC(x,0),PLATE_SIZE-1);*/ } /*printf("main at %d...\n", __LINE__);*/ /* initialize our hot row */ for(x = 0; x < FIXED_ROW_COL; x++) { oldPlate_h[LOC_H(x,FIXED_ROW)] = HOT_START; newPlate_h[LOC_H(x,FIXED_ROW)] = HOT_START; } /*printf("main at %d...\n", __LINE__);*/ /* initialize our lonely hot dot */ oldPlate_h[LOC_H(DOT_X,DOT_Y)] = HOT_START; newPlate_h[LOC_H(DOT_X,DOT_Y)] = HOT_START; /*printf("main at %d...\n", __LINE__);*/ cudaMemcpy((void*)oldPlate_d, (void*) oldPlate_h, PLATE_AREA * sizeof(float), cudaMemcpyHostToDevice); /*printf("main at %d...\n", __LINE__);*/ cudaMemcpy((void*)newPlate_d, (void*) newPlate_h, PLATE_AREA * sizeof(float), cudaMemcpyHostToDevice); /* get our grids/blocks all ready... */ dim3 calcGrid; calcGrid.x = BLOCKS_X; calcGrid.y = BLOCKS_Y; dim3 calcBlock; calcBlock.x = THREADS_X; calcBlock.y = THREADS_Y; dim3 checkGrid; checkGrid.x = BLOCKS_X; checkGrid.y = BLOCKS_Y; dim3 checkBlock; checkBlock.x = THREADS_X; checkBlock.y = THREADS_Y; start = getTime(); while((*allSteady_h != TRUE) && (iteration < MAX_ITERATION)) { /*printf("main at %d...\n", __LINE__);*/ /* run calculation kernel */ /*printf("main at %d...\n", __LINE__);*/ runCalc<<<calcGrid,calcBlock>>>(oldPlate_d, newPlate_d); /* synchronize */ /* synchronize and run check kernel every other iteration */ if(iteration ^ 1) { /* XOR faster than mod... */ *allSteady_h = TRUE; /*printf("main set allSteady_h to %d\n", *allSteady_h);*/ cudaMemcpy(allSteady_d, allSteady_h, sizeof(abool_t), cudaMemcpyHostToDevice); cudaThreadSynchronize(); runCheck<<<checkGrid,checkBlock>>>(oldPlate_d, newPlate_d, allSteady_d); } cudaThreadSynchronize(); /* swap plate pointers on the device... */ tmpPlate_d = oldPlate_d; oldPlate_d = newPlate_d; newPlate_d = tmpPlate_d; /* increment iteration count */ iteration++; cudaMemcpy(allSteady_h, allSteady_d, sizeof(abool_t), cudaMemcpyDeviceToHost); } end = getTime(); et = end - start; printf("%d iterations in %0.4f seconds...\n", iteration, et); free(oldPlate_h); free(newPlate_h); cudaFree(oldPlate_d); cudaFree(newPlate_d); return 0; } /* * get a high-precision representation of the current time */ double getTime() { struct timeval tp; gettimeofday(&tp, NULL); return (double)tp.tv_sec + (double)tp.tv_usec * 1e-6; }
ee23e090474e7efed871d7088d416d1e021f538b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int row = threadIdx.y + blockIdx.y*blockDim.y; int col = threadIdx.x + blockIdx.x*blockDim.x; if ( row<numRows && col<numCols) { int index = numCols * row + col; uchar4 color = rgbaImage[index]; unsigned char grey = (unsigned char) (0.299f*color.x+ 0.587f*color.y + 0.114f*color.z); greyImage[index] = grey; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int blockWidth = 32; int blockHeight = 32; const dim3 blockSize(blockWidth, blockHeight, 1); //TODO int blocksX = numCols/blockWidth + 1; int blocksY = numRows/blockHeight + 1; const dim3 gridSize( blocksX, blocksY, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
ee23e090474e7efed871d7088d416d1e021f538b.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int row = threadIdx.y + blockIdx.y*blockDim.y; int col = threadIdx.x + blockIdx.x*blockDim.x; if ( row<numRows && col<numCols) { int index = numCols * row + col; uchar4 color = rgbaImage[index]; unsigned char grey = (unsigned char) (0.299f*color.x+ 0.587f*color.y + 0.114f*color.z); greyImage[index] = grey; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int blockWidth = 32; int blockHeight = 32; const dim3 blockSize(blockWidth, blockHeight, 1); //TODO int blocksX = numCols/blockWidth + 1; int blocksY = numRows/blockHeight + 1; const dim3 gridSize( blocksX, blocksY, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
fe684f50c9a7472ad035fac1b3c72430cc8d5d59.hip
// !!! This is a file automatically generated by hipify!!! nclude <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "hip/hip_runtime.h" #include "functions.c" __device__ unsigned int modprodC(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely __device__ unsigned int modExpC(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprodC(aExpb, z, p); z = modprodC(z, z, p); b /= 2; } return aExpb; } __global__ void findX(unsigned int p, unsigned int g, unsigned int h, unsigned int *x) { //unsigned int block = blockIdx.x; //unsigned int blocksize = blockDim.x; //unsigned int thread = threadIdx.x; //unsigned int id=thread + block*blocksize; if (*x==0 || modExpC(g,*x,p)!=h) { printf("Finding the secret key...\n"); double startTime = clock(); for (unsigned int i=0;i<p-1;i++) { if (modExpC(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); *x=i+1; } } double endTime = clock(); double totalTime = (endTime-startTime)/CLOCKS_PER_SEC; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); } } int main (int argc, char **argv) { /* Part 2. Start this program by first copying the contents of the main function from your completed decrypt.c main function. */ /* Q4 Make the search for the secret key parallel on the GPU using CUDA. */ //declare storage for an ElGamal cryptosytem unsigned int n, p, g, h, x; unsigned int Nints; //get the secret key from the user printf("Enter the secret key (0 if unknown): "); fflush(stdout); char stat = scanf("%u",&x); printf("Reading file.\n"); /* Q3 Complete this function. Read in the public key data from public_key.txt and the cyphertexts from messages.txt. */ FILE *pub_key = fopen("public_key.txt","r"); FILE *cyperT = fopen("message.txt","r"); fscanf(pub_key,"%u\n%u\n%u\n%u",&n,&p,&g,&h); fclose(pub_key); fscanf(cyperT,"%u\n",&Nints); unsigned int *a=(unsigned int *) malloc(Nints*sizeof(unsigned int)); unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); for(unsigned int i=0;i<Nints;i++) { fscanf(cyperT,"%u %u\n",&Zmessage[i],&a[i]); } fclose(cyperT); // find the secret key unsigned int Nthreads = Nints; unsigned int Nblocks = (n+Nthreads-1)/Nthreads; hipMalloc((void**)&x,1*sizeof(unsigned int)); printf("%u\n",x);hipLaunchKernelGGL(( findX), dim3(Nthreads),dim3(Nblocks) , 0, 0, p,g,h,&x); //hipDeviceSynchronize(); printf("x:%u\n",x); unsigned int foundx; /* Q3 After finding the secret key, decrypt the message */ hipMemcpy(&x,&foundx,1*sizeof(unsigned int),hipMemcpyHostToDevice); printf("secret key:%u\n",foundx); ElGamalDecrypt(Zmessage,a,Nints,p,foundx); unsigned char *message = (unsigned char *) malloc(Nints*sizeof(unsigned char)); unsigned int charsPerInt = (n-1)/8; unsigned int Nchars = Nints*charsPerInt; convertZToString(Zmessage,Nints,message,Nchars); printf("Decrypted Message = \"%s\"\n",message); hipFree(&x); return 0; }
fe684f50c9a7472ad035fac1b3c72430cc8d5d59.cu
nclude <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "cuda.h" #include "functions.c" __device__ unsigned int modprodC(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely __device__ unsigned int modExpC(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprodC(aExpb, z, p); z = modprodC(z, z, p); b /= 2; } return aExpb; } __global__ void findX(unsigned int p, unsigned int g, unsigned int h, unsigned int *x) { //unsigned int block = blockIdx.x; //unsigned int blocksize = blockDim.x; //unsigned int thread = threadIdx.x; //unsigned int id=thread + block*blocksize; if (*x==0 || modExpC(g,*x,p)!=h) { printf("Finding the secret key...\n"); double startTime = clock(); for (unsigned int i=0;i<p-1;i++) { if (modExpC(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); *x=i+1; } } double endTime = clock(); double totalTime = (endTime-startTime)/CLOCKS_PER_SEC; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); } } int main (int argc, char **argv) { /* Part 2. Start this program by first copying the contents of the main function from your completed decrypt.c main function. */ /* Q4 Make the search for the secret key parallel on the GPU using CUDA. */ //declare storage for an ElGamal cryptosytem unsigned int n, p, g, h, x; unsigned int Nints; //get the secret key from the user printf("Enter the secret key (0 if unknown): "); fflush(stdout); char stat = scanf("%u",&x); printf("Reading file.\n"); /* Q3 Complete this function. Read in the public key data from public_key.txt and the cyphertexts from messages.txt. */ FILE *pub_key = fopen("public_key.txt","r"); FILE *cyperT = fopen("message.txt","r"); fscanf(pub_key,"%u\n%u\n%u\n%u",&n,&p,&g,&h); fclose(pub_key); fscanf(cyperT,"%u\n",&Nints); unsigned int *a=(unsigned int *) malloc(Nints*sizeof(unsigned int)); unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); for(unsigned int i=0;i<Nints;i++) { fscanf(cyperT,"%u %u\n",&Zmessage[i],&a[i]); } fclose(cyperT); // find the secret key unsigned int Nthreads = Nints; unsigned int Nblocks = (n+Nthreads-1)/Nthreads; cudaMalloc((void**)&x,1*sizeof(unsigned int)); printf("%u\n",x); findX<<< Nthreads,Nblocks >>>(p,g,h,&x); //cudaDeviceSynchronize(); printf("x:%u\n",x); unsigned int foundx; /* Q3 After finding the secret key, decrypt the message */ cudaMemcpy(&x,&foundx,1*sizeof(unsigned int),cudaMemcpyHostToDevice); printf("secret key:%u\n",foundx); ElGamalDecrypt(Zmessage,a,Nints,p,foundx); unsigned char *message = (unsigned char *) malloc(Nints*sizeof(unsigned char)); unsigned int charsPerInt = (n-1)/8; unsigned int Nchars = Nints*charsPerInt; convertZToString(Zmessage,Nints,message,Nchars); printf("Decrypted Message = \"%s\"\n",message); cudaFree(&x); return 0; }
5a6cdc67213e1a6e83a31c040bde26f6654af2fc.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu" #else #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(real* out, real* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(real* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); \ if (self_ == src) { \ if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(hipGetLastError()); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(round, THCNumerics<real>::round, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real) #endif IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, real min_value, real max_value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorClampOp<real>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorClampOp<real>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THAssert(THCTensor_(checkGPU)(state, 3, self, x, y)); int i; long nd = THCTensor_(nDimension)(state, x); ptrdiff_t nelem = THCTensor_(nElement)(state, x); THArgCheck(nd == THCTensor_(nDimension)(state, y), 1, "tensors must have same number of dimensions"); for (i = 0; i < nd; i++) { THArgCheck(THCTensor_(size)(state, x, i) == THCTensor_(size)(state, y, i), 1, "dimension %i of x and y does not match", i); if (dimension < 0 && THCTensor_(size)(state, x, i) == 3) { dimension = i; } } THArgCheck(dimension >= 0 && dimension < nd, 3, "dimension %d out of range", dimension+1); THArgCheck(THCTensor_(size)(state, x, dimension) == 3, 3, "dimension %d does not have size 3", dimension+1); THCTensor_(resizeAs)(state, self, x); long sx = THCTensor_(stride)(state, x, dimension); long sy = THCTensor_(stride)(state, y, dimension); long so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp<real>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(tpow)(THCState *state, THCTensor *self_, real value, THCTensor *src) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorTPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w) { THAssert(THCTensor_(checkGPU)(state, 3, result, a, b)); THArgCheck(THCTensor_(nElement)(state, a) == THCTensor_(nElement)(state, b), 3, "sizes do not match"); THCTensor_(resizeAs)(state, result, a); if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp<real>(w))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } #endif THC_API void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == ScalarConvert<int, real>::to(1)) { // self += src2 if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += value * src2 if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src1); if (value == ScalarConvert<int, real>::to(1)) { // self = src1 + src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 + value * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == ScalarConvert<int, real>::to(1)) { // self -= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += -value * src2 if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>( ScalarNegate<real>::to(value)))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src1); if (value == ScalarConvert<int, real>::to(1)) { // self = src1 - src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 - value * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>( ScalarNegate<real>::to(value)))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self *= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self = pow(self, src2) if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = pow(src1, src2) if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self *= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorMinOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1(state, self, TensorMaxValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1(state, self, TensorMinValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) { THCTensor_(resizeAs)(state, self_, t); THCTensor_(copy)(state, self_, t); } else { THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1), 1, "sizes do not match"); } THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) { THCTensor_(resizeAs)(state, self_, t); THCTensor_(copy)(state, self_, t); } else { THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1), 1, "sizes do not match"); } THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } #endif
5a6cdc67213e1a6e83a31c040bde26f6654af2fc.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu" #else #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(real* out, real* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(real* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); \ if (self_ == src) { \ if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(cudaGetLastError()); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(round, THCNumerics<real>::round, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real) #endif IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, real min_value, real max_value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorClampOp<real>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorClampOp<real>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THAssert(THCTensor_(checkGPU)(state, 3, self, x, y)); int i; long nd = THCTensor_(nDimension)(state, x); ptrdiff_t nelem = THCTensor_(nElement)(state, x); THArgCheck(nd == THCTensor_(nDimension)(state, y), 1, "tensors must have same number of dimensions"); for (i = 0; i < nd; i++) { THArgCheck(THCTensor_(size)(state, x, i) == THCTensor_(size)(state, y, i), 1, "dimension %i of x and y does not match", i); if (dimension < 0 && THCTensor_(size)(state, x, i) == 3) { dimension = i; } } THArgCheck(dimension >= 0 && dimension < nd, 3, "dimension %d out of range", dimension+1); THArgCheck(THCTensor_(size)(state, x, dimension) == 3, 3, "dimension %d does not have size 3", dimension+1); THCTensor_(resizeAs)(state, self, x); long sx = THCTensor_(stride)(state, x, dimension); long sy = THCTensor_(stride)(state, y, dimension); long so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp<real>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(tpow)(THCState *state, THCTensor *self_, real value, THCTensor *src) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorTPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w) { THAssert(THCTensor_(checkGPU)(state, 3, result, a, b)); THArgCheck(THCTensor_(nElement)(state, a) == THCTensor_(nElement)(state, b), 3, "sizes do not match"); THCTensor_(resizeAs)(state, result, a); if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp<real>(w))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } #endif THC_API void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == ScalarConvert<int, real>::to(1)) { // self += src2 if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += value * src2 if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src1); if (value == ScalarConvert<int, real>::to(1)) { // self = src1 + src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 + value * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == ScalarConvert<int, real>::to(1)) { // self -= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += -value * src2 if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>( ScalarNegate<real>::to(value)))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src1); if (value == ScalarConvert<int, real>::to(1)) { // self = src1 - src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 - value * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>( ScalarNegate<real>::to(value)))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self *= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self = pow(self, src2) if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = pow(src1, src2) if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self *= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorMinOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1(state, self, TensorMaxValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1(state, self, TensorMinValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) { THCTensor_(resizeAs)(state, self_, t); THCTensor_(copy)(state, self_, t); } else { THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1), 1, "sizes do not match"); } THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) { THAssert(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) { THCTensor_(resizeAs)(state, self_, t); THCTensor_(copy)(state, self_, t); } else { THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1), 1, "sizes do not match"); } THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } #endif
8c6b7c60c40378f316ec56324d181d93a742d0a6.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <hipfft.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "knn_hip.cuh" //#include "gpu_data.h" #define gpu_errchk(ans) { gpu_assert((ans), __FILE__, __LINE__); } inline void gpu_assert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "gpu_assert: %s %s %d\n", hipGetErrorString(code), file, line); exit(code); } } #define BW 1024 template<typename T> void cudaMemsetType(T *dev_ptr, T val, int n_vals) { thrust::device_ptr<T> thrust_dev_ptr(dev_ptr); thrust::fill(thrust_dev_ptr, thrust_dev_ptr + n_vals, val); } // Uses reduction to quickly add things. __global__ void correlationKernel(float* cij, float*sum, int total_size) { extern __shared__ float shmem[]; // atomically add the accumulated loss per block into the global accumulator uint s_thread_index = threadIdx.x; uint thread_index = blockIdx.x * blockDim.x + threadIdx.x; while(thread_index < total_size) { shmem[s_thread_index] = cij[thread_index]; thread_index += blockDim.x * gridDim.x; } __syncthreads(); for(int stride = blockDim.x/2; stride > 0; stride /= 2) { if(s_thread_index < stride) { shmem[s_thread_index] += shmem[s_thread_index + stride]; } __syncthreads(); } if (threadIdx.x == 0){ atomicAdd(sum, shmem[0]); } } // Called to get the sum from correlationKernel. // cij is a lot larger than total_size, but we only want that much of it. float correlationKernelSum(float* cij, int total_size) { // Inialize loss on the device to be zero float sum, *d_sum; gpu_errchk( hipMalloc(&d_sum, sizeof(float)) ); cudaMemsetType<float>(d_sum, 0.0, 1); float *gpu_cij; gpu_errchk( hipMalloc(&gpu_cij, total_size * sizeof(float)) ); gpu_errchk(hipMemcpy(gpu_cij, cij, total_size * sizeof(float), hipMemcpyHostToDevice)); // Accumulate the total loss on the device by invoking a kernel int n_blocks = ::min(65535, (total_size + BW - 1) / BW); hipLaunchKernelGGL(( correlationKernel) , dim3(n_blocks), dim3(BW), BW * sizeof(float), 0, gpu_cij, d_sum, total_size); gpu_errchk( hipMemcpy(&sum, d_sum, sizeof(float), hipMemcpyDeviceToHost) ); gpu_errchk( hipFree(d_sum) ); gpu_errchk( hipFree(gpu_cij) ); // Return the sum return sum; } // This using threads to help us sort in parallel. __global__ void mergeSortKernel( float *src, float*dst, float * followsrc, float * followdst, int section, int num_section, int total_size) { uint thread_index = blockIdx.x * blockDim.x + threadIdx.x; int low = thread_index * section * num_section; int mid, hi; int slice = 0; // Now we do stuff for each section. while(slice < num_section && low < total_size) { mid = min(low + section/2, total_size); hi = min(low + section, total_size); merge(src, dst, followsrc, followdst,low, mid, hi); low += section; slice ++; } } /* * This function merges 2 lists [low to mid), [mid to hi) not in places. */ __device__ void merge(float *src, float *dst, float* followsrc, float* followdst, int low, int mid, int hi) { int a_counter = low; int b_counter = mid; for (int i = low; i < hi; i++) { if (a_counter < mid && (b_counter >= hi || src[a_counter] > src[b_counter])) { dst[i] = src[a_counter]; followdst[i] = followsrc[a_counter]; a_counter ++; } else { if (src[a_counter] == src[b_counter]) { if (followsrc[a_counter] > followsrc[b_counter]) { dst[i] = src[a_counter]; followdst[i] = followsrc[a_counter]; a_counter ++; } else { dst[i] = src[b_counter]; followdst[i] = followsrc[b_counter]; b_counter ++; } } else { dst[i] = src[b_counter]; followdst[i] = followsrc[b_counter]; b_counter ++; } } } } /* * This function allocates memory for the device objects, and calls the merge kernel. * Here, we start with small sections and sort them, and doubling the section size each time. */ void callMergeKernel(const unsigned int blocks, const unsigned int threadsPerBlock, float * cij, float * cijr, int total_size) { //Allocate GPU... float *gpu_src; gpu_errchk(hipMalloc((void **) &gpu_src, total_size * sizeof(float))); gpu_errchk(hipMemcpy(gpu_src, cij, total_size * sizeof(float), hipMemcpyHostToDevice)); float *gpu_dst; gpu_errchk(hipMalloc((void **) &gpu_dst, total_size * sizeof(float))); gpu_errchk(hipMemset(gpu_dst, 0, total_size * sizeof(float))); float *gpu_fsrc; gpu_errchk(hipMalloc((void **) &gpu_fsrc, total_size * sizeof(float))); gpu_errchk(hipMemcpy(gpu_fsrc, cijr, total_size * sizeof(float), hipMemcpyHostToDevice)); float *gpu_fdst; gpu_errchk(hipMalloc((void **) &gpu_fdst, total_size * sizeof(float))); gpu_errchk(hipMemset(gpu_fdst, 0, total_size * sizeof(float))); int total_threads = blocks * threadsPerBlock; for (int section = 2; section< total_size *2; section <<= 1) { int num_section = total_size / ((total_threads) * section) + 1; hipLaunchKernelGGL(( mergeSortKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, gpu_src, gpu_dst, gpu_fsrc, gpu_fdst, section, num_section, total_size); float *temp = gpu_dst; gpu_dst = gpu_src; gpu_src = temp; temp = gpu_fdst; gpu_fdst = gpu_fsrc; gpu_fsrc = temp; } gpu_errchk(hipMemcpy(cij, gpu_src, total_size * sizeof(float), hipMemcpyDeviceToHost)); gpu_errchk(hipMemcpy(cijr, gpu_fsrc, total_size * sizeof(float), hipMemcpyDeviceToHost)); gpu_errchk( hipFree(gpu_fsrc) ); gpu_errchk( hipFree(gpu_src) ); gpu_errchk( hipFree(gpu_dst) ); gpu_errchk( hipFree(gpu_fdst) ); }
8c6b7c60c40378f316ec56324d181d93a742d0a6.cu
#include <cstdio> #include <cuda_runtime.h> #include <cufft.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "knn.cuh" //#include "gpu_data.h" #define gpu_errchk(ans) { gpu_assert((ans), __FILE__, __LINE__); } inline void gpu_assert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "gpu_assert: %s %s %d\n", cudaGetErrorString(code), file, line); exit(code); } } #define BW 1024 template<typename T> void cudaMemsetType(T *dev_ptr, T val, int n_vals) { thrust::device_ptr<T> thrust_dev_ptr(dev_ptr); thrust::fill(thrust_dev_ptr, thrust_dev_ptr + n_vals, val); } // Uses reduction to quickly add things. __global__ void correlationKernel(float* cij, float*sum, int total_size) { extern __shared__ float shmem[]; // atomically add the accumulated loss per block into the global accumulator uint s_thread_index = threadIdx.x; uint thread_index = blockIdx.x * blockDim.x + threadIdx.x; while(thread_index < total_size) { shmem[s_thread_index] = cij[thread_index]; thread_index += blockDim.x * gridDim.x; } __syncthreads(); for(int stride = blockDim.x/2; stride > 0; stride /= 2) { if(s_thread_index < stride) { shmem[s_thread_index] += shmem[s_thread_index + stride]; } __syncthreads(); } if (threadIdx.x == 0){ atomicAdd(sum, shmem[0]); } } // Called to get the sum from correlationKernel. // cij is a lot larger than total_size, but we only want that much of it. float correlationKernelSum(float* cij, int total_size) { // Inialize loss on the device to be zero float sum, *d_sum; gpu_errchk( cudaMalloc(&d_sum, sizeof(float)) ); cudaMemsetType<float>(d_sum, 0.0, 1); float *gpu_cij; gpu_errchk( cudaMalloc(&gpu_cij, total_size * sizeof(float)) ); gpu_errchk(cudaMemcpy(gpu_cij, cij, total_size * sizeof(float), cudaMemcpyHostToDevice)); // Accumulate the total loss on the device by invoking a kernel int n_blocks = std::min(65535, (total_size + BW - 1) / BW); correlationKernel <<<n_blocks, BW, BW * sizeof(float)>>>(gpu_cij, d_sum, total_size); gpu_errchk( cudaMemcpy(&sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost) ); gpu_errchk( cudaFree(d_sum) ); gpu_errchk( cudaFree(gpu_cij) ); // Return the sum return sum; } // This using threads to help us sort in parallel. __global__ void mergeSortKernel( float *src, float*dst, float * followsrc, float * followdst, int section, int num_section, int total_size) { uint thread_index = blockIdx.x * blockDim.x + threadIdx.x; int low = thread_index * section * num_section; int mid, hi; int slice = 0; // Now we do stuff for each section. while(slice < num_section && low < total_size) { mid = min(low + section/2, total_size); hi = min(low + section, total_size); merge(src, dst, followsrc, followdst,low, mid, hi); low += section; slice ++; } } /* * This function merges 2 lists [low to mid), [mid to hi) not in places. */ __device__ void merge(float *src, float *dst, float* followsrc, float* followdst, int low, int mid, int hi) { int a_counter = low; int b_counter = mid; for (int i = low; i < hi; i++) { if (a_counter < mid && (b_counter >= hi || src[a_counter] > src[b_counter])) { dst[i] = src[a_counter]; followdst[i] = followsrc[a_counter]; a_counter ++; } else { if (src[a_counter] == src[b_counter]) { if (followsrc[a_counter] > followsrc[b_counter]) { dst[i] = src[a_counter]; followdst[i] = followsrc[a_counter]; a_counter ++; } else { dst[i] = src[b_counter]; followdst[i] = followsrc[b_counter]; b_counter ++; } } else { dst[i] = src[b_counter]; followdst[i] = followsrc[b_counter]; b_counter ++; } } } } /* * This function allocates memory for the device objects, and calls the merge kernel. * Here, we start with small sections and sort them, and doubling the section size each time. */ void callMergeKernel(const unsigned int blocks, const unsigned int threadsPerBlock, float * cij, float * cijr, int total_size) { //Allocate GPU... float *gpu_src; gpu_errchk(cudaMalloc((void **) &gpu_src, total_size * sizeof(float))); gpu_errchk(cudaMemcpy(gpu_src, cij, total_size * sizeof(float), cudaMemcpyHostToDevice)); float *gpu_dst; gpu_errchk(cudaMalloc((void **) &gpu_dst, total_size * sizeof(float))); gpu_errchk(cudaMemset(gpu_dst, 0, total_size * sizeof(float))); float *gpu_fsrc; gpu_errchk(cudaMalloc((void **) &gpu_fsrc, total_size * sizeof(float))); gpu_errchk(cudaMemcpy(gpu_fsrc, cijr, total_size * sizeof(float), cudaMemcpyHostToDevice)); float *gpu_fdst; gpu_errchk(cudaMalloc((void **) &gpu_fdst, total_size * sizeof(float))); gpu_errchk(cudaMemset(gpu_fdst, 0, total_size * sizeof(float))); int total_threads = blocks * threadsPerBlock; for (int section = 2; section< total_size *2; section <<= 1) { int num_section = total_size / ((total_threads) * section) + 1; mergeSortKernel<<<blocks, threadsPerBlock>>>(gpu_src, gpu_dst, gpu_fsrc, gpu_fdst, section, num_section, total_size); float *temp = gpu_dst; gpu_dst = gpu_src; gpu_src = temp; temp = gpu_fdst; gpu_fdst = gpu_fsrc; gpu_fsrc = temp; } gpu_errchk(cudaMemcpy(cij, gpu_src, total_size * sizeof(float), cudaMemcpyDeviceToHost)); gpu_errchk(cudaMemcpy(cijr, gpu_fsrc, total_size * sizeof(float), cudaMemcpyDeviceToHost)); gpu_errchk( cudaFree(gpu_fsrc) ); gpu_errchk( cudaFree(gpu_src) ); gpu_errchk( cudaFree(gpu_dst) ); gpu_errchk( cudaFree(gpu_fdst) ); }
2fc6e4f7dd2b3d1183432461364a7f4a8b9eae16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/HIPGraphsUtils.cuh> #include <c10/macros/Macros.h> #include <hiprand/hiprand_kernel.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/MemoryAccess.cuh> namespace at{ namespace native{ namespace { // philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4 // for all members of float4 to be consumed UNROLL has to be 4. Don't change! // Note: VEC <= 4 (and in most real-world cases will be 4), so same logic applies. const int UNROLL = 4; template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims, int VEC, typename mask_t> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(256, 4) #endif __global__ void fused_dropout_kernel_vec(at::cuda::detail::TensorInfo<scalar_t, IndexType> a, at::cuda::detail::TensorInfo<scalar_t, IndexType> b, at::cuda::detail::TensorInfo<mask_t, IndexType> c, IndexType totalElements, accscalar_t p, PhiloxCudaState philox_args) { // make sure we don't break assumption that we can't have > 4 elements / thread static_assert(VEC <= 4, "Value of VEC must be in [2, 4]"); using LoadT = memory::aligned_vector<scalar_t, VEC>; using MaskLoadT = memory::aligned_vector<mask_t, VEC>; auto seeds = at::cuda::philox::unpack(philox_args); IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(std::get<0>(seeds), idx, std::get<1>(seeds), &state); // Helps align the total number of times hiprand_uniform4 is called by each thread for the same totalElements // in the vec=2 and vec=4 cases. bool gridxvec_loop_state = 0; accscalar_t scale = 1.0 / p; float4 rand; // Note: Vectorized loads means we'll stride each thread by an additional VEC factor, as we'll load VEC elements at a time for (IndexType linearIndex = idx * VEC; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x * VEC) { // local storage scalar_t src[VEC]; // We'll use this to actually cause vectorized loads later LoadT *value = reinterpret_cast<LoadT*>(&src); //hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything // Note: need a new set of random values per 4 elements -- we'll handle VEC elements in this thread, so need ceil(VEC / 4) // sets of rand. if ((VEC == 4) || (gridxvec_loop_state == 0)) { rand = hiprand_uniform4(&state); } else { // sets up the last two values we generated last iteration to be used this iteration. rand.x = rand.z; rand.y = rand.w; gridxvec_loop_state ^= 1; } rand.x = rand.x < p; rand.y = rand.y < p; if (VEC == 4) { rand.z = rand.z < p; rand.w = rand.w < p; } // Note: We explicitly check for is_contiguous() before launching the vectorized kernel // and replace IndexToOffset call with linearIndex to allow vectorization of NHWC (or other) // ordering. // Single vectorized load *value = *reinterpret_cast<LoadT*>(&a.data[linearIndex]); scalar_t r[VEC]; mask_t mask[VEC]; // Perform the actual computation #pragma unroll for (int ii = 0; ii < VEC; ii++) { r[ii] = src[ii]*(&rand.x)[ii]*scale; mask[ii] = (mask_t)(&rand.x)[ii]; } // Vectorized writes for both mask & result *(reinterpret_cast<LoadT*>(&b.data[linearIndex])) = *reinterpret_cast<LoadT*>(&r[0]); *(reinterpret_cast<MaskLoadT*>(&c.data[linearIndex])) = *reinterpret_cast<MaskLoadT*>(&mask[0]); __syncthreads(); } } template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims, int BDims = ADims, typename mask_t> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(256, 4) #endif __global__ void fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a, cuda::detail::TensorInfo<scalar_t, IndexType> b, cuda::detail::TensorInfo<mask_t, IndexType> c, IndexType totalElements, accscalar_t p, PhiloxCudaState philox_args) { auto seeds = at::cuda::philox::unpack(philox_args); IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(std::get<0>(seeds), idx, std::get<1>(seeds), &state); accscalar_t scale = 1.0 / p; IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) * blockDim.x * gridDim.x * UNROLL; for (IndexType linearIndex = idx; linearIndex < rounded_size; linearIndex += gridDim.x * blockDim.x*UNROLL) { //hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything float4 rand = hiprand_uniform4(&state); scalar_t src[UNROLL]; rand.x = rand.x < p; rand.y = rand.y < p; rand.z = rand.z < p; rand.w = rand.w < p; for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `a` const IndexType aOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a); src[ii] = a.data[aOffset]; } } for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, BDims>::get(li, b); b.data[bOffset] = src[ii]*(&rand.x)[ii]*scale; c.data[bOffset] = (mask_t)(&rand.x)[ii]; } } __syncthreads(); } } template<typename mask_t, typename scalar_t, typename accscalar_t> void masked_scale_kernel(at::Tensor& ret, const at::Tensor& src, const at::Tensor& mask, accscalar_t scale){ auto iter = at::TensorIteratorConfig() .check_all_same_dtype(false) .add_output(ret) .add_input(src) .add_input(mask) .build(); at::native::gpu_kernel( iter, [=]GPU_LAMBDA(const scalar_t src_val, const mask_t mask_val) -> scalar_t { return (float)mask_val * src_val * scale; }); } template <typename scalar_t> int get_vector_size(at::Tensor self, at::Tensor ret, at::Tensor mask) { int vec_size = 4; // get the vector size if (!self.is_non_overlapping_and_dense() || !ret.is_non_overlapping_and_dense() || !mask.is_non_overlapping_and_dense()) { vec_size = 1; } else { vec_size = memory::can_vectorize_up_to<scalar_t>((char*)self.data_ptr()); } // check that we'd have no remainders - prefer a smaller vector size with no remainders over a larger vector and remainder. bool can_vectorize = true; do { can_vectorize = self.numel() % vec_size == 0 && ret.numel() % vec_size == 0 && mask.numel() % vec_size == 0; if (!can_vectorize) vec_size /= 2; } while (vec_size > 1 && !can_vectorize); return can_vectorize ? vec_size : 1; } template <typename index_type, typename mask_t> inline void launcher( const Tensor& self, Tensor& ret, Tensor& mask, double p, const int64_t nelem, const PhiloxCudaState rng_engine_inputs, dim3 grid, dim3 dim_block) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, index_type>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, index_type>(ret); auto mask_info = cuda::detail::getTensorInfo<mask_t, index_type>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); // ret and mask are collapsed to 1d // contiguous tensor int vec_size = get_vector_size<scalar_t>(self, ret, mask); if (vec_size > 1) { switch (vec_size) { case 4: hipLaunchKernelGGL(( fused_dropout_kernel_vec< scalar_t, accscalar_t, index_type, 1, 4>) , dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_HIP_KERNEL_LAUNCH_CHECK(); break; case 2: hipLaunchKernelGGL(( fused_dropout_kernel_vec< scalar_t, accscalar_t, index_type, 1, 2>) , dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_HIP_KERNEL_LAUNCH_CHECK(); break; } } else { switch (self_info.dims) { case 1: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, index_type, 1>) , dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_HIP_KERNEL_LAUNCH_CHECK(); break; default: if (!self.is_contiguous() && ret.is_contiguous() && mask.is_contiguous()) { hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, index_type, -1, 1>) , dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, index_type, -1>) , dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } }); } } //anonymous namespace template <typename mask_t> std::tuple<Tensor,Tensor> dropout_cuda(CUDAGeneratorImpl* gen, const Tensor& self, double p){ Tensor mask = at::empty_like(self, self.options().dtype(c10::CppTypeToScalarType<mask_t>::value)); const int64_t nelem = self.numel(); // empty tensors should not get here, but just in case, avoid FPE // non-training shot-cut if (nelem==0) return std::tuple<Tensor,Tensor>(self.clone(), mask); Tensor ret = at::empty_like(self); const int64_t block_size = 256; unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size; dim3 dim_block(block_size); dim3 grid((nelem + block_size -1)/block_size); grid.x = ::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL; PhiloxCudaState rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_cuda_state(counter_offset); } if (cuda::detail::canUse32BitIndexMath(self)){ launcher<unsigned int, mask_t>( self, ret, mask, p, nelem, rng_engine_inputs, grid, dim_block); } else { launcher<uint64_t, mask_t>( self, ret, mask, p, nelem, rng_engine_inputs, grid, dim_block); } return std::tuple<Tensor,Tensor>(ret, mask); } std::tuple<Tensor,Tensor> native_dropout_cuda(const Tensor& self, double p, c10::optional<bool> train){ // short-cut for train == false if (train.has_value() && !train.value()) { return std::make_tuple(self.clone(), at::ones_like(self, self.options().dtype(c10::CppTypeToScalarType<bool>::value))); } // short-cut if (p == 1) { // native_dropout_cuda is in derivatives.yaml, so we don't need to add data // dependency from output to input for autograd auto ret = at::zeros_like(self); auto mask = at::zeros_like(self, self.options().dtype(c10::CppTypeToScalarType<bool>::value)); return std::tuple<Tensor,Tensor>(ret, mask); } auto gen = get_generator_or_default<CUDAGeneratorImpl>(c10::nullopt, cuda::detail::getDefaultCUDAGenerator()); double p1m = 1. - p; return dropout_cuda<bool>(gen, self, p1m); } // TODO: _fused_dropout_cuda is to be removed, see PR #63937 std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor& self, double p, c10::optional<Generator> gen_){ auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); return dropout_cuda<uint8_t>(gen, self, p); } template <typename mask_t> Tensor dropout_backward_cuda(const Tensor& grad, const Tensor& mask, double scale){ Tensor ret = at::empty_like(grad, grad.suggest_memory_format()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "masked_scale", [&] { using accscalar_t = acc_type<scalar_t, true>; masked_scale_kernel<mask_t, scalar_t>(ret, grad, mask, (accscalar_t)scale); }); return ret; } Tensor native_dropout_backward_cuda(const Tensor& grad, const Tensor& mask, double scale){ TORCH_CHECK(mask.scalar_type() == at::ScalarType::Bool, "Mask should be Bool Scalar Type", mask.scalar_type()); return dropout_backward_cuda<bool>(grad, mask, scale); } // TODO: masked_scale_cuda is to be removed, see PR #63937 Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){ TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype"); return dropout_backward_cuda<uint8_t>(self, mask, scale); } } }
2fc6e4f7dd2b3d1183432461364a7f4a8b9eae16.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/CUDAGraphsUtils.cuh> #include <c10/macros/Macros.h> #include <curand_kernel.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/MemoryAccess.cuh> namespace at{ namespace native{ namespace { // philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4 // for all members of float4 to be consumed UNROLL has to be 4. Don't change! // Note: VEC <= 4 (and in most real-world cases will be 4), so same logic applies. const int UNROLL = 4; template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims, int VEC, typename mask_t> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(256, 4) #endif __global__ void fused_dropout_kernel_vec(at::cuda::detail::TensorInfo<scalar_t, IndexType> a, at::cuda::detail::TensorInfo<scalar_t, IndexType> b, at::cuda::detail::TensorInfo<mask_t, IndexType> c, IndexType totalElements, accscalar_t p, PhiloxCudaState philox_args) { // make sure we don't break assumption that we can't have > 4 elements / thread static_assert(VEC <= 4, "Value of VEC must be in [2, 4]"); using LoadT = memory::aligned_vector<scalar_t, VEC>; using MaskLoadT = memory::aligned_vector<mask_t, VEC>; auto seeds = at::cuda::philox::unpack(philox_args); IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(std::get<0>(seeds), idx, std::get<1>(seeds), &state); // Helps align the total number of times curand_uniform4 is called by each thread for the same totalElements // in the vec=2 and vec=4 cases. bool gridxvec_loop_state = 0; accscalar_t scale = 1.0 / p; float4 rand; // Note: Vectorized loads means we'll stride each thread by an additional VEC factor, as we'll load VEC elements at a time for (IndexType linearIndex = idx * VEC; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x * VEC) { // local storage scalar_t src[VEC]; // We'll use this to actually cause vectorized loads later LoadT *value = reinterpret_cast<LoadT*>(&src); //curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything // Note: need a new set of random values per 4 elements -- we'll handle VEC elements in this thread, so need ceil(VEC / 4) // sets of rand. if ((VEC == 4) || (gridxvec_loop_state == 0)) { rand = curand_uniform4(&state); } else { // sets up the last two values we generated last iteration to be used this iteration. rand.x = rand.z; rand.y = rand.w; gridxvec_loop_state ^= 1; } rand.x = rand.x < p; rand.y = rand.y < p; if (VEC == 4) { rand.z = rand.z < p; rand.w = rand.w < p; } // Note: We explicitly check for is_contiguous() before launching the vectorized kernel // and replace IndexToOffset call with linearIndex to allow vectorization of NHWC (or other) // ordering. // Single vectorized load *value = *reinterpret_cast<LoadT*>(&a.data[linearIndex]); scalar_t r[VEC]; mask_t mask[VEC]; // Perform the actual computation #pragma unroll for (int ii = 0; ii < VEC; ii++) { r[ii] = src[ii]*(&rand.x)[ii]*scale; mask[ii] = (mask_t)(&rand.x)[ii]; } // Vectorized writes for both mask & result *(reinterpret_cast<LoadT*>(&b.data[linearIndex])) = *reinterpret_cast<LoadT*>(&r[0]); *(reinterpret_cast<MaskLoadT*>(&c.data[linearIndex])) = *reinterpret_cast<MaskLoadT*>(&mask[0]); __syncthreads(); } } template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims, int BDims = ADims, typename mask_t> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(256, 4) #endif __global__ void fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a, cuda::detail::TensorInfo<scalar_t, IndexType> b, cuda::detail::TensorInfo<mask_t, IndexType> c, IndexType totalElements, accscalar_t p, PhiloxCudaState philox_args) { auto seeds = at::cuda::philox::unpack(philox_args); IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(std::get<0>(seeds), idx, std::get<1>(seeds), &state); accscalar_t scale = 1.0 / p; IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) * blockDim.x * gridDim.x * UNROLL; for (IndexType linearIndex = idx; linearIndex < rounded_size; linearIndex += gridDim.x * blockDim.x*UNROLL) { //curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything float4 rand = curand_uniform4(&state); scalar_t src[UNROLL]; rand.x = rand.x < p; rand.y = rand.y < p; rand.z = rand.z < p; rand.w = rand.w < p; for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `a` const IndexType aOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a); src[ii] = a.data[aOffset]; } } for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, BDims>::get(li, b); b.data[bOffset] = src[ii]*(&rand.x)[ii]*scale; c.data[bOffset] = (mask_t)(&rand.x)[ii]; } } __syncthreads(); } } template<typename mask_t, typename scalar_t, typename accscalar_t> void masked_scale_kernel(at::Tensor& ret, const at::Tensor& src, const at::Tensor& mask, accscalar_t scale){ auto iter = at::TensorIteratorConfig() .check_all_same_dtype(false) .add_output(ret) .add_input(src) .add_input(mask) .build(); at::native::gpu_kernel( iter, [=]GPU_LAMBDA(const scalar_t src_val, const mask_t mask_val) -> scalar_t { return (float)mask_val * src_val * scale; }); } template <typename scalar_t> int get_vector_size(at::Tensor self, at::Tensor ret, at::Tensor mask) { int vec_size = 4; // get the vector size if (!self.is_non_overlapping_and_dense() || !ret.is_non_overlapping_and_dense() || !mask.is_non_overlapping_and_dense()) { vec_size = 1; } else { vec_size = memory::can_vectorize_up_to<scalar_t>((char*)self.data_ptr()); } // check that we'd have no remainders - prefer a smaller vector size with no remainders over a larger vector and remainder. bool can_vectorize = true; do { can_vectorize = self.numel() % vec_size == 0 && ret.numel() % vec_size == 0 && mask.numel() % vec_size == 0; if (!can_vectorize) vec_size /= 2; } while (vec_size > 1 && !can_vectorize); return can_vectorize ? vec_size : 1; } template <typename index_type, typename mask_t> inline void launcher( const Tensor& self, Tensor& ret, Tensor& mask, double p, const int64_t nelem, const PhiloxCudaState rng_engine_inputs, dim3 grid, dim3 dim_block) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, index_type>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, index_type>(ret); auto mask_info = cuda::detail::getTensorInfo<mask_t, index_type>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); // ret and mask are collapsed to 1d // contiguous tensor int vec_size = get_vector_size<scalar_t>(self, ret, mask); if (vec_size > 1) { switch (vec_size) { case 4: fused_dropout_kernel_vec< scalar_t, accscalar_t, index_type, 1, 4> <<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; case 2: fused_dropout_kernel_vec< scalar_t, accscalar_t, index_type, 1, 2> <<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; } } else { switch (self_info.dims) { case 1: fused_dropout_kernel<scalar_t, accscalar_t, index_type, 1> <<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; default: if (!self.is_contiguous() && ret.is_contiguous() && mask.is_contiguous()) { fused_dropout_kernel<scalar_t, accscalar_t, index_type, -1, 1> <<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { fused_dropout_kernel<scalar_t, accscalar_t, index_type, -1> <<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } }); } } //anonymous namespace template <typename mask_t> std::tuple<Tensor,Tensor> dropout_cuda(CUDAGeneratorImpl* gen, const Tensor& self, double p){ Tensor mask = at::empty_like(self, self.options().dtype(c10::CppTypeToScalarType<mask_t>::value)); const int64_t nelem = self.numel(); // empty tensors should not get here, but just in case, avoid FPE // non-training shot-cut if (nelem==0) return std::tuple<Tensor,Tensor>(self.clone(), mask); Tensor ret = at::empty_like(self); const int64_t block_size = 256; unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size; dim3 dim_block(block_size); dim3 grid((nelem + block_size -1)/block_size); grid.x = std::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL; PhiloxCudaState rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_cuda_state(counter_offset); } if (cuda::detail::canUse32BitIndexMath(self)){ launcher<unsigned int, mask_t>( self, ret, mask, p, nelem, rng_engine_inputs, grid, dim_block); } else { launcher<uint64_t, mask_t>( self, ret, mask, p, nelem, rng_engine_inputs, grid, dim_block); } return std::tuple<Tensor,Tensor>(ret, mask); } std::tuple<Tensor,Tensor> native_dropout_cuda(const Tensor& self, double p, c10::optional<bool> train){ // short-cut for train == false if (train.has_value() && !train.value()) { return std::make_tuple(self.clone(), at::ones_like(self, self.options().dtype(c10::CppTypeToScalarType<bool>::value))); } // short-cut if (p == 1) { // native_dropout_cuda is in derivatives.yaml, so we don't need to add data // dependency from output to input for autograd auto ret = at::zeros_like(self); auto mask = at::zeros_like(self, self.options().dtype(c10::CppTypeToScalarType<bool>::value)); return std::tuple<Tensor,Tensor>(ret, mask); } auto gen = get_generator_or_default<CUDAGeneratorImpl>(c10::nullopt, cuda::detail::getDefaultCUDAGenerator()); double p1m = 1. - p; return dropout_cuda<bool>(gen, self, p1m); } // TODO: _fused_dropout_cuda is to be removed, see PR #63937 std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor& self, double p, c10::optional<Generator> gen_){ auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); return dropout_cuda<uint8_t>(gen, self, p); } template <typename mask_t> Tensor dropout_backward_cuda(const Tensor& grad, const Tensor& mask, double scale){ Tensor ret = at::empty_like(grad, grad.suggest_memory_format()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "masked_scale", [&] { using accscalar_t = acc_type<scalar_t, true>; masked_scale_kernel<mask_t, scalar_t>(ret, grad, mask, (accscalar_t)scale); }); return ret; } Tensor native_dropout_backward_cuda(const Tensor& grad, const Tensor& mask, double scale){ TORCH_CHECK(mask.scalar_type() == at::ScalarType::Bool, "Mask should be Bool Scalar Type", mask.scalar_type()); return dropout_backward_cuda<bool>(grad, mask, scale); } // TODO: masked_scale_cuda is to be removed, see PR #63937 Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){ TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype"); return dropout_backward_cuda<uint8_t>(self, mask, scale); } } }
f0c409922c76f3948b4fd2b7a3271b5ec15d0c6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename T> __global__ void _Gather( const int NxKxS, const int S, const int C, const int K, const int64_t* index, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, NxKxS) { const int j = yi % S; const int i = yi / S / K; int pos = __ldg(index + yi / S % K); pos = (pos >= 0 ? pos : pos + C); y[yi] = x[(i * C + pos) * S + j]; } } template <typename T> __global__ void _GatherGrad( const int NxKxS, const int S, const int C, const int K, const int64_t* index, const T* dy, float* dx) { CUDA_1D_KERNEL_LOOP(yi, NxKxS) { const int j = yi % S; const int i = yi / S / K; int pos = __ldg(index + yi / S % K); pos = (pos >= 0 ? pos : pos + C); math::utils::AtomicAdd( dx + (i * C + pos) * S + j, convert::To<float>(dy[yi])); } } template <typename T, int D> __global__ void _GatherElements( const int N, const int axis, const int num_dims, const SimpleArray<int, D> X_strides, const SimpleArray<int, D> Y_dims, const int64_t* index, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, N) { int xi = 0, tmp = yi; for (int d = num_dims - 1; d >= 0; --d) { int r; FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], tmp, &tmp, &r); xi += (d == axis ? index[yi] : r) * X_strides.data[d]; } y[yi] = x[xi]; } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DEFINE_KERNEL_LAUNCHER(name, InputT, OutputT) \ template <> \ void name<InputT, CUDAContext>( \ const int N, \ const int S, \ const int C, \ const int K, \ const int64_t* index, \ const InputT* x, \ OutputT* y, \ CUDAContext* ctx) { \ const int NxKxS = N * K * S; \ hipLaunchKernelGGL(( _##name), dim3(CUDA_BLOCKS(NxKxS)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \ NxKxS, \ S, \ C, \ K, \ index, \ reinterpret_cast<const math::ScalarType<InputT>::type*>(x), \ reinterpret_cast<math::ScalarType<OutputT>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(Gather, bool, bool); DEFINE_KERNEL_LAUNCHER(Gather, uint8_t, uint8_t); DEFINE_KERNEL_LAUNCHER(Gather, int8_t, int8_t); DEFINE_KERNEL_LAUNCHER(Gather, int, int); DEFINE_KERNEL_LAUNCHER(Gather, int64_t, int64_t); DEFINE_KERNEL_LAUNCHER(Gather, float16, float16); DEFINE_KERNEL_LAUNCHER(Gather, float, float); DEFINE_KERNEL_LAUNCHER(Gather, double, double); DEFINE_KERNEL_LAUNCHER(GatherGrad, float16, float); // GatherGrad DEFINE_KERNEL_LAUNCHER(GatherGrad, float, float); // GatherGrad DEFINE_KERNEL_LAUNCHER(GatherGrad, double, float); // GatherGrad #undef DEFINE_KERNEL_LAUNCHER #define DEFINE_KERNEL_LAUNCHER(name, T) \ template <> \ void name<T, CUDAContext>( \ const int axis, \ const int num_dims, \ const int64_t* x_strides, \ const int64_t* y_dims, \ const int64_t* index, \ const T* x, \ T* y, \ CUDAContext* ctx) { \ CUDA_TENSOR_DIMS_CHECK(num_dims); \ SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_strides; \ SimpleArray<int, CUDA_TENSOR_MAX_DIMS> Y_dims; \ const auto N = std::accumulate( \ y_dims, y_dims + num_dims, 1, std::multiplies<int64_t>()); \ for (int i = 0; i < num_dims; ++i) { \ X_strides.data[i] = x_strides[i]; \ Y_dims.data[i] = y_dims[i]; \ } \ hipLaunchKernelGGL(( _##name), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \ N, axis, num_dims, X_strides, Y_dims, index, x, y); \ } DEFINE_KERNEL_LAUNCHER(GatherElements, bool); DEFINE_KERNEL_LAUNCHER(GatherElements, uint8_t); DEFINE_KERNEL_LAUNCHER(GatherElements, int8_t); DEFINE_KERNEL_LAUNCHER(GatherElements, int); DEFINE_KERNEL_LAUNCHER(GatherElements, int64_t); DEFINE_KERNEL_LAUNCHER(GatherElements, float16); DEFINE_KERNEL_LAUNCHER(GatherElements, float); DEFINE_KERNEL_LAUNCHER(GatherElements, double); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernels } // namespace dragon #endif // USE_ROCM
f0c409922c76f3948b4fd2b7a3271b5ec15d0c6e.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename T> __global__ void _Gather( const int NxKxS, const int S, const int C, const int K, const int64_t* index, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, NxKxS) { const int j = yi % S; const int i = yi / S / K; int pos = __ldg(index + yi / S % K); pos = (pos >= 0 ? pos : pos + C); y[yi] = x[(i * C + pos) * S + j]; } } template <typename T> __global__ void _GatherGrad( const int NxKxS, const int S, const int C, const int K, const int64_t* index, const T* dy, float* dx) { CUDA_1D_KERNEL_LOOP(yi, NxKxS) { const int j = yi % S; const int i = yi / S / K; int pos = __ldg(index + yi / S % K); pos = (pos >= 0 ? pos : pos + C); math::utils::AtomicAdd( dx + (i * C + pos) * S + j, convert::To<float>(dy[yi])); } } template <typename T, int D> __global__ void _GatherElements( const int N, const int axis, const int num_dims, const SimpleArray<int, D> X_strides, const SimpleArray<int, D> Y_dims, const int64_t* index, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, N) { int xi = 0, tmp = yi; for (int d = num_dims - 1; d >= 0; --d) { int r; FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], tmp, &tmp, &r); xi += (d == axis ? index[yi] : r) * X_strides.data[d]; } y[yi] = x[xi]; } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DEFINE_KERNEL_LAUNCHER(name, InputT, OutputT) \ template <> \ void name<InputT, CUDAContext>( \ const int N, \ const int S, \ const int C, \ const int K, \ const int64_t* index, \ const InputT* x, \ OutputT* y, \ CUDAContext* ctx) { \ const int NxKxS = N * K * S; \ _##name<<<CUDA_BLOCKS(NxKxS), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ NxKxS, \ S, \ C, \ K, \ index, \ reinterpret_cast<const math::ScalarType<InputT>::type*>(x), \ reinterpret_cast<math::ScalarType<OutputT>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(Gather, bool, bool); DEFINE_KERNEL_LAUNCHER(Gather, uint8_t, uint8_t); DEFINE_KERNEL_LAUNCHER(Gather, int8_t, int8_t); DEFINE_KERNEL_LAUNCHER(Gather, int, int); DEFINE_KERNEL_LAUNCHER(Gather, int64_t, int64_t); DEFINE_KERNEL_LAUNCHER(Gather, float16, float16); DEFINE_KERNEL_LAUNCHER(Gather, float, float); DEFINE_KERNEL_LAUNCHER(Gather, double, double); DEFINE_KERNEL_LAUNCHER(GatherGrad, float16, float); // GatherGrad DEFINE_KERNEL_LAUNCHER(GatherGrad, float, float); // GatherGrad DEFINE_KERNEL_LAUNCHER(GatherGrad, double, float); // GatherGrad #undef DEFINE_KERNEL_LAUNCHER #define DEFINE_KERNEL_LAUNCHER(name, T) \ template <> \ void name<T, CUDAContext>( \ const int axis, \ const int num_dims, \ const int64_t* x_strides, \ const int64_t* y_dims, \ const int64_t* index, \ const T* x, \ T* y, \ CUDAContext* ctx) { \ CUDA_TENSOR_DIMS_CHECK(num_dims); \ SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_strides; \ SimpleArray<int, CUDA_TENSOR_MAX_DIMS> Y_dims; \ const auto N = std::accumulate( \ y_dims, y_dims + num_dims, 1, std::multiplies<int64_t>()); \ for (int i = 0; i < num_dims; ++i) { \ X_strides.data[i] = x_strides[i]; \ Y_dims.data[i] = y_dims[i]; \ } \ _##name<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ N, axis, num_dims, X_strides, Y_dims, index, x, y); \ } DEFINE_KERNEL_LAUNCHER(GatherElements, bool); DEFINE_KERNEL_LAUNCHER(GatherElements, uint8_t); DEFINE_KERNEL_LAUNCHER(GatherElements, int8_t); DEFINE_KERNEL_LAUNCHER(GatherElements, int); DEFINE_KERNEL_LAUNCHER(GatherElements, int64_t); DEFINE_KERNEL_LAUNCHER(GatherElements, float16); DEFINE_KERNEL_LAUNCHER(GatherElements, float); DEFINE_KERNEL_LAUNCHER(GatherElements, double); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernels } // namespace dragon #endif // USE_CUDA
57d43e5ffd8cd0d3db5508c9d997e7f1930e1258.hip
// !!! This is a file automatically generated by hipify!!! /* * Just run sh compileRun.sh * Use config.h in order to adjust problem size */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "config.h" void printSparseMatrix(double* A) { for (int i = 0; i < N; i++) { int x = i % D; int y = i / D; for (int j = 0; j < N; j++) { double value = 0; if (j == i) { value = A[5 * i]; } else if (j == i - 1 && x > 0) { value = A[5 * i + 1]; } else if (j == i + 1 && x < D - 1) { value = A[5 * i + 2]; } else if (j == i - D && y > 0) { value = A[5 * i + 3]; } else if (j == i + D && y < D - 1) { value = A[5 * i + 4]; } printf("%10.6f", value); } printf("\n"); } } __global__ void iterateILU(double* srcU, double* dstU, char* smallError) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = index % 3; int i = index / 3; int x = i % D; int y = i / D; if (i < N && (offset == 0 || x < D - 1 && offset == 1 || y < D - 1 && offset == 2)) { double value = 0; if (offset == 0) { if (x > 0) value += srcU[5 * (i - 1) + 2] * srcU[5 * (i - 1) + 2]; if (y > 0) value += srcU[5 * (i - D) + 4] * srcU[5 * (i - D) + 4]; value = sqrt(4 - value); } else { value = -1 / srcU[5 * i]; } dstU[5 * i + 2 * offset] = value; if (fabs(value - srcU[5 * i + 2 * offset]) >= EPSILON_ILU) { *smallError = 0; } } } __global__ void initMatrix(double* A) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < 5 * N) { A[i] = (i % 5 == 0); } } __global__ void transpose(double* A, double* B) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < 5 * N) { int offset = i % 5; if (offset == 0) { A[i] = B[i]; } else { int j = i / 5 - D * (offset == 3) + D * (offset == 4) - (offset == 1) + (offset == 2); A[i] = B[5 * j + 2 - (offset - 1) % 2 + 2 * ((offset - 1) / 2)]; } } } __device__ double func(double x, double y) { return 8 * M_PI * M_PI * sin(2 * M_PI * x) * sin(2 * M_PI * y); } __global__ void initBase(double *base, double h) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int x = i % D; int y = i / D; double f = func(h * x + h, h * y + h); base[i] = h * h * f; } } __global__ void calculateGSV(double* A, double *u, double *base, char *smallError, int sourceTime, int time, int lastTime, int offset, int k) { int i = 2 * (blockIdx.x * blockDim.x + threadIdx.x) + offset; if (i < N) { int x = i % D; int y = i / D; int diagIdx = (x + y) / 2; if (diagIdx < k) { double sum = base[i]; if (y > 0) sum -= A[5 * i + 3] * u[i - D]; if (y < D - 1) sum -= A[5 * i + 4] * u[i + D]; if (x > 0) sum -= A[5 * i + 1] * u[i - 1]; if (x < D - 1) sum -= A[5 * i + 2] * u[i + 1]; sum /= A[5 * i]; if (fabsf(sum - u[i]) >= EPSILON_GSV) { smallError[(k - diagIdx + D) % D] = 0; } u[i] = sum; } } } __global__ void fetchU(double *uHistory, double *u, int k) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int x = i % D; int y = i / D; int diagIdx = (x + y) / 2; u[i] = uHistory[i + ((k + 1 + diagIdx) % D) * N]; } } __global__ void initX(double* x, double value) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { x[i] = value; } } __global__ void initR0(double* d_r, double* d_b, double* d_x) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int ix = i % D; double residuum = d_b[i]; if (ix - 1 >= 0) residuum += d_x[i - 1]; if (ix + 1 < D) residuum += d_x[i + 1]; if (i - D >= 0) residuum += d_x[i - D]; if (i + D < N) residuum += d_x[i + D]; residuum -= 4 * d_x[i]; d_r[i] = residuum; } } __global__ void calculateAx(double* d_x, double* d_r) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int ix = i % D; double residuum = 0; if (ix - 1 >= 0) residuum -= d_x[i - 1]; if (ix + 1 < D) residuum -= d_x[i + 1]; if (i - D >= 0) residuum -= d_x[i - D]; if (i + D < N) residuum -= d_x[i + D]; residuum += 4 * d_x[i]; d_r[i] = residuum; } } __global__ void addfv(double* a, double* b, double f, double* c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] = b[i] + f * c[i]; } } void decompose(double* d_U_, double* d_L, char* d_smallError, int* iterations, int blockSize) { *iterations = 0; int gridSize3N = (3 * N + blockSize - 1) / blockSize; int gridSize5N = (5 * N + blockSize - 1) / blockSize; double* d_U[2]; d_U[0] = d_U_; hipMalloc((void**) &d_U[1], 5 * N * sizeof(double)); hipMemset(d_smallError, 0, 1); // Initialize matrices with identity hipLaunchKernelGGL(( initMatrix), dim3(gridSize5N), dim3(blockSize), 0, 0, d_U[0]); hipLaunchKernelGGL(( initMatrix), dim3(gridSize5N), dim3(blockSize), 0, 0, d_U[1]); for (int m = 0;; m++) { hipMemset(d_smallError, 1, 1); hipLaunchKernelGGL(( iterateILU), dim3(gridSize3N),dim3(blockSize), 0, 0, d_U[m % 2 == 0], d_U[m % 2], d_smallError); (*iterations)++; char smallError; hipMemcpy(&smallError, d_smallError, 1, hipMemcpyDeviceToHost); if (smallError && *iterations % 2 == 0) break; } hipLaunchKernelGGL(( transpose), dim3(gridSize5N), dim3(blockSize), 0, 0, d_L, d_U[(*iterations) % 2 == 0]); hipFree(d_U[1]); } void solveGSV(double* d_A, double* d_u, double* d_b, char* d_smallError, int *iterations, int blockSize) { *iterations = 0; int halfN = (N + 1) / 2; int gridSizeHalfN = (halfN + blockSize - 1) / blockSize; hipMemset(d_smallError, 0, D); // Calculate u for (int k = 1; ; k++) { int time = (k % D) * N; int lastTime = ((k - 1 + D) % D) * N; hipMemset(d_smallError + (k % D), 1, 1); // Black fields hipLaunchKernelGGL(( calculateGSV), dim3(gridSizeHalfN), dim3(blockSize), 0, 0, d_A, d_u, d_b, d_smallError, lastTime, time, lastTime, 0, k); // White fields hipLaunchKernelGGL(( calculateGSV), dim3(gridSizeHalfN), dim3(blockSize), 0, 0, d_A, d_u, d_b, d_smallError, time, time, lastTime, 1, k); (*iterations)++; if (k >= D) { char smallError; hipMemcpy(&smallError, d_smallError + ((k + 1) % D), 1, hipMemcpyDeviceToHost); if (smallError) break; } } // Fetch result //fetchU<<<gridSizeN, blockSize>>>(d_uHistory, d_u, *iterations); } void solveBr(double* d_L, double* d_U, double* d_r, double* d_p, double* d_tmp, char* d_smallError, int blockSize) { int it; solveGSV(d_L, d_tmp, d_r, d_smallError, &it, blockSize); //printf("%d Ly=r iterations\n", it); solveGSV(d_U, d_p, d_tmp, d_smallError, &it, blockSize); //printf("%d Up=y iterations\n", it); } __global__ void dotProduct(double* t, double* a, double* b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < N) { t[i] = a[i] * b[i]; } } __global__ void sumReduction(double* a, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n / 2) { a[i] += a[(n + 1) / 2 + i]; } } double reductionScalarProduct(double* d_a, double* d_b, double* d_tmp, int blockSize) { int size = N; int gridSizeN = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( dotProduct), dim3(gridSizeN),dim3(blockSize), 0, 0, d_tmp, d_a, d_b); while (size > 1) { int gridSize = ((size + 1) / 2 + blockSize - 1) / blockSize; hipLaunchKernelGGL(( sumReduction), dim3(gridSize),dim3(blockSize), 0, 0, d_tmp, size); size = (size + 1) / 2; } double result; hipMemcpy(&result, d_tmp, sizeof(double), hipMemcpyDeviceToHost); return result; } void solve(double *u, int *iterations, int blockSize) { *iterations = 0; int gridSizeN = (N + blockSize - 1) / blockSize; // Allocate memory double *d_base; hipMalloc((void**) &d_base, N * sizeof(double)); hipLaunchKernelGGL(( initBase), dim3(gridSizeN), dim3(blockSize), 0, 0, d_base, H); char *d_smallError; hipMalloc((void**) &d_smallError, D); hipMemset(d_smallError, 0, D); double* d_U; hipMalloc((void**) &d_U, 5 * N * sizeof(double)); double* d_L; hipMalloc((void**) &d_L, 5 * N * sizeof(double)); int it; decompose(d_U, d_L, d_smallError, &it, blockSize); printf("%d iterations for ILU decomposition\n", it); double* d_x; hipMalloc((void**) &d_x, N * sizeof(double)); hipLaunchKernelGGL(( initX), dim3(gridSizeN),dim3(blockSize), 0, 0, d_x, 1); double* d_r; hipMalloc((void**) &d_r, N * sizeof(double)); double* d_p; hipMalloc((void**) &d_p, N * sizeof(double)); double* d_tmp0; hipMalloc((void**) &d_tmp0, N * sizeof(double)); double* d_tmp1; hipMalloc((void**) &d_tmp1, N * sizeof(double)); double* d_delta; hipMalloc((void**) &d_delta, 2 * sizeof(double)); double delta = 0, newDelta, deltaHat; hipLaunchKernelGGL(( initR0), dim3(gridSizeN),dim3(blockSize), 0, 0, d_r, d_base, d_x); solveBr(d_L, d_U, d_r, d_p, d_tmp0, d_smallError, blockSize); //hipMemset(d_delta, 0, 2 * sizeof(double)); //scalarProduct<<<gridSizeN,blockSize>>>(d_r, d_p, d_delta); //hipMemcpy(&delta, d_delta, sizeof(double), hipMemcpyDeviceToHost); delta = reductionScalarProduct(d_r, d_p, d_tmp0, blockSize); while (delta >= EPSILON * EPSILON) { //hipMemset(d_delta, 0, 2 * sizeof(double)); hipLaunchKernelGGL(( calculateAx), dim3(gridSizeN),dim3(blockSize), 0, 0, d_p, d_tmp0); deltaHat = reductionScalarProduct(d_p, d_tmp0, d_tmp1, blockSize); //scalarProduct<<<gridSizeN,blockSize>>>(d_p, d_tmp0, d_delta + 1); //hipMemcpy(&deltaHat, d_delta + 1, sizeof(double), hipMemcpyDeviceToHost); deltaHat = delta / deltaHat; hipLaunchKernelGGL(( addfv), dim3(gridSizeN),dim3(blockSize), 0, 0, d_x, d_x, deltaHat, d_p); hipLaunchKernelGGL(( addfv), dim3(gridSizeN),dim3(blockSize), 0, 0, d_r, d_r, -deltaHat, d_tmp0); solveBr(d_L, d_U, d_r, d_tmp1, d_tmp0, d_smallError, blockSize); newDelta = reductionScalarProduct(d_r, d_tmp1, d_tmp0, blockSize); //scalarProduct<<<gridSizeN,blockSize>>>(d_r, d_tmp1, d_delta); //hipMemcpy(&newDelta, d_delta, sizeof(double), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( addfv), dim3(gridSizeN),dim3(blockSize), 0, 0, d_p, d_tmp1, newDelta / delta, d_p); delta = newDelta; (*iterations)++; } hipMemcpy(u, d_x, N * sizeof(double), hipMemcpyDeviceToHost); // Release memory hipFree(d_base); hipFree(d_smallError); hipFree(d_U); hipFree(d_L); hipFree(d_x); hipFree(d_r); hipFree(d_p); hipFree(d_tmp0); hipFree(d_tmp1); hipFree(d_delta); } double analyticU(double x, double y) { return sin(2 * M_PI * x) * sin(2 * M_PI * y); } int main(void) { int i, j; double* u = (double*) malloc(N * sizeof(double));; hipSetDevice(CUDA_DEVICE); int device; hipGetDevice(&device); struct hipDeviceProp_t prop; hipGetDeviceProperties(& prop, device); int blockSize = 8 * prop.warpSize; printf("Run on %s (device %d) with blocksize %d\n", prop.name, device, blockSize); printf("l = %d\nd = %d\nn = %d\n\n", L, D, N); int it; solve(u, &it, blockSize); if (SHOW_RESULTS) { printf("\nResult:\n"); for (i = 0; i < D; i++) { for (j = 0; j < D; j++) { printf("%8.4f", u[j + D * i]); } printf("\n"); } printf("\nAnalytic:\n"); for (i = 0; i < D; i++) { for (j = 0; j < D; j++) { printf("%8.4f", analyticU(j * H + H, i * H + H)); } printf("\n"); } printf("\n"); } double maxError = 0.0; for (i = 0; i < D; i++) { for (j = 0; j < D; j++) { double error = fabs(analyticU(j * H + H, i * H + H) - u[j + D * i]); maxError = fmax(error, maxError); } } printf("Max error: %4.8f\n", maxError); printf("Iterations: %d\n", it); free(u); return 0; }
57d43e5ffd8cd0d3db5508c9d997e7f1930e1258.cu
/* * Just run sh compileRun.sh * Use config.h in order to adjust problem size */ #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "config.h" void printSparseMatrix(double* A) { for (int i = 0; i < N; i++) { int x = i % D; int y = i / D; for (int j = 0; j < N; j++) { double value = 0; if (j == i) { value = A[5 * i]; } else if (j == i - 1 && x > 0) { value = A[5 * i + 1]; } else if (j == i + 1 && x < D - 1) { value = A[5 * i + 2]; } else if (j == i - D && y > 0) { value = A[5 * i + 3]; } else if (j == i + D && y < D - 1) { value = A[5 * i + 4]; } printf("%10.6f", value); } printf("\n"); } } __global__ void iterateILU(double* srcU, double* dstU, char* smallError) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = index % 3; int i = index / 3; int x = i % D; int y = i / D; if (i < N && (offset == 0 || x < D - 1 && offset == 1 || y < D - 1 && offset == 2)) { double value = 0; if (offset == 0) { if (x > 0) value += srcU[5 * (i - 1) + 2] * srcU[5 * (i - 1) + 2]; if (y > 0) value += srcU[5 * (i - D) + 4] * srcU[5 * (i - D) + 4]; value = sqrt(4 - value); } else { value = -1 / srcU[5 * i]; } dstU[5 * i + 2 * offset] = value; if (fabs(value - srcU[5 * i + 2 * offset]) >= EPSILON_ILU) { *smallError = 0; } } } __global__ void initMatrix(double* A) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < 5 * N) { A[i] = (i % 5 == 0); } } __global__ void transpose(double* A, double* B) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < 5 * N) { int offset = i % 5; if (offset == 0) { A[i] = B[i]; } else { int j = i / 5 - D * (offset == 3) + D * (offset == 4) - (offset == 1) + (offset == 2); A[i] = B[5 * j + 2 - (offset - 1) % 2 + 2 * ((offset - 1) / 2)]; } } } __device__ double func(double x, double y) { return 8 * M_PI * M_PI * sin(2 * M_PI * x) * sin(2 * M_PI * y); } __global__ void initBase(double *base, double h) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int x = i % D; int y = i / D; double f = func(h * x + h, h * y + h); base[i] = h * h * f; } } __global__ void calculateGSV(double* A, double *u, double *base, char *smallError, int sourceTime, int time, int lastTime, int offset, int k) { int i = 2 * (blockIdx.x * blockDim.x + threadIdx.x) + offset; if (i < N) { int x = i % D; int y = i / D; int diagIdx = (x + y) / 2; if (diagIdx < k) { double sum = base[i]; if (y > 0) sum -= A[5 * i + 3] * u[i - D]; if (y < D - 1) sum -= A[5 * i + 4] * u[i + D]; if (x > 0) sum -= A[5 * i + 1] * u[i - 1]; if (x < D - 1) sum -= A[5 * i + 2] * u[i + 1]; sum /= A[5 * i]; if (fabsf(sum - u[i]) >= EPSILON_GSV) { smallError[(k - diagIdx + D) % D] = 0; } u[i] = sum; } } } __global__ void fetchU(double *uHistory, double *u, int k) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int x = i % D; int y = i / D; int diagIdx = (x + y) / 2; u[i] = uHistory[i + ((k + 1 + diagIdx) % D) * N]; } } __global__ void initX(double* x, double value) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { x[i] = value; } } __global__ void initR0(double* d_r, double* d_b, double* d_x) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int ix = i % D; double residuum = d_b[i]; if (ix - 1 >= 0) residuum += d_x[i - 1]; if (ix + 1 < D) residuum += d_x[i + 1]; if (i - D >= 0) residuum += d_x[i - D]; if (i + D < N) residuum += d_x[i + D]; residuum -= 4 * d_x[i]; d_r[i] = residuum; } } __global__ void calculateAx(double* d_x, double* d_r) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int ix = i % D; double residuum = 0; if (ix - 1 >= 0) residuum -= d_x[i - 1]; if (ix + 1 < D) residuum -= d_x[i + 1]; if (i - D >= 0) residuum -= d_x[i - D]; if (i + D < N) residuum -= d_x[i + D]; residuum += 4 * d_x[i]; d_r[i] = residuum; } } __global__ void addfv(double* a, double* b, double f, double* c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] = b[i] + f * c[i]; } } void decompose(double* d_U_, double* d_L, char* d_smallError, int* iterations, int blockSize) { *iterations = 0; int gridSize3N = (3 * N + blockSize - 1) / blockSize; int gridSize5N = (5 * N + blockSize - 1) / blockSize; double* d_U[2]; d_U[0] = d_U_; cudaMalloc((void**) &d_U[1], 5 * N * sizeof(double)); cudaMemset(d_smallError, 0, 1); // Initialize matrices with identity initMatrix<<<gridSize5N, blockSize>>>(d_U[0]); initMatrix<<<gridSize5N, blockSize>>>(d_U[1]); for (int m = 0;; m++) { cudaMemset(d_smallError, 1, 1); iterateILU<<<gridSize3N,blockSize>>>(d_U[m % 2 == 0], d_U[m % 2], d_smallError); (*iterations)++; char smallError; cudaMemcpy(&smallError, d_smallError, 1, cudaMemcpyDeviceToHost); if (smallError && *iterations % 2 == 0) break; } transpose<<<gridSize5N, blockSize>>>(d_L, d_U[(*iterations) % 2 == 0]); cudaFree(d_U[1]); } void solveGSV(double* d_A, double* d_u, double* d_b, char* d_smallError, int *iterations, int blockSize) { *iterations = 0; int halfN = (N + 1) / 2; int gridSizeHalfN = (halfN + blockSize - 1) / blockSize; cudaMemset(d_smallError, 0, D); // Calculate u for (int k = 1; ; k++) { int time = (k % D) * N; int lastTime = ((k - 1 + D) % D) * N; cudaMemset(d_smallError + (k % D), 1, 1); // Black fields calculateGSV<<<gridSizeHalfN, blockSize>>>(d_A, d_u, d_b, d_smallError, lastTime, time, lastTime, 0, k); // White fields calculateGSV<<<gridSizeHalfN, blockSize>>>(d_A, d_u, d_b, d_smallError, time, time, lastTime, 1, k); (*iterations)++; if (k >= D) { char smallError; cudaMemcpy(&smallError, d_smallError + ((k + 1) % D), 1, cudaMemcpyDeviceToHost); if (smallError) break; } } // Fetch result //fetchU<<<gridSizeN, blockSize>>>(d_uHistory, d_u, *iterations); } void solveBr(double* d_L, double* d_U, double* d_r, double* d_p, double* d_tmp, char* d_smallError, int blockSize) { int it; solveGSV(d_L, d_tmp, d_r, d_smallError, &it, blockSize); //printf("%d Ly=r iterations\n", it); solveGSV(d_U, d_p, d_tmp, d_smallError, &it, blockSize); //printf("%d Up=y iterations\n", it); } __global__ void dotProduct(double* t, double* a, double* b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < N) { t[i] = a[i] * b[i]; } } __global__ void sumReduction(double* a, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n / 2) { a[i] += a[(n + 1) / 2 + i]; } } double reductionScalarProduct(double* d_a, double* d_b, double* d_tmp, int blockSize) { int size = N; int gridSizeN = (N + blockSize - 1) / blockSize; dotProduct<<<gridSizeN,blockSize>>>(d_tmp, d_a, d_b); while (size > 1) { int gridSize = ((size + 1) / 2 + blockSize - 1) / blockSize; sumReduction<<<gridSize,blockSize>>>(d_tmp, size); size = (size + 1) / 2; } double result; cudaMemcpy(&result, d_tmp, sizeof(double), cudaMemcpyDeviceToHost); return result; } void solve(double *u, int *iterations, int blockSize) { *iterations = 0; int gridSizeN = (N + blockSize - 1) / blockSize; // Allocate memory double *d_base; cudaMalloc((void**) &d_base, N * sizeof(double)); initBase<<<gridSizeN, blockSize>>>(d_base, H); char *d_smallError; cudaMalloc((void**) &d_smallError, D); cudaMemset(d_smallError, 0, D); double* d_U; cudaMalloc((void**) &d_U, 5 * N * sizeof(double)); double* d_L; cudaMalloc((void**) &d_L, 5 * N * sizeof(double)); int it; decompose(d_U, d_L, d_smallError, &it, blockSize); printf("%d iterations for ILU decomposition\n", it); double* d_x; cudaMalloc((void**) &d_x, N * sizeof(double)); initX<<<gridSizeN,blockSize>>>(d_x, 1); double* d_r; cudaMalloc((void**) &d_r, N * sizeof(double)); double* d_p; cudaMalloc((void**) &d_p, N * sizeof(double)); double* d_tmp0; cudaMalloc((void**) &d_tmp0, N * sizeof(double)); double* d_tmp1; cudaMalloc((void**) &d_tmp1, N * sizeof(double)); double* d_delta; cudaMalloc((void**) &d_delta, 2 * sizeof(double)); double delta = 0, newDelta, deltaHat; initR0<<<gridSizeN,blockSize>>>(d_r, d_base, d_x); solveBr(d_L, d_U, d_r, d_p, d_tmp0, d_smallError, blockSize); //cudaMemset(d_delta, 0, 2 * sizeof(double)); //scalarProduct<<<gridSizeN,blockSize>>>(d_r, d_p, d_delta); //cudaMemcpy(&delta, d_delta, sizeof(double), cudaMemcpyDeviceToHost); delta = reductionScalarProduct(d_r, d_p, d_tmp0, blockSize); while (delta >= EPSILON * EPSILON) { //cudaMemset(d_delta, 0, 2 * sizeof(double)); calculateAx<<<gridSizeN,blockSize>>>(d_p, d_tmp0); deltaHat = reductionScalarProduct(d_p, d_tmp0, d_tmp1, blockSize); //scalarProduct<<<gridSizeN,blockSize>>>(d_p, d_tmp0, d_delta + 1); //cudaMemcpy(&deltaHat, d_delta + 1, sizeof(double), cudaMemcpyDeviceToHost); deltaHat = delta / deltaHat; addfv<<<gridSizeN,blockSize>>>(d_x, d_x, deltaHat, d_p); addfv<<<gridSizeN,blockSize>>>(d_r, d_r, -deltaHat, d_tmp0); solveBr(d_L, d_U, d_r, d_tmp1, d_tmp0, d_smallError, blockSize); newDelta = reductionScalarProduct(d_r, d_tmp1, d_tmp0, blockSize); //scalarProduct<<<gridSizeN,blockSize>>>(d_r, d_tmp1, d_delta); //cudaMemcpy(&newDelta, d_delta, sizeof(double), cudaMemcpyDeviceToHost); addfv<<<gridSizeN,blockSize>>>(d_p, d_tmp1, newDelta / delta, d_p); delta = newDelta; (*iterations)++; } cudaMemcpy(u, d_x, N * sizeof(double), cudaMemcpyDeviceToHost); // Release memory cudaFree(d_base); cudaFree(d_smallError); cudaFree(d_U); cudaFree(d_L); cudaFree(d_x); cudaFree(d_r); cudaFree(d_p); cudaFree(d_tmp0); cudaFree(d_tmp1); cudaFree(d_delta); } double analyticU(double x, double y) { return sin(2 * M_PI * x) * sin(2 * M_PI * y); } int main(void) { int i, j; double* u = (double*) malloc(N * sizeof(double));; cudaSetDevice(CUDA_DEVICE); int device; cudaGetDevice(&device); struct cudaDeviceProp prop; cudaGetDeviceProperties(& prop, device); int blockSize = 8 * prop.warpSize; printf("Run on %s (device %d) with blocksize %d\n", prop.name, device, blockSize); printf("l = %d\nd = %d\nn = %d\n\n", L, D, N); int it; solve(u, &it, blockSize); if (SHOW_RESULTS) { printf("\nResult:\n"); for (i = 0; i < D; i++) { for (j = 0; j < D; j++) { printf("%8.4f", u[j + D * i]); } printf("\n"); } printf("\nAnalytic:\n"); for (i = 0; i < D; i++) { for (j = 0; j < D; j++) { printf("%8.4f", analyticU(j * H + H, i * H + H)); } printf("\n"); } printf("\n"); } double maxError = 0.0; for (i = 0; i < D; i++) { for (j = 0; j < D; j++) { double error = fabs(analyticU(j * H + H, i * H + H) - u[j + D * i]); maxError = fmax(error, maxError); } } printf("Max error: %4.8f\n", maxError); printf("Iterations: %d\n", it); free(u); return 0; }
b4f654572e0b85cae109c47b9e5027c8efa1f565.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/optimizers/lars_momentum_op.h" #include "paddle/fluid/platform/fast_divmod.h" #include "paddle/phi/kernels/funcs/math_cuda_utils.h" #if TORCH_HIP_VERSION >= 11000 #include <hip/hip_cooperative_groups.h> #endif #ifdef __HIPCC__ #define LARS_BLOCK_SIZE 256 #else #define LARS_BLOCK_SIZE 512 #endif #define LARS_MAX_MERGED_OPS 60 namespace paddle { namespace operators { template <typename T> using MultiPrecisionType = typename details::MPTypeTrait<T>::Type; __device__ __forceinline__ float Sqrt(float x) { return sqrtf(x); } __device__ __forceinline__ double Sqrt(double x) { return sqrt(x); } __device__ __forceinline__ float Fma(float x, float y, float z) { return fmaf(x, y, z); } __device__ __forceinline__ double Fma(double x, double y, double z) { return fma(x, y, z); } template <typename T> class LarsThreadConfig { public: int grid_for_norm; int grid_for_lars; #if TORCH_HIP_VERSION >= 11000 private: int grid_stride; public: explicit LarsThreadConfig(int64_t numel, int sm_num, int num_blocks_per_sm) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_lars = ::min(::min(sm_num * num_blocks_per_sm, grid), LARS_BLOCK_SIZE); grid_stride = LARS_BLOCK_SIZE * grid_for_lars; } int GetRepeatTimes(int64_t numel) { return (numel + grid_stride - 1) / grid_stride - 1; } #else int repeat_times; explicit LarsThreadConfig(const int64_t numel) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_norm = ::min(grid, LARS_BLOCK_SIZE); const int grid_stride = grid_for_norm * LARS_BLOCK_SIZE; repeat_times = (numel + grid_stride - 1) / grid_stride - 1; // Determine to read 4 fp16 or float data once, but 2 double data once. grid_for_lars = std::is_same<double, T>::value ? (numel + (LARS_BLOCK_SIZE << 1) - 1) / (LARS_BLOCK_SIZE << 1) : (numel + (LARS_BLOCK_SIZE << 2) - 1) / (LARS_BLOCK_SIZE << 2); } #endif }; template <typename T, typename MT, int VecSize, bool IsAmp = false> __device__ inline void VectorizeLarsUpdate( const T* __restrict__ grad, const MT* param, const MT* velocity, T* param_out, MT* velocity_out, const MT mu, MT local_lr, const MT lars_weight_decay, const MT rescale_grad, const int tid, const int grid_stride, const int numel, MT* master_param_out = nullptr) { using VecType = phi::AlignedVector<T, VecSize>; using VecMType = phi::AlignedVector<MT, VecSize>; int main = numel >> (VecSize >> 1); int tail_offset = main * VecSize; const VecType* grad_vec = reinterpret_cast<const VecType*>(grad); const VecMType* param_vec = reinterpret_cast<const VecMType*>(param); const VecMType* velocity_vec = reinterpret_cast<const VecMType*>(velocity); VecType* param_out_vec = reinterpret_cast<VecType*>(param_out); VecMType* velocity_out_vec = reinterpret_cast<VecMType*>(velocity_out); VecMType* master_param_out_vec; if (IsAmp) { master_param_out_vec = reinterpret_cast<VecMType*>(master_param_out); } for (int i = tid; i < main; i += grid_stride) { VecType param_out_tmp; VecMType velocity_tmp, param_tmp; VecType grad_data = grad_vec[i]; VecMType param_data = param_vec[i]; VecMType velocity_data = velocity_vec[i]; #pragma unroll for (int j = 0; j < VecSize; ++j) { MT grad_val = static_cast<MT>(grad_data[j]) * rescale_grad; velocity_tmp[j] = Fma(velocity_data[j], mu, local_lr * Fma(lars_weight_decay, param_data[j], grad_val)); param_tmp[j] = param_data[j] - velocity_tmp[j]; param_out_tmp[j] = static_cast<T>(param_tmp[j]); } param_out_vec[i] = param_out_tmp; velocity_out_vec[i] = velocity_tmp; if (IsAmp) { master_param_out_vec[i] = param_tmp; } } for (int i = tid + tail_offset; i < numel; i += grid_stride) { MT grad_val = static_cast<MT>(grad[i]) * rescale_grad; MT param_val = param[i]; MT velocity_tmp = Fma(velocity[i], mu, local_lr * Fma(lars_weight_decay, param_val, grad_val)); MT param_tmp = param_val - velocity_tmp; param_out[i] = static_cast<T>(param_tmp); velocity_out[i] = velocity_tmp; if (IsAmp) { master_param_out[i] = param_tmp; } } } #if TORCH_HIP_VERSION >= 11000 /* Once TORCH_HIP_VERSION is beyond 11, cooperative_groups can be involved in without --rdc=true compile flag, then L2_norm kernel can be set with __device__ and cooperative_groups::grid_group also can be involved. Otherwise, adding this flag may affect much, L2_norm kernel shall be set with __global__.*/ // TODO(limingshu): declaration of cooperative_groups wapper is invalid in host. template <typename T, typename MT> __forceinline__ __device__ void L2NormKernel( const cooperative_groups::grid_group* cg, #else template <typename T, typename MT> __global__ void L2NormKernel( #endif const T* p_data, const T* __restrict__ g_data, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int64_t numel, const int repeat_times, const MT rescale_grad, const int thresh = 0, MT* __restrict__ p_n = nullptr, MT* __restrict__ g_n = nullptr) { __shared__ MT s_buffer[2]; int tid = threadIdx.x + blockDim.x * blockIdx.x; int grid_stride = LARS_BLOCK_SIZE * gridDim.x; MT p_tmp = static_cast<MT>(0); MT g_tmp = static_cast<MT>(0); while (tid < numel) { MT tmp0 = static_cast<MT>(p_data[tid]); MT tmp1 = static_cast<MT>(g_data[tid]); p_tmp += (tmp0 * tmp0); g_tmp += (tmp1 * tmp1); tid += grid_stride; } p_tmp = phi::funcs::blockReduceSum<MT>(p_tmp, FINAL_MASK); g_tmp = phi::funcs::blockReduceSum<MT>(g_tmp, FINAL_MASK); if (threadIdx.x == 0) { p_buffer[blockIdx.x] = p_tmp; g_buffer[blockIdx.x] = g_tmp; } #if TORCH_HIP_VERSION >= 11000 cg->sync(); // Grid sync for writring partial result to gloabl memory MT p_part_sum = threadIdx.x < gridDim.x ? p_buffer[threadIdx.x] : 0; MT g_part_sum = threadIdx.x < gridDim.x ? g_buffer[threadIdx.x] : 0; MT tmp0 = phi::funcs::blockReduceSum<MT>(p_part_sum, FINAL_MASK); MT tmp1 = phi::funcs::blockReduceSum<MT>(g_part_sum, FINAL_MASK); if (threadIdx.x == 0) { s_buffer[0] = tmp0; s_buffer[1] = tmp1; } __syncthreads(); *p_n = Sqrt(s_buffer[0]); *g_n = rescale_grad * Sqrt(s_buffer[1]); #endif } template <typename T, typename MT> __forceinline__ __device__ void MomentumUpdate( const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, const MT mu, const MT lars_weight_decay, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const MT param_norm, const MT grad_norm, const int tid, const int grid_stride, const int64_t numel, const bool is_amp) { const MT lr = learning_rate[0]; MT local_lr = lr; if (param_norm > static_cast<MT>(0) && grad_norm > static_cast<MT>(0)) { local_lr = lr * lars_coeff * param_norm / (fma(lars_weight_decay, param_norm, grad_norm) + epsilon); } if (is_amp) { VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/true>( grad, master_param, velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel, master_param_out); } else { if (std::is_same<T, float>::value || std::is_same<T, paddle::platform::float16>::value) { /* TODO(limingshu): pointer cast may damage memory accessing for fp16 */ VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } else { VectorizeLarsUpdate<T, MT, /*VecSize=*/2, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } } } #if TORCH_HIP_VERSION >= 11000 template <typename T, typename MT> struct LarsParamWarpper { int64_t numel_arr[LARS_MAX_MERGED_OPS]; int repeat_arr[LARS_MAX_MERGED_OPS]; const T* __restrict__ g_arr[LARS_MAX_MERGED_OPS]; const MT* __restrict__ lr_arr[LARS_MAX_MERGED_OPS]; T* __restrict__ p_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ v_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ master_p_out_arr[LARS_MAX_MERGED_OPS]; MT weight_decay_arr[LARS_MAX_MERGED_OPS]; }; template <typename T, typename MT> __global__ void MergedMomentumLarsKernel(LarsParamWarpper<T, MT> lars_warpper, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int op_num, const MT mu, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const bool is_amp) { int grid_stride = gridDim.x * LARS_BLOCK_SIZE; int tid = threadIdx.x + blockIdx.x * blockDim.x; const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); for (int i = 0; i < op_num; ++i) { int numel = lars_warpper.numel_arr[i]; MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], p_buffer, g_buffer, numel, lars_warpper.repeat_arr[i], rescale_grad, 0, &param_norm, &grad_norm); MomentumUpdate<T, MT>( lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], lars_warpper.v_out_arr[i], lars_warpper.p_out_arr[i], lars_warpper.v_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.lr_arr[i], mu, lars_warpper.weight_decay_arr[i], lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } } #endif template <typename T, typename MT> __global__ void MomentumLarsKernel( const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const MT mu, const MT lars_coeff, const MT lars_weight_decay, const MT epsilon, const MT rescale_grad, const int repeat_times, const int thresh, const int64_t numel, const bool is_amp) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int grid_stride = gridDim.x * LARS_BLOCK_SIZE; #if TORCH_HIP_VERSION >= 11000 const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, param, grad, p_buffer, g_buffer, numel, repeat_times, rescale_grad, gridDim.x, &param_norm, &grad_norm); #else const MT rescale_grad_pow = rescale_grad * rescale_grad; MT param_part_norm = threadIdx.x < thresh ? p_buffer[threadIdx.x] : 0; MT grad_part_norm = threadIdx.x < thresh ? g_buffer[threadIdx.x] : 0; __syncthreads(); MT param_norm = Sqrt(phi::funcs::blockReduceSum<MT>(param_part_norm, FINAL_MASK)); MT grad_norm = Sqrt(rescale_grad_pow * phi::funcs::blockReduceSum<MT>( grad_part_norm, FINAL_MASK)); #endif MomentumUpdate<T, MT>(param, grad, velocity, param_out, velocity_out, master_param, master_param_out, learning_rate, mu, lars_weight_decay, lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } template <typename T, typename MT> inline void SeparatedLarsMomentumOpCUDAKernel( const platform::CUDADeviceContext& cuda_ctx, const T* param_data, T* param_out_data, const MT* velocity_data, MT* velocity_out_data, const T* grad_data, const MT* lr, MT* p_buffer, MT* g_buffer, const MT mu, const MT lars_coeff, const MT weight_decay, const MT epsilon, const MT rescale_grad, const int64_t numel, const MT* master_param_data, MT* master_out_data, const bool is_amp) { LarsThreadConfig<T> lars_thread_config(numel); hipLaunchKernelGGL(( L2NormKernel<T, MT>), dim3(lars_thread_config.grid_for_norm), dim3(LARS_BLOCK_SIZE), 0, cuda_ctx.stream(), param_data, grad_data, p_buffer, g_buffer, numel, lars_thread_config.repeat_times, rescale_grad); hipLaunchKernelGGL(( MomentumLarsKernel<T, MT>), dim3(lars_thread_config.grid_for_lars), dim3(LARS_BLOCK_SIZE), 0, cuda_ctx.stream(), param_data, grad_data, velocity_data, param_out_data, velocity_out_data, master_param_data, master_out_data, lr, p_buffer, g_buffer, mu, lars_coeff, weight_decay, epsilon, rescale_grad, 0, lars_thread_config.grid_for_norm, numel, is_amp); } template <typename DeviceContext, typename T> class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> { using MT = MultiPrecisionType<T>; public: void Compute(const framework::ExecutionContext& ctx) const override { int num_blocks_per_sm = 0; bool multi_precision = ctx.Attr<bool>("multi_precision"); auto& cuda_ctx = ctx.template device_context<platform::CUDADeviceContext>(); int sm_num = cuda_ctx.GetSMCount(); framework::Tensor tmp_buffer_t = ctx.AllocateTmpTensor<MT, platform::CUDADeviceContext>( {LARS_BLOCK_SIZE << 1}, cuda_ctx); auto* p_buffer = tmp_buffer_t.mutable_data<MT>(ctx.GetPlace()); auto* g_buffer = p_buffer + LARS_BLOCK_SIZE; MT mu = static_cast<MT>(ctx.Attr<float>("mu")); MT lars_coeff = static_cast<MT>(ctx.Attr<float>("lars_coeff")); MT epsilon = static_cast<MT>(ctx.Attr<float>("epsilon")); MT rescale_grad = static_cast<MT>(ctx.Attr<float>("rescale_grad")); auto weight_decay_arr = ctx.Attr<std::vector<float>>("lars_weight_decay"); auto grad = ctx.MultiInput<framework::LoDTensor>("Grad"); auto param = ctx.MultiInput<framework::LoDTensor>("Param"); auto velocity = ctx.MultiInput<framework::LoDTensor>("Velocity"); auto param_out = ctx.MultiOutput<framework::LoDTensor>("ParamOut"); auto velocity_out = ctx.MultiOutput<framework::LoDTensor>("VelocityOut"); auto learning_rate = ctx.MultiInput<framework::LoDTensor>("LearningRate"); auto master_param = ctx.MultiInput<framework::LoDTensor>("MasterParam"); auto master_param_out = ctx.MultiOutput<framework::LoDTensor>("MasterParamOut"); int op_num = grad.size(); #if TORCH_HIP_VERSION >= 11000 if (op_num > 1) { LarsParamWarpper<T, MT> lars_warpper; PADDLE_ENFORCE_LT( op_num, LARS_MAX_MERGED_OPS, platform::errors::InvalidArgument( "The maximum number of merged-ops supported is (%d), but" "lars op required for trainning this model is (%d)\n", LARS_MAX_MERGED_OPS, op_num)); /* Implementation of lars optimizer consists of following two steps: 1. Figure out the L2 norm statistic result of grad data and param data. 2. Update param and velocity with usage of L2 norm statistic result. Step1 and step2 can be merged with api provided by nvida hipLaunchCooperativeKernel: - The thread quantity shall less than pyhsical SM limited threads - Launche as thread-block can synchronizlly execute. */ hipOccupancyMaxActiveBlocksPerMultiprocessor( &num_blocks_per_sm, MergedMomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); size_t total_numel = 0; for (int i = 0; i < op_num; ++i) { size_t temp_numel = param[i]->numel(); total_numel += temp_numel; lars_warpper.numel_arr[i] = temp_numel; lars_warpper.g_arr[i] = grad[i]->data<T>(); lars_warpper.lr_arr[i] = learning_rate[i]->data<MT>(); lars_warpper.p_out_arr[i] = param_out[i]->mutable_data<T>(ctx.GetPlace()); lars_warpper.v_out_arr[i] = velocity_out[i]->mutable_data<MT>(ctx.GetPlace()); lars_warpper.weight_decay_arr[i] = static_cast<MT>(weight_decay_arr[i]); PADDLE_ENFORCE_EQ( param[i]->data<T>(), lars_warpper.p_out_arr[i], platform::errors::InvalidArgument( "Input(Param) and Output(ParamOut) must be the same Tensors.")); PADDLE_ENFORCE_EQ(velocity[i]->data<MT>(), lars_warpper.v_out_arr[i], platform::errors::InvalidArgument( "Input(Velocity) and Output(VelocityOut) must be " "the same Tensors.")); } int64_t avg_numel = total_numel / op_num; LarsThreadConfig<float> lars_thread_config(avg_numel, sm_num, num_blocks_per_sm); for (int i = 0; i < op_num; ++i) { lars_warpper.repeat_arr[i] = lars_thread_config.GetRepeatTimes(lars_warpper.numel_arr[i]); } if (multi_precision) { for (int i = 0; i < op_num; ++i) { lars_warpper.master_p_out_arr[i] = master_param_out[i]->mutable_data<MT>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(master_param[i]->data<MT>(), lars_warpper.master_p_out_arr[i], platform::errors::InvalidArgument( "Input(MasterParam) and Output(MasterParamOut) " "must be the same Tensors.")); } } void* cuda_param[] = {reinterpret_cast<void*>(&lars_warpper), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&op_num), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads, and thead of each block synchronizedly cooperate. hipLaunchCooperativeKernel( reinterpret_cast<void*>(MergedMomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } else { auto* param_data = param[0]->data<T>(); auto* grad_data = grad[0]->data<T>(); auto* velocity_data = velocity[0]->data<MT>(); auto* lr = learning_rate[0]->data<MT>(); auto* param_out_data = param_out[0]->mutable_data<T>(ctx.GetPlace()); auto* velocity_out_data = velocity_out[0]->mutable_data<MT>(ctx.GetPlace()); const MT* master_param_data = multi_precision ? master_param[0]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[0]->mutable_data<MT>(ctx.GetPlace()) : nullptr; int64_t numel = param[0]->numel(); MT lars_weight_decay = weight_decay_arr[0]; // Figure out how many blocks can be active in each sm. hipOccupancyMaxActiveBlocksPerMultiprocessor( &num_blocks_per_sm, MomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); LarsThreadConfig<float> lars_thread_config(numel, sm_num, num_blocks_per_sm); int repeat_times = lars_thread_config.GetRepeatTimes(numel); int thresh = 0; void* cuda_param[] = { reinterpret_cast<void*>(&param_data), reinterpret_cast<void*>(&grad_data), reinterpret_cast<void*>(&velocity_data), reinterpret_cast<void*>(&param_out_data), reinterpret_cast<void*>(&velocity_out_data), reinterpret_cast<void*>(&master_param_data), reinterpret_cast<void*>(&master_param_out_data), reinterpret_cast<void*>(&lr), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&lars_weight_decay), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&repeat_times), reinterpret_cast<void*>(&thresh), // Just a placeholder reinterpret_cast<void*>(&numel), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads. hipLaunchCooperativeKernel( reinterpret_cast<void*>(MomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } #else for (int i = 0; i < op_num; ++i) { const MT* master_param_data = multi_precision ? master_param[i]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[i]->mutable_data<MT>(ctx.GetPlace()) : nullptr; SeparatedLarsMomentumOpCUDAKernel<T, MT>( cuda_ctx, param[i]->data<T>(), param_out[i]->mutable_data<T>(ctx.GetPlace()), velocity[i]->data<MT>(), velocity_out[i]->mutable_data<MT>(ctx.GetPlace()), grad[i]->data<T>(), learning_rate[i]->data<MT>(), p_buffer, g_buffer, mu, lars_coeff, weight_decay_arr[i], epsilon, rescale_grad, param[i]->numel(), master_param_data, master_param_out_data, multi_precision); } #endif } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( lars_momentum, ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, double>, ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>);
b4f654572e0b85cae109c47b9e5027c8efa1f565.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/optimizers/lars_momentum_op.h" #include "paddle/fluid/platform/fast_divmod.h" #include "paddle/phi/kernels/funcs/math_cuda_utils.h" #if CUDA_VERSION >= 11000 #include <cooperative_groups.h> #endif #ifdef __HIPCC__ #define LARS_BLOCK_SIZE 256 #else #define LARS_BLOCK_SIZE 512 #endif #define LARS_MAX_MERGED_OPS 60 namespace paddle { namespace operators { template <typename T> using MultiPrecisionType = typename details::MPTypeTrait<T>::Type; __device__ __forceinline__ float Sqrt(float x) { return sqrtf(x); } __device__ __forceinline__ double Sqrt(double x) { return sqrt(x); } __device__ __forceinline__ float Fma(float x, float y, float z) { return fmaf(x, y, z); } __device__ __forceinline__ double Fma(double x, double y, double z) { return fma(x, y, z); } template <typename T> class LarsThreadConfig { public: int grid_for_norm; int grid_for_lars; #if CUDA_VERSION >= 11000 private: int grid_stride; public: explicit LarsThreadConfig(int64_t numel, int sm_num, int num_blocks_per_sm) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_lars = std::min(std::min(sm_num * num_blocks_per_sm, grid), LARS_BLOCK_SIZE); grid_stride = LARS_BLOCK_SIZE * grid_for_lars; } int GetRepeatTimes(int64_t numel) { return (numel + grid_stride - 1) / grid_stride - 1; } #else int repeat_times; explicit LarsThreadConfig(const int64_t numel) { int grid = (numel + LARS_BLOCK_SIZE - 1) / LARS_BLOCK_SIZE; grid_for_norm = std::min(grid, LARS_BLOCK_SIZE); const int grid_stride = grid_for_norm * LARS_BLOCK_SIZE; repeat_times = (numel + grid_stride - 1) / grid_stride - 1; // Determine to read 4 fp16 or float data once, but 2 double data once. grid_for_lars = std::is_same<double, T>::value ? (numel + (LARS_BLOCK_SIZE << 1) - 1) / (LARS_BLOCK_SIZE << 1) : (numel + (LARS_BLOCK_SIZE << 2) - 1) / (LARS_BLOCK_SIZE << 2); } #endif }; template <typename T, typename MT, int VecSize, bool IsAmp = false> __device__ inline void VectorizeLarsUpdate( const T* __restrict__ grad, const MT* param, const MT* velocity, T* param_out, MT* velocity_out, const MT mu, MT local_lr, const MT lars_weight_decay, const MT rescale_grad, const int tid, const int grid_stride, const int numel, MT* master_param_out = nullptr) { using VecType = phi::AlignedVector<T, VecSize>; using VecMType = phi::AlignedVector<MT, VecSize>; int main = numel >> (VecSize >> 1); int tail_offset = main * VecSize; const VecType* grad_vec = reinterpret_cast<const VecType*>(grad); const VecMType* param_vec = reinterpret_cast<const VecMType*>(param); const VecMType* velocity_vec = reinterpret_cast<const VecMType*>(velocity); VecType* param_out_vec = reinterpret_cast<VecType*>(param_out); VecMType* velocity_out_vec = reinterpret_cast<VecMType*>(velocity_out); VecMType* master_param_out_vec; if (IsAmp) { master_param_out_vec = reinterpret_cast<VecMType*>(master_param_out); } for (int i = tid; i < main; i += grid_stride) { VecType param_out_tmp; VecMType velocity_tmp, param_tmp; VecType grad_data = grad_vec[i]; VecMType param_data = param_vec[i]; VecMType velocity_data = velocity_vec[i]; #pragma unroll for (int j = 0; j < VecSize; ++j) { MT grad_val = static_cast<MT>(grad_data[j]) * rescale_grad; velocity_tmp[j] = Fma(velocity_data[j], mu, local_lr * Fma(lars_weight_decay, param_data[j], grad_val)); param_tmp[j] = param_data[j] - velocity_tmp[j]; param_out_tmp[j] = static_cast<T>(param_tmp[j]); } param_out_vec[i] = param_out_tmp; velocity_out_vec[i] = velocity_tmp; if (IsAmp) { master_param_out_vec[i] = param_tmp; } } for (int i = tid + tail_offset; i < numel; i += grid_stride) { MT grad_val = static_cast<MT>(grad[i]) * rescale_grad; MT param_val = param[i]; MT velocity_tmp = Fma(velocity[i], mu, local_lr * Fma(lars_weight_decay, param_val, grad_val)); MT param_tmp = param_val - velocity_tmp; param_out[i] = static_cast<T>(param_tmp); velocity_out[i] = velocity_tmp; if (IsAmp) { master_param_out[i] = param_tmp; } } } #if CUDA_VERSION >= 11000 /* Once CUDA_VERSION is beyond 11, cooperative_groups can be involved in without --rdc=true compile flag, then L2_norm kernel can be set with __device__ and cooperative_groups::grid_group also can be involved. Otherwise, adding this flag may affect much, L2_norm kernel shall be set with __global__.*/ // TODO(limingshu): declaration of cooperative_groups wapper is invalid in host. template <typename T, typename MT> __forceinline__ __device__ void L2NormKernel( const cooperative_groups::grid_group* cg, #else template <typename T, typename MT> __global__ void L2NormKernel( #endif const T* p_data, const T* __restrict__ g_data, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int64_t numel, const int repeat_times, const MT rescale_grad, const int thresh = 0, MT* __restrict__ p_n = nullptr, MT* __restrict__ g_n = nullptr) { __shared__ MT s_buffer[2]; int tid = threadIdx.x + blockDim.x * blockIdx.x; int grid_stride = LARS_BLOCK_SIZE * gridDim.x; MT p_tmp = static_cast<MT>(0); MT g_tmp = static_cast<MT>(0); while (tid < numel) { MT tmp0 = static_cast<MT>(p_data[tid]); MT tmp1 = static_cast<MT>(g_data[tid]); p_tmp += (tmp0 * tmp0); g_tmp += (tmp1 * tmp1); tid += grid_stride; } p_tmp = phi::funcs::blockReduceSum<MT>(p_tmp, FINAL_MASK); g_tmp = phi::funcs::blockReduceSum<MT>(g_tmp, FINAL_MASK); if (threadIdx.x == 0) { p_buffer[blockIdx.x] = p_tmp; g_buffer[blockIdx.x] = g_tmp; } #if CUDA_VERSION >= 11000 cg->sync(); // Grid sync for writring partial result to gloabl memory MT p_part_sum = threadIdx.x < gridDim.x ? p_buffer[threadIdx.x] : 0; MT g_part_sum = threadIdx.x < gridDim.x ? g_buffer[threadIdx.x] : 0; MT tmp0 = phi::funcs::blockReduceSum<MT>(p_part_sum, FINAL_MASK); MT tmp1 = phi::funcs::blockReduceSum<MT>(g_part_sum, FINAL_MASK); if (threadIdx.x == 0) { s_buffer[0] = tmp0; s_buffer[1] = tmp1; } __syncthreads(); *p_n = Sqrt(s_buffer[0]); *g_n = rescale_grad * Sqrt(s_buffer[1]); #endif } template <typename T, typename MT> __forceinline__ __device__ void MomentumUpdate( const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, const MT mu, const MT lars_weight_decay, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const MT param_norm, const MT grad_norm, const int tid, const int grid_stride, const int64_t numel, const bool is_amp) { const MT lr = learning_rate[0]; MT local_lr = lr; if (param_norm > static_cast<MT>(0) && grad_norm > static_cast<MT>(0)) { local_lr = lr * lars_coeff * param_norm / (fma(lars_weight_decay, param_norm, grad_norm) + epsilon); } if (is_amp) { VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/true>( grad, master_param, velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel, master_param_out); } else { if (std::is_same<T, float>::value || std::is_same<T, paddle::platform::float16>::value) { /* TODO(limingshu): pointer cast may damage memory accessing for fp16 */ VectorizeLarsUpdate<T, MT, /*VecSize=*/4, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } else { VectorizeLarsUpdate<T, MT, /*VecSize=*/2, /*IsAmp=*/false>( grad, reinterpret_cast<const MT*>(param), velocity, param_out, velocity_out, mu, local_lr, lars_weight_decay, rescale_grad, tid, grid_stride, numel); } } } #if CUDA_VERSION >= 11000 template <typename T, typename MT> struct LarsParamWarpper { int64_t numel_arr[LARS_MAX_MERGED_OPS]; int repeat_arr[LARS_MAX_MERGED_OPS]; const T* __restrict__ g_arr[LARS_MAX_MERGED_OPS]; const MT* __restrict__ lr_arr[LARS_MAX_MERGED_OPS]; T* __restrict__ p_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ v_out_arr[LARS_MAX_MERGED_OPS]; MT* __restrict__ master_p_out_arr[LARS_MAX_MERGED_OPS]; MT weight_decay_arr[LARS_MAX_MERGED_OPS]; }; template <typename T, typename MT> __global__ void MergedMomentumLarsKernel(LarsParamWarpper<T, MT> lars_warpper, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const int op_num, const MT mu, const MT lars_coeff, const MT epsilon, const MT rescale_grad, const bool is_amp) { int grid_stride = gridDim.x * LARS_BLOCK_SIZE; int tid = threadIdx.x + blockIdx.x * blockDim.x; const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); for (int i = 0; i < op_num; ++i) { int numel = lars_warpper.numel_arr[i]; MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], p_buffer, g_buffer, numel, lars_warpper.repeat_arr[i], rescale_grad, 0, &param_norm, &grad_norm); MomentumUpdate<T, MT>( lars_warpper.p_out_arr[i], lars_warpper.g_arr[i], lars_warpper.v_out_arr[i], lars_warpper.p_out_arr[i], lars_warpper.v_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.master_p_out_arr[i], lars_warpper.lr_arr[i], mu, lars_warpper.weight_decay_arr[i], lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } } #endif template <typename T, typename MT> __global__ void MomentumLarsKernel( const T* param, const T* __restrict__ grad, const MT* velocity, T* param_out, MT* velocity_out, const MT* master_param, MT* master_param_out, const MT* __restrict__ learning_rate, MT* __restrict__ p_buffer, MT* __restrict__ g_buffer, const MT mu, const MT lars_coeff, const MT lars_weight_decay, const MT epsilon, const MT rescale_grad, const int repeat_times, const int thresh, const int64_t numel, const bool is_amp) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int grid_stride = gridDim.x * LARS_BLOCK_SIZE; #if CUDA_VERSION >= 11000 const cooperative_groups::grid_group cg = cooperative_groups::this_grid(); MT param_norm = static_cast<MT>(0); MT grad_norm = static_cast<MT>(0); L2NormKernel<T, MT>(&cg, param, grad, p_buffer, g_buffer, numel, repeat_times, rescale_grad, gridDim.x, &param_norm, &grad_norm); #else const MT rescale_grad_pow = rescale_grad * rescale_grad; MT param_part_norm = threadIdx.x < thresh ? p_buffer[threadIdx.x] : 0; MT grad_part_norm = threadIdx.x < thresh ? g_buffer[threadIdx.x] : 0; __syncthreads(); MT param_norm = Sqrt(phi::funcs::blockReduceSum<MT>(param_part_norm, FINAL_MASK)); MT grad_norm = Sqrt(rescale_grad_pow * phi::funcs::blockReduceSum<MT>( grad_part_norm, FINAL_MASK)); #endif MomentumUpdate<T, MT>(param, grad, velocity, param_out, velocity_out, master_param, master_param_out, learning_rate, mu, lars_weight_decay, lars_coeff, epsilon, rescale_grad, param_norm, grad_norm, tid, grid_stride, numel, is_amp); } template <typename T, typename MT> inline void SeparatedLarsMomentumOpCUDAKernel( const platform::CUDADeviceContext& cuda_ctx, const T* param_data, T* param_out_data, const MT* velocity_data, MT* velocity_out_data, const T* grad_data, const MT* lr, MT* p_buffer, MT* g_buffer, const MT mu, const MT lars_coeff, const MT weight_decay, const MT epsilon, const MT rescale_grad, const int64_t numel, const MT* master_param_data, MT* master_out_data, const bool is_amp) { LarsThreadConfig<T> lars_thread_config(numel); L2NormKernel<T, MT><<<lars_thread_config.grid_for_norm, LARS_BLOCK_SIZE, 0, cuda_ctx.stream()>>>( param_data, grad_data, p_buffer, g_buffer, numel, lars_thread_config.repeat_times, rescale_grad); MomentumLarsKernel<T, MT><<<lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, 0, cuda_ctx.stream()>>>( param_data, grad_data, velocity_data, param_out_data, velocity_out_data, master_param_data, master_out_data, lr, p_buffer, g_buffer, mu, lars_coeff, weight_decay, epsilon, rescale_grad, 0, lars_thread_config.grid_for_norm, numel, is_amp); } template <typename DeviceContext, typename T> class LarsMomentumOpCUDAKernel : public framework::OpKernel<T> { using MT = MultiPrecisionType<T>; public: void Compute(const framework::ExecutionContext& ctx) const override { int num_blocks_per_sm = 0; bool multi_precision = ctx.Attr<bool>("multi_precision"); auto& cuda_ctx = ctx.template device_context<platform::CUDADeviceContext>(); int sm_num = cuda_ctx.GetSMCount(); framework::Tensor tmp_buffer_t = ctx.AllocateTmpTensor<MT, platform::CUDADeviceContext>( {LARS_BLOCK_SIZE << 1}, cuda_ctx); auto* p_buffer = tmp_buffer_t.mutable_data<MT>(ctx.GetPlace()); auto* g_buffer = p_buffer + LARS_BLOCK_SIZE; MT mu = static_cast<MT>(ctx.Attr<float>("mu")); MT lars_coeff = static_cast<MT>(ctx.Attr<float>("lars_coeff")); MT epsilon = static_cast<MT>(ctx.Attr<float>("epsilon")); MT rescale_grad = static_cast<MT>(ctx.Attr<float>("rescale_grad")); auto weight_decay_arr = ctx.Attr<std::vector<float>>("lars_weight_decay"); auto grad = ctx.MultiInput<framework::LoDTensor>("Grad"); auto param = ctx.MultiInput<framework::LoDTensor>("Param"); auto velocity = ctx.MultiInput<framework::LoDTensor>("Velocity"); auto param_out = ctx.MultiOutput<framework::LoDTensor>("ParamOut"); auto velocity_out = ctx.MultiOutput<framework::LoDTensor>("VelocityOut"); auto learning_rate = ctx.MultiInput<framework::LoDTensor>("LearningRate"); auto master_param = ctx.MultiInput<framework::LoDTensor>("MasterParam"); auto master_param_out = ctx.MultiOutput<framework::LoDTensor>("MasterParamOut"); int op_num = grad.size(); #if CUDA_VERSION >= 11000 if (op_num > 1) { LarsParamWarpper<T, MT> lars_warpper; PADDLE_ENFORCE_LT( op_num, LARS_MAX_MERGED_OPS, platform::errors::InvalidArgument( "The maximum number of merged-ops supported is (%d), but" "lars op required for trainning this model is (%d)\n", LARS_MAX_MERGED_OPS, op_num)); /* Implementation of lars optimizer consists of following two steps: 1. Figure out the L2 norm statistic result of grad data and param data. 2. Update param and velocity with usage of L2 norm statistic result. Step1 and step2 can be merged with api provided by nvida cudaLaunchCooperativeKernel: - The thread quantity shall less than pyhsical SM limited threads - Launche as thread-block can synchronizlly execute. */ cudaOccupancyMaxActiveBlocksPerMultiprocessor( &num_blocks_per_sm, MergedMomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); size_t total_numel = 0; for (int i = 0; i < op_num; ++i) { size_t temp_numel = param[i]->numel(); total_numel += temp_numel; lars_warpper.numel_arr[i] = temp_numel; lars_warpper.g_arr[i] = grad[i]->data<T>(); lars_warpper.lr_arr[i] = learning_rate[i]->data<MT>(); lars_warpper.p_out_arr[i] = param_out[i]->mutable_data<T>(ctx.GetPlace()); lars_warpper.v_out_arr[i] = velocity_out[i]->mutable_data<MT>(ctx.GetPlace()); lars_warpper.weight_decay_arr[i] = static_cast<MT>(weight_decay_arr[i]); PADDLE_ENFORCE_EQ( param[i]->data<T>(), lars_warpper.p_out_arr[i], platform::errors::InvalidArgument( "Input(Param) and Output(ParamOut) must be the same Tensors.")); PADDLE_ENFORCE_EQ(velocity[i]->data<MT>(), lars_warpper.v_out_arr[i], platform::errors::InvalidArgument( "Input(Velocity) and Output(VelocityOut) must be " "the same Tensors.")); } int64_t avg_numel = total_numel / op_num; LarsThreadConfig<float> lars_thread_config(avg_numel, sm_num, num_blocks_per_sm); for (int i = 0; i < op_num; ++i) { lars_warpper.repeat_arr[i] = lars_thread_config.GetRepeatTimes(lars_warpper.numel_arr[i]); } if (multi_precision) { for (int i = 0; i < op_num; ++i) { lars_warpper.master_p_out_arr[i] = master_param_out[i]->mutable_data<MT>(ctx.GetPlace()); PADDLE_ENFORCE_EQ(master_param[i]->data<MT>(), lars_warpper.master_p_out_arr[i], platform::errors::InvalidArgument( "Input(MasterParam) and Output(MasterParamOut) " "must be the same Tensors.")); } } void* cuda_param[] = {reinterpret_cast<void*>(&lars_warpper), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&op_num), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads, and thead of each block synchronizedly cooperate. cudaLaunchCooperativeKernel( reinterpret_cast<void*>(MergedMomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } else { auto* param_data = param[0]->data<T>(); auto* grad_data = grad[0]->data<T>(); auto* velocity_data = velocity[0]->data<MT>(); auto* lr = learning_rate[0]->data<MT>(); auto* param_out_data = param_out[0]->mutable_data<T>(ctx.GetPlace()); auto* velocity_out_data = velocity_out[0]->mutable_data<MT>(ctx.GetPlace()); const MT* master_param_data = multi_precision ? master_param[0]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[0]->mutable_data<MT>(ctx.GetPlace()) : nullptr; int64_t numel = param[0]->numel(); MT lars_weight_decay = weight_decay_arr[0]; // Figure out how many blocks can be active in each sm. cudaOccupancyMaxActiveBlocksPerMultiprocessor( &num_blocks_per_sm, MomentumLarsKernel<T, MT>, LARS_BLOCK_SIZE, sizeof(MT) << 1); LarsThreadConfig<float> lars_thread_config(numel, sm_num, num_blocks_per_sm); int repeat_times = lars_thread_config.GetRepeatTimes(numel); int thresh = 0; void* cuda_param[] = { reinterpret_cast<void*>(&param_data), reinterpret_cast<void*>(&grad_data), reinterpret_cast<void*>(&velocity_data), reinterpret_cast<void*>(&param_out_data), reinterpret_cast<void*>(&velocity_out_data), reinterpret_cast<void*>(&master_param_data), reinterpret_cast<void*>(&master_param_out_data), reinterpret_cast<void*>(&lr), reinterpret_cast<void*>(&p_buffer), reinterpret_cast<void*>(&g_buffer), reinterpret_cast<void*>(&mu), reinterpret_cast<void*>(&lars_coeff), reinterpret_cast<void*>(&lars_weight_decay), reinterpret_cast<void*>(&epsilon), reinterpret_cast<void*>(&rescale_grad), reinterpret_cast<void*>(&repeat_times), reinterpret_cast<void*>(&thresh), // Just a placeholder reinterpret_cast<void*>(&numel), reinterpret_cast<void*>(&multi_precision)}; // Lanuch all sm theads. cudaLaunchCooperativeKernel( reinterpret_cast<void*>(MomentumLarsKernel<T, MT>), lars_thread_config.grid_for_lars, LARS_BLOCK_SIZE, cuda_param, 0, cuda_ctx.stream()); } #else for (int i = 0; i < op_num; ++i) { const MT* master_param_data = multi_precision ? master_param[i]->data<MT>() : nullptr; MT* master_param_out_data = multi_precision ? master_param_out[i]->mutable_data<MT>(ctx.GetPlace()) : nullptr; SeparatedLarsMomentumOpCUDAKernel<T, MT>( cuda_ctx, param[i]->data<T>(), param_out[i]->mutable_data<T>(ctx.GetPlace()), velocity[i]->data<MT>(), velocity_out[i]->mutable_data<MT>(ctx.GetPlace()), grad[i]->data<T>(), learning_rate[i]->data<MT>(), p_buffer, g_buffer, mu, lars_coeff, weight_decay_arr[i], epsilon, rescale_grad, param[i]->numel(), master_param_data, master_param_out_data, multi_precision); } #endif } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( lars_momentum, ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, double>, ops::LarsMomentumOpCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>);
2d4c7191b923e901c6ac0e5c4a896025c065ba28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { //Device code __device__ __inline__ float dot(const float2 a, const float2 b) { return (a.x * b.x) + (a.y * b.y); } __device__ float2 calculatePosition(int x, int y, float width, float height) { float2 fragSize = make_float2(2 / width, 2 / height); return make_float2(fragSize.x * x + fragSize.y / 2 - 1, fragSize.y * y + fragSize.y / 2 - 1); } __global__ void baryKernel(const float2 *v0, \ const float2 *v1, \ const float2 *v2, \ const unsigned int dCount, \ const unsigned int primitivesCount, \ const float *da, \ const float *db, \ const float *dc, \ float *dOut, \ int *dOut_valid_frament, \ int *dOut_valid_pixel, \ const int width, \ const int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int z = blockIdx.z * blockDim.z + threadIdx.z; if (x < width && y < height && z < primitivesCount) { float2 pos = calculatePosition(x, y, width, height); float2 t0 = make_float2(v2[z].x, v2[z].y); float2 t1 = make_float2(v0[z].x, v0[z].y); float2 t2 = make_float2(v1[z].x, v1[z].y); float2 v0 = make_float2(t1.x - t0.x, t1.y - t0.y); float2 v1 = make_float2(t2.x - t0.x, t2.y - t0.y); float2 v2 = make_float2(pos.x - t0.x, pos.y - t0.y); float d00 = dot(v0, v0); float d01 = dot(v0, v1); float d11 = dot(v1, v1); float d20 = dot(v2, v0); float d21 = dot(v2, v1); float denom = d00 * d11 - d01 * d01; float baryX = (d11 * d20 - d01 * d21) / denom; float baryY = (d00 * d21 - d01 * d20) / denom; float baryZ = 1 - baryX - baryY; int rowSize = width; int gridSize = rowSize * height; int triangleBlockSize = gridSize * dCount; int outDataBaseIndex = x + y * rowSize + z * triangleBlockSize; int validIndex = x + y * rowSize + z * gridSize; if (baryX > 0 && baryY > 0 && baryZ > 0) { int inDataBaseIndex = z * dCount; for (int i = 0; i < dCount; i++) { int idx = inDataBaseIndex + i; dOut[outDataBaseIndex + i * gridSize] = da[idx] * baryX + db[idx] * baryY + dc[idx] * baryZ; } dOut_valid_frament[validIndex] = 1; dOut_valid_pixel[x + y * rowSize] += 1; } else { for (int i = 0; i < dCount; i++) { dOut[outDataBaseIndex + i * gridSize] = 0; } dOut_valid_frament[validIndex] = 0; } } } }
2d4c7191b923e901c6ac0e5c4a896025c065ba28.cu
extern "C" { //Device code __device__ __inline__ float dot(const float2 a, const float2 b) { return (a.x * b.x) + (a.y * b.y); } __device__ float2 calculatePosition(int x, int y, float width, float height) { float2 fragSize = make_float2(2 / width, 2 / height); return make_float2(fragSize.x * x + fragSize.y / 2 - 1, fragSize.y * y + fragSize.y / 2 - 1); } __global__ void baryKernel(const float2 *v0, \ const float2 *v1, \ const float2 *v2, \ const unsigned int dCount, \ const unsigned int primitivesCount, \ const float *da, \ const float *db, \ const float *dc, \ float *dOut, \ int *dOut_valid_frament, \ int *dOut_valid_pixel, \ const int width, \ const int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int z = blockIdx.z * blockDim.z + threadIdx.z; if (x < width && y < height && z < primitivesCount) { float2 pos = calculatePosition(x, y, width, height); float2 t0 = make_float2(v2[z].x, v2[z].y); float2 t1 = make_float2(v0[z].x, v0[z].y); float2 t2 = make_float2(v1[z].x, v1[z].y); float2 v0 = make_float2(t1.x - t0.x, t1.y - t0.y); float2 v1 = make_float2(t2.x - t0.x, t2.y - t0.y); float2 v2 = make_float2(pos.x - t0.x, pos.y - t0.y); float d00 = dot(v0, v0); float d01 = dot(v0, v1); float d11 = dot(v1, v1); float d20 = dot(v2, v0); float d21 = dot(v2, v1); float denom = d00 * d11 - d01 * d01; float baryX = (d11 * d20 - d01 * d21) / denom; float baryY = (d00 * d21 - d01 * d20) / denom; float baryZ = 1 - baryX - baryY; int rowSize = width; int gridSize = rowSize * height; int triangleBlockSize = gridSize * dCount; int outDataBaseIndex = x + y * rowSize + z * triangleBlockSize; int validIndex = x + y * rowSize + z * gridSize; if (baryX > 0 && baryY > 0 && baryZ > 0) { int inDataBaseIndex = z * dCount; for (int i = 0; i < dCount; i++) { int idx = inDataBaseIndex + i; dOut[outDataBaseIndex + i * gridSize] = da[idx] * baryX + db[idx] * baryY + dc[idx] * baryZ; } dOut_valid_frament[validIndex] = 1; dOut_valid_pixel[x + y * rowSize] += 1; } else { for (int i = 0; i < dCount; i++) { dOut[outDataBaseIndex + i * gridSize] = 0; } dOut_valid_frament[validIndex] = 0; } } } }
3ab35ee0cbaee238c39b266dbf8f9d6b3c21dab9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WRAD #ifdef WCHEM #include <math.h> #include <stdio.h> #include <stdlib.h> #include "prototypes.h" #include "oct.h" #include <string.h> #include <mpi.h> //#include "atomic_data/Atomic.h" #include "gpu_type.h" #define FRAC_VAR (0.1) #define idloc 0 //================================================================================ __device__ void dE2T(struct Rtype *R, REAL aexp,struct RUNPARAMS *param){ REAL tloc; REAL eint=R->eint; REAL nH=R->nh; REAL x=R->nhplus/R->nh; REAL pstar=param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); nH=nH/POW(aexp,3)*param->unit.unit_N; eint=eint/POW(aexp,5)*pstar; tloc=eint/(1.5*nH*KBOLTZ*(1.+x)); R->temp=tloc; } // ============================================================================ __device__ REAL dcucompute_alpha_b(REAL temp, REAL unit_number, REAL aexp) { // CASE B recombination rate m**3 s*-1 // temperature should be given in Kelvin REAL alpha_b,lambda; lambda=2e0*157807e0/temp; alpha_b=2.753e-14*POW(lambda,1.5)/POW(1e0+POW(lambda/2.740,0.407),2.242); //cm3/s #ifdef TESTCOSMO alpha_b=alpha_b*1e-6*unit_number;///(aexp*aexp*aexp); //m3/s #else alpha_b=alpha_b*1e-6*unit_number; //m3/s #endif return alpha_b; } //========================================================= //========================================================= __device__ REAL dcucompute_alpha_a(REAL temp, REAL unit_number, REAL aexp) { // CASE A recombination rate m**3 s*-1 // temperature should be given in Kelvin REAL alpha_a,lambda; lambda=2e0*157807e0/temp; alpha_a=1.269e-13*POW(lambda,1.503)/POW(1e0+POW(lambda/0.522,0.470),1.923); //cm3/s #ifdef TESTCOSMO alpha_a=alpha_a*1e-6*unit_number;///(aexp*aexp*aexp); //m3/s #else alpha_a=alpha_a*1e-6*unit_number; //m3/s #endif return alpha_a; } //========================================================= //========================================================= __device__ REAL dcucompute_beta(REAL temp, REAL unit_number, REAL aexp) { // Collizional ionization rate m**3 s*-1 // temperature in Kelvin REAL beta,T5; T5=temp/1e5; beta=5.85e-11*SQRT(temp)/(1+SQRT(T5))*EXP(-(157809e0/temp)); //cm3/s #ifdef TESTCOSMO beta=beta*1e-6*unit_number;///(aexp*aexp*aexp); // !m3/s #else beta=beta*1e-6*unit_number; // !m3/s #endif return beta; } //********************************************************************************** //********************************************************************************** __device__ void dcuCompCooling(REAL temp, REAL x, REAL nH, REAL *lambda, REAL *tcool, REAL aexp,REAL CLUMPF) { REAL c1,c2,c3,c4,c5,c6; REAL unsurtc; REAL nh2; nh2=nH*1e-6;// ! m-3 ==> cm-3 // Collisional Ionization Cooling c1=EXP(-157809.1e0/temp)*1.27e-21*SQRT(temp)/(1.+SQRT(temp/1e5))*x*(1.-x)*nh2*nh2*CLUMPF; // Case A Recombination Cooling c2=1.778e-29*temp*POW(2e0*157807e0/temp,1.965e0)/POW(1.+POW(2e0*157807e0/temp/0.541e0,0.502e0),2.697e0)*x*x*nh2*nh2*CLUMPF; // Case B Recombination Cooling c6=3.435e-30*temp*POW(2e0*157807e0/temp,1.970e0)/POW(1.+(POW(2e0*157807e0/temp/2.250e0,0.376e0)),3.720e0)*x*x*nh2*nh2*CLUMPF; c6=0.; // Collisional excitation cooling c3=EXP(-118348e0/temp)*7.5e-19/(1+SQRT(temp/1e5))*x*(1.-x)*nh2*nh2*CLUMPF; // Bremmsstrahlung c4=1.42e-27*1.5e0*SQRT(temp)*x*x*nh2*nh2*CLUMPF; // Compton Cooling /* c5=1.017e-37*POW(2.727/aexp,4)*(temp-2.727/aexp)*nh2*x; */ c5=0.; #ifndef WRADTEST c5=5.406e-24*(temp-2.727/aexp)/POW(aexp/0.001,4)*x*nh2; REAL Ta=2.727/aexp; c5=5.406e-36*(temp-Ta)/(aexp*aexp*aexp*aexp)*x*nh2; #endif // Overall Cooling *lambda=c1+c2+c3+c4+c5+c6;// ! erg*cm-3*s-1 // Unit Conversion *lambda=(*lambda)*1e-7*1e6;// ! J*m-3*s-1 // cooling times unsurtc=FMAX(c1,c2); unsurtc=FMAX(unsurtc,c3); unsurtc=FMAX(unsurtc,c4); unsurtc=FMAX(unsurtc,FABS(c5)); unsurtc=FMAX(unsurtc,c6)*1e-7;// ==> J/cm3/s *tcool=1.5e0*nh2*(1.+x)*KBOLTZ*temp/unsurtc; //Myr } // =========================================================================================================================== __global__ void dchemrad(struct RGRID *stencil, int nread, int stride, struct CPUINFO *cpu, REAL dxcur, REAL dtnew, struct RUNPARAMS *param, REAL aexporg, int chemonly) { int i,icell,igrp; //int idloc=0; int nitcool=0; REAL hnu0=13.6*1.6022e-19, Cool, tcool, dtcool, tcool1, currentcool_t=0., alpha, alphab, beta, tloc, xt, eintt, ai_tmp1=0., et[NGRP], p[NGRP]; REAL aexp; REAL ebkg[NGRP]; REAL z=1./aexporg-1.; REAL c=param->clightorg*LIGHT_SPEED_IN_M_PER_S; // switch back to physical velocity m/s REAL hnu[NGRP]; REAL alphae[NGRP]; REAL alphai[NGRP]; REAL factgrp[NGRP]; for(igrp=0;igrp<NGRP;igrp++) { hnu[igrp]=param->atomic.hnu[igrp]; alphae[igrp]=param->atomic.alphae[igrp]; alphai[igrp]=param->atomic.alphai[igrp]; factgrp[igrp]=param->atomic.factgrp[igrp]; } #ifdef S_X REAL E0overI[NGRP]; REAL N2[NGRP]; REAL F2[NGRP]; #endif #define BLOCKCOOL 1 // KEPT FROM CUDATON FOR SIMPLICITY #define idloc3 0 // KEPT FROM CUDATON FOR SIMPLICITY REAL egyloc[BLOCKCOOL*NGRP], floc[3*BLOCKCOOL*NGRP], srcloc[BLOCKCOOL*NGRP], x0[BLOCKCOOL], nH[BLOCKCOOL], eint[BLOCKCOOL]; REAL dt=dtnew*param->unit.unit_t*POW(aexporg,2); REAL emin; struct Rtype R; REAL fudgecool=param->fudgecool; int ncvgcool=param->ncvgcool; REAL E0; #ifdef SCHAYE REAL navg=(param->cosmo->ob/param->cosmo->om)/(PROTON_MASS*MOLECULAR_MU)*param->unit.unit_d; #endif REAL xorg; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells if(stencil[i].oct[6].cell[icell].split) continue; // we dont treat split cells memcpy(&R,&stencil[i].New.cell[icell].rfieldnew,sizeof(struct Rtype));// We get the local physical quantities after transport update #ifdef HOMOSOURCE // we override the value with the homogeneous source density R.src=param->bkg; #endif //if(eint[idloc]!=E0) printf("1!\n"); /// ==================== UV Background #ifdef UVBKG if(NGRP>1) printf("WARNING BAD BEHAVIOR FOR BKG with NGRP>1 !\n"); //for(igrp=0;igrp<NGRP;igrp++) ebkg[igrp]=3.6*(z<3?1.:4./(1+z)) ; // Katz simple model // Poor FIT to Haardt & MAdau 2012 /* for(igrp=0;igrp<NGRP;igrp++){ REAL amp=1.2e-16,sig=1.,zavg=2,mz=1e-18,pz=1.2e-17; ebkg[igrp]=amp/(sig*SQRT(2*M_PI))*exp(-POW((z-zavg),2)/(2.*POW(sig,2)))+mz*z+pz; // comoving photons/s/m3 } */ #else for(igrp=0;igrp<NGRP;igrp++) ebkg[igrp]=0.; #endif // switch to physical units, chemistry remains unchanged with and without cosmo for (igrp=0;igrp<NGRP;igrp++) { egyloc[idloc+igrp*BLOCKCOOL] =R.e[igrp]/(aexporg*aexporg*aexporg)*param->unit.unit_N;//+ebkg[igrp]; floc[0+idloc3+igrp*BLOCKCOOL*3]=R.fx[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; floc[1+idloc3+igrp*BLOCKCOOL*3]=R.fy[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; floc[2+idloc3+igrp*BLOCKCOOL*3]=R.fz[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; } x0[idloc]=R.nhplus/R.nh; xorg= x0[idloc]; nH[idloc]=R.nh/(aexporg*aexporg*aexporg)*param->unit.unit_N; eint[idloc]=R.eint/POW(aexporg,5)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); emin=PMIN/(GAMMA-1.)/POW(aexporg,5)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); // physical minimal pressure for (igrp=0;igrp<NGRP;igrp++){ srcloc[idloc+igrp*BLOCKCOOL]=(R.src[igrp]*param->unit.unit_N/param->unit.unit_t/(aexporg*aexporg))/POW(aexporg,3); //phot/s/dv (physique) } // R.src phot/unit_t/unit_dv (comobile) REAL eorg=eint[idloc]; REAL etorg=egyloc[idloc]; REAL torg=eint[idloc]/(1.5*nH[idloc]*KBOLTZ*(1.+x0[idloc])); //if(srcloc[0]>0) printf("nh=%e %e %e %e\n",R.nh,R.e[0],eint[idloc],3[idloc]); // at this stage we are ready to do the calculations // DEALING WITH CLUMPING ---------------------- #ifdef WCLUMP REAL CLUMPF2=FMIN(FMAX(POW(nH[idloc]/6.,0.7),1.),40.); REAL CLUMPI=1.; #else REAL CLUMPF2=1.; REAL CLUMPI=1.; #endif for(igrp=0;igrp<NGRP;igrp++) { alphai[igrp] *= CLUMPI; alphae[igrp] *= CLUMPI; } // ------------------------------------------------- /// local cooling loop ------------------------------- aexp=aexporg; fudgecool=param->fudgecool; currentcool_t=0.; nitcool=0.; REAL da; //printf("cpu=%d fudge=%e ncv=%d currentcool_t=%e dt=%e\n",cpu->rank,param->fudgecool,ncvgcool,currentcool_t,dt); // local cooling loop ------------------------------- while(currentcool_t<dt) { /// Cosmological Adiabatic expansion effects ============== #ifdef TESTCOSMO REAL hubblet=param->cosmo->H0*SQRT(param->cosmo->om/aexp+param->cosmo->ov*(aexp*aexp))/aexp*(1e3/(1e6*PARSEC)); // s-1 // SOMETHING TO CHECK HERE #else REAL hubblet=0.; #endif //if(eint[idloc]!=E0) printf("2!\n"); tloc=eint[idloc]/(1.5*nH[idloc]*KBOLTZ*(1.+x0[idloc])); //== Getting a timestep dcuCompCooling(tloc,x0[idloc],nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); ai_tmp1=0.; //if(eint[idloc]!=E0) printf("3!\n"); if(fudgecool<1e-20){ printf("eint=%e(%e<%e) nH=%e x0=%e(%e) T=%e(%e) N=%e(%e)\n",eint[idloc],eorg,emin,nH[idloc],x0[idloc],xorg,tloc,torg,et[0],etorg); //if(fudgecool<1e-20) abort(); } for (igrp=0;igrp<NGRP;igrp++) ai_tmp1 += ((alphae[igrp])*hnu[igrp]-(alphai[igrp])*hnu0)*egyloc[idloc+igrp*BLOCKCOOL]; tcool=FABS(eint[idloc]/(nH[idloc]*(1.0-x0[idloc])*ai_tmp1*(!chemonly)-Cool)); ai_tmp1=0.; dtcool=FMIN(fudgecool*tcool,dt-currentcool_t); alpha=dcucompute_alpha_a(tloc,1.,1.)*CLUMPF2; alphab=dcucompute_alpha_b(tloc,1.,1.)*CLUMPF2; beta=dcucompute_beta(tloc,1.,1.)*CLUMPF2; //== Update // ABSORPTION int test = 0; REAL factotsa[NGRP]; for(igrp=0;igrp<NGRP;igrp++) { #ifdef OTSA factotsa[igrp]=0; alpha=alphab; // recombination is limited to non ground state levels #else factotsa[igrp]=(igrp==0); #endif ai_tmp1 = alphai[igrp]; if(chemonly){ et[igrp]=egyloc[idloc+igrp*BLOCKCOOL]; } else{ et[igrp]=((alpha-alphab)*x0[idloc]*x0[idloc]*nH[idloc]*nH[idloc]*dtcool*factotsa[igrp]+egyloc[idloc+igrp*BLOCKCOOL]+srcloc[idloc+igrp*BLOCKCOOL]*dtcool*factgrp[igrp])/(1.+dtcool*(ai_tmp1*(1.-x0[idloc])*nH[idloc])); } if((et[igrp]<0)||(isnan(et[igrp]))){ test=1; //printf("eint=%e nH=%e x0=%e T=%e N=%e\n",eint[idloc],nH[idloc],x0[idloc],tloc,et[0]); } p[igrp]=(1.+(alphai[igrp]*nH[idloc]*(1-x0[idloc]))*dtcool); } ai_tmp1=0.; if(test) { fudgecool=fudgecool/10.; continue; } // IONISATION #ifndef S_X #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*et[igrp]*(!chemonly);} #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*egyloc[idloc+igrp*BLOCKCOOL]*(!chemonly);} #endif #else N2[0]=1.0; REAL pp=(1.-POW(x0[idloc],0.4092)); if(pp<0.) pp=0.; for(igrp=1;igrp<NGRP;igrp++){ N2[igrp]=1.0+0.3908*POW(pp,1.7592)*E0overI[igrp]; if(N2[igrp]<1.0) N2[igrp]=1.0; } #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*et[igrp]*N2[igrp]*(!chemonly);} #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*egyloc[idloc+igrp*BLOCKCOOL]*N2[igrp]*(!chemonly);} #endif #endif xt=1.-(alpha*x0[idloc]*x0[idloc]*nH[idloc]*dtcool+(1. -x0[idloc]))/(1.+dtcool*(beta*x0[idloc]*nH[idloc]+ai_tmp1)); ai_tmp1=0.; if(((xt>1.)||(xt<0.))||(isnan(xt))) { //printf("XION ERR eintt=%e xt=%e et=%e\n",eintt,xt,et[0]); fudgecool/=10.; continue; } #ifdef SEMI_IMPLICIT dcuCompCooling(tloc,xt,nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); #else dcuCompCooling(tloc,x0[idloc],nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); #endif #ifdef COOLING // HEATING + COOLING int compcool=1; // do we need to compute the cooling ? #ifdef SCHAYE if((nH[idloc]>1e6)&&(R.nh>(param->stars->overdensity_cond*navg))){ REAL tlocs; tlocs=eintt/(1.5*nH[idloc]*KBOLTZ*(1.+xt)); if(tlocs<1e5){ eintt=(1.08e9*KBOLTZ)*POW(nH[idloc]/1e5,4./3.)/(GAMMA-1)/FSCHAYE; // polytropic EOS compcool=0.; // cancel cooling calculation fudgecool=FMIN(fudgecool*1.5,param->fudgecool); } } #endif // SCHAYE if(compcool){ REAL SN = 0; #ifdef SUPERNOVAE SN = R.snfb; if (R.snfb) Cool = 0; // Stop the cooling if supernovae if (R.snfb) printf("dE\t%e\tE0\t%e\tdtcool\t%e\t",R.snfb*dtcool,eintt, dtcool); #endif #ifndef S_X #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += et[igrp]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*(!chemonly);} eintt=(eint[idloc]+ dtcool*(nH[idloc]*(1.-xt)*(ai_tmp1)-Cool+SN)); // if (R.snfb) printf("E0\t%e\n",eintt); #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += egyloc[idloc+igrp*BLOCKCOOL]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-x0[idloc])*(ai_tmp1)-Cool+SN)); #endif //SEMI #else //===================================== X RAYS ============================== REAL pp2; F2[0]=1.0; //if(eint[idloc]!=E0) printf("7!\n"); #ifdef SEMI_IMPLICIT pp2=1.0-POW(xt,0.2663); #else pp2=1.0-POW(x0[idloc],0.2663); #endif if(pp2<0.) pp2=0.; for(igrp=1;igrp<NGRP;igrp++){ F2[igrp]=1.0; F2[igrp]=0.9971*(1.0-POW(pp2,1.3163)); if(F2[igrp]>1.0) F2[igrp]=1.0; if(F2[igrp]<0.0) F2[igrp]=0.0; } #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += et[igrp]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*F2[igrp]*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-xt)*(ai_tmp1)-Cool+SN)); #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += egyloc[idloc+igrp*BLOCKCOOL]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*F2[igrp]*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-x0[idloc])*(ai_tmp1)-Cool+SN)); #endif //================================================================================ #endif //S_X if(eintt<0.) { //printf("E NEG eintt=%e xt=%e et=%e\n",eintt,xt,et[0]); fudgecool=fudgecool/10.; continue; } if(FABS(eintt-eint[idloc])>FRAC_VAR*eint[idloc]) { // if(srcloc[idloc]==0.){ //printf("DELTA E eintt=%e xt=%e et=%e\n",eintt,xt,et[0]); fudgecool=fudgecool/10.; continue; //} } else{ fudgecool=FMIN(fudgecool*1.5,param->fudgecool); } ai_tmp1=0; eintt=FMAX(emin,eintt); } #else eintt=eint[idloc]; #endif // inner update REAL aold=aexp; #ifdef TESTCOSMO da=hubblet*dtcool*aexp; aexp+=da; #endif for(igrp =0;igrp<NGRP;igrp++) { egyloc[idloc+igrp*BLOCKCOOL]=et[igrp]*POW(aold/aexp,3); if(!chemonly){ floc[0+idloc3+igrp*BLOCKCOOL*3]=floc[0+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); floc[1+idloc3+igrp*BLOCKCOOL*3]=floc[1+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); floc[2+idloc3+igrp*BLOCKCOOL*3]=floc[2+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); } } x0[idloc]=xt; //printf("xt=%e\n",xt); #ifdef COOLING eint[idloc]=eintt*POW(aold/aexp,5); #endif currentcool_t+=dtcool; fudgecool=param->fudgecool; nitcool++; if((nitcool==ncvgcool)&&(ncvgcool!=0)) break; } /// ====================== End of the cooling loop //aexp=aexporg; // FIlling the rad structure to send it back if(!chemonly){ for(igrp=0;igrp<NGRP;igrp++) { R.e[igrp]=FMAX(egyloc[idloc+igrp*BLOCKCOOL]*POW(aexp,3),EMIN*factgrp[igrp])/param->unit.unit_N; R.fx[igrp]=floc[0+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; R.fy[igrp]=floc[1+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; R.fz[igrp]=floc[2+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; } } R.nhplus=x0[idloc]*R.nh; R.eint=eint[idloc]*POW(aexp,5)/param->unit.unit_n/param->unit.unit_d/POW(param->unit.unit_v,2); dE2T(&R,aexp,param); memcpy(&stencil[i].New.cell[icell].rfieldnew,&R,sizeof(struct Rtype)); } } } #if 0 __global__ void dchemrad(struct RGRID *stencil, int nread, int stride, struct CPUINFO *cpu, REAL dxcur, REAL dtnew, struct RUNPARAMS *param, REAL aexporg, int chemonly) { int i,icell,igrp; //int idloc; int nitcool=0; REAL hnu0=13.6*1.6022e-19, Cool, tcool, dtcool, tcool1, currentcool_t=0., alpha, alphab, beta, tloc, xt, eintt, ai_tmp1=0., et[NGRP], p[NGRP]; REAL aexp; REAL ebkg[NGRP]; REAL z=1./aexporg-1.; REAL c=param->clightorg*LIGHT_SPEED_IN_M_PER_S; // switch back to physical velocity m/s REAL hnu[NGRP]; REAL alphae[NGRP]; REAL alphai[NGRP]; REAL factgrp[NGRP]; for(igrp=0;igrp<NGRP;igrp++) { hnu[igrp]=param->atomic.hnu[igrp]; alphae[igrp]=param->atomic.alphae[igrp]; alphai[igrp]=param->atomic.alphai[igrp]; factgrp[igrp]=param->atomic.factgrp[igrp]; } #ifdef S_X REAL E0overI[NGRP]; REAL N2[NGRP]; REAL F2[NGRP]; #endif #define BLOCKCOOL 1 // KEPT FROM CUDATON FOR SIMPLICITY #define idloc 0 // KEPT FROM CUDATON FOR SIMPLICITY #define idloc3 0 // KEPT FROM CUDATON FOR SIMPLICITY REAL egyloc[BLOCKCOOL*NGRP], floc[3*BLOCKCOOL*NGRP], x0[BLOCKCOOL], nH[BLOCKCOOL], eint[BLOCKCOOL], srcloc[BLOCKCOOL]; REAL dt=dtnew*param->unit.unit_t*POW(aexporg,2); REAL emin; struct Rtype R; REAL fudgecool=param->fudgecool; int ncvgcool=param->ncvgcool; REAL E0; #ifdef SCHAYE REAL navg=(param->cosmo->ob/param->cosmo->om)/(PROTON_MASS*MOLECULAR_MU)*param->unit.unit_d; #endif REAL xorg; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells if(stencil[i].oct[6].cell[icell].split) continue; // we dont treat split cells memcpy(&R,&stencil[i].New.cell[icell].rfieldnew,sizeof(struct Rtype));// We get the local physical quantities after transport update #ifdef HOMOSOURCE // we override the value with the homogeneous source density R.src=param->bkg; #endif // switch to physical units, chemistry remains unchanged with and without cosmo for (igrp=0;igrp<NGRP;igrp++) { egyloc[idloc+igrp*BLOCKCOOL] =R.e[igrp]/(aexporg*aexporg*aexporg)*param->unit.unit_N;//+ebkg[igrp]; floc[0+idloc3+igrp*BLOCKCOOL*3]=R.fx[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; floc[1+idloc3+igrp*BLOCKCOOL*3]=R.fy[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; floc[2+idloc3+igrp*BLOCKCOOL*3]=R.fz[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; } x0[idloc]=R.nhplus/R.nh; xorg= x0[idloc]; nH[idloc]=R.nh/(aexporg*aexporg*aexporg)*param->unit.unit_N; eint[idloc]=R.eint/POW(aexporg,5)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); emin=PMIN/(GAMMA-1.)/POW(aexporg,5)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); // physical minimal pressure //srcloc[idloc]=(R.src*param->unit.unit_N/param->unit.unit_t/(aexporg*aexporg)+ebkg[0])/POW(aexporg,3); for (igrp=0;igrp<NGRP;igrp++){ srcloc[idloc+igrp*BLOCKCOOL]=(R.src[igrp]*param->unit.unit_N/param->unit.unit_t/(aexporg*aexporg))/POW(aexporg,3); //phot/s/dv (physique) } /// BELOW THE FULL EXPRESSION OF E in natural units //emin=PMIN/(GAMMA-1.)/POW(aexporg,5)/POW(param->unit.unit_l,3)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); // physical minimal pressure /* if(((isnan(eint[idloc]))||(isnan(x0[idloc])))||(eint[idloc]==0.)){ */ /* printf("start with nans or ZErO egy %e\n",eint[idloc]); */ /* abort(); */ /* } */ // at this stage we are ready to do the calculations // DEALING WITH CLUMPING ---------------------- #ifdef WCLUMP REAL CLUMPF2=FMIN(FMAX(POW(nH[idloc]/6.,0.7),1.),40.); REAL CLUMPI=1.; #else REAL CLUMPF2=1.; REAL CLUMPI=1.; #endif for(igrp=0;igrp<NGRP;igrp++) { alphai[igrp] *= CLUMPI; alphae[igrp] *= CLUMPI; } // ------------------------------------------------- // local cooling loop ------------------------------- aexp=aexporg; fudgecool=param->fudgecool; currentcool_t=0.; nitcool=0.; REAL da; //printf("fudge=%e ncv=%d currentcool_t=%e dt=%e\n",fudgecool,ncvgcool,currentcool_t,dt); #if 1 while(currentcool_t<dt) { // Cosmological Adiabatic expansion effects ============== #ifdef TESTCOSMO REAL hubblet=param->cosmo->H0*SQRT(param->cosmo->om/aexp+param->cosmo->ov*(aexp*aexp))/aexp*(1e3/(1e6*PARSEC)); // s-1 // SOMETHING TO CHECK HERE #else REAL hubblet=0.; #endif tloc=eint[idloc]/(1.5*nH[idloc]*KBOLTZ*(1.+x0[idloc])); //== Getting a timestep dcuCompCooling(tloc,x0[idloc],nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); ai_tmp1=0.; for (igrp=0;igrp<NGRP;igrp++) ai_tmp1 += ((alphae[igrp])*hnu[igrp]-(alphai[igrp])*hnu0)*egyloc[idloc+igrp*BLOCKCOOL]; tcool=FABS(eint[idloc]/(nH[idloc]*(1.0-x0[idloc])*ai_tmp1-Cool)); ai_tmp1=0.; dtcool=FMIN(fudgecool*tcool,dt-currentcool_t); alpha=dcucompute_alpha_a(tloc,1.,1.)*CLUMPF2; alphab=dcucompute_alpha_b(tloc,1.,1.)*CLUMPF2; beta=dcucompute_beta(tloc,1.,1.)*CLUMPF2; //== Update // ABSORPTION int test = 0; REAL factotsa[NGRP]; for(igrp=0;igrp<NGRP;igrp++) { #ifdef OTSA factotsa[igrp]=0; alpha=alphab; // recombination is limited to non ground state levels #else factotsa[igrp]=(igrp==0); #endif ai_tmp1 = alphai[igrp]; if(chemonly){ et[igrp]=egyloc[idloc+igrp*BLOCKCOOL]; } else{ et[igrp]=((alpha-alphab)*x0[idloc]*x0[idloc]*nH[idloc]*nH[idloc]*dtcool*factotsa[igrp]+egyloc[idloc+igrp*BLOCKCOOL]+srcloc[idloc+igrp*BLOCKCOOL]*dtcool*factgrp[igrp])/(1.+dtcool*(ai_tmp1*(1.-x0[idloc])*nH[idloc])); } if((et[igrp]<0)||(isnan(et[igrp]))){ test=1; } p[igrp]=(1.+(alphai[igrp]*nH[idloc]*(1-x0[idloc]))*dtcool); } ai_tmp1=0.; if(test) { fudgecool=fudgecool/10.; continue; } // IONISATION #ifndef S_X #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*et[igrp]*(!chemonly);} #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*egyloc[idloc+igrp*BLOCKCOOL]*(!chemonly);} #endif #else N2[0]=1.0; REAL pp=(1.-POW(x0[idloc],0.4092)); if(pp<0.) pp=0.; for(igrp=1;igrp<NGRP;igrp++){ N2[igrp]=1.0+0.3908*POW(pp,1.7592)*E0overI[igrp]; if(N2[igrp]<1.0) N2[igrp]=1.0; } #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*et[igrp]*N2[igrp]*(!chemonly);} #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*egyloc[idloc+igrp*BLOCKCOOL]*N2[igrp]*(!chemonly);} #endif #endif xt=1.-(alpha*x0[idloc]*x0[idloc]*nH[idloc]*dtcool+(1. -x0[idloc]))/(1.+dtcool*(beta*x0[idloc]*nH[idloc]+ai_tmp1)); ai_tmp1=0.; if(((xt>1.)||(xt<0.))||(isnan(xt))) { fudgecool/=10.; continue; } #ifdef SEMI_IMPLICIT dcuCompCooling(tloc,xt,nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); #else dcuCompCooling(tloc,x0[idloc],nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); #endif #ifdef COOLING // HEATING int compcool=1; // do we need to compute the cooling ? #ifdef SCHAYE if((nH[idloc]>1e5)&&(R.nh>(57.7*navg))){ REAL tlocs; tlocs=eintt/(1.5*nH[idloc]*KBOLTZ*(1.+xt)); if(tlocs<1e5){ eintt=(1.08e9*KBOLTZ)*POW(nH[idloc]/1e5,4./3.)/(GAMMA-1); // polytropic EOS compcool=0.; // cancel cooling calculation fudgecool=FMIN(fudgecool*1.5,param->fudgecool); } } #endif if(compcool){ REAL SN=0.; #ifndef S_X #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += et[igrp]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-xt)*(ai_tmp1)-Cool)); #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += egyloc[idloc+igrp*BLOCKCOOL]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-x0[idloc])*(ai_tmp1)-Cool)); #endif #else //===================================== X RAYS ============================== REAL pp2; F2[0]=1.0; #ifdef SEMI_IMPLICIT pp2=1.0-POW(xt,0.2663); #else pp2=1.0-POW(x0[idloc],0.2663); #endif if(pp2<0.) pp2=0.; for(igrp=1;igrp<NGRP;igrp++){ F2[igrp]=1.0; F2[igrp]=0.9971*(1.0-POW(pp2,1.3163)); if(F2[igrp]>1.0) F2[igrp]=1.0; if(F2[igrp]<0.0) F2[igrp]=0.0; } #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += et[igrp]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*F2[igrp]*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-xt)*(ai_tmp1)-Cool)); #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += egyloc[idloc+igrp*BLOCKCOOL]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*F2[igrp]*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-x0[idloc])*(ai_tmp1)-Cool)); #endif #endif if(eintt<0.) { fudgecool=fudgecool/10.; continue; } if(FABS(eintt-eint[idloc])>FRAC_VAR*eint[idloc]) { fudgecool=fudgecool/10.; continue; } else{ fudgecool=FMIN(fudgecool*1.5,param->fudgecool); } ai_tmp1=0; eintt=FMAX(emin,eintt); } #else eintt=eint[idloc]; #endif // inner update REAL aold=aexp; #ifdef TESTCOSMO REAL da=hubblet*dtcool*aexp; aexp+=da; #endif for(igrp =0;igrp<NGRP;igrp++) { egyloc[idloc+igrp*BLOCKCOOL]=et[igrp]*POW(aold/aexp,3); if(!chemonly){ floc[0+idloc3+igrp*BLOCKCOOL*3]=floc[0+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); floc[1+idloc3+igrp*BLOCKCOOL*3]=floc[1+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); floc[2+idloc3+igrp*BLOCKCOOL*3]=floc[2+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); } } x0[idloc]=xt; #ifdef COOLING eint[idloc]=eintt*POW(aold/aexp,5); #endif currentcool_t+=dtcool; fudgecool=param->fudgecool; nitcool++; if((nitcool==ncvgcool)&&(ncvgcool!=0)) break; } #endif // ====================== End of the cooling loop // FIlling the rad structure to send it back if(!chemonly){ for(igrp=0;igrp<NGRP;igrp++) { R.e[igrp]=FMAX(egyloc[idloc+igrp*BLOCKCOOL]*aexp*aexp*aexp,EMIN*factgrp[igrp])/param->unit.unit_N; R.fx[igrp]=floc[0+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; R.fy[igrp]=floc[1+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; R.fz[igrp]=floc[2+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; } } R.nhplus=x0[idloc]*R.nh; R.eint=eint[idloc]*POW(aexp,5)/param->unit.unit_n/param->unit.unit_d/POW(param->unit.unit_v,2); dE2T(&R,aexp,param); memcpy(&stencil[i].New.cell[icell].rfieldnew,&R,sizeof(struct Rtype)); } } } #endif #endif #endif
3ab35ee0cbaee238c39b266dbf8f9d6b3c21dab9.cu
#ifdef WRAD #ifdef WCHEM #include <math.h> #include <stdio.h> #include <stdlib.h> #include "prototypes.h" #include "oct.h" #include <string.h> #include <mpi.h> //#include "atomic_data/Atomic.h" #include "gpu_type.h" #define FRAC_VAR (0.1) #define idloc 0 //================================================================================ __device__ void dE2T(struct Rtype *R, REAL aexp,struct RUNPARAMS *param){ REAL tloc; REAL eint=R->eint; REAL nH=R->nh; REAL x=R->nhplus/R->nh; REAL pstar=param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); nH=nH/POW(aexp,3)*param->unit.unit_N; eint=eint/POW(aexp,5)*pstar; tloc=eint/(1.5*nH*KBOLTZ*(1.+x)); R->temp=tloc; } // ============================================================================ __device__ REAL dcucompute_alpha_b(REAL temp, REAL unit_number, REAL aexp) { // CASE B recombination rate m**3 s*-1 // temperature should be given in Kelvin REAL alpha_b,lambda; lambda=2e0*157807e0/temp; alpha_b=2.753e-14*POW(lambda,1.5)/POW(1e0+POW(lambda/2.740,0.407),2.242); //cm3/s #ifdef TESTCOSMO alpha_b=alpha_b*1e-6*unit_number;///(aexp*aexp*aexp); //m3/s #else alpha_b=alpha_b*1e-6*unit_number; //m3/s #endif return alpha_b; } //========================================================= //========================================================= __device__ REAL dcucompute_alpha_a(REAL temp, REAL unit_number, REAL aexp) { // CASE A recombination rate m**3 s*-1 // temperature should be given in Kelvin REAL alpha_a,lambda; lambda=2e0*157807e0/temp; alpha_a=1.269e-13*POW(lambda,1.503)/POW(1e0+POW(lambda/0.522,0.470),1.923); //cm3/s #ifdef TESTCOSMO alpha_a=alpha_a*1e-6*unit_number;///(aexp*aexp*aexp); //m3/s #else alpha_a=alpha_a*1e-6*unit_number; //m3/s #endif return alpha_a; } //========================================================= //========================================================= __device__ REAL dcucompute_beta(REAL temp, REAL unit_number, REAL aexp) { // Collizional ionization rate m**3 s*-1 // temperature in Kelvin REAL beta,T5; T5=temp/1e5; beta=5.85e-11*SQRT(temp)/(1+SQRT(T5))*EXP(-(157809e0/temp)); //cm3/s #ifdef TESTCOSMO beta=beta*1e-6*unit_number;///(aexp*aexp*aexp); // !m3/s #else beta=beta*1e-6*unit_number; // !m3/s #endif return beta; } //********************************************************************************** //********************************************************************************** __device__ void dcuCompCooling(REAL temp, REAL x, REAL nH, REAL *lambda, REAL *tcool, REAL aexp,REAL CLUMPF) { REAL c1,c2,c3,c4,c5,c6; REAL unsurtc; REAL nh2; nh2=nH*1e-6;// ! m-3 ==> cm-3 // Collisional Ionization Cooling c1=EXP(-157809.1e0/temp)*1.27e-21*SQRT(temp)/(1.+SQRT(temp/1e5))*x*(1.-x)*nh2*nh2*CLUMPF; // Case A Recombination Cooling c2=1.778e-29*temp*POW(2e0*157807e0/temp,1.965e0)/POW(1.+POW(2e0*157807e0/temp/0.541e0,0.502e0),2.697e0)*x*x*nh2*nh2*CLUMPF; // Case B Recombination Cooling c6=3.435e-30*temp*POW(2e0*157807e0/temp,1.970e0)/POW(1.+(POW(2e0*157807e0/temp/2.250e0,0.376e0)),3.720e0)*x*x*nh2*nh2*CLUMPF; c6=0.; // Collisional excitation cooling c3=EXP(-118348e0/temp)*7.5e-19/(1+SQRT(temp/1e5))*x*(1.-x)*nh2*nh2*CLUMPF; // Bremmsstrahlung c4=1.42e-27*1.5e0*SQRT(temp)*x*x*nh2*nh2*CLUMPF; // Compton Cooling /* c5=1.017e-37*POW(2.727/aexp,4)*(temp-2.727/aexp)*nh2*x; */ c5=0.; #ifndef WRADTEST c5=5.406e-24*(temp-2.727/aexp)/POW(aexp/0.001,4)*x*nh2; REAL Ta=2.727/aexp; c5=5.406e-36*(temp-Ta)/(aexp*aexp*aexp*aexp)*x*nh2; #endif // Overall Cooling *lambda=c1+c2+c3+c4+c5+c6;// ! erg*cm-3*s-1 // Unit Conversion *lambda=(*lambda)*1e-7*1e6;// ! J*m-3*s-1 // cooling times unsurtc=FMAX(c1,c2); unsurtc=FMAX(unsurtc,c3); unsurtc=FMAX(unsurtc,c4); unsurtc=FMAX(unsurtc,FABS(c5)); unsurtc=FMAX(unsurtc,c6)*1e-7;// ==> J/cm3/s *tcool=1.5e0*nh2*(1.+x)*KBOLTZ*temp/unsurtc; //Myr } // =========================================================================================================================== __global__ void dchemrad(struct RGRID *stencil, int nread, int stride, struct CPUINFO *cpu, REAL dxcur, REAL dtnew, struct RUNPARAMS *param, REAL aexporg, int chemonly) { int i,icell,igrp; //int idloc=0; int nitcool=0; REAL hnu0=13.6*1.6022e-19, Cool, tcool, dtcool, tcool1, currentcool_t=0., alpha, alphab, beta, tloc, xt, eintt, ai_tmp1=0., et[NGRP], p[NGRP]; REAL aexp; REAL ebkg[NGRP]; REAL z=1./aexporg-1.; REAL c=param->clightorg*LIGHT_SPEED_IN_M_PER_S; // switch back to physical velocity m/s REAL hnu[NGRP]; REAL alphae[NGRP]; REAL alphai[NGRP]; REAL factgrp[NGRP]; for(igrp=0;igrp<NGRP;igrp++) { hnu[igrp]=param->atomic.hnu[igrp]; alphae[igrp]=param->atomic.alphae[igrp]; alphai[igrp]=param->atomic.alphai[igrp]; factgrp[igrp]=param->atomic.factgrp[igrp]; } #ifdef S_X REAL E0overI[NGRP]; REAL N2[NGRP]; REAL F2[NGRP]; #endif #define BLOCKCOOL 1 // KEPT FROM CUDATON FOR SIMPLICITY #define idloc3 0 // KEPT FROM CUDATON FOR SIMPLICITY REAL egyloc[BLOCKCOOL*NGRP], floc[3*BLOCKCOOL*NGRP], srcloc[BLOCKCOOL*NGRP], x0[BLOCKCOOL], nH[BLOCKCOOL], eint[BLOCKCOOL]; REAL dt=dtnew*param->unit.unit_t*POW(aexporg,2); REAL emin; struct Rtype R; REAL fudgecool=param->fudgecool; int ncvgcool=param->ncvgcool; REAL E0; #ifdef SCHAYE REAL navg=(param->cosmo->ob/param->cosmo->om)/(PROTON_MASS*MOLECULAR_MU)*param->unit.unit_d; #endif REAL xorg; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells if(stencil[i].oct[6].cell[icell].split) continue; // we dont treat split cells memcpy(&R,&stencil[i].New.cell[icell].rfieldnew,sizeof(struct Rtype));// We get the local physical quantities after transport update #ifdef HOMOSOURCE // we override the value with the homogeneous source density R.src=param->bkg; #endif //if(eint[idloc]!=E0) printf("1!\n"); /// ==================== UV Background #ifdef UVBKG if(NGRP>1) printf("WARNING BAD BEHAVIOR FOR BKG with NGRP>1 !\n"); //for(igrp=0;igrp<NGRP;igrp++) ebkg[igrp]=3.6*(z<3?1.:4./(1+z)) ; // Katz simple model // Poor FIT to Haardt & MAdau 2012 /* for(igrp=0;igrp<NGRP;igrp++){ REAL amp=1.2e-16,sig=1.,zavg=2,mz=1e-18,pz=1.2e-17; ebkg[igrp]=amp/(sig*SQRT(2*M_PI))*exp(-POW((z-zavg),2)/(2.*POW(sig,2)))+mz*z+pz; // comoving photons/s/m3 } */ #else for(igrp=0;igrp<NGRP;igrp++) ebkg[igrp]=0.; #endif // switch to physical units, chemistry remains unchanged with and without cosmo for (igrp=0;igrp<NGRP;igrp++) { egyloc[idloc+igrp*BLOCKCOOL] =R.e[igrp]/(aexporg*aexporg*aexporg)*param->unit.unit_N;//+ebkg[igrp]; floc[0+idloc3+igrp*BLOCKCOOL*3]=R.fx[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; floc[1+idloc3+igrp*BLOCKCOOL*3]=R.fy[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; floc[2+idloc3+igrp*BLOCKCOOL*3]=R.fz[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; } x0[idloc]=R.nhplus/R.nh; xorg= x0[idloc]; nH[idloc]=R.nh/(aexporg*aexporg*aexporg)*param->unit.unit_N; eint[idloc]=R.eint/POW(aexporg,5)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); emin=PMIN/(GAMMA-1.)/POW(aexporg,5)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); // physical minimal pressure for (igrp=0;igrp<NGRP;igrp++){ srcloc[idloc+igrp*BLOCKCOOL]=(R.src[igrp]*param->unit.unit_N/param->unit.unit_t/(aexporg*aexporg))/POW(aexporg,3); //phot/s/dv (physique) } // R.src phot/unit_t/unit_dv (comobile) REAL eorg=eint[idloc]; REAL etorg=egyloc[idloc]; REAL torg=eint[idloc]/(1.5*nH[idloc]*KBOLTZ*(1.+x0[idloc])); //if(srcloc[0]>0) printf("nh=%e %e %e %e\n",R.nh,R.e[0],eint[idloc],3[idloc]); // at this stage we are ready to do the calculations // DEALING WITH CLUMPING ---------------------- #ifdef WCLUMP REAL CLUMPF2=FMIN(FMAX(POW(nH[idloc]/6.,0.7),1.),40.); REAL CLUMPI=1.; #else REAL CLUMPF2=1.; REAL CLUMPI=1.; #endif for(igrp=0;igrp<NGRP;igrp++) { alphai[igrp] *= CLUMPI; alphae[igrp] *= CLUMPI; } // ------------------------------------------------- /// local cooling loop ------------------------------- aexp=aexporg; fudgecool=param->fudgecool; currentcool_t=0.; nitcool=0.; REAL da; //printf("cpu=%d fudge=%e ncv=%d currentcool_t=%e dt=%e\n",cpu->rank,param->fudgecool,ncvgcool,currentcool_t,dt); // local cooling loop ------------------------------- while(currentcool_t<dt) { /// Cosmological Adiabatic expansion effects ============== #ifdef TESTCOSMO REAL hubblet=param->cosmo->H0*SQRT(param->cosmo->om/aexp+param->cosmo->ov*(aexp*aexp))/aexp*(1e3/(1e6*PARSEC)); // s-1 // SOMETHING TO CHECK HERE #else REAL hubblet=0.; #endif //if(eint[idloc]!=E0) printf("2!\n"); tloc=eint[idloc]/(1.5*nH[idloc]*KBOLTZ*(1.+x0[idloc])); //== Getting a timestep dcuCompCooling(tloc,x0[idloc],nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); ai_tmp1=0.; //if(eint[idloc]!=E0) printf("3!\n"); if(fudgecool<1e-20){ printf("eint=%e(%e<%e) nH=%e x0=%e(%e) T=%e(%e) N=%e(%e)\n",eint[idloc],eorg,emin,nH[idloc],x0[idloc],xorg,tloc,torg,et[0],etorg); //if(fudgecool<1e-20) abort(); } for (igrp=0;igrp<NGRP;igrp++) ai_tmp1 += ((alphae[igrp])*hnu[igrp]-(alphai[igrp])*hnu0)*egyloc[idloc+igrp*BLOCKCOOL]; tcool=FABS(eint[idloc]/(nH[idloc]*(1.0-x0[idloc])*ai_tmp1*(!chemonly)-Cool)); ai_tmp1=0.; dtcool=FMIN(fudgecool*tcool,dt-currentcool_t); alpha=dcucompute_alpha_a(tloc,1.,1.)*CLUMPF2; alphab=dcucompute_alpha_b(tloc,1.,1.)*CLUMPF2; beta=dcucompute_beta(tloc,1.,1.)*CLUMPF2; //== Update // ABSORPTION int test = 0; REAL factotsa[NGRP]; for(igrp=0;igrp<NGRP;igrp++) { #ifdef OTSA factotsa[igrp]=0; alpha=alphab; // recombination is limited to non ground state levels #else factotsa[igrp]=(igrp==0); #endif ai_tmp1 = alphai[igrp]; if(chemonly){ et[igrp]=egyloc[idloc+igrp*BLOCKCOOL]; } else{ et[igrp]=((alpha-alphab)*x0[idloc]*x0[idloc]*nH[idloc]*nH[idloc]*dtcool*factotsa[igrp]+egyloc[idloc+igrp*BLOCKCOOL]+srcloc[idloc+igrp*BLOCKCOOL]*dtcool*factgrp[igrp])/(1.+dtcool*(ai_tmp1*(1.-x0[idloc])*nH[idloc])); } if((et[igrp]<0)||(isnan(et[igrp]))){ test=1; //printf("eint=%e nH=%e x0=%e T=%e N=%e\n",eint[idloc],nH[idloc],x0[idloc],tloc,et[0]); } p[igrp]=(1.+(alphai[igrp]*nH[idloc]*(1-x0[idloc]))*dtcool); } ai_tmp1=0.; if(test) { fudgecool=fudgecool/10.; continue; } // IONISATION #ifndef S_X #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*et[igrp]*(!chemonly);} #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*egyloc[idloc+igrp*BLOCKCOOL]*(!chemonly);} #endif #else N2[0]=1.0; REAL pp=(1.-POW(x0[idloc],0.4092)); if(pp<0.) pp=0.; for(igrp=1;igrp<NGRP;igrp++){ N2[igrp]=1.0+0.3908*POW(pp,1.7592)*E0overI[igrp]; if(N2[igrp]<1.0) N2[igrp]=1.0; } #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*et[igrp]*N2[igrp]*(!chemonly);} #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*egyloc[idloc+igrp*BLOCKCOOL]*N2[igrp]*(!chemonly);} #endif #endif xt=1.-(alpha*x0[idloc]*x0[idloc]*nH[idloc]*dtcool+(1. -x0[idloc]))/(1.+dtcool*(beta*x0[idloc]*nH[idloc]+ai_tmp1)); ai_tmp1=0.; if(((xt>1.)||(xt<0.))||(isnan(xt))) { //printf("XION ERR eintt=%e xt=%e et=%e\n",eintt,xt,et[0]); fudgecool/=10.; continue; } #ifdef SEMI_IMPLICIT dcuCompCooling(tloc,xt,nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); #else dcuCompCooling(tloc,x0[idloc],nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); #endif #ifdef COOLING // HEATING + COOLING int compcool=1; // do we need to compute the cooling ? #ifdef SCHAYE if((nH[idloc]>1e6)&&(R.nh>(param->stars->overdensity_cond*navg))){ REAL tlocs; tlocs=eintt/(1.5*nH[idloc]*KBOLTZ*(1.+xt)); if(tlocs<1e5){ eintt=(1.08e9*KBOLTZ)*POW(nH[idloc]/1e5,4./3.)/(GAMMA-1)/FSCHAYE; // polytropic EOS compcool=0.; // cancel cooling calculation fudgecool=FMIN(fudgecool*1.5,param->fudgecool); } } #endif // SCHAYE if(compcool){ REAL SN = 0; #ifdef SUPERNOVAE SN = R.snfb; if (R.snfb) Cool = 0; // Stop the cooling if supernovae if (R.snfb) printf("dE\t%e\tE0\t%e\tdtcool\t%e\t",R.snfb*dtcool,eintt, dtcool); #endif #ifndef S_X #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += et[igrp]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*(!chemonly);} eintt=(eint[idloc]+ dtcool*(nH[idloc]*(1.-xt)*(ai_tmp1)-Cool+SN)); // if (R.snfb) printf("E0\t%e\n",eintt); #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += egyloc[idloc+igrp*BLOCKCOOL]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-x0[idloc])*(ai_tmp1)-Cool+SN)); #endif //SEMI #else //===================================== X RAYS ============================== REAL pp2; F2[0]=1.0; //if(eint[idloc]!=E0) printf("7!\n"); #ifdef SEMI_IMPLICIT pp2=1.0-POW(xt,0.2663); #else pp2=1.0-POW(x0[idloc],0.2663); #endif if(pp2<0.) pp2=0.; for(igrp=1;igrp<NGRP;igrp++){ F2[igrp]=1.0; F2[igrp]=0.9971*(1.0-POW(pp2,1.3163)); if(F2[igrp]>1.0) F2[igrp]=1.0; if(F2[igrp]<0.0) F2[igrp]=0.0; } #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += et[igrp]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*F2[igrp]*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-xt)*(ai_tmp1)-Cool+SN)); #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += egyloc[idloc+igrp*BLOCKCOOL]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*F2[igrp]*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-x0[idloc])*(ai_tmp1)-Cool+SN)); #endif //================================================================================ #endif //S_X if(eintt<0.) { //printf("E NEG eintt=%e xt=%e et=%e\n",eintt,xt,et[0]); fudgecool=fudgecool/10.; continue; } if(FABS(eintt-eint[idloc])>FRAC_VAR*eint[idloc]) { // if(srcloc[idloc]==0.){ //printf("DELTA E eintt=%e xt=%e et=%e\n",eintt,xt,et[0]); fudgecool=fudgecool/10.; continue; //} } else{ fudgecool=FMIN(fudgecool*1.5,param->fudgecool); } ai_tmp1=0; eintt=FMAX(emin,eintt); } #else eintt=eint[idloc]; #endif // inner update REAL aold=aexp; #ifdef TESTCOSMO da=hubblet*dtcool*aexp; aexp+=da; #endif for(igrp =0;igrp<NGRP;igrp++) { egyloc[idloc+igrp*BLOCKCOOL]=et[igrp]*POW(aold/aexp,3); if(!chemonly){ floc[0+idloc3+igrp*BLOCKCOOL*3]=floc[0+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); floc[1+idloc3+igrp*BLOCKCOOL*3]=floc[1+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); floc[2+idloc3+igrp*BLOCKCOOL*3]=floc[2+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); } } x0[idloc]=xt; //printf("xt=%e\n",xt); #ifdef COOLING eint[idloc]=eintt*POW(aold/aexp,5); #endif currentcool_t+=dtcool; fudgecool=param->fudgecool; nitcool++; if((nitcool==ncvgcool)&&(ncvgcool!=0)) break; } /// ====================== End of the cooling loop //aexp=aexporg; // FIlling the rad structure to send it back if(!chemonly){ for(igrp=0;igrp<NGRP;igrp++) { R.e[igrp]=FMAX(egyloc[idloc+igrp*BLOCKCOOL]*POW(aexp,3),EMIN*factgrp[igrp])/param->unit.unit_N; R.fx[igrp]=floc[0+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; R.fy[igrp]=floc[1+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; R.fz[igrp]=floc[2+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; } } R.nhplus=x0[idloc]*R.nh; R.eint=eint[idloc]*POW(aexp,5)/param->unit.unit_n/param->unit.unit_d/POW(param->unit.unit_v,2); dE2T(&R,aexp,param); memcpy(&stencil[i].New.cell[icell].rfieldnew,&R,sizeof(struct Rtype)); } } } #if 0 __global__ void dchemrad(struct RGRID *stencil, int nread, int stride, struct CPUINFO *cpu, REAL dxcur, REAL dtnew, struct RUNPARAMS *param, REAL aexporg, int chemonly) { int i,icell,igrp; //int idloc; int nitcool=0; REAL hnu0=13.6*1.6022e-19, Cool, tcool, dtcool, tcool1, currentcool_t=0., alpha, alphab, beta, tloc, xt, eintt, ai_tmp1=0., et[NGRP], p[NGRP]; REAL aexp; REAL ebkg[NGRP]; REAL z=1./aexporg-1.; REAL c=param->clightorg*LIGHT_SPEED_IN_M_PER_S; // switch back to physical velocity m/s REAL hnu[NGRP]; REAL alphae[NGRP]; REAL alphai[NGRP]; REAL factgrp[NGRP]; for(igrp=0;igrp<NGRP;igrp++) { hnu[igrp]=param->atomic.hnu[igrp]; alphae[igrp]=param->atomic.alphae[igrp]; alphai[igrp]=param->atomic.alphai[igrp]; factgrp[igrp]=param->atomic.factgrp[igrp]; } #ifdef S_X REAL E0overI[NGRP]; REAL N2[NGRP]; REAL F2[NGRP]; #endif #define BLOCKCOOL 1 // KEPT FROM CUDATON FOR SIMPLICITY #define idloc 0 // KEPT FROM CUDATON FOR SIMPLICITY #define idloc3 0 // KEPT FROM CUDATON FOR SIMPLICITY REAL egyloc[BLOCKCOOL*NGRP], floc[3*BLOCKCOOL*NGRP], x0[BLOCKCOOL], nH[BLOCKCOOL], eint[BLOCKCOOL], srcloc[BLOCKCOOL]; REAL dt=dtnew*param->unit.unit_t*POW(aexporg,2); REAL emin; struct Rtype R; REAL fudgecool=param->fudgecool; int ncvgcool=param->ncvgcool; REAL E0; #ifdef SCHAYE REAL navg=(param->cosmo->ob/param->cosmo->om)/(PROTON_MASS*MOLECULAR_MU)*param->unit.unit_d; #endif REAL xorg; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells if(stencil[i].oct[6].cell[icell].split) continue; // we dont treat split cells memcpy(&R,&stencil[i].New.cell[icell].rfieldnew,sizeof(struct Rtype));// We get the local physical quantities after transport update #ifdef HOMOSOURCE // we override the value with the homogeneous source density R.src=param->bkg; #endif // switch to physical units, chemistry remains unchanged with and without cosmo for (igrp=0;igrp<NGRP;igrp++) { egyloc[idloc+igrp*BLOCKCOOL] =R.e[igrp]/(aexporg*aexporg*aexporg)*param->unit.unit_N;//+ebkg[igrp]; floc[0+idloc3+igrp*BLOCKCOOL*3]=R.fx[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; floc[1+idloc3+igrp*BLOCKCOOL*3]=R.fy[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; floc[2+idloc3+igrp*BLOCKCOOL*3]=R.fz[igrp]/POW(aexporg,4)*param->unit.unit_l/param->unit.unit_t*param->unit.unit_N; } x0[idloc]=R.nhplus/R.nh; xorg= x0[idloc]; nH[idloc]=R.nh/(aexporg*aexporg*aexporg)*param->unit.unit_N; eint[idloc]=R.eint/POW(aexporg,5)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); emin=PMIN/(GAMMA-1.)/POW(aexporg,5)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); // physical minimal pressure //srcloc[idloc]=(R.src*param->unit.unit_N/param->unit.unit_t/(aexporg*aexporg)+ebkg[0])/POW(aexporg,3); for (igrp=0;igrp<NGRP;igrp++){ srcloc[idloc+igrp*BLOCKCOOL]=(R.src[igrp]*param->unit.unit_N/param->unit.unit_t/(aexporg*aexporg))/POW(aexporg,3); //phot/s/dv (physique) } /// BELOW THE FULL EXPRESSION OF E in natural units //emin=PMIN/(GAMMA-1.)/POW(aexporg,5)/POW(param->unit.unit_l,3)*param->unit.unit_n*param->unit.unit_d*POW(param->unit.unit_v,2); // physical minimal pressure /* if(((isnan(eint[idloc]))||(isnan(x0[idloc])))||(eint[idloc]==0.)){ */ /* printf("start with nans or ZErO egy %e\n",eint[idloc]); */ /* abort(); */ /* } */ // at this stage we are ready to do the calculations // DEALING WITH CLUMPING ---------------------- #ifdef WCLUMP REAL CLUMPF2=FMIN(FMAX(POW(nH[idloc]/6.,0.7),1.),40.); REAL CLUMPI=1.; #else REAL CLUMPF2=1.; REAL CLUMPI=1.; #endif for(igrp=0;igrp<NGRP;igrp++) { alphai[igrp] *= CLUMPI; alphae[igrp] *= CLUMPI; } // ------------------------------------------------- // local cooling loop ------------------------------- aexp=aexporg; fudgecool=param->fudgecool; currentcool_t=0.; nitcool=0.; REAL da; //printf("fudge=%e ncv=%d currentcool_t=%e dt=%e\n",fudgecool,ncvgcool,currentcool_t,dt); #if 1 while(currentcool_t<dt) { // Cosmological Adiabatic expansion effects ============== #ifdef TESTCOSMO REAL hubblet=param->cosmo->H0*SQRT(param->cosmo->om/aexp+param->cosmo->ov*(aexp*aexp))/aexp*(1e3/(1e6*PARSEC)); // s-1 // SOMETHING TO CHECK HERE #else REAL hubblet=0.; #endif tloc=eint[idloc]/(1.5*nH[idloc]*KBOLTZ*(1.+x0[idloc])); //== Getting a timestep dcuCompCooling(tloc,x0[idloc],nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); ai_tmp1=0.; for (igrp=0;igrp<NGRP;igrp++) ai_tmp1 += ((alphae[igrp])*hnu[igrp]-(alphai[igrp])*hnu0)*egyloc[idloc+igrp*BLOCKCOOL]; tcool=FABS(eint[idloc]/(nH[idloc]*(1.0-x0[idloc])*ai_tmp1-Cool)); ai_tmp1=0.; dtcool=FMIN(fudgecool*tcool,dt-currentcool_t); alpha=dcucompute_alpha_a(tloc,1.,1.)*CLUMPF2; alphab=dcucompute_alpha_b(tloc,1.,1.)*CLUMPF2; beta=dcucompute_beta(tloc,1.,1.)*CLUMPF2; //== Update // ABSORPTION int test = 0; REAL factotsa[NGRP]; for(igrp=0;igrp<NGRP;igrp++) { #ifdef OTSA factotsa[igrp]=0; alpha=alphab; // recombination is limited to non ground state levels #else factotsa[igrp]=(igrp==0); #endif ai_tmp1 = alphai[igrp]; if(chemonly){ et[igrp]=egyloc[idloc+igrp*BLOCKCOOL]; } else{ et[igrp]=((alpha-alphab)*x0[idloc]*x0[idloc]*nH[idloc]*nH[idloc]*dtcool*factotsa[igrp]+egyloc[idloc+igrp*BLOCKCOOL]+srcloc[idloc+igrp*BLOCKCOOL]*dtcool*factgrp[igrp])/(1.+dtcool*(ai_tmp1*(1.-x0[idloc])*nH[idloc])); } if((et[igrp]<0)||(isnan(et[igrp]))){ test=1; } p[igrp]=(1.+(alphai[igrp]*nH[idloc]*(1-x0[idloc]))*dtcool); } ai_tmp1=0.; if(test) { fudgecool=fudgecool/10.; continue; } // IONISATION #ifndef S_X #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*et[igrp]*(!chemonly);} #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*egyloc[idloc+igrp*BLOCKCOOL]*(!chemonly);} #endif #else N2[0]=1.0; REAL pp=(1.-POW(x0[idloc],0.4092)); if(pp<0.) pp=0.; for(igrp=1;igrp<NGRP;igrp++){ N2[igrp]=1.0+0.3908*POW(pp,1.7592)*E0overI[igrp]; if(N2[igrp]<1.0) N2[igrp]=1.0; } #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*et[igrp]*N2[igrp]*(!chemonly);} #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += alphai[igrp]*egyloc[idloc+igrp*BLOCKCOOL]*N2[igrp]*(!chemonly);} #endif #endif xt=1.-(alpha*x0[idloc]*x0[idloc]*nH[idloc]*dtcool+(1. -x0[idloc]))/(1.+dtcool*(beta*x0[idloc]*nH[idloc]+ai_tmp1)); ai_tmp1=0.; if(((xt>1.)||(xt<0.))||(isnan(xt))) { fudgecool/=10.; continue; } #ifdef SEMI_IMPLICIT dcuCompCooling(tloc,xt,nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); #else dcuCompCooling(tloc,x0[idloc],nH[idloc],&Cool,&tcool1,aexp,CLUMPF2); #endif #ifdef COOLING // HEATING int compcool=1; // do we need to compute the cooling ? #ifdef SCHAYE if((nH[idloc]>1e5)&&(R.nh>(57.7*navg))){ REAL tlocs; tlocs=eintt/(1.5*nH[idloc]*KBOLTZ*(1.+xt)); if(tlocs<1e5){ eintt=(1.08e9*KBOLTZ)*POW(nH[idloc]/1e5,4./3.)/(GAMMA-1); // polytropic EOS compcool=0.; // cancel cooling calculation fudgecool=FMIN(fudgecool*1.5,param->fudgecool); } } #endif if(compcool){ REAL SN=0.; #ifndef S_X #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += et[igrp]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-xt)*(ai_tmp1)-Cool)); #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += egyloc[idloc+igrp*BLOCKCOOL]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-x0[idloc])*(ai_tmp1)-Cool)); #endif #else //===================================== X RAYS ============================== REAL pp2; F2[0]=1.0; #ifdef SEMI_IMPLICIT pp2=1.0-POW(xt,0.2663); #else pp2=1.0-POW(x0[idloc],0.2663); #endif if(pp2<0.) pp2=0.; for(igrp=1;igrp<NGRP;igrp++){ F2[igrp]=1.0; F2[igrp]=0.9971*(1.0-POW(pp2,1.3163)); if(F2[igrp]>1.0) F2[igrp]=1.0; if(F2[igrp]<0.0) F2[igrp]=0.0; } #ifdef SEMI_IMPLICIT for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += et[igrp]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*F2[igrp]*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-xt)*(ai_tmp1)-Cool)); #else for(igrp=0;igrp<NGRP;igrp++) {ai_tmp1 += egyloc[idloc+igrp*BLOCKCOOL]*(alphae[igrp]*hnu[igrp]-(alphai[igrp]*hnu0))*F2[igrp]*(!chemonly);} eintt=(eint[idloc]+dtcool*(nH[idloc]*(1.-x0[idloc])*(ai_tmp1)-Cool)); #endif #endif if(eintt<0.) { fudgecool=fudgecool/10.; continue; } if(FABS(eintt-eint[idloc])>FRAC_VAR*eint[idloc]) { fudgecool=fudgecool/10.; continue; } else{ fudgecool=FMIN(fudgecool*1.5,param->fudgecool); } ai_tmp1=0; eintt=FMAX(emin,eintt); } #else eintt=eint[idloc]; #endif // inner update REAL aold=aexp; #ifdef TESTCOSMO REAL da=hubblet*dtcool*aexp; aexp+=da; #endif for(igrp =0;igrp<NGRP;igrp++) { egyloc[idloc+igrp*BLOCKCOOL]=et[igrp]*POW(aold/aexp,3); if(!chemonly){ floc[0+idloc3+igrp*BLOCKCOOL*3]=floc[0+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); floc[1+idloc3+igrp*BLOCKCOOL*3]=floc[1+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); floc[2+idloc3+igrp*BLOCKCOOL*3]=floc[2+idloc3+igrp*BLOCKCOOL*3]/p[igrp]*POW(aold/aexp,4); } } x0[idloc]=xt; #ifdef COOLING eint[idloc]=eintt*POW(aold/aexp,5); #endif currentcool_t+=dtcool; fudgecool=param->fudgecool; nitcool++; if((nitcool==ncvgcool)&&(ncvgcool!=0)) break; } #endif // ====================== End of the cooling loop // FIlling the rad structure to send it back if(!chemonly){ for(igrp=0;igrp<NGRP;igrp++) { R.e[igrp]=FMAX(egyloc[idloc+igrp*BLOCKCOOL]*aexp*aexp*aexp,EMIN*factgrp[igrp])/param->unit.unit_N; R.fx[igrp]=floc[0+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; R.fy[igrp]=floc[1+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; R.fz[igrp]=floc[2+idloc3+igrp*BLOCKCOOL*3]*POW(aexp,4)/param->unit.unit_l*param->unit.unit_t/param->unit.unit_N; } } R.nhplus=x0[idloc]*R.nh; R.eint=eint[idloc]*POW(aexp,5)/param->unit.unit_n/param->unit.unit_d/POW(param->unit.unit_v,2); dE2T(&R,aexp,param); memcpy(&stencil[i].New.cell[icell].rfieldnew,&R,sizeof(struct Rtype)); } } } #endif #endif #endif
2307c397f802f66c74edb8e8c93a10ca1098c005.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2008-2011 Yung-Yu Chen <yyc@solvcon.net>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "cueuler.h" __device__ void cuda_calc_jaco(exedata *exd, int icl, double fcn[NEQ][NDIM], double jacos[NEQ][NEQ][NDIM]) { // pointers. double *psol; // scalars. double ga, ga1, ga3, ga1h; double u1, u2, u3, u4; #if NDIM == 3 double u5; #endif // accelerating variables. double rho2, ke2, g1ke2, vs, gretot, getot, pr, v1o2, v2o2, v1, v2; #if NDIM == 3 double v3o2, v3; #endif // initialize values. ga = exd->amsca[icl*NSCA]; ga1 = ga-1; ga3 = ga-3; ga1h = ga1/2; psol = exd->sol + icl*NEQ; u1 = psol[0] + SOLVCESE_TINY; u2 = psol[1]; u3 = psol[2]; u4 = psol[3]; #if NDIM == 3 u5 = psol[4]; #endif // accelerating variables. rho2 = u1*u1; v1 = u2/u1; v1o2 = v1*v1; v2 = u3/u1; v2o2 = v2*v2; #if NDIM == 3 v3 = u4/u1; v3o2 = v3*v3; #endif ke2 = (u2*u2 + u3*u3 #if NDIM == 3 + u4*u4 #endif )/u1; g1ke2 = ga1*ke2; vs = ke2/u1; gretot = ga * #if NDIM == 3 u5 #else u4 #endif ; getot = gretot/u1; pr = ga1* #if NDIM == 3 u5 #else u4 #endif - ga1h * ke2; // flux function. #if NDIM == 3 fcn[0][0] = u2; fcn[0][1] = u3; fcn[0][2] = u4; fcn[1][0] = pr + u2*v1; fcn[1][1] = u2*v2; fcn[1][2] = u2*v3; fcn[2][0] = u3*v1; fcn[2][1] = pr + u3*v2; fcn[2][2] = u3*v3; fcn[3][0] = u4*v1; fcn[3][1] = u4*v2; fcn[3][2] = pr + u4*v3; fcn[4][0] = (pr + u5)*v1; fcn[4][1] = (pr + u5)*v2; fcn[4][2] = (pr + u5)*v3; #else fcn[0][0] = u2; fcn[0][1] = u3; fcn[1][0] = pr + u2*v1; fcn[1][1] = u2*v2; fcn[2][0] = u3*v1; fcn[2][1] = pr + u3*v2; fcn[3][0] = (pr + u4)*v1; fcn[3][1] = (pr + u4)*v2; #endif // Jacobian matrices. #if NDIM == 3 jacos[0][0][0] = 0; jacos[0][0][1] = 0; jacos[0][0][2] = 0; jacos[0][1][0] = 1; jacos[0][1][1] = 0; jacos[0][1][2] = 0; jacos[0][2][0] = 0; jacos[0][2][1] = 1; jacos[0][2][2] = 0; jacos[0][3][0] = 0; jacos[0][3][1] = 0; jacos[0][3][2] = 1; jacos[0][4][0] = 0; jacos[0][4][1] = 0; jacos[0][4][2] = 0; jacos[1][0][0] = -v1o2 + ga1h*vs; jacos[1][0][1] = -v1*v2; jacos[1][0][2] = -v1*v3; jacos[1][1][0] = -ga3*v1; jacos[1][1][1] = v2; jacos[1][1][2] = v3; jacos[1][2][0] = -ga1*v2; jacos[1][2][1] = v1; jacos[1][2][2] = 0; jacos[1][3][0] = -ga1*v3; jacos[1][3][1] = 0; jacos[1][3][2] = v1; jacos[1][4][0] = ga1; jacos[1][4][1] = 0; jacos[1][4][2] = 0; jacos[2][0][0] = -v2*v1; jacos[2][0][1] = -v2o2 + ga1h*vs; jacos[2][0][2] = -v2*v3; jacos[2][1][0] = v2; jacos[2][1][1] = -ga1*v1; jacos[2][1][2] = 0; jacos[2][2][0] = v1; jacos[2][2][1] = -ga3*v2; jacos[2][2][2] = v3; jacos[2][3][0] = 0; jacos[2][3][1] = -ga1*v3; jacos[2][3][2] = v2; jacos[2][4][0] = 0; jacos[2][4][1] = ga1; jacos[2][4][2] = 0; jacos[3][0][0] = -v3*v1; jacos[3][0][1] = -v3*v2; jacos[3][0][2] = -v3o2 + ga1h*vs; jacos[3][1][0] = v3; jacos[3][1][1] = 0; jacos[3][1][2] = -ga1*v1; jacos[3][2][0] = 0; jacos[3][2][1] = v3; jacos[3][2][2] = -ga1*v2; jacos[3][3][0] = v1; jacos[3][3][1] = v2; jacos[3][3][2] = -ga3*v3; jacos[3][4][0] = 0; jacos[3][4][1] = 0; jacos[3][4][2] = ga1; jacos[4][0][0] = (-gretot + g1ke2)*u2/rho2; jacos[4][0][1] = (-gretot + g1ke2)*u3/rho2; jacos[4][0][2] = (-gretot + g1ke2)*u4/rho2; jacos[4][1][0] = getot - ga1h*(vs + 2*v1o2); jacos[4][1][1] = -ga1*v1*v2; jacos[4][1][2] = -ga1*v1*v3; jacos[4][2][0] = -ga1*v2*v1; jacos[4][2][1] = getot - ga1h*(vs + 2*v2o2); jacos[4][2][2] = -ga1*v2*v3; jacos[4][3][0] = -ga1*v3*v1; jacos[4][3][1] = -ga1*v3*v2; jacos[4][3][2] = getot - ga1h*(vs + 2*v3o2); jacos[4][4][0] = ga*v1; jacos[4][4][1] = ga*v2; jacos[4][4][2] = ga*v3; #else jacos[0][0][0] = 0; jacos[0][0][1] = 0; jacos[0][1][0] = 1; jacos[0][1][1] = 0; jacos[0][2][0] = 0; jacos[0][2][1] = 1; jacos[0][3][0] = 0; jacos[0][3][1] = 0; jacos[1][0][0] = -v1o2 + ga1h*vs; jacos[1][0][1] = -v1*v2; jacos[1][1][0] = -ga3*v1; jacos[1][1][1] = v2; jacos[1][2][0] = -ga1*v2; jacos[1][2][1] = v1; jacos[1][3][0] = ga1; jacos[1][3][1] = 0; jacos[2][0][0] = -v2*v1; jacos[2][0][1] = -v2o2 + ga1h*vs; jacos[2][1][0] = v2; jacos[2][1][1] = -ga1*v1; jacos[2][2][0] = v1; jacos[2][2][1] = -ga3*v2; jacos[2][3][0] = 0; jacos[2][3][1] = ga1; jacos[3][0][0] = (-gretot + g1ke2)*u2/rho2; jacos[3][0][1] = (-gretot + g1ke2)*u3/rho2; jacos[3][1][0] = getot - ga1h*(vs + 2*v1o2); jacos[3][1][1] = -ga1*v1*v2; jacos[3][2][0] = -ga1*v2*v1; jacos[3][2][1] = getot - ga1h*(vs + 2*v2o2); jacos[3][3][0] = ga*v1; jacos[3][3][1] = ga*v2; #endif return; }; __global__ void cuda_calc_solt(exedata *exd) { // pointers. double *psolt, *pidsol, *pdsol; // scalars. double val; // arrays. double jacos[NEQ][NEQ][NDIM]; double fcn[NEQ][NDIM]; // interators. int icl, ieq, jeq, idm; #ifdef __HIPCC__ // CUDA thread control. int istart = -exd->ngstcell + blockDim.x * blockIdx.x + threadIdx.x; #endif psolt = exd->solt + istart*NEQ; pidsol = exd->dsol + istart*NEQ*NDIM; #ifndef __HIPCC__ for (icl=istart; icl<iend; icl++) { #else icl = istart; if (icl < exd->ncell) { #endif cuda_calc_jaco(exd, icl, fcn, jacos); for (ieq=0; ieq<NEQ; ieq++) { psolt[ieq] = 0.0; for (idm=0; idm<NDIM; idm++) { val = 0.0; pdsol = pidsol; for (jeq=0; jeq<NEQ; jeq++) { val += jacos[ieq][jeq][idm]*pdsol[idm]; pdsol += NDIM; }; psolt[ieq] -= val; }; }; #ifndef __HIPCC__ // advance pointers. psolt += NEQ; pidsol += NEQ*NDIM; #endif }; }; #ifdef __HIPCC__ extern "C" int calc_solt(int nthread, exedata *exc, void *gexc) { int nblock = (exc->ngstcell + exc->ncell + nthread-1) / nthread; hipLaunchKernelGGL(( cuda_calc_solt), dim3(nblock), dim3(nthread), 0, 0, (exedata *)gexc); hipDeviceSynchronize(); return 0; }; #endif #ifdef __HIPCC__ __global__ void cuda_calc_soln(exedata *exd) { int istart = blockDim.x * blockIdx.x + threadIdx.x; #else int calc_soln(exedata *exd, int istart, int iend) { struct tms timm0, timm1; int cputicks; times(&timm0); #ifdef SOLVCESE_FE feenableexcept(SOLVCESE_FE); #endif #endif int clnfc, fcnnd; // partial pointers. int *pclfcs, *pfcnds, *pfccls; double *pndcrd, *pfccnd, *pclcnd; double *pjcecnd, *pcecnd, *pcevol; double *pjsol, *pdsol, *pjsolt, *psoln; // scalars. double hdt, qdt, voe; #if NDIM == 3 double disu0, disu1, disu2; double disv0, disv1, disv2; #endif // arrays. double crd[FCMND+1][NDIM]; double cnde[NDIM]; double sfnml[FCMND][NDIM]; double sfcnd[FCMND][NDIM]; double futo[NEQ]; double fusp[NEQ]; double futm[NEQ]; double jacos[NEQ][NEQ][NDIM]; double usfc[NEQ]; double fcn[NEQ][NDIM]; double dfcn[NEQ][NDIM]; // interators. int icl, ifl, inf, ifc, jcl, ieq, jeq; qdt = exd->time_increment * 0.25; hdt = exd->time_increment * 0.5; #ifndef __HIPCC__ for (icl=istart; icl<iend; icl++) { #else icl = istart; if (icl < exd->ncell) { #endif // initialize fluxes. for (ieq=0; ieq<NEQ; ieq++) { futo[ieq] = 0.0; }; pclfcs = exd->clfcs + icl*(CLMFC+1); clnfc = pclfcs[0]; for (ifl=1; ifl<=clnfc; ifl++) { ifc = pclfcs[ifl]; // face node coordinates. pfcnds = exd->fcnds + ifc*(FCMND+1); fcnnd = pfcnds[0]; for (inf=0; inf<fcnnd; inf++) { pndcrd = exd->ndcrd + pfcnds[inf+1]*NDIM; crd[inf][0] = pndcrd[0]; crd[inf][1] = pndcrd[1]; #if NDIM == 3 crd[inf][2] = pndcrd[2]; #endif }; crd[fcnnd][0] = crd[0][0]; crd[fcnnd][1] = crd[0][1]; #if NDIM == 3 crd[fcnnd][2] = crd[0][2]; #endif // neighboring cell center. pfccls = exd->fccls + ifc*FCREL; jcl = pfccls[0] + pfccls[1] - icl; pclcnd = exd->clcnd + jcl*NDIM; cnde[0] = pclcnd[0]; cnde[1] = pclcnd[1]; #if NDIM == 3 cnde[2] = pclcnd[2]; #endif // calculate geometric center of the bounding sub-face. for (inf=0; inf<fcnnd; inf++) { sfcnd[inf][0] = cnde[0] + crd[inf][0]; #if NDIM == 3 sfcnd[inf][0] += crd[inf+1][0]; #endif sfcnd[inf][0] /= NDIM; sfcnd[inf][1] = cnde[1] + crd[inf][1]; #if NDIM == 3 sfcnd[inf][1] += crd[inf+1][1]; #endif sfcnd[inf][1] /= NDIM; #if NDIM == 3 sfcnd[inf][2] = cnde[2] + crd[inf][2] + crd[inf+1][2]; sfcnd[inf][2] /= NDIM; #endif }; // calculate outward area vector of the bounding sub-face. #if NDIM == 3 voe = (pfccls[0] - icl) + SOLVCESE_ALMOST_ZERO; voe /= (icl - pfccls[0]) + SOLVCESE_ALMOST_ZERO; voe *= 0.5; pfccnd = exd->fccnd + ifc*NDIM; for (inf=0; inf<fcnnd; inf++) { disu0 = crd[inf ][0] - cnde[0]; disu1 = crd[inf ][1] - cnde[1]; disu2 = crd[inf ][2] - cnde[2]; disv0 = crd[inf+1][0] - cnde[0]; disv1 = crd[inf+1][1] - cnde[1]; disv2 = crd[inf+1][2] - cnde[2]; sfnml[inf][0] = (disu1*disv2 - disu2*disv1) * voe; sfnml[inf][1] = (disu2*disv0 - disu0*disv2) * voe; sfnml[inf][2] = (disu0*disv1 - disu1*disv0) * voe; }; #else voe = (crd[0][0]-cnde[0])*(crd[1][1]-cnde[1]) - (crd[0][1]-cnde[1])*(crd[1][0]-cnde[0]); voe /= fabs(voe); sfnml[0][0] = -(cnde[1]-crd[0][1]) * voe; sfnml[0][1] = (cnde[0]-crd[0][0]) * voe; sfnml[1][0] = (cnde[1]-crd[1][1]) * voe; sfnml[1][1] = -(cnde[0]-crd[1][0]) * voe; #endif // spatial flux (given time). pjcecnd = exd->cecnd + jcl*(CLMFC+1)*NDIM; pcecnd = exd->cecnd + (icl*(CLMFC+1)+ifl)*NDIM; pjsol = exd->sol + jcl*NEQ; pdsol = exd->dsol + jcl*NEQ*NDIM; for (ieq=0; ieq<NEQ; ieq++) { fusp[ieq] = pjsol[ieq]; fusp[ieq] += (pcecnd[0]-pjcecnd[0]) * pdsol[0]; fusp[ieq] += (pcecnd[1]-pjcecnd[1]) * pdsol[1]; #if NDIM == 3 fusp[ieq] += (pcecnd[2]-pjcecnd[2]) * pdsol[2]; #endif pdsol += NDIM; }; pcevol = exd->cevol + icl*(CLMFC+1)+ifl; for (ieq=0; ieq<NEQ; ieq++) { fusp[ieq] *= pcevol[0]; }; // temporal flux (give space). #ifndef __HIPCC__ exd->jacofunc(exd, jcl, (double *)fcn, (double *)jacos); #else cuda_calc_jaco(exd, jcl, fcn, jacos); #endif pjsolt = exd->solt + jcl*NEQ; for (ieq=0; ieq<NEQ; ieq++) futm[ieq] = 0.0; for (inf=0; inf<fcnnd; inf++) { // solution at sub-face center. pdsol = exd->dsol + jcl*NEQ*NDIM; for (ieq=0; ieq<NEQ; ieq++) { usfc[ieq] = qdt * pjsolt[ieq]; usfc[ieq] += (sfcnd[inf][0]-pjcecnd[0]) * pdsol[0]; usfc[ieq] += (sfcnd[inf][1]-pjcecnd[1]) * pdsol[1]; #if NDIM == 3 usfc[ieq] += (sfcnd[inf][2]-pjcecnd[2]) * pdsol[2]; #endif pdsol += NDIM; }; // spatial derivatives. for (ieq=0; ieq<NEQ; ieq++) { dfcn[ieq][0] = fcn[ieq][0]; dfcn[ieq][1] = fcn[ieq][1]; #if NDIM == 3 dfcn[ieq][2] = fcn[ieq][2]; #endif for (jeq=0; jeq<NEQ; jeq++) { dfcn[ieq][0] += jacos[ieq][jeq][0] * usfc[jeq]; dfcn[ieq][1] += jacos[ieq][jeq][1] * usfc[jeq]; #if NDIM == 3 dfcn[ieq][2] += jacos[ieq][jeq][2] * usfc[jeq]; #endif }; }; // temporal flux. for (ieq=0; ieq<NEQ; ieq++) { futm[ieq] += dfcn[ieq][0] * sfnml[inf][0]; futm[ieq] += dfcn[ieq][1] * sfnml[inf][1]; #if NDIM == 3 futm[ieq] += dfcn[ieq][2] * sfnml[inf][2]; #endif }; }; // sum fluxes. for (ieq=0; ieq<NEQ; ieq++) { futo[ieq] += fusp[ieq] - hdt*futm[ieq]; }; }; // update solutions. psoln = exd->soln + icl*NEQ; pcevol = exd->cevol + icl*(CLMFC+1); for (ieq=0; ieq<NEQ; ieq++) { psoln[ieq] = futo[ieq] / pcevol[0]; }; }; #ifndef __HIPCC__ times(&timm1); cputicks = (int)((timm1.tms_utime+timm1.tms_stime) - (timm0.tms_utime+timm0.tms_stime)); return cputicks; }; #else }; extern "C" int calc_soln(int nthread, exedata *exc, void *gexc) { int nblock = (exc->ncell + nthread-1) / nthread; hipLaunchKernelGGL(( cuda_calc_soln), dim3(nblock), dim3(nthread), 0, 0, (exedata *)gexc); hipDeviceSynchronize(); return 0; }; #endif // vim: set ts=4 et:
2307c397f802f66c74edb8e8c93a10ca1098c005.cu
/* * Copyright (C) 2008-2011 Yung-Yu Chen <yyc@solvcon.net>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "cueuler.h" __device__ void cuda_calc_jaco(exedata *exd, int icl, double fcn[NEQ][NDIM], double jacos[NEQ][NEQ][NDIM]) { // pointers. double *psol; // scalars. double ga, ga1, ga3, ga1h; double u1, u2, u3, u4; #if NDIM == 3 double u5; #endif // accelerating variables. double rho2, ke2, g1ke2, vs, gretot, getot, pr, v1o2, v2o2, v1, v2; #if NDIM == 3 double v3o2, v3; #endif // initialize values. ga = exd->amsca[icl*NSCA]; ga1 = ga-1; ga3 = ga-3; ga1h = ga1/2; psol = exd->sol + icl*NEQ; u1 = psol[0] + SOLVCESE_TINY; u2 = psol[1]; u3 = psol[2]; u4 = psol[3]; #if NDIM == 3 u5 = psol[4]; #endif // accelerating variables. rho2 = u1*u1; v1 = u2/u1; v1o2 = v1*v1; v2 = u3/u1; v2o2 = v2*v2; #if NDIM == 3 v3 = u4/u1; v3o2 = v3*v3; #endif ke2 = (u2*u2 + u3*u3 #if NDIM == 3 + u4*u4 #endif )/u1; g1ke2 = ga1*ke2; vs = ke2/u1; gretot = ga * #if NDIM == 3 u5 #else u4 #endif ; getot = gretot/u1; pr = ga1* #if NDIM == 3 u5 #else u4 #endif - ga1h * ke2; // flux function. #if NDIM == 3 fcn[0][0] = u2; fcn[0][1] = u3; fcn[0][2] = u4; fcn[1][0] = pr + u2*v1; fcn[1][1] = u2*v2; fcn[1][2] = u2*v3; fcn[2][0] = u3*v1; fcn[2][1] = pr + u3*v2; fcn[2][2] = u3*v3; fcn[3][0] = u4*v1; fcn[3][1] = u4*v2; fcn[3][2] = pr + u4*v3; fcn[4][0] = (pr + u5)*v1; fcn[4][1] = (pr + u5)*v2; fcn[4][2] = (pr + u5)*v3; #else fcn[0][0] = u2; fcn[0][1] = u3; fcn[1][0] = pr + u2*v1; fcn[1][1] = u2*v2; fcn[2][0] = u3*v1; fcn[2][1] = pr + u3*v2; fcn[3][0] = (pr + u4)*v1; fcn[3][1] = (pr + u4)*v2; #endif // Jacobian matrices. #if NDIM == 3 jacos[0][0][0] = 0; jacos[0][0][1] = 0; jacos[0][0][2] = 0; jacos[0][1][0] = 1; jacos[0][1][1] = 0; jacos[0][1][2] = 0; jacos[0][2][0] = 0; jacos[0][2][1] = 1; jacos[0][2][2] = 0; jacos[0][3][0] = 0; jacos[0][3][1] = 0; jacos[0][3][2] = 1; jacos[0][4][0] = 0; jacos[0][4][1] = 0; jacos[0][4][2] = 0; jacos[1][0][0] = -v1o2 + ga1h*vs; jacos[1][0][1] = -v1*v2; jacos[1][0][2] = -v1*v3; jacos[1][1][0] = -ga3*v1; jacos[1][1][1] = v2; jacos[1][1][2] = v3; jacos[1][2][0] = -ga1*v2; jacos[1][2][1] = v1; jacos[1][2][2] = 0; jacos[1][3][0] = -ga1*v3; jacos[1][3][1] = 0; jacos[1][3][2] = v1; jacos[1][4][0] = ga1; jacos[1][4][1] = 0; jacos[1][4][2] = 0; jacos[2][0][0] = -v2*v1; jacos[2][0][1] = -v2o2 + ga1h*vs; jacos[2][0][2] = -v2*v3; jacos[2][1][0] = v2; jacos[2][1][1] = -ga1*v1; jacos[2][1][2] = 0; jacos[2][2][0] = v1; jacos[2][2][1] = -ga3*v2; jacos[2][2][2] = v3; jacos[2][3][0] = 0; jacos[2][3][1] = -ga1*v3; jacos[2][3][2] = v2; jacos[2][4][0] = 0; jacos[2][4][1] = ga1; jacos[2][4][2] = 0; jacos[3][0][0] = -v3*v1; jacos[3][0][1] = -v3*v2; jacos[3][0][2] = -v3o2 + ga1h*vs; jacos[3][1][0] = v3; jacos[3][1][1] = 0; jacos[3][1][2] = -ga1*v1; jacos[3][2][0] = 0; jacos[3][2][1] = v3; jacos[3][2][2] = -ga1*v2; jacos[3][3][0] = v1; jacos[3][3][1] = v2; jacos[3][3][2] = -ga3*v3; jacos[3][4][0] = 0; jacos[3][4][1] = 0; jacos[3][4][2] = ga1; jacos[4][0][0] = (-gretot + g1ke2)*u2/rho2; jacos[4][0][1] = (-gretot + g1ke2)*u3/rho2; jacos[4][0][2] = (-gretot + g1ke2)*u4/rho2; jacos[4][1][0] = getot - ga1h*(vs + 2*v1o2); jacos[4][1][1] = -ga1*v1*v2; jacos[4][1][2] = -ga1*v1*v3; jacos[4][2][0] = -ga1*v2*v1; jacos[4][2][1] = getot - ga1h*(vs + 2*v2o2); jacos[4][2][2] = -ga1*v2*v3; jacos[4][3][0] = -ga1*v3*v1; jacos[4][3][1] = -ga1*v3*v2; jacos[4][3][2] = getot - ga1h*(vs + 2*v3o2); jacos[4][4][0] = ga*v1; jacos[4][4][1] = ga*v2; jacos[4][4][2] = ga*v3; #else jacos[0][0][0] = 0; jacos[0][0][1] = 0; jacos[0][1][0] = 1; jacos[0][1][1] = 0; jacos[0][2][0] = 0; jacos[0][2][1] = 1; jacos[0][3][0] = 0; jacos[0][3][1] = 0; jacos[1][0][0] = -v1o2 + ga1h*vs; jacos[1][0][1] = -v1*v2; jacos[1][1][0] = -ga3*v1; jacos[1][1][1] = v2; jacos[1][2][0] = -ga1*v2; jacos[1][2][1] = v1; jacos[1][3][0] = ga1; jacos[1][3][1] = 0; jacos[2][0][0] = -v2*v1; jacos[2][0][1] = -v2o2 + ga1h*vs; jacos[2][1][0] = v2; jacos[2][1][1] = -ga1*v1; jacos[2][2][0] = v1; jacos[2][2][1] = -ga3*v2; jacos[2][3][0] = 0; jacos[2][3][1] = ga1; jacos[3][0][0] = (-gretot + g1ke2)*u2/rho2; jacos[3][0][1] = (-gretot + g1ke2)*u3/rho2; jacos[3][1][0] = getot - ga1h*(vs + 2*v1o2); jacos[3][1][1] = -ga1*v1*v2; jacos[3][2][0] = -ga1*v2*v1; jacos[3][2][1] = getot - ga1h*(vs + 2*v2o2); jacos[3][3][0] = ga*v1; jacos[3][3][1] = ga*v2; #endif return; }; __global__ void cuda_calc_solt(exedata *exd) { // pointers. double *psolt, *pidsol, *pdsol; // scalars. double val; // arrays. double jacos[NEQ][NEQ][NDIM]; double fcn[NEQ][NDIM]; // interators. int icl, ieq, jeq, idm; #ifdef __CUDACC__ // CUDA thread control. int istart = -exd->ngstcell + blockDim.x * blockIdx.x + threadIdx.x; #endif psolt = exd->solt + istart*NEQ; pidsol = exd->dsol + istart*NEQ*NDIM; #ifndef __CUDACC__ for (icl=istart; icl<iend; icl++) { #else icl = istart; if (icl < exd->ncell) { #endif cuda_calc_jaco(exd, icl, fcn, jacos); for (ieq=0; ieq<NEQ; ieq++) { psolt[ieq] = 0.0; for (idm=0; idm<NDIM; idm++) { val = 0.0; pdsol = pidsol; for (jeq=0; jeq<NEQ; jeq++) { val += jacos[ieq][jeq][idm]*pdsol[idm]; pdsol += NDIM; }; psolt[ieq] -= val; }; }; #ifndef __CUDACC__ // advance pointers. psolt += NEQ; pidsol += NEQ*NDIM; #endif }; }; #ifdef __CUDACC__ extern "C" int calc_solt(int nthread, exedata *exc, void *gexc) { int nblock = (exc->ngstcell + exc->ncell + nthread-1) / nthread; cuda_calc_solt<<<nblock, nthread>>>((exedata *)gexc); cudaThreadSynchronize(); return 0; }; #endif #ifdef __CUDACC__ __global__ void cuda_calc_soln(exedata *exd) { int istart = blockDim.x * blockIdx.x + threadIdx.x; #else int calc_soln(exedata *exd, int istart, int iend) { struct tms timm0, timm1; int cputicks; times(&timm0); #ifdef SOLVCESE_FE feenableexcept(SOLVCESE_FE); #endif #endif int clnfc, fcnnd; // partial pointers. int *pclfcs, *pfcnds, *pfccls; double *pndcrd, *pfccnd, *pclcnd; double *pjcecnd, *pcecnd, *pcevol; double *pjsol, *pdsol, *pjsolt, *psoln; // scalars. double hdt, qdt, voe; #if NDIM == 3 double disu0, disu1, disu2; double disv0, disv1, disv2; #endif // arrays. double crd[FCMND+1][NDIM]; double cnde[NDIM]; double sfnml[FCMND][NDIM]; double sfcnd[FCMND][NDIM]; double futo[NEQ]; double fusp[NEQ]; double futm[NEQ]; double jacos[NEQ][NEQ][NDIM]; double usfc[NEQ]; double fcn[NEQ][NDIM]; double dfcn[NEQ][NDIM]; // interators. int icl, ifl, inf, ifc, jcl, ieq, jeq; qdt = exd->time_increment * 0.25; hdt = exd->time_increment * 0.5; #ifndef __CUDACC__ for (icl=istart; icl<iend; icl++) { #else icl = istart; if (icl < exd->ncell) { #endif // initialize fluxes. for (ieq=0; ieq<NEQ; ieq++) { futo[ieq] = 0.0; }; pclfcs = exd->clfcs + icl*(CLMFC+1); clnfc = pclfcs[0]; for (ifl=1; ifl<=clnfc; ifl++) { ifc = pclfcs[ifl]; // face node coordinates. pfcnds = exd->fcnds + ifc*(FCMND+1); fcnnd = pfcnds[0]; for (inf=0; inf<fcnnd; inf++) { pndcrd = exd->ndcrd + pfcnds[inf+1]*NDIM; crd[inf][0] = pndcrd[0]; crd[inf][1] = pndcrd[1]; #if NDIM == 3 crd[inf][2] = pndcrd[2]; #endif }; crd[fcnnd][0] = crd[0][0]; crd[fcnnd][1] = crd[0][1]; #if NDIM == 3 crd[fcnnd][2] = crd[0][2]; #endif // neighboring cell center. pfccls = exd->fccls + ifc*FCREL; jcl = pfccls[0] + pfccls[1] - icl; pclcnd = exd->clcnd + jcl*NDIM; cnde[0] = pclcnd[0]; cnde[1] = pclcnd[1]; #if NDIM == 3 cnde[2] = pclcnd[2]; #endif // calculate geometric center of the bounding sub-face. for (inf=0; inf<fcnnd; inf++) { sfcnd[inf][0] = cnde[0] + crd[inf][0]; #if NDIM == 3 sfcnd[inf][0] += crd[inf+1][0]; #endif sfcnd[inf][0] /= NDIM; sfcnd[inf][1] = cnde[1] + crd[inf][1]; #if NDIM == 3 sfcnd[inf][1] += crd[inf+1][1]; #endif sfcnd[inf][1] /= NDIM; #if NDIM == 3 sfcnd[inf][2] = cnde[2] + crd[inf][2] + crd[inf+1][2]; sfcnd[inf][2] /= NDIM; #endif }; // calculate outward area vector of the bounding sub-face. #if NDIM == 3 voe = (pfccls[0] - icl) + SOLVCESE_ALMOST_ZERO; voe /= (icl - pfccls[0]) + SOLVCESE_ALMOST_ZERO; voe *= 0.5; pfccnd = exd->fccnd + ifc*NDIM; for (inf=0; inf<fcnnd; inf++) { disu0 = crd[inf ][0] - cnde[0]; disu1 = crd[inf ][1] - cnde[1]; disu2 = crd[inf ][2] - cnde[2]; disv0 = crd[inf+1][0] - cnde[0]; disv1 = crd[inf+1][1] - cnde[1]; disv2 = crd[inf+1][2] - cnde[2]; sfnml[inf][0] = (disu1*disv2 - disu2*disv1) * voe; sfnml[inf][1] = (disu2*disv0 - disu0*disv2) * voe; sfnml[inf][2] = (disu0*disv1 - disu1*disv0) * voe; }; #else voe = (crd[0][0]-cnde[0])*(crd[1][1]-cnde[1]) - (crd[0][1]-cnde[1])*(crd[1][0]-cnde[0]); voe /= fabs(voe); sfnml[0][0] = -(cnde[1]-crd[0][1]) * voe; sfnml[0][1] = (cnde[0]-crd[0][0]) * voe; sfnml[1][0] = (cnde[1]-crd[1][1]) * voe; sfnml[1][1] = -(cnde[0]-crd[1][0]) * voe; #endif // spatial flux (given time). pjcecnd = exd->cecnd + jcl*(CLMFC+1)*NDIM; pcecnd = exd->cecnd + (icl*(CLMFC+1)+ifl)*NDIM; pjsol = exd->sol + jcl*NEQ; pdsol = exd->dsol + jcl*NEQ*NDIM; for (ieq=0; ieq<NEQ; ieq++) { fusp[ieq] = pjsol[ieq]; fusp[ieq] += (pcecnd[0]-pjcecnd[0]) * pdsol[0]; fusp[ieq] += (pcecnd[1]-pjcecnd[1]) * pdsol[1]; #if NDIM == 3 fusp[ieq] += (pcecnd[2]-pjcecnd[2]) * pdsol[2]; #endif pdsol += NDIM; }; pcevol = exd->cevol + icl*(CLMFC+1)+ifl; for (ieq=0; ieq<NEQ; ieq++) { fusp[ieq] *= pcevol[0]; }; // temporal flux (give space). #ifndef __CUDACC__ exd->jacofunc(exd, jcl, (double *)fcn, (double *)jacos); #else cuda_calc_jaco(exd, jcl, fcn, jacos); #endif pjsolt = exd->solt + jcl*NEQ; for (ieq=0; ieq<NEQ; ieq++) futm[ieq] = 0.0; for (inf=0; inf<fcnnd; inf++) { // solution at sub-face center. pdsol = exd->dsol + jcl*NEQ*NDIM; for (ieq=0; ieq<NEQ; ieq++) { usfc[ieq] = qdt * pjsolt[ieq]; usfc[ieq] += (sfcnd[inf][0]-pjcecnd[0]) * pdsol[0]; usfc[ieq] += (sfcnd[inf][1]-pjcecnd[1]) * pdsol[1]; #if NDIM == 3 usfc[ieq] += (sfcnd[inf][2]-pjcecnd[2]) * pdsol[2]; #endif pdsol += NDIM; }; // spatial derivatives. for (ieq=0; ieq<NEQ; ieq++) { dfcn[ieq][0] = fcn[ieq][0]; dfcn[ieq][1] = fcn[ieq][1]; #if NDIM == 3 dfcn[ieq][2] = fcn[ieq][2]; #endif for (jeq=0; jeq<NEQ; jeq++) { dfcn[ieq][0] += jacos[ieq][jeq][0] * usfc[jeq]; dfcn[ieq][1] += jacos[ieq][jeq][1] * usfc[jeq]; #if NDIM == 3 dfcn[ieq][2] += jacos[ieq][jeq][2] * usfc[jeq]; #endif }; }; // temporal flux. for (ieq=0; ieq<NEQ; ieq++) { futm[ieq] += dfcn[ieq][0] * sfnml[inf][0]; futm[ieq] += dfcn[ieq][1] * sfnml[inf][1]; #if NDIM == 3 futm[ieq] += dfcn[ieq][2] * sfnml[inf][2]; #endif }; }; // sum fluxes. for (ieq=0; ieq<NEQ; ieq++) { futo[ieq] += fusp[ieq] - hdt*futm[ieq]; }; }; // update solutions. psoln = exd->soln + icl*NEQ; pcevol = exd->cevol + icl*(CLMFC+1); for (ieq=0; ieq<NEQ; ieq++) { psoln[ieq] = futo[ieq] / pcevol[0]; }; }; #ifndef __CUDACC__ times(&timm1); cputicks = (int)((timm1.tms_utime+timm1.tms_stime) - (timm0.tms_utime+timm0.tms_stime)); return cputicks; }; #else }; extern "C" int calc_soln(int nthread, exedata *exc, void *gexc) { int nblock = (exc->ncell + nthread-1) / nthread; cuda_calc_soln<<<nblock, nthread>>>((exedata *)gexc); cudaThreadSynchronize(); return 0; }; #endif // vim: set ts=4 et:
312ac48da95673a84067a0c5e8714143d769af83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Academic License - for use in teaching, academic research, and meeting * course requirements at degree granting institutions only. Not for * government, commercial, or other organizational use. * * stereoDisparity.cu * * Code generation for function 'stereoDisparity' * */ /* Include files */ #include "rt_nonfinite.h" #include "stereoDisparity.h" /* Function Declarations */ static __global__ void stereoDisparity_kernel1(int16_T *out_disp, int32_T *min_cost); static __global__ void stereoDisparity_kernel2(const uint8_T *img1, const uint8_T *img0, int32_T d, int32_T *diff_img); static __global__ void stereoDisparity_kernel3(int32_T *diff_img, int32_T *a); static __global__ void stereoDisparity_kernel4(int32_T *a, real_T *cost_v); static __global__ void stereoDisparity_kernel5(real_T *cost_v, real_T *cost); static __global__ void stereoDisparity_kernel6(int32_T d, real_T *cost, int16_T * out_disp, int32_T *min_cost); /* Function Definitions */ static __global__ __launch_bounds__(512, 1) void stereoDisparity_kernel1(int16_T *out_disp, int32_T *min_cost) { int32_T temp_cost; ; ; temp_cost = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) + threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) + threadIdx.x); if (!(temp_cost >= 145408)) { /* modified algorithm for stereo disparity block matching */ /* In this implementation instead of finding shifted image ,indices are mapped accordingly */ /* to save memory and some processing RGBA column major packed data is used as input for */ /* Compatibility with CUDA intrinsics Convolution is performed using separable filters (Horizontal and then Vertical) */ /* gpu code generation pragma */ /* Stereo disparity Parameters */ /* WIN_RAD is the radius of the window to be operated,min_disparity is the minimum disparity level */ /* the search continues max_disparity is the maximun disparity level the search continues */ /* Image dimensions for loop control */ /* The number of channels packed are 4 (RGBA) so as nChannels are 4 */ /* To store the raw differences */ /* To store the minimum cost */ /* Store the final disparity */ min_cost[temp_cost] = 99999999; out_disp[temp_cost] = 0; } } static __global__ __launch_bounds__(512, 1) void stereoDisparity_kernel2(const uint8_T *img1, const uint8_T *img0, int32_T d, int32_T *diff_img) { uint32_T threadId; int32_T ind_h; int32_T rowIdx; int32_T ind_w1; int32_T colIdx; int32_T ind_w2; int32_T tDiff; int32_T kk; int32_T temp_cost; ; ; threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) + threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) + threadIdx.x; colIdx = (int32_T)(threadId / 300U); rowIdx = (int32_T)(threadId - (uint32_T)colIdx * 300U); if ((!(rowIdx >= 300)) && (!(colIdx >= 528))) { /* Row index calculation */ ind_h = rowIdx - 7; /* Column indices calculation for left image */ ind_w1 = colIdx - 7; /* Row indices calculation for right image */ ind_w2 = (colIdx + d) - 23; /* Border clamping for row Indices */ if (rowIdx - 7 <= 0) { ind_h = 1; } if (ind_h > 284) { ind_h = 284; } /* Border clamping for column indices for left image */ if (colIdx - 7 <= 0) { ind_w1 = 1; } if (ind_w1 > 512) { ind_w1 = 512; } /* Border clamping for column indices for right image */ if (ind_w2 <= 0) { ind_w2 = 1; } if (ind_w2 > 512) { ind_w2 = 512; } /* In this step, Sum of absolute Differences is performed */ /* across tour channels. */ tDiff = 0; for (kk = 0; kk < 4; kk++) { temp_cost = (int32_T)img0[(((ind_h - 1) << 2) + kk) + 1136 * (ind_w1 - 1)] - (int32_T)img1[(((ind_h - 1) << 2) + kk) + 1136 * (ind_w2 - 1)]; if (temp_cost < 0) { temp_cost = -temp_cost; } if ((tDiff < 0) && (temp_cost < MIN_int32_T - tDiff)) { tDiff = MIN_int32_T; } else if ((tDiff > 0) && (temp_cost > MAX_int32_T - tDiff)) { tDiff = MAX_int32_T; } else { tDiff += temp_cost; } } /* Store the SAD cost into a matrix */ diff_img[rowIdx + 300 * colIdx] = tDiff; } } static __global__ __launch_bounds__(512, 1) void stereoDisparity_kernel3(int32_T *diff_img, int32_T *a) { int32_T temp_cost; ; ; temp_cost = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) + threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) + threadIdx.x); if (!(temp_cost >= 158400)) { /* Aggregating the differences using separable convolution. Expect this to generate two Kernel */ /* using shared memory.The first kernel is the convolution with the horizontal kernel and second */ /* kernel operates on its output the column wise convolution. */ a[temp_cost] = diff_img[temp_cost]; } } static __global__ __launch_bounds__(1024, 1) void stereoDisparity_kernel4 (int32_T *a, real_T *cost_v) { real_T cv; int32_T temp_cost; int32_T threadIdY; int32_T threadIdX; __shared__ int32_T a_shared[1536]; int32_T baseR; int32_T srow; int32_T strideRow; int32_T scol; int32_T strideCol; int32_T y_idx; int32_T baseC; int32_T x_idx; ; ; threadIdY = (int32_T)(blockDim.y * blockIdx.y + threadIdx.y); threadIdX = (int32_T)(blockDim.x * blockIdx.x + threadIdx.x); baseR = threadIdX; srow = (int32_T)threadIdx.x; strideRow = (int32_T)blockDim.x; scol = (int32_T)threadIdx.y; strideCol = (int32_T)blockDim.y; for (y_idx = srow; y_idx <= 31; y_idx += strideRow) { baseC = threadIdY; for (x_idx = scol; x_idx <= 47; x_idx += strideCol) { if ((baseR >= 0) && (baseR < 300) && ((baseC >= 0) && (baseC < 528))) { a_shared[y_idx + 32 * x_idx] = (int32_T)a[300 * baseC + baseR]; } else { a_shared[y_idx + 32 * x_idx] = 0; } baseC += strideCol; } baseR += strideRow; } __syncthreads(); if ((!(threadIdX >= 300)) && (!(threadIdY >= 512))) { cv = 0.0; for (temp_cost = 0; temp_cost < 17; temp_cost++) { cv += (real_T)a_shared[(int32_T)threadIdx.x + 32 * ((int32_T)threadIdx.y + ((temp_cost + threadIdY) - threadIdY))]; } cost_v[threadIdX + 300 * threadIdY] = cv; } } static __global__ __launch_bounds__(1024, 1) void stereoDisparity_kernel5(real_T *cost_v, real_T *cost) { real_T cv; int32_T temp_cost; int32_T threadIdY; int32_T threadIdX; __shared__ real_T cost_v_shared[1536]; int32_T baseR; int32_T srow; int32_T strideRow; int32_T scol; int32_T strideCol; int32_T y_idx; int32_T baseC; int32_T x_idx; ; ; threadIdY = (int32_T)(blockDim.y * blockIdx.y + threadIdx.y); threadIdX = (int32_T)(blockDim.x * blockIdx.x + threadIdx.x); baseR = threadIdX; srow = (int32_T)threadIdx.x; strideRow = (int32_T)blockDim.x; scol = (int32_T)threadIdx.y; strideCol = (int32_T)blockDim.y; for (y_idx = srow; y_idx <= 47; y_idx += strideRow) { baseC = threadIdY; for (x_idx = scol; x_idx <= 31; x_idx += strideCol) { if ((baseR >= 0) && (baseR < 300) && ((baseC >= 0) && (baseC < 512))) { cost_v_shared[y_idx + 48 * x_idx] = (real_T)cost_v[300 * baseC + baseR]; } else { cost_v_shared[y_idx + 48 * x_idx] = 0.0; } baseC += strideCol; } baseR += strideRow; } __syncthreads(); if ((!(threadIdX >= 284)) && (!(threadIdY >= 512))) { cv = 0.0; for (temp_cost = 0; temp_cost < 17; temp_cost++) { cv += cost_v_shared[((int32_T)threadIdx.x + ((temp_cost + threadIdX) - threadIdX)) + 48 * (int32_T)threadIdx.y]; } cost[threadIdX + 284 * threadIdY] = cv; } } static __global__ __launch_bounds__(512, 1) void stereoDisparity_kernel6(int32_T d, real_T *cost, int16_T *out_disp, int32_T *min_cost) { uint32_T threadId; real_T cv; int32_T kk; int32_T colIdx; int32_T temp_cost; ; ; threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) + threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) + threadIdx.x; colIdx = (int32_T)(threadId / 284U); kk = (int32_T)(threadId - (uint32_T)colIdx * 284U); if ((!(kk >= 284)) && (!(colIdx >= 512))) { /* load the cost */ cv = cost[kk + 284 * colIdx]; if (cv < 2.147483648E+9) { if (cv >= -2.147483648E+9) { temp_cost = (int32_T)cv; } else { temp_cost = MIN_int32_T; } } else if (cv >= 2.147483648E+9) { temp_cost = MAX_int32_T; } else { temp_cost = 0; } /* compare against the minimum cost available and store the */ /* disparity value */ if (min_cost[kk + 284 * colIdx] > temp_cost) { min_cost[kk + 284 * colIdx] = temp_cost; out_disp[kk + 284 * colIdx] = (int16_T)((int32_T)fabs(-16.0 + (real_T)d) + 8); } } } void stereoDisparity(const uint8_T img0[581632], const uint8_T img1[581632], int16_T out_disp[145408]) { int32_T d; int16_T *gpu_out_disp; int32_T *gpu_min_cost; uint8_T *gpu_img1; uint8_T *gpu_img0; int32_T *gpu_diff_img; int32_T *gpu_a; real_T *gpu_cost_v; real_T *gpu_cost; boolean_T img1_dirtyOnCpu; boolean_T img0_dirtyOnCpu; hipMalloc(&gpu_min_cost, 581632ULL); hipMalloc(&gpu_out_disp, 290816ULL); hipMalloc(&gpu_cost, 1163264ULL); hipMalloc(&gpu_cost_v, 1228800ULL); hipMalloc(&gpu_a, 633600ULL); hipMalloc(&gpu_diff_img, 633600ULL); hipMalloc(&gpu_img0, 581632ULL); hipMalloc(&gpu_img1, 581632ULL); img1_dirtyOnCpu = true; img0_dirtyOnCpu = true; /* modified algorithm for stereo disparity block matching */ /* In this implementation instead of finding shifted image ,indices are mapped accordingly */ /* to save memory and some processing RGBA column major packed data is used as input for */ /* Compatibility with CUDA intrinsics Convolution is performed using separable filters (Horizontal and then Vertical) */ /* gpu code generation pragma */ /* Stereo disparity Parameters */ /* WIN_RAD is the radius of the window to be operated,min_disparity is the minimum disparity level */ /* the search continues max_disparity is the maximun disparity level the search continues */ /* Image dimensions for loop control */ /* The number of channels packed are 4 (RGBA) so as nChannels are 4 */ /* To store the raw differences */ /* To store the minimum cost */ /* Store the final disparity */ hipLaunchKernelGGL(( stereoDisparity_kernel1), dim3(dim3(284U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, gpu_out_disp, gpu_min_cost); /* Filters for aggregating the differences */ /* filter_h is the horizontal filter used in separable convolution */ /* filter_v is the vertical filter used in separable convolution which */ /* operates on the output of the row convolution */ /* Main Loop that runs for all the disparity levels. This loop is */ /* expected to run on CPU. */ for (d = 0; d < 17; d++) { /* Find the difference matrix for the current disparity level. Expect */ /* this to generate a Kernel function. */ if (img1_dirtyOnCpu) { hipMemcpy((void *)gpu_img1, (void *)&img1[0], 581632ULL, hipMemcpyHostToDevice); img1_dirtyOnCpu = false; } if (img0_dirtyOnCpu) { hipMemcpy((void *)gpu_img0, (void *)&img0[0], 581632ULL, hipMemcpyHostToDevice); img0_dirtyOnCpu = false; } hipLaunchKernelGGL(( stereoDisparity_kernel2), dim3(dim3(310U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, gpu_img1, gpu_img0, d, gpu_diff_img); /* Aggregating the differences using separable convolution. Expect this to generate two Kernel */ /* using shared memory.The first kernel is the convolution with the horizontal kernel and second */ /* kernel operates on its output the column wise convolution. */ hipLaunchKernelGGL(( stereoDisparity_kernel3), dim3(dim3(310U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, gpu_diff_img, gpu_a); hipLaunchKernelGGL(( stereoDisparity_kernel4), dim3(dim3(10U, 16U, 1U)), dim3(dim3(32U, 32U, 1U)), 0, 0, gpu_a, gpu_cost_v); hipLaunchKernelGGL(( stereoDisparity_kernel5), dim3(dim3(9U, 16U, 1U)), dim3(dim3(32U, 32U, 1U)), 0, 0, gpu_cost_v, gpu_cost); /* This part updates the min_cost matrix with by comparing the values */ /* with current disparity level. */ hipLaunchKernelGGL(( stereoDisparity_kernel6), dim3(dim3(284U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, d, gpu_cost, gpu_out_disp, gpu_min_cost); } hipMemcpy((void *)&out_disp[0], (void *)gpu_out_disp, 290816ULL, hipMemcpyDeviceToHost); hipFree(gpu_img1); hipFree(gpu_img0); hipFree(gpu_diff_img); hipFree(gpu_a); hipFree(gpu_cost_v); hipFree(gpu_cost); hipFree(gpu_out_disp); hipFree(gpu_min_cost); } /* End of code generation (stereoDisparity.cu) */
312ac48da95673a84067a0c5e8714143d769af83.cu
/* * Academic License - for use in teaching, academic research, and meeting * course requirements at degree granting institutions only. Not for * government, commercial, or other organizational use. * * stereoDisparity.cu * * Code generation for function 'stereoDisparity' * */ /* Include files */ #include "rt_nonfinite.h" #include "stereoDisparity.h" /* Function Declarations */ static __global__ void stereoDisparity_kernel1(int16_T *out_disp, int32_T *min_cost); static __global__ void stereoDisparity_kernel2(const uint8_T *img1, const uint8_T *img0, int32_T d, int32_T *diff_img); static __global__ void stereoDisparity_kernel3(int32_T *diff_img, int32_T *a); static __global__ void stereoDisparity_kernel4(int32_T *a, real_T *cost_v); static __global__ void stereoDisparity_kernel5(real_T *cost_v, real_T *cost); static __global__ void stereoDisparity_kernel6(int32_T d, real_T *cost, int16_T * out_disp, int32_T *min_cost); /* Function Definitions */ static __global__ __launch_bounds__(512, 1) void stereoDisparity_kernel1(int16_T *out_disp, int32_T *min_cost) { int32_T temp_cost; ; ; temp_cost = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) + threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) + threadIdx.x); if (!(temp_cost >= 145408)) { /* modified algorithm for stereo disparity block matching */ /* In this implementation instead of finding shifted image ,indices are mapped accordingly */ /* to save memory and some processing RGBA column major packed data is used as input for */ /* Compatibility with CUDA intrinsics Convolution is performed using separable filters (Horizontal and then Vertical) */ /* gpu code generation pragma */ /* Stereo disparity Parameters */ /* WIN_RAD is the radius of the window to be operated,min_disparity is the minimum disparity level */ /* the search continues max_disparity is the maximun disparity level the search continues */ /* Image dimensions for loop control */ /* The number of channels packed are 4 (RGBA) so as nChannels are 4 */ /* To store the raw differences */ /* To store the minimum cost */ /* Store the final disparity */ min_cost[temp_cost] = 99999999; out_disp[temp_cost] = 0; } } static __global__ __launch_bounds__(512, 1) void stereoDisparity_kernel2(const uint8_T *img1, const uint8_T *img0, int32_T d, int32_T *diff_img) { uint32_T threadId; int32_T ind_h; int32_T rowIdx; int32_T ind_w1; int32_T colIdx; int32_T ind_w2; int32_T tDiff; int32_T kk; int32_T temp_cost; ; ; threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) + threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) + threadIdx.x; colIdx = (int32_T)(threadId / 300U); rowIdx = (int32_T)(threadId - (uint32_T)colIdx * 300U); if ((!(rowIdx >= 300)) && (!(colIdx >= 528))) { /* Row index calculation */ ind_h = rowIdx - 7; /* Column indices calculation for left image */ ind_w1 = colIdx - 7; /* Row indices calculation for right image */ ind_w2 = (colIdx + d) - 23; /* Border clamping for row Indices */ if (rowIdx - 7 <= 0) { ind_h = 1; } if (ind_h > 284) { ind_h = 284; } /* Border clamping for column indices for left image */ if (colIdx - 7 <= 0) { ind_w1 = 1; } if (ind_w1 > 512) { ind_w1 = 512; } /* Border clamping for column indices for right image */ if (ind_w2 <= 0) { ind_w2 = 1; } if (ind_w2 > 512) { ind_w2 = 512; } /* In this step, Sum of absolute Differences is performed */ /* across tour channels. */ tDiff = 0; for (kk = 0; kk < 4; kk++) { temp_cost = (int32_T)img0[(((ind_h - 1) << 2) + kk) + 1136 * (ind_w1 - 1)] - (int32_T)img1[(((ind_h - 1) << 2) + kk) + 1136 * (ind_w2 - 1)]; if (temp_cost < 0) { temp_cost = -temp_cost; } if ((tDiff < 0) && (temp_cost < MIN_int32_T - tDiff)) { tDiff = MIN_int32_T; } else if ((tDiff > 0) && (temp_cost > MAX_int32_T - tDiff)) { tDiff = MAX_int32_T; } else { tDiff += temp_cost; } } /* Store the SAD cost into a matrix */ diff_img[rowIdx + 300 * colIdx] = tDiff; } } static __global__ __launch_bounds__(512, 1) void stereoDisparity_kernel3(int32_T *diff_img, int32_T *a) { int32_T temp_cost; ; ; temp_cost = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) + threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) + threadIdx.x); if (!(temp_cost >= 158400)) { /* Aggregating the differences using separable convolution. Expect this to generate two Kernel */ /* using shared memory.The first kernel is the convolution with the horizontal kernel and second */ /* kernel operates on its output the column wise convolution. */ a[temp_cost] = diff_img[temp_cost]; } } static __global__ __launch_bounds__(1024, 1) void stereoDisparity_kernel4 (int32_T *a, real_T *cost_v) { real_T cv; int32_T temp_cost; int32_T threadIdY; int32_T threadIdX; __shared__ int32_T a_shared[1536]; int32_T baseR; int32_T srow; int32_T strideRow; int32_T scol; int32_T strideCol; int32_T y_idx; int32_T baseC; int32_T x_idx; ; ; threadIdY = (int32_T)(blockDim.y * blockIdx.y + threadIdx.y); threadIdX = (int32_T)(blockDim.x * blockIdx.x + threadIdx.x); baseR = threadIdX; srow = (int32_T)threadIdx.x; strideRow = (int32_T)blockDim.x; scol = (int32_T)threadIdx.y; strideCol = (int32_T)blockDim.y; for (y_idx = srow; y_idx <= 31; y_idx += strideRow) { baseC = threadIdY; for (x_idx = scol; x_idx <= 47; x_idx += strideCol) { if ((baseR >= 0) && (baseR < 300) && ((baseC >= 0) && (baseC < 528))) { a_shared[y_idx + 32 * x_idx] = (int32_T)a[300 * baseC + baseR]; } else { a_shared[y_idx + 32 * x_idx] = 0; } baseC += strideCol; } baseR += strideRow; } __syncthreads(); if ((!(threadIdX >= 300)) && (!(threadIdY >= 512))) { cv = 0.0; for (temp_cost = 0; temp_cost < 17; temp_cost++) { cv += (real_T)a_shared[(int32_T)threadIdx.x + 32 * ((int32_T)threadIdx.y + ((temp_cost + threadIdY) - threadIdY))]; } cost_v[threadIdX + 300 * threadIdY] = cv; } } static __global__ __launch_bounds__(1024, 1) void stereoDisparity_kernel5(real_T *cost_v, real_T *cost) { real_T cv; int32_T temp_cost; int32_T threadIdY; int32_T threadIdX; __shared__ real_T cost_v_shared[1536]; int32_T baseR; int32_T srow; int32_T strideRow; int32_T scol; int32_T strideCol; int32_T y_idx; int32_T baseC; int32_T x_idx; ; ; threadIdY = (int32_T)(blockDim.y * blockIdx.y + threadIdx.y); threadIdX = (int32_T)(blockDim.x * blockIdx.x + threadIdx.x); baseR = threadIdX; srow = (int32_T)threadIdx.x; strideRow = (int32_T)blockDim.x; scol = (int32_T)threadIdx.y; strideCol = (int32_T)blockDim.y; for (y_idx = srow; y_idx <= 47; y_idx += strideRow) { baseC = threadIdY; for (x_idx = scol; x_idx <= 31; x_idx += strideCol) { if ((baseR >= 0) && (baseR < 300) && ((baseC >= 0) && (baseC < 512))) { cost_v_shared[y_idx + 48 * x_idx] = (real_T)cost_v[300 * baseC + baseR]; } else { cost_v_shared[y_idx + 48 * x_idx] = 0.0; } baseC += strideCol; } baseR += strideRow; } __syncthreads(); if ((!(threadIdX >= 284)) && (!(threadIdY >= 512))) { cv = 0.0; for (temp_cost = 0; temp_cost < 17; temp_cost++) { cv += cost_v_shared[((int32_T)threadIdx.x + ((temp_cost + threadIdX) - threadIdX)) + 48 * (int32_T)threadIdx.y]; } cost[threadIdX + 284 * threadIdY] = cv; } } static __global__ __launch_bounds__(512, 1) void stereoDisparity_kernel6(int32_T d, real_T *cost, int16_T *out_disp, int32_T *min_cost) { uint32_T threadId; real_T cv; int32_T kk; int32_T colIdx; int32_T temp_cost; ; ; threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) + threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) + threadIdx.x; colIdx = (int32_T)(threadId / 284U); kk = (int32_T)(threadId - (uint32_T)colIdx * 284U); if ((!(kk >= 284)) && (!(colIdx >= 512))) { /* load the cost */ cv = cost[kk + 284 * colIdx]; if (cv < 2.147483648E+9) { if (cv >= -2.147483648E+9) { temp_cost = (int32_T)cv; } else { temp_cost = MIN_int32_T; } } else if (cv >= 2.147483648E+9) { temp_cost = MAX_int32_T; } else { temp_cost = 0; } /* compare against the minimum cost available and store the */ /* disparity value */ if (min_cost[kk + 284 * colIdx] > temp_cost) { min_cost[kk + 284 * colIdx] = temp_cost; out_disp[kk + 284 * colIdx] = (int16_T)((int32_T)fabs(-16.0 + (real_T)d) + 8); } } } void stereoDisparity(const uint8_T img0[581632], const uint8_T img1[581632], int16_T out_disp[145408]) { int32_T d; int16_T *gpu_out_disp; int32_T *gpu_min_cost; uint8_T *gpu_img1; uint8_T *gpu_img0; int32_T *gpu_diff_img; int32_T *gpu_a; real_T *gpu_cost_v; real_T *gpu_cost; boolean_T img1_dirtyOnCpu; boolean_T img0_dirtyOnCpu; cudaMalloc(&gpu_min_cost, 581632ULL); cudaMalloc(&gpu_out_disp, 290816ULL); cudaMalloc(&gpu_cost, 1163264ULL); cudaMalloc(&gpu_cost_v, 1228800ULL); cudaMalloc(&gpu_a, 633600ULL); cudaMalloc(&gpu_diff_img, 633600ULL); cudaMalloc(&gpu_img0, 581632ULL); cudaMalloc(&gpu_img1, 581632ULL); img1_dirtyOnCpu = true; img0_dirtyOnCpu = true; /* modified algorithm for stereo disparity block matching */ /* In this implementation instead of finding shifted image ,indices are mapped accordingly */ /* to save memory and some processing RGBA column major packed data is used as input for */ /* Compatibility with CUDA intrinsics Convolution is performed using separable filters (Horizontal and then Vertical) */ /* gpu code generation pragma */ /* Stereo disparity Parameters */ /* WIN_RAD is the radius of the window to be operated,min_disparity is the minimum disparity level */ /* the search continues max_disparity is the maximun disparity level the search continues */ /* Image dimensions for loop control */ /* The number of channels packed are 4 (RGBA) so as nChannels are 4 */ /* To store the raw differences */ /* To store the minimum cost */ /* Store the final disparity */ stereoDisparity_kernel1<<<dim3(284U, 1U, 1U), dim3(512U, 1U, 1U)>>> (gpu_out_disp, gpu_min_cost); /* Filters for aggregating the differences */ /* filter_h is the horizontal filter used in separable convolution */ /* filter_v is the vertical filter used in separable convolution which */ /* operates on the output of the row convolution */ /* Main Loop that runs for all the disparity levels. This loop is */ /* expected to run on CPU. */ for (d = 0; d < 17; d++) { /* Find the difference matrix for the current disparity level. Expect */ /* this to generate a Kernel function. */ if (img1_dirtyOnCpu) { cudaMemcpy((void *)gpu_img1, (void *)&img1[0], 581632ULL, cudaMemcpyHostToDevice); img1_dirtyOnCpu = false; } if (img0_dirtyOnCpu) { cudaMemcpy((void *)gpu_img0, (void *)&img0[0], 581632ULL, cudaMemcpyHostToDevice); img0_dirtyOnCpu = false; } stereoDisparity_kernel2<<<dim3(310U, 1U, 1U), dim3(512U, 1U, 1U)>>>(gpu_img1, gpu_img0, d, gpu_diff_img); /* Aggregating the differences using separable convolution. Expect this to generate two Kernel */ /* using shared memory.The first kernel is the convolution with the horizontal kernel and second */ /* kernel operates on its output the column wise convolution. */ stereoDisparity_kernel3<<<dim3(310U, 1U, 1U), dim3(512U, 1U, 1U)>>> (gpu_diff_img, gpu_a); stereoDisparity_kernel4<<<dim3(10U, 16U, 1U), dim3(32U, 32U, 1U)>>>(gpu_a, gpu_cost_v); stereoDisparity_kernel5<<<dim3(9U, 16U, 1U), dim3(32U, 32U, 1U)>>> (gpu_cost_v, gpu_cost); /* This part updates the min_cost matrix with by comparing the values */ /* with current disparity level. */ stereoDisparity_kernel6<<<dim3(284U, 1U, 1U), dim3(512U, 1U, 1U)>>>(d, gpu_cost, gpu_out_disp, gpu_min_cost); } cudaMemcpy((void *)&out_disp[0], (void *)gpu_out_disp, 290816ULL, cudaMemcpyDeviceToHost); cudaFree(gpu_img1); cudaFree(gpu_img0); cudaFree(gpu_diff_img); cudaFree(gpu_a); cudaFree(gpu_cost_v); cudaFree(gpu_cost); cudaFree(gpu_out_disp); cudaFree(gpu_min_cost); } /* End of code generation (stereoDisparity.cu) */
480ab35ef378bca5111a4c8fbe15e76e46d91226.hip
// !!! This is a file automatically generated by hipify!!! #include "SlcImage.h" #include <iostream> #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <fcntl.h> #include <sys/mman.h> #include <hip/hip_complex.h> #include <assert.h> #include <rocblas.h> #include "hipError_t.h" #include <errno.h> #include <unistd.h> SlcImage::SlcImage() { fileid = -1; is_mapped = 0; is_opened = 0; height = 0; width = 0; } SlcImage::SlcImage(std::string fn, size_t h, size_t w) { filename = fn; width = w; height = h; is_mapped = 0; is_opened = 0; openFile(); buffersize = filesize; offset = 0l; openFile(); setupMmap(); } SlcImage::SlcImage(std::string fn, size_t h, size_t w, size_t bsize) { filename = fn; width = w; height = h; is_mapped = 0; is_opened = 0; buffersize = bsize*(1l<<30); //1G as a unit offset = 0l; openFile(); //std::cout << "buffer and file sizes" << buffersize << " " << filesize << std::endl; setupMmap(); } void SlcImage::setBufferSize(size_t sizeInG) { buffersize = sizeInG*(1l<<30); } void SlcImage::openFile() { if(!is_opened){ fileid = open(filename.c_str(), O_RDONLY, 0); if(fileid == -1) { fprintf(stderr, "Error opening file %s\n", filename.c_str()); exit(EXIT_FAILURE); } } struct stat st; stat(filename.c_str(), &st); filesize = st.st_size; //lseek(fileid,filesize-1,SEEK_SET); is_opened = 1; } void SlcImage::closeFile() { if(is_opened) { close(fileid); is_opened = 0; } } /* void SlcImage::setupMmap() { if(!is_mapped) { float2 *fmmap = (float2 *)mmap(NULL, filesize, PROT_READ, MAP_SHARED, fileid, 0); assert (fmmap != MAP_FAILED); mmapPtr = fmmap; is_mapped = 1; } }*/ void SlcImage::setupMmap() { if(is_opened) { if(!is_mapped) { void * fmmap; if((fmmap=mmap((caddr_t)0, buffersize, PROT_READ, MAP_SHARED, fileid, offset)) == MAP_FAILED) { fprintf(stderr, "mmap error: %d %d\n", fileid, errno); exit(1); } mmapPtr = (float2 *)fmmap; is_mapped = 1; } } else { fprintf(stderr, "error! file is not opened"); exit(1);} //fprintf(stderr, "debug mmap setup %ld, %ld\n", offset, buffersize); //fprintf(stderr, "starting mmap pixel %f %f\n", mmapPtr[0].x, mmapPtr[0].y); } void SlcImage::mUnMap() { if(is_mapped) { if(munmap((void *)mmapPtr, buffersize) == -1) { fprintf(stderr, "munmap error: %d\n", fileid); } is_mapped = 0; } } /// load a tile of data h_tile x w_tile from CPU (mmap) to GPU /// @param dArray pointer for array in device memory /// @param h_offset Down/Height offset /// @param w_offset Across/Width offset /// @param h_tile Down/Height tile size /// @param w_tile Across/Width tile size /// @param stream CUDA stream for copying void SlcImage::loadToDevice(float2 *dArray, size_t h_offset, size_t w_offset, size_t h_tile, size_t w_tile, hipStream_t stream) { size_t tileStartAddress = (h_offset*width + w_offset)*sizeof(float2); size_t tileLastAddress = tileStartAddress + (h_tile*width + w_tile)*sizeof(float2); size_t pagesize = getpagesize(); if(tileStartAddress < offset || tileLastAddress > offset + buffersize ) { size_t temp = tileStartAddress/pagesize; offset = temp*pagesize; mUnMap(); setupMmap(); } float2 *startPtr = mmapPtr ; startPtr += (tileStartAddress - offset)/sizeof(float2); // @note // We assume down/across directions as rows/cols. Therefore, SLC mmap and device array are both row major. // cuBlas assumes both source and target arrays are column major. // To use hipblasSetMatrix, we need to switch w_tile/h_tile for rows/cols // checkCudaErrors(hipblasSetMatrixAsync(w_tile, h_tile, sizeof(float2), startPtr, width, dArray, w_tile, stream)); checkCudaErrors(hipMemcpy2DAsync(dArray, w_tile*sizeof(float2), startPtr, width*sizeof(float2), w_tile*sizeof(float2), h_tile, hipMemcpyHostToDevice,stream)); } SlcImage::~SlcImage() { mUnMap(); closeFile(); } void SlcImage::testData() { float2 *test; test =(float2 *)malloc(10*sizeof(float2)); mempcpy(test, mmapPtr+1000000l, 10*sizeof(float2)); for(int i=0; i<10; i++) std::cout << test[i].x << " " << test[i].y << ","; std::cout << std::endl; }
480ab35ef378bca5111a4c8fbe15e76e46d91226.cu
#include "SlcImage.h" #include <iostream> #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <fcntl.h> #include <sys/mman.h> #include <cuComplex.h> #include <assert.h> #include <cublas_v2.h> #include "cudaError.h" #include <errno.h> #include <unistd.h> SlcImage::SlcImage() { fileid = -1; is_mapped = 0; is_opened = 0; height = 0; width = 0; } SlcImage::SlcImage(std::string fn, size_t h, size_t w) { filename = fn; width = w; height = h; is_mapped = 0; is_opened = 0; openFile(); buffersize = filesize; offset = 0l; openFile(); setupMmap(); } SlcImage::SlcImage(std::string fn, size_t h, size_t w, size_t bsize) { filename = fn; width = w; height = h; is_mapped = 0; is_opened = 0; buffersize = bsize*(1l<<30); //1G as a unit offset = 0l; openFile(); //std::cout << "buffer and file sizes" << buffersize << " " << filesize << std::endl; setupMmap(); } void SlcImage::setBufferSize(size_t sizeInG) { buffersize = sizeInG*(1l<<30); } void SlcImage::openFile() { if(!is_opened){ fileid = open(filename.c_str(), O_RDONLY, 0); if(fileid == -1) { fprintf(stderr, "Error opening file %s\n", filename.c_str()); exit(EXIT_FAILURE); } } struct stat st; stat(filename.c_str(), &st); filesize = st.st_size; //lseek(fileid,filesize-1,SEEK_SET); is_opened = 1; } void SlcImage::closeFile() { if(is_opened) { close(fileid); is_opened = 0; } } /* void SlcImage::setupMmap() { if(!is_mapped) { float2 *fmmap = (float2 *)mmap(NULL, filesize, PROT_READ, MAP_SHARED, fileid, 0); assert (fmmap != MAP_FAILED); mmapPtr = fmmap; is_mapped = 1; } }*/ void SlcImage::setupMmap() { if(is_opened) { if(!is_mapped) { void * fmmap; if((fmmap=mmap((caddr_t)0, buffersize, PROT_READ, MAP_SHARED, fileid, offset)) == MAP_FAILED) { fprintf(stderr, "mmap error: %d %d\n", fileid, errno); exit(1); } mmapPtr = (float2 *)fmmap; is_mapped = 1; } } else { fprintf(stderr, "error! file is not opened"); exit(1);} //fprintf(stderr, "debug mmap setup %ld, %ld\n", offset, buffersize); //fprintf(stderr, "starting mmap pixel %f %f\n", mmapPtr[0].x, mmapPtr[0].y); } void SlcImage::mUnMap() { if(is_mapped) { if(munmap((void *)mmapPtr, buffersize) == -1) { fprintf(stderr, "munmap error: %d\n", fileid); } is_mapped = 0; } } /// load a tile of data h_tile x w_tile from CPU (mmap) to GPU /// @param dArray pointer for array in device memory /// @param h_offset Down/Height offset /// @param w_offset Across/Width offset /// @param h_tile Down/Height tile size /// @param w_tile Across/Width tile size /// @param stream CUDA stream for copying void SlcImage::loadToDevice(float2 *dArray, size_t h_offset, size_t w_offset, size_t h_tile, size_t w_tile, cudaStream_t stream) { size_t tileStartAddress = (h_offset*width + w_offset)*sizeof(float2); size_t tileLastAddress = tileStartAddress + (h_tile*width + w_tile)*sizeof(float2); size_t pagesize = getpagesize(); if(tileStartAddress < offset || tileLastAddress > offset + buffersize ) { size_t temp = tileStartAddress/pagesize; offset = temp*pagesize; mUnMap(); setupMmap(); } float2 *startPtr = mmapPtr ; startPtr += (tileStartAddress - offset)/sizeof(float2); // @note // We assume down/across directions as rows/cols. Therefore, SLC mmap and device array are both row major. // cuBlas assumes both source and target arrays are column major. // To use cublasSetMatrix, we need to switch w_tile/h_tile for rows/cols // checkCudaErrors(cublasSetMatrixAsync(w_tile, h_tile, sizeof(float2), startPtr, width, dArray, w_tile, stream)); checkCudaErrors(cudaMemcpy2DAsync(dArray, w_tile*sizeof(float2), startPtr, width*sizeof(float2), w_tile*sizeof(float2), h_tile, cudaMemcpyHostToDevice,stream)); } SlcImage::~SlcImage() { mUnMap(); closeFile(); } void SlcImage::testData() { float2 *test; test =(float2 *)malloc(10*sizeof(float2)); mempcpy(test, mmapPtr+1000000l, 10*sizeof(float2)); for(int i=0; i<10; i++) std::cout << test[i].x << " " << test[i].y << ","; std::cout << std::endl; }
a65dd49e3cd5566b3d0d2fac33c024d2d93df43f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "permute.hpp" namespace Shadow { namespace Vision { __global__ void KernelPermute(const float* in_data, int count, int num_axes, const int* order, const int* old_steps, const int* new_steps, float* out_data) { CUDA_KERNEL_LOOP(globalid, count) { int old_idx = 0; int idx = globalid; for (int j = 0; j < num_axes; ++j) { old_idx += (idx / new_steps[j]) * old_steps[order[j]]; idx %= new_steps[j]; } out_data[globalid] = in_data[old_idx]; } } template <> void Permute<DeviceType::kGPU, float>(const float* in_data, int count, int num_axes, const int* order, const int* old_steps, const int* new_steps, float* out_data, Context* context) { hipLaunchKernelGGL(( KernelPermute), dim3(GetBlocks(count)), dim3(NumThreads), 0, hipStream_t(context->stream()), in_data, count, num_axes, order, old_steps, new_steps, out_data); CUDA_CHECK(hipPeekAtLastError()); } } // namespace Vision } // namespace Shadow namespace Shadow { REGISTER_OP_KERNEL_DEFAULT(PermuteGPU, PermuteKernelDefault<DeviceType::kGPU>); } // namespace Shadow
a65dd49e3cd5566b3d0d2fac33c024d2d93df43f.cu
#include "permute.hpp" namespace Shadow { namespace Vision { __global__ void KernelPermute(const float* in_data, int count, int num_axes, const int* order, const int* old_steps, const int* new_steps, float* out_data) { CUDA_KERNEL_LOOP(globalid, count) { int old_idx = 0; int idx = globalid; for (int j = 0; j < num_axes; ++j) { old_idx += (idx / new_steps[j]) * old_steps[order[j]]; idx %= new_steps[j]; } out_data[globalid] = in_data[old_idx]; } } template <> void Permute<DeviceType::kGPU, float>(const float* in_data, int count, int num_axes, const int* order, const int* old_steps, const int* new_steps, float* out_data, Context* context) { KernelPermute<<<GetBlocks(count), NumThreads, 0, cudaStream_t(context->stream())>>>( in_data, count, num_axes, order, old_steps, new_steps, out_data); CUDA_CHECK(cudaPeekAtLastError()); } } // namespace Vision } // namespace Shadow namespace Shadow { REGISTER_OP_KERNEL_DEFAULT(PermuteGPU, PermuteKernelDefault<DeviceType::kGPU>); } // namespace Shadow
feebcf8a1f01aed67171eae38cc9bcc4f19794bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
feebcf8a1f01aed67171eae38cc9bcc4f19794bf.cu
#include <iostream> #include <math.h> // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
f43eadb35002c4a1deadae7ec2a4fb9f702d7783.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // Modified by Wei Liu // ------------------------------------------------------------------ #include <vector> #include "caffe/layers/smooth_l1_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w * (b0 - b1) } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SmoothL1Forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.gpu_data(), errors_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype loss; caffe_gpu_asum(count, errors_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = diff_.count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SmoothL1Backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.gpu_data(), diff_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe
f43eadb35002c4a1deadae7ec2a4fb9f702d7783.cu
// ------------------------------------------------------------------ // Fast R-CNN // copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // Modified by Wei Liu // ------------------------------------------------------------------ #include <vector> #include "caffe/layers/smooth_l1_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w * (b0 - b1) } // NOLINT_NEXT_LINE(whitespace/operators) SmoothL1Forward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.gpu_data(), errors_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype loss; caffe_gpu_asum(count, errors_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = diff_.count(); // NOLINT_NEXT_LINE(whitespace/operators) SmoothL1Backward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.gpu_data(), diff_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe
be04473fdaeeb4c2f97d019d4dd2787f87ed53ed.hip
// !!! This is a file automatically generated by hipify!!! #define MAIN_PROGRAM #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "rocblas.h" #include <time.h> #include <stdlib.h> #include <stdio.h> #include <assert.h> #include "cudaglobal.h" #include <math.h> #include "global.h" #include <getopt.h> #include <time.h> #include <assert.h> #include "dev_su3.h" #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> extern "C" { #include "complex.h" #include "gauge_io.h" #include "rngs.h" #include "su3manip.h" #include "observables.h" #include "read_input.h" } # define CUDA_SAFE_CALL( call) { \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error : %s.\n", hipGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } __device__ int dev_LX,dev_LY,dev_LZ,dev_T,dev_VOLUME; // the reduction fields for the global sums of the functional F, dAdA and maxdAdA __device__ double * dev_redfield_F; __device__ double * dev_redfield_dAdA; __device__ double * dev_redfield_maxdAdA; __device__ double * dev_redfield_plaq; __constant__ __device__ double sa_beta; __constant__ __device__ double therm_beta; /* include the cuda code files here, necessary, because nvcc does not support external calls, all cuda functions must be inlined*/ #include "dev_su3.cu" #include "overrelax.cu" #include "MersenneTwister.cu" #include "simulated_annealing.cu" #include "heatbath_thermalization.cu" extern int read_gf_ildg(su3 gf[], char* filename); void initnn(){ int t,x,y,z,pos, count; count=0; for(t=0;t<T;t++){ for(z=0; z<LZ; z++){ for(y=0; y<LY; y++){ for(x=0; x<LX; x++){ pos= x + LX*(y + LY*(z + LZ*t)); ind[count] = pos; //plus direction nn[8*pos+3] = x + LX*(y + LY*(z + LZ*((t+1)%T))); nn[8*pos+2] = x + LX*(y + LY*((z+1)%LZ + LZ*t)); nn[8*pos+1] = x + LX*((y+1)%LY + LY*(z + LZ*t)); nn[8*pos+0] = (x+1)%LX + LX*(y + LY*(z + LZ*t)); //minus direction if(t==0){ nn[8*pos+7] = x + LX*(y + LY*(z + LZ*((T-1)))); } else{ nn[8*pos+7] = x + LX*(y + LY*(z + LZ*((t-1)))); } if(z==0){ nn[8*pos+6] = x + LX*(y + LY*((LZ-1) + LZ*t)); } else{ nn[8*pos+6] = x + LX*(y + LY*((z-1) + LZ*t)); } if(y==0){ nn[8*pos+5] = x + LX*((LY-1) + LY*(z + LZ*t)); } else{ nn[8*pos+5] = x + LX*((y-1) + LY*(z + LZ*t)); } if(x==0){ nn[8*pos+4] = (LX-1) + LX*(y + LY*(z + LZ*t)); } else{ nn[8*pos+4] = (x-1) + LX*(y + LY*(z + LZ*t)); } count++; } } } } } //initialize nearest-neighbour table for gpu with even-odd enabled //init_nn must have been called before for initialization of nn void initnn_eo(){ int x,y,z,t,index,nnpos,j, count; int evenpos=0; int oddpos=0; // here we initialize the conversion field lexic2eo evenpos=0; oddpos=0; count=0; for(t=0;t<T;t++){ for(z=0;z<LZ;z++){ for(y=0;y<LY;y++){ for(x=0;x<LX;x++){ if( ((x+y+z+t) %2)==0){ lexic2eo[count] = evenpos; evenpos++; } else{ lexic2eo[count] = oddpos; oddpos++; } count++; } } } } evenpos=0; oddpos=0; count=0; for(t=0;t<T;t++){ for(z=0;z<LZ;z++){ for(y=0;y<LY;y++){ for(x=0;x<LX;x++){ index = ind[count]; if(((t+x+y+z)%2 == 0)){ nnpos = lexic2eo[index]; for(j=0;j<4;j++){ nn_eo[8*nnpos+j] = lexic2eo[ nn[8*index+j] ]; } for(j=0;j<4;j++){ nn_eo[8*nnpos+4+j] = lexic2eo[ nn[8*index+4+j] ]; } eoidx_even[evenpos] = index; evenpos++; } else{ nnpos = lexic2eo[index]; for(j=0;j<4;j++){ nn_oe[8*nnpos+j] = lexic2eo[ nn[8*index+j] ]; } for(j=0;j<4;j++){ nn_oe[8*nnpos+4+j] = lexic2eo[ nn[8*index+4+j] ]; } eoidx_odd[oddpos] = index; oddpos++; } count++; } } } } } void init_gaugefixing(su3* gf, su3* trafo){ hipError_t cudaerr; // the gauge field #ifdef GF_8 /* allocate 8 doubles of gf = 4*4*VOLUME double2's*/ size_t dev_gfsize = 4*4*VOLUME * sizeof(dev_su3_8); #else /* allocate 2 rows of gf = 6*4*VOLUME double2's*/ size_t dev_gfsize = 6*4*VOLUME * sizeof(dev_su3_2v); #endif if((cudaerr=hipMalloc((void **) &dev_gf, dev_gfsize)) != hipSuccess){ printf("Error in init_mixedsolve(): Memory allocation of gauge field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated gauge field on device\n"); } #ifdef USETEXTURE /* if((cudaerr=hipMalloc((void **) &dev_gf2, dev_gfsize)) != hipSuccess){ printf("Error in init_mixedsolve(): Memory allocation of gauge field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated gauge field 2 on device\n"); } */ #endif #ifdef GF_8 h2d_gf = (dev_su3_8 *)malloc(dev_gfsize); // Allocate conversion gf on host su3to8(gf,h2d_gf); #else h2d_gf = (dev_su3_2v *)malloc(dev_gfsize); // Allocate conversion gf on host su3to2v(gf,h2d_gf); #endif hipMemcpy(dev_gf, h2d_gf, dev_gfsize, hipMemcpyHostToDevice); // the trafo fields #ifdef GF_8 /* allocate 8 doubles of trafo = 4*VOLUME double2's*/ dev_gfsize = 4*VOLUME * sizeof(dev_su3_8); #else /* allocate 2 rows of gf = 6*VOLUME double2's*/ dev_gfsize = 6*VOLUME * sizeof(dev_su3_2v); #endif if((cudaerr=hipMalloc((void **) &dev_trafo1, dev_gfsize)) != hipSuccess){ printf("Error in init_mixedsolve(): Memory allocation of trafo field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated trafo field 1 on device\n"); } #ifdef USETEXTURE //we only need a second trafo field, if we use textures as texture fields are read-only! if((cudaerr=hipMalloc((void **) &dev_trafo2, dev_gfsize)) != hipSuccess){ printf("Error in init_mixedsolve(): Memory allocation of trafo field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated trafo field 2 on device\n"); } #endif #ifdef GF_8 h2d_trafo = (dev_su3_8 *)malloc(dev_gfsize); su3to8_trafo(trafo,h2d_trafo); #else h2d_trafo = (dev_su3_2v *)malloc(dev_gfsize); su3to2v_trafo(trafo,h2d_trafo); #endif hipMemcpy(dev_trafo1, h2d_trafo, dev_gfsize, hipMemcpyHostToDevice); #ifdef USETEXTURE hipMemcpy(dev_trafo2, h2d_trafo, dev_gfsize, hipMemcpyHostToDevice); #endif //grid size_t nnsize = 8*VOLUME*sizeof(int); nn = (int *) malloc(nnsize); hipMalloc((void **) &dev_nn, nnsize); size_t indsize = VOLUME*sizeof(int); ind = (int *) malloc(indsize); lexic2eo = (int *) malloc(indsize); // nearest neighbours EO size_t nnsize_evenodd = (size_t)8*VOLUME/2*sizeof(int); nn_oe = (int *) malloc(nnsize_evenodd); hipMalloc((void **) &dev_nn_oe, nnsize_evenodd); nn_eo = (int *) malloc(nnsize_evenodd); hipMalloc((void **) &dev_nn_eo, nnsize_evenodd); // index EO size_t indsize_evenodd = (size_t)VOLUME/2*sizeof(int); eoidx_even = (int *) malloc(indsize_evenodd); hipMalloc((void **) &dev_eoidx_even, indsize_evenodd); eoidx_odd = (int *) malloc(indsize_evenodd); hipMalloc((void **) &dev_eoidx_odd, indsize_evenodd); initnn(); initnn_eo(); //shownn(); //showcompare_gf(T-1, LX-1, LY-1, LZ-1, 3); // copy to device index arrays hipMemcpy(dev_nn, nn, nnsize, hipMemcpyHostToDevice); hipMemcpy(dev_nn_eo, nn_eo, nnsize_evenodd, hipMemcpyHostToDevice); hipMemcpy(dev_nn_oe, nn_oe, nnsize_evenodd, hipMemcpyHostToDevice); hipMemcpy(dev_eoidx_even, eoidx_even, indsize_evenodd, hipMemcpyHostToDevice); hipMemcpy(dev_eoidx_odd, eoidx_odd, indsize_evenodd, hipMemcpyHostToDevice); output_size = LZ*T*sizeof(double); // parallel in t and z direction hipMalloc((void **) &dev_output, output_size); // output array double * host_output = (double*) malloc(output_size); int grid[5]; grid[0]=LX; grid[1]=LY; grid[2]=LZ; grid[3]=T; grid[4]=VOLUME; hipMalloc((void **) &dev_grid, (size_t)(5*sizeof(int))); hipMemcpy(dev_grid, &(grid[0]), (size_t)(5*sizeof(int)), hipMemcpyHostToDevice); //init grid hipLaunchKernelGGL(( dev_gfix_init), dim3(1), dim3(1) , 0, 0, dev_grid); //reduction field for functional if(VOLUME%BLOCK != 0){ printf("Error: VOLUME is not a multiple of BLOCK. Aborting...\n"); exit(100); } int redfieldsize = VOLUME/BLOCK; printf("VOLUME/BLOCK = %d\n", VOLUME/BLOCK); hipMalloc((void **) &dev_redfield_F, redfieldsize*sizeof(double)); if((redfield_F = (double*)malloc(redfieldsize*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(F)\n"); } hipMalloc((void **) &dev_redfield_dAdA, redfieldsize*sizeof(double)); if((redfield_dAdA = (double*)malloc(redfieldsize*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(dAdA)\n"); } hipMalloc((void **) &dev_redfield_maxdAdA, redfieldsize*sizeof(double)); if((redfield_maxdAdA = (double*)malloc(redfieldsize*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(maxdAdA)\n"); } hipMalloc((void **) &dev_redfield_plaq, T*sizeof(double)); if((redfield_plaq = (double*)malloc(T*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(plaq)\n"); } printf("%s\n", hipGetErrorString(hipGetLastError())); } void finalize_gaugefixing(){ hipFree(dev_gf); hipFree(dev_trafo1); #ifdef USETEXTURE //hipFree(dev_gf2); hipFree(dev_trafo2); #endif hipFree(dev_grid); hipFree(dev_output); hipFree(dev_nn); hipFree(dev_redfield_F); hipFree(dev_redfield_dAdA); hipFree(dev_redfield_maxdAdA); hipFree(dev_redfield_plaq); hipFree(dev_nn_eo); hipFree(dev_nn_oe); hipFree(dev_eoidx_even); hipFree(dev_eoidx_odd); free(h2d_gf); free(h2d_trafo); free(redfield_F); free(redfield_dAdA); free(redfield_maxdAdA); free(redfield_plaq); free(nn); free(nn_eo); free(nn_oe); free(eoidx_even); free(eoidx_odd); free(lexic2eo); free(ind); } void init_thermalization(su3* gf){ hipError_t cudaerr; // the gauge field #ifdef GF_8 /* allocate 8 doubles of gf = 4*4*VOLUME double2's*/ size_t dev_gfsize = 4*4*VOLUME * sizeof(dev_su3_8); #else /* allocate 2 rows of gf = 6*4*VOLUME double2's*/ size_t dev_gfsize = 6*4*VOLUME * sizeof(dev_su3_2v); #endif if((cudaerr=hipMalloc((void **) &dev_gf, dev_gfsize)) != hipSuccess){ printf("Error in init_mixedsolve(): Memory allocation of gauge field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated gauge field on device\n"); } if((cudaerr=hipMalloc((void **) &dev_gf2, dev_gfsize)) != hipSuccess){ printf("Error in init_mixedsolve(): Memory allocation of gauge field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated gauge field 2 on device\n"); } #ifdef GF_8 h2d_gf = (dev_su3_8 *)malloc(dev_gfsize); // Allocate conversion gf on host su3to8(gf,h2d_gf); #else h2d_gf = (dev_su3_2v *)malloc(dev_gfsize); // Allocate conversion gf on host su3to2v(gf,h2d_gf); #endif hipMemcpy(dev_gf, h2d_gf, dev_gfsize, hipMemcpyHostToDevice); hipMemcpy(dev_gf2, h2d_gf, dev_gfsize, hipMemcpyHostToDevice); // the staples field; we do EVEN/ODD update, so we only need half the gauge field size #ifdef GF_8 /* allocate 8 doubles of gf = 4*4*VOLUME/2 double2's*/ dev_gfsize = 4*4*VOLUME/2 * sizeof(dev_su3_8); #else /* allocate 2 rows of gf = 6*4*VOLUME/2 double2's*/ dev_gfsize = 6*4*VOLUME/2 * sizeof(dev_su3_2v); #endif if((cudaerr=hipMalloc((void **) &dev_staples, dev_gfsize)) != hipSuccess){ printf("Error in init_mixedsolve(): Memory allocation of staple field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated staple field on device\n"); } //grid size_t nnsize = 8*VOLUME*sizeof(int); nn = (int *) malloc(nnsize); hipMalloc((void **) &dev_nn, nnsize); size_t indsize = VOLUME*sizeof(int); ind = (int *) malloc(indsize); lexic2eo = (int *) malloc(indsize); // nearest neighbours EO size_t nnsize_evenodd = (size_t)8*VOLUME/2*sizeof(int); nn_oe = (int *) malloc(nnsize_evenodd); hipMalloc((void **) &dev_nn_oe, nnsize_evenodd); nn_eo = (int *) malloc(nnsize_evenodd); hipMalloc((void **) &dev_nn_eo, nnsize_evenodd); // index EO size_t indsize_evenodd = (size_t)VOLUME/2*sizeof(int); eoidx_even = (int *) malloc(indsize_evenodd); hipMalloc((void **) &dev_eoidx_even, indsize_evenodd); eoidx_odd = (int *) malloc(indsize_evenodd); hipMalloc((void **) &dev_eoidx_odd, indsize_evenodd); initnn(); initnn_eo(); //shownn(); //showcompare_gf(T-1, LX-1, LY-1, LZ-1, 3); // copy to device index arrays hipMemcpy(dev_nn, nn, nnsize, hipMemcpyHostToDevice); hipMemcpy(dev_nn_eo, nn_eo, nnsize_evenodd, hipMemcpyHostToDevice); hipMemcpy(dev_nn_oe, nn_oe, nnsize_evenodd, hipMemcpyHostToDevice); hipMemcpy(dev_eoidx_even, eoidx_even, indsize_evenodd, hipMemcpyHostToDevice); hipMemcpy(dev_eoidx_odd, eoidx_odd, indsize_evenodd, hipMemcpyHostToDevice); output_size = LZ*T*sizeof(double); // parallel in t and z direction hipMalloc((void **) &dev_output, output_size); // output array double * host_output = (double*) malloc(output_size); int grid[5]; grid[0]=LX; grid[1]=LY; grid[2]=LZ; grid[3]=T; grid[4]=VOLUME; hipMalloc((void **) &dev_grid, (size_t)(5*sizeof(int))); hipMemcpy(dev_grid, &(grid[0]), (size_t)(5*sizeof(int)), hipMemcpyHostToDevice); //init grid hipLaunchKernelGGL(( dev_gfix_init), dim3(1), dim3(1) , 0, 0, dev_grid); //reduction field for functional if(VOLUME%BLOCK != 0){ printf("Error: VOLUME is not a multiple of BLOCK. Aborting...\n"); exit(100); } hipMalloc((void **) &dev_redfield_plaq, T*sizeof(double)); if((redfield_plaq = (double*)malloc(T*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(plaq)\n"); } printf("%s\n", hipGetErrorString(hipGetLastError())); } void finalize_thermalization(){ hipFree(dev_gf); hipFree(dev_staples); hipFree(dev_grid); hipFree(dev_output); hipFree(dev_nn); hipFree(dev_redfield_plaq); hipFree(dev_nn_eo); hipFree(dev_nn_oe); hipFree(dev_eoidx_even); hipFree(dev_eoidx_odd); free(h2d_gf); free(redfield_plaq); free(nn); free(nn_eo); free(nn_oe); free(eoidx_even); free(eoidx_odd); free(lexic2eo); free(ind); } void intro(){ fprintf(stdout, "\n"); fprintf(stdout, "######## This is cudagfx https://github.com/kpetrov/cudagfx ########\n"); fprintf(stdout, "######## a program to fix lattice Landau gauge ########\n"); fprintf(stdout, "######## original code: Florian Burger ########\n\n\n"); fprintf(stdout, "######## updated and maintained: Konstantin Petrov const.petrov@gmail.com ########\n\n\n"); } void usage() { fprintf(stdout, "Code to compute Landau gauge on gauge field\n"); fprintf(stdout, "Usage: cudagaugefix -i [inputfile] -f [gaugefile]\n"); exit(0); } int main(int argc, char *argv[]){ int ret; double F,dada; double plaq; int c; int gfDEVICE; int gridsize; if(VOLUME%BLOCK != 0){ printf("Error: VOLUME is not a multiple of BLOCK. Aborting...\n"); exit(100); } dim3 blockdim(BLOCK,1,1); if( VOLUME >= BLOCK){ gridsize =VOLUME/BLOCK; } else{ gridsize=1; } dim3 griddim(gridsize,1,1); char inputfilename[100]; char gaugefilename[100]; char fixedgaugename[100]; intro(); gfDEVICE=0; //by default, use card number 0 while ((c = getopt(argc, argv, "h?:i:d:f:")) != -1) { switch (c) { case 'i': strcpy ( &(inputfilename[0]) , optarg ); printf("The input file is: %s\n", &(inputfilename[0])); break; case 'd': gfDEVICE=atoi(optarg); break; case 'f': strcpy ( &(gaugefilename[0]) , optarg ); strcpy ( &(fixedgaugename[0]) , "landau_" ); strcat ( &(fixedgaugename[0]) , optarg ); ; printf("The gauge file is: %s\n", &(gaugefilename[0])); printf("The fixed gauge file is: %s\n", &(fixedgaugename[0])); break; case 'h': case '?': default: usage(); break; } } printf("setting device to %d\n", gfDEVICE); fflush(stdout); hipSetDevice(gfDEVICE); int deVice; hipGetDevice(&deVice); printf("set device to %d\n", deVice); printf("%s\n", hipGetErrorString(hipGetLastError())); read_input(&(inputfilename[0])); printf("LX = %d, LY = %d, LZ = %d, T = %d\n", LX, LY, LZ, T); g_gf = (su3*) malloc(4*VOLUME*sizeof(su3)); trafo1 = (su3*) malloc(VOLUME*sizeof(su3)); trafo2 = (su3*) malloc(VOLUME*sizeof(su3)); if ( read_gf_ildg(g_gf, &(gaugefilename[0]))!=0) {printf("Error reading configuration from %s\n",&(gaugefilename[0]) ); exit(1); } struct stat st; if(stat(&(fixedgaugename[0]),&st) == 0) {printf(" output file %s is present, exiting...\n", &(fixedgaugename[0])); exit (-1); } printf("Setting random seed to %d\n", randseed); PlantSeeds(randseed); if(thermflag==1){ if(thermparam.startcond==0){ unit_init_gauge(g_gf); } else{ random_init_gauge(g_gf); } init_thermalization(g_gf); init_MT(4*VOLUME/2, 4*4*VOLUME/2); // we need 4 sets of (1/4) (gauss/unif) numbers // for 4 links per site plaq = calc_plaquette(dev_gf,1); printf("%s\n", hipGetErrorString(hipGetLastError())); thermalize_gauge(); finalize_thermalization(); } else{ //unit_init_trafo(trafo1); random_init_trafo(trafo1); init_gaugefixing(g_gf, trafo1); init_MT(VOLUME/2, 4*VOLUME/2); // need one gauss rnd and 4 unif. rnd for all lattice points //calculate plaquette plaq = calc_plaquette(dev_gf,1); printf("%s\n", hipGetErrorString(hipGetLastError())); F = gauge_functional(g_gf); dada = dAdA(g_gf); printf("HOST FUNC = %.16e\tHOST dAdA = %.16e\n", F, dada); //small benchmark //benchmark(); //exit(100); //end small benchmark // do the simulated annealing if(saflag==1){ printf("Starting simulated annealing...\n"); printf("Tmin = %f, Tmax = %f, N = %d, expo = %f\n", saparam.Tmin, saparam.Tmax, saparam.N, saparam.expo); simannealing_gauge(); } // do the overrelaxation if(orxflag==1){ printf("Starting overrelaxation...\n"); ret = overrelax_gauge(orxmaxit, orxeps, orxcheckinterval); if(ret < 0){ printf("Gauge condition not reached. Aborting...\n"); finalize_gaugefixing(); free(trafo1); free(trafo2); free(g_gf); exit(300); } } #ifdef USETEXTURE // apply the trafo dev_gf -> dev_gf2 /* bind_texture_trafo(dev_trafo1); dev_apply_trafo<<< griddim, blockdim >>> (dev_gf2, dev_gf, dev_trafo1, dev_nn); unbind_texture_trafo(); */ bind_texture_gf(dev_gf); plaq = calc_plaquette(dev_gf,1); unbind_texture_gf(); #else // apply the trafo dev_gf -> dev_gf (only one field on GPU) /* this does not work yet dev_apply_trafo<<< griddim, blockdim >>> (dev_gf, dev_gf, dev_trafo1, dev_nn); */ plaq = calc_plaquette(dev_gf,1); #endif printf("%s\n", hipGetErrorString(hipGetLastError())); // Copy to Host Mem: //trafo printf("Transferring back to host...\n"); printf("Applying trafo on host...\n"); #ifdef GF_8 size_t dev_gfsize = 4*VOLUME * sizeof(dev_su3_8); hipMemcpy(h2d_trafo, dev_trafo1, dev_gfsize, hipMemcpyDeviceToHost); from8tosu3_trafo(trafo1, h2d_trafo); #else size_t dev_gfsize = 6*VOLUME * sizeof(dev_su3_2v); hipMemcpy(h2d_trafo, dev_trafo1, dev_gfsize, hipMemcpyDeviceToHost); from2vtosu3_trafo(trafo1, h2d_trafo); #endif g_trafo(g_gf, trafo1); plaq = mean_plaq(g_gf); PLAQ = plaq; dada = dAdA(g_gf); DADA = dada; F = gauge_functional(g_gf); FUNC = F; printf("Final HOST values:\n"); printf("PLAQ = %.16f\n", PLAQ); printf("F = %.16e \t dAdA = %.16e\t max(dAdA) = %.16e\n", FUNC, DADA, maxDADA); printf("Writing out the gauge fixed field ..."); ret = write_gf_ildg(g_gf, &(fixedgaugename[0]), 64); if(ret!=0){ fprintf(stderr, "Error writing gauge field. Aborting...\n"); exit(400); } printf("done.\n"); //gf //dev_gfsize = 6*4*VOLUME * sizeof(dev_su3_2v); //hipMemcpy(h2d_gf, dev_gf, dev_gfsize, hipMemcpyDeviceToHost); finalize_gaugefixing(); } free(trafo1); free(trafo2); free(g_gf); }
be04473fdaeeb4c2f97d019d4dd2787f87ed53ed.cu
#define MAIN_PROGRAM #include <cuda.h> #include <cuda_runtime.h> #include "cublas.h" #include <time.h> #include <stdlib.h> #include <stdio.h> #include <assert.h> #include "cudaglobal.h" #include <math.h> #include "global.h" #include <getopt.h> #include <time.h> #include <assert.h> #include "dev_su3.h" #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> extern "C" { #include "complex.h" #include "gauge_io.h" #include "rngs.h" #include "su3manip.h" #include "observables.h" #include "read_input.h" } # define CUDA_SAFE_CALL( call) { \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error : %s.\n", cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } __device__ int dev_LX,dev_LY,dev_LZ,dev_T,dev_VOLUME; // the reduction fields for the global sums of the functional F, dAdA and maxdAdA __device__ double * dev_redfield_F; __device__ double * dev_redfield_dAdA; __device__ double * dev_redfield_maxdAdA; __device__ double * dev_redfield_plaq; __constant__ __device__ double sa_beta; __constant__ __device__ double therm_beta; /* include the cuda code files here, necessary, because nvcc does not support external calls, all cuda functions must be inlined*/ #include "dev_su3.cu" #include "overrelax.cu" #include "MersenneTwister.cu" #include "simulated_annealing.cu" #include "heatbath_thermalization.cu" extern int read_gf_ildg(su3 gf[], char* filename); void initnn(){ int t,x,y,z,pos, count; count=0; for(t=0;t<T;t++){ for(z=0; z<LZ; z++){ for(y=0; y<LY; y++){ for(x=0; x<LX; x++){ pos= x + LX*(y + LY*(z + LZ*t)); ind[count] = pos; //plus direction nn[8*pos+3] = x + LX*(y + LY*(z + LZ*((t+1)%T))); nn[8*pos+2] = x + LX*(y + LY*((z+1)%LZ + LZ*t)); nn[8*pos+1] = x + LX*((y+1)%LY + LY*(z + LZ*t)); nn[8*pos+0] = (x+1)%LX + LX*(y + LY*(z + LZ*t)); //minus direction if(t==0){ nn[8*pos+7] = x + LX*(y + LY*(z + LZ*((T-1)))); } else{ nn[8*pos+7] = x + LX*(y + LY*(z + LZ*((t-1)))); } if(z==0){ nn[8*pos+6] = x + LX*(y + LY*((LZ-1) + LZ*t)); } else{ nn[8*pos+6] = x + LX*(y + LY*((z-1) + LZ*t)); } if(y==0){ nn[8*pos+5] = x + LX*((LY-1) + LY*(z + LZ*t)); } else{ nn[8*pos+5] = x + LX*((y-1) + LY*(z + LZ*t)); } if(x==0){ nn[8*pos+4] = (LX-1) + LX*(y + LY*(z + LZ*t)); } else{ nn[8*pos+4] = (x-1) + LX*(y + LY*(z + LZ*t)); } count++; } } } } } //initialize nearest-neighbour table for gpu with even-odd enabled //init_nn must have been called before for initialization of nn void initnn_eo(){ int x,y,z,t,index,nnpos,j, count; int evenpos=0; int oddpos=0; // here we initialize the conversion field lexic2eo evenpos=0; oddpos=0; count=0; for(t=0;t<T;t++){ for(z=0;z<LZ;z++){ for(y=0;y<LY;y++){ for(x=0;x<LX;x++){ if( ((x+y+z+t) %2)==0){ lexic2eo[count] = evenpos; evenpos++; } else{ lexic2eo[count] = oddpos; oddpos++; } count++; } } } } evenpos=0; oddpos=0; count=0; for(t=0;t<T;t++){ for(z=0;z<LZ;z++){ for(y=0;y<LY;y++){ for(x=0;x<LX;x++){ index = ind[count]; if(((t+x+y+z)%2 == 0)){ nnpos = lexic2eo[index]; for(j=0;j<4;j++){ nn_eo[8*nnpos+j] = lexic2eo[ nn[8*index+j] ]; } for(j=0;j<4;j++){ nn_eo[8*nnpos+4+j] = lexic2eo[ nn[8*index+4+j] ]; } eoidx_even[evenpos] = index; evenpos++; } else{ nnpos = lexic2eo[index]; for(j=0;j<4;j++){ nn_oe[8*nnpos+j] = lexic2eo[ nn[8*index+j] ]; } for(j=0;j<4;j++){ nn_oe[8*nnpos+4+j] = lexic2eo[ nn[8*index+4+j] ]; } eoidx_odd[oddpos] = index; oddpos++; } count++; } } } } } void init_gaugefixing(su3* gf, su3* trafo){ cudaError_t cudaerr; // the gauge field #ifdef GF_8 /* allocate 8 doubles of gf = 4*4*VOLUME double2's*/ size_t dev_gfsize = 4*4*VOLUME * sizeof(dev_su3_8); #else /* allocate 2 rows of gf = 6*4*VOLUME double2's*/ size_t dev_gfsize = 6*4*VOLUME * sizeof(dev_su3_2v); #endif if((cudaerr=cudaMalloc((void **) &dev_gf, dev_gfsize)) != cudaSuccess){ printf("Error in init_mixedsolve(): Memory allocation of gauge field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated gauge field on device\n"); } #ifdef USETEXTURE /* if((cudaerr=cudaMalloc((void **) &dev_gf2, dev_gfsize)) != cudaSuccess){ printf("Error in init_mixedsolve(): Memory allocation of gauge field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated gauge field 2 on device\n"); } */ #endif #ifdef GF_8 h2d_gf = (dev_su3_8 *)malloc(dev_gfsize); // Allocate conversion gf on host su3to8(gf,h2d_gf); #else h2d_gf = (dev_su3_2v *)malloc(dev_gfsize); // Allocate conversion gf on host su3to2v(gf,h2d_gf); #endif cudaMemcpy(dev_gf, h2d_gf, dev_gfsize, cudaMemcpyHostToDevice); // the trafo fields #ifdef GF_8 /* allocate 8 doubles of trafo = 4*VOLUME double2's*/ dev_gfsize = 4*VOLUME * sizeof(dev_su3_8); #else /* allocate 2 rows of gf = 6*VOLUME double2's*/ dev_gfsize = 6*VOLUME * sizeof(dev_su3_2v); #endif if((cudaerr=cudaMalloc((void **) &dev_trafo1, dev_gfsize)) != cudaSuccess){ printf("Error in init_mixedsolve(): Memory allocation of trafo field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated trafo field 1 on device\n"); } #ifdef USETEXTURE //we only need a second trafo field, if we use textures as texture fields are read-only! if((cudaerr=cudaMalloc((void **) &dev_trafo2, dev_gfsize)) != cudaSuccess){ printf("Error in init_mixedsolve(): Memory allocation of trafo field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated trafo field 2 on device\n"); } #endif #ifdef GF_8 h2d_trafo = (dev_su3_8 *)malloc(dev_gfsize); su3to8_trafo(trafo,h2d_trafo); #else h2d_trafo = (dev_su3_2v *)malloc(dev_gfsize); su3to2v_trafo(trafo,h2d_trafo); #endif cudaMemcpy(dev_trafo1, h2d_trafo, dev_gfsize, cudaMemcpyHostToDevice); #ifdef USETEXTURE cudaMemcpy(dev_trafo2, h2d_trafo, dev_gfsize, cudaMemcpyHostToDevice); #endif //grid size_t nnsize = 8*VOLUME*sizeof(int); nn = (int *) malloc(nnsize); cudaMalloc((void **) &dev_nn, nnsize); size_t indsize = VOLUME*sizeof(int); ind = (int *) malloc(indsize); lexic2eo = (int *) malloc(indsize); // nearest neighbours EO size_t nnsize_evenodd = (size_t)8*VOLUME/2*sizeof(int); nn_oe = (int *) malloc(nnsize_evenodd); cudaMalloc((void **) &dev_nn_oe, nnsize_evenodd); nn_eo = (int *) malloc(nnsize_evenodd); cudaMalloc((void **) &dev_nn_eo, nnsize_evenodd); // index EO size_t indsize_evenodd = (size_t)VOLUME/2*sizeof(int); eoidx_even = (int *) malloc(indsize_evenodd); cudaMalloc((void **) &dev_eoidx_even, indsize_evenodd); eoidx_odd = (int *) malloc(indsize_evenodd); cudaMalloc((void **) &dev_eoidx_odd, indsize_evenodd); initnn(); initnn_eo(); //shownn(); //showcompare_gf(T-1, LX-1, LY-1, LZ-1, 3); // copy to device index arrays cudaMemcpy(dev_nn, nn, nnsize, cudaMemcpyHostToDevice); cudaMemcpy(dev_nn_eo, nn_eo, nnsize_evenodd, cudaMemcpyHostToDevice); cudaMemcpy(dev_nn_oe, nn_oe, nnsize_evenodd, cudaMemcpyHostToDevice); cudaMemcpy(dev_eoidx_even, eoidx_even, indsize_evenodd, cudaMemcpyHostToDevice); cudaMemcpy(dev_eoidx_odd, eoidx_odd, indsize_evenodd, cudaMemcpyHostToDevice); output_size = LZ*T*sizeof(double); // parallel in t and z direction cudaMalloc((void **) &dev_output, output_size); // output array double * host_output = (double*) malloc(output_size); int grid[5]; grid[0]=LX; grid[1]=LY; grid[2]=LZ; grid[3]=T; grid[4]=VOLUME; cudaMalloc((void **) &dev_grid, (size_t)(5*sizeof(int))); cudaMemcpy(dev_grid, &(grid[0]), (size_t)(5*sizeof(int)), cudaMemcpyHostToDevice); //init grid dev_gfix_init<<< 1, 1 >>> (dev_grid); //reduction field for functional if(VOLUME%BLOCK != 0){ printf("Error: VOLUME is not a multiple of BLOCK. Aborting...\n"); exit(100); } int redfieldsize = VOLUME/BLOCK; printf("VOLUME/BLOCK = %d\n", VOLUME/BLOCK); cudaMalloc((void **) &dev_redfield_F, redfieldsize*sizeof(double)); if((redfield_F = (double*)malloc(redfieldsize*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(F)\n"); } cudaMalloc((void **) &dev_redfield_dAdA, redfieldsize*sizeof(double)); if((redfield_dAdA = (double*)malloc(redfieldsize*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(dAdA)\n"); } cudaMalloc((void **) &dev_redfield_maxdAdA, redfieldsize*sizeof(double)); if((redfield_maxdAdA = (double*)malloc(redfieldsize*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(maxdAdA)\n"); } cudaMalloc((void **) &dev_redfield_plaq, T*sizeof(double)); if((redfield_plaq = (double*)malloc(T*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(plaq)\n"); } printf("%s\n", cudaGetErrorString(cudaGetLastError())); } void finalize_gaugefixing(){ cudaFree(dev_gf); cudaFree(dev_trafo1); #ifdef USETEXTURE //cudaFree(dev_gf2); cudaFree(dev_trafo2); #endif cudaFree(dev_grid); cudaFree(dev_output); cudaFree(dev_nn); cudaFree(dev_redfield_F); cudaFree(dev_redfield_dAdA); cudaFree(dev_redfield_maxdAdA); cudaFree(dev_redfield_plaq); cudaFree(dev_nn_eo); cudaFree(dev_nn_oe); cudaFree(dev_eoidx_even); cudaFree(dev_eoidx_odd); free(h2d_gf); free(h2d_trafo); free(redfield_F); free(redfield_dAdA); free(redfield_maxdAdA); free(redfield_plaq); free(nn); free(nn_eo); free(nn_oe); free(eoidx_even); free(eoidx_odd); free(lexic2eo); free(ind); } void init_thermalization(su3* gf){ cudaError_t cudaerr; // the gauge field #ifdef GF_8 /* allocate 8 doubles of gf = 4*4*VOLUME double2's*/ size_t dev_gfsize = 4*4*VOLUME * sizeof(dev_su3_8); #else /* allocate 2 rows of gf = 6*4*VOLUME double2's*/ size_t dev_gfsize = 6*4*VOLUME * sizeof(dev_su3_2v); #endif if((cudaerr=cudaMalloc((void **) &dev_gf, dev_gfsize)) != cudaSuccess){ printf("Error in init_mixedsolve(): Memory allocation of gauge field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated gauge field on device\n"); } if((cudaerr=cudaMalloc((void **) &dev_gf2, dev_gfsize)) != cudaSuccess){ printf("Error in init_mixedsolve(): Memory allocation of gauge field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated gauge field 2 on device\n"); } #ifdef GF_8 h2d_gf = (dev_su3_8 *)malloc(dev_gfsize); // Allocate conversion gf on host su3to8(gf,h2d_gf); #else h2d_gf = (dev_su3_2v *)malloc(dev_gfsize); // Allocate conversion gf on host su3to2v(gf,h2d_gf); #endif cudaMemcpy(dev_gf, h2d_gf, dev_gfsize, cudaMemcpyHostToDevice); cudaMemcpy(dev_gf2, h2d_gf, dev_gfsize, cudaMemcpyHostToDevice); // the staples field; we do EVEN/ODD update, so we only need half the gauge field size #ifdef GF_8 /* allocate 8 doubles of gf = 4*4*VOLUME/2 double2's*/ dev_gfsize = 4*4*VOLUME/2 * sizeof(dev_su3_8); #else /* allocate 2 rows of gf = 6*4*VOLUME/2 double2's*/ dev_gfsize = 6*4*VOLUME/2 * sizeof(dev_su3_2v); #endif if((cudaerr=cudaMalloc((void **) &dev_staples, dev_gfsize)) != cudaSuccess){ printf("Error in init_mixedsolve(): Memory allocation of staple field failed. Aborting...\n"); exit(200); } // Allocate array on device else{ printf("Allocated staple field on device\n"); } //grid size_t nnsize = 8*VOLUME*sizeof(int); nn = (int *) malloc(nnsize); cudaMalloc((void **) &dev_nn, nnsize); size_t indsize = VOLUME*sizeof(int); ind = (int *) malloc(indsize); lexic2eo = (int *) malloc(indsize); // nearest neighbours EO size_t nnsize_evenodd = (size_t)8*VOLUME/2*sizeof(int); nn_oe = (int *) malloc(nnsize_evenodd); cudaMalloc((void **) &dev_nn_oe, nnsize_evenodd); nn_eo = (int *) malloc(nnsize_evenodd); cudaMalloc((void **) &dev_nn_eo, nnsize_evenodd); // index EO size_t indsize_evenodd = (size_t)VOLUME/2*sizeof(int); eoidx_even = (int *) malloc(indsize_evenodd); cudaMalloc((void **) &dev_eoidx_even, indsize_evenodd); eoidx_odd = (int *) malloc(indsize_evenodd); cudaMalloc((void **) &dev_eoidx_odd, indsize_evenodd); initnn(); initnn_eo(); //shownn(); //showcompare_gf(T-1, LX-1, LY-1, LZ-1, 3); // copy to device index arrays cudaMemcpy(dev_nn, nn, nnsize, cudaMemcpyHostToDevice); cudaMemcpy(dev_nn_eo, nn_eo, nnsize_evenodd, cudaMemcpyHostToDevice); cudaMemcpy(dev_nn_oe, nn_oe, nnsize_evenodd, cudaMemcpyHostToDevice); cudaMemcpy(dev_eoidx_even, eoidx_even, indsize_evenodd, cudaMemcpyHostToDevice); cudaMemcpy(dev_eoidx_odd, eoidx_odd, indsize_evenodd, cudaMemcpyHostToDevice); output_size = LZ*T*sizeof(double); // parallel in t and z direction cudaMalloc((void **) &dev_output, output_size); // output array double * host_output = (double*) malloc(output_size); int grid[5]; grid[0]=LX; grid[1]=LY; grid[2]=LZ; grid[3]=T; grid[4]=VOLUME; cudaMalloc((void **) &dev_grid, (size_t)(5*sizeof(int))); cudaMemcpy(dev_grid, &(grid[0]), (size_t)(5*sizeof(int)), cudaMemcpyHostToDevice); //init grid dev_gfix_init<<< 1, 1 >>> (dev_grid); //reduction field for functional if(VOLUME%BLOCK != 0){ printf("Error: VOLUME is not a multiple of BLOCK. Aborting...\n"); exit(100); } cudaMalloc((void **) &dev_redfield_plaq, T*sizeof(double)); if((redfield_plaq = (double*)malloc(T*sizeof(double)))==(void*)NULL){ fprintf(stderr,"Error in init_gaugefixing: malloc error(plaq)\n"); } printf("%s\n", cudaGetErrorString(cudaGetLastError())); } void finalize_thermalization(){ cudaFree(dev_gf); cudaFree(dev_staples); cudaFree(dev_grid); cudaFree(dev_output); cudaFree(dev_nn); cudaFree(dev_redfield_plaq); cudaFree(dev_nn_eo); cudaFree(dev_nn_oe); cudaFree(dev_eoidx_even); cudaFree(dev_eoidx_odd); free(h2d_gf); free(redfield_plaq); free(nn); free(nn_eo); free(nn_oe); free(eoidx_even); free(eoidx_odd); free(lexic2eo); free(ind); } void intro(){ fprintf(stdout, "\n"); fprintf(stdout, "######## This is cudagfx https://github.com/kpetrov/cudagfx ########\n"); fprintf(stdout, "######## a program to fix lattice Landau gauge ########\n"); fprintf(stdout, "######## original code: Florian Burger ########\n\n\n"); fprintf(stdout, "######## updated and maintained: Konstantin Petrov const.petrov@gmail.com ########\n\n\n"); } void usage() { fprintf(stdout, "Code to compute Landau gauge on gauge field\n"); fprintf(stdout, "Usage: cudagaugefix -i [inputfile] -f [gaugefile]\n"); exit(0); } int main(int argc, char *argv[]){ int ret; double F,dada; double plaq; int c; int gfDEVICE; int gridsize; if(VOLUME%BLOCK != 0){ printf("Error: VOLUME is not a multiple of BLOCK. Aborting...\n"); exit(100); } dim3 blockdim(BLOCK,1,1); if( VOLUME >= BLOCK){ gridsize =VOLUME/BLOCK; } else{ gridsize=1; } dim3 griddim(gridsize,1,1); char inputfilename[100]; char gaugefilename[100]; char fixedgaugename[100]; intro(); gfDEVICE=0; //by default, use card number 0 while ((c = getopt(argc, argv, "h?:i:d:f:")) != -1) { switch (c) { case 'i': strcpy ( &(inputfilename[0]) , optarg ); printf("The input file is: %s\n", &(inputfilename[0])); break; case 'd': gfDEVICE=atoi(optarg); break; case 'f': strcpy ( &(gaugefilename[0]) , optarg ); strcpy ( &(fixedgaugename[0]) , "landau_" ); strcat ( &(fixedgaugename[0]) , optarg ); ; printf("The gauge file is: %s\n", &(gaugefilename[0])); printf("The fixed gauge file is: %s\n", &(fixedgaugename[0])); break; case 'h': case '?': default: usage(); break; } } printf("setting device to %d\n", gfDEVICE); fflush(stdout); cudaSetDevice(gfDEVICE); int deVice; cudaGetDevice(&deVice); printf("set device to %d\n", deVice); printf("%s\n", cudaGetErrorString(cudaGetLastError())); read_input(&(inputfilename[0])); printf("LX = %d, LY = %d, LZ = %d, T = %d\n", LX, LY, LZ, T); g_gf = (su3*) malloc(4*VOLUME*sizeof(su3)); trafo1 = (su3*) malloc(VOLUME*sizeof(su3)); trafo2 = (su3*) malloc(VOLUME*sizeof(su3)); if ( read_gf_ildg(g_gf, &(gaugefilename[0]))!=0) {printf("Error reading configuration from %s\n",&(gaugefilename[0]) ); exit(1); } struct stat st; if(stat(&(fixedgaugename[0]),&st) == 0) {printf(" output file %s is present, exiting...\n", &(fixedgaugename[0])); exit (-1); } printf("Setting random seed to %d\n", randseed); PlantSeeds(randseed); if(thermflag==1){ if(thermparam.startcond==0){ unit_init_gauge(g_gf); } else{ random_init_gauge(g_gf); } init_thermalization(g_gf); init_MT(4*VOLUME/2, 4*4*VOLUME/2); // we need 4 sets of (1/4) (gauss/unif) numbers // for 4 links per site plaq = calc_plaquette(dev_gf,1); printf("%s\n", cudaGetErrorString(cudaGetLastError())); thermalize_gauge(); finalize_thermalization(); } else{ //unit_init_trafo(trafo1); random_init_trafo(trafo1); init_gaugefixing(g_gf, trafo1); init_MT(VOLUME/2, 4*VOLUME/2); // need one gauss rnd and 4 unif. rnd for all lattice points //calculate plaquette plaq = calc_plaquette(dev_gf,1); printf("%s\n", cudaGetErrorString(cudaGetLastError())); F = gauge_functional(g_gf); dada = dAdA(g_gf); printf("HOST FUNC = %.16e\tHOST dAdA = %.16e\n", F, dada); //small benchmark //benchmark(); //exit(100); //end small benchmark // do the simulated annealing if(saflag==1){ printf("Starting simulated annealing...\n"); printf("Tmin = %f, Tmax = %f, N = %d, expo = %f\n", saparam.Tmin, saparam.Tmax, saparam.N, saparam.expo); simannealing_gauge(); } // do the overrelaxation if(orxflag==1){ printf("Starting overrelaxation...\n"); ret = overrelax_gauge(orxmaxit, orxeps, orxcheckinterval); if(ret < 0){ printf("Gauge condition not reached. Aborting...\n"); finalize_gaugefixing(); free(trafo1); free(trafo2); free(g_gf); exit(300); } } #ifdef USETEXTURE // apply the trafo dev_gf -> dev_gf2 /* bind_texture_trafo(dev_trafo1); dev_apply_trafo<<< griddim, blockdim >>> (dev_gf2, dev_gf, dev_trafo1, dev_nn); unbind_texture_trafo(); */ bind_texture_gf(dev_gf); plaq = calc_plaquette(dev_gf,1); unbind_texture_gf(); #else // apply the trafo dev_gf -> dev_gf (only one field on GPU) /* this does not work yet dev_apply_trafo<<< griddim, blockdim >>> (dev_gf, dev_gf, dev_trafo1, dev_nn); */ plaq = calc_plaquette(dev_gf,1); #endif printf("%s\n", cudaGetErrorString(cudaGetLastError())); // Copy to Host Mem: //trafo printf("Transferring back to host...\n"); printf("Applying trafo on host...\n"); #ifdef GF_8 size_t dev_gfsize = 4*VOLUME * sizeof(dev_su3_8); cudaMemcpy(h2d_trafo, dev_trafo1, dev_gfsize, cudaMemcpyDeviceToHost); from8tosu3_trafo(trafo1, h2d_trafo); #else size_t dev_gfsize = 6*VOLUME * sizeof(dev_su3_2v); cudaMemcpy(h2d_trafo, dev_trafo1, dev_gfsize, cudaMemcpyDeviceToHost); from2vtosu3_trafo(trafo1, h2d_trafo); #endif g_trafo(g_gf, trafo1); plaq = mean_plaq(g_gf); PLAQ = plaq; dada = dAdA(g_gf); DADA = dada; F = gauge_functional(g_gf); FUNC = F; printf("Final HOST values:\n"); printf("PLAQ = %.16f\n", PLAQ); printf("F = %.16e \t dAdA = %.16e\t max(dAdA) = %.16e\n", FUNC, DADA, maxDADA); printf("Writing out the gauge fixed field ..."); ret = write_gf_ildg(g_gf, &(fixedgaugename[0]), 64); if(ret!=0){ fprintf(stderr, "Error writing gauge field. Aborting...\n"); exit(400); } printf("done.\n"); //gf //dev_gfsize = 6*4*VOLUME * sizeof(dev_su3_2v); //cudaMemcpy(h2d_gf, dev_gf, dev_gfsize, cudaMemcpyDeviceToHost); finalize_gaugefixing(); } free(trafo1); free(trafo2); free(g_gf); }
a1eb3999f80bb2c4a0ce67584b2db166c5c6ac67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * _reg_resampling_kernels.cu * * * Created by Marc Modat on 24/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_RESAMPLING_KERNELS_CU #define _REG_RESAMPLING_KERNELS_CU #include "stdio.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> texture<float, 3, hipReadModeElementType> sourceTexture; texture<float4, 1, hipReadModeElementType> sourceMatrixTexture; texture<float4, 1, hipReadModeElementType> positionFieldTexture; texture<int, 1, hipReadModeElementType> maskTexture; /* *************************************************************** */ __device__ __constant__ int3 c_SourceDim; __device__ __constant__ int c_VoxelNumber; __device__ __constant__ float c_PaddingValue; __device__ __constant__ int c_ActiveVoxelNumber; /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_resampleSourceImage_kernel(float *resultArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world position in the source space float4 realPosition = tex1Dfetch(positionFieldTexture,tid); //Get the voxel-based position in the source space float3 voxelPosition; float4 matrix = tex1Dfetch(sourceMatrixTexture,0); voxelPosition.x = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; matrix = tex1Dfetch(sourceMatrixTexture,1); voxelPosition.y = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; matrix = tex1Dfetch(sourceMatrixTexture,2); voxelPosition.z = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; int3 sourceImageSize = c_SourceDim; float3 relativePosition; relativePosition.x=(voxelPosition.x+0.5f)/(float)sourceImageSize.x; relativePosition.y=(voxelPosition.y+0.5f)/(float)sourceImageSize.y; relativePosition.z=(voxelPosition.z+0.5f)/(float)sourceImageSize.z; if( relativePosition.x>=0.0f && relativePosition.x<=1.0f && relativePosition.y>=0.0f && relativePosition.y<=1.0f && relativePosition.z>=0.0f && relativePosition.z<=1.0f ){ resultArray[tex1Dfetch(maskTexture,tid)]=tex3D(sourceTexture, relativePosition.x, relativePosition.y, relativePosition.z); } else resultArray[tex1Dfetch(maskTexture,tid)]=c_PaddingValue; } } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_getSourceImageGradient_kernel(float4 *gradientArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world position in the source space //const int index=tex1Dfetch(maskTexture,tid); float4 realPosition = tex1Dfetch(positionFieldTexture,tid); //Get the voxel-based position in the source space float3 voxelPosition; float4 matrix = tex1Dfetch(sourceMatrixTexture,0); voxelPosition.x = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; matrix = tex1Dfetch(sourceMatrixTexture,1); voxelPosition.y = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; matrix = tex1Dfetch(sourceMatrixTexture,2); voxelPosition.z = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; int3 sourceImageSize = c_SourceDim; if( 0.0f<=voxelPosition.x && voxelPosition.x<=float(sourceImageSize.x-1) && 0.0f<=voxelPosition.y && voxelPosition.y<=float(sourceImageSize.y-1) && 0.0f<=voxelPosition.z && voxelPosition.z<=float(sourceImageSize.z-1)){ int3 voxel; voxel.x = (int)(voxelPosition.x); voxel.y = (int)(voxelPosition.y); voxel.z = (int)(voxelPosition.z); float xBasis[2]; float relative = fabsf(voxelPosition.x - (float)voxel.x); xBasis[0]=1.0f-relative; xBasis[1]=relative; float yBasis[2]; relative = fabsf(voxelPosition.y - (float)voxel.y); yBasis[0]=1.0f-relative; yBasis[1]=relative; float zBasis[2]; relative = fabsf(voxelPosition.z - (float)voxel.z); zBasis[0]=1.0f-relative; zBasis[1]=relative; float deriv[2]; deriv[0]=-1.0f; deriv[1]=1.0f; float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f); float3 relativePosition; for(short c=0; c<2; c++){ relativePosition.z=((float)voxel.z+(float)c+0.5f)/(float)c_SourceDim.z; float3 tempValueY=make_float3(0.0f, 0.0f, 0.0f); for(short b=0; b<2; b++){ float2 tempValueX=make_float2(0.0f, 0.0f); relativePosition.y=((float)voxel.y+(float)b+0.5f)/(float)c_SourceDim.y; for(short a=0; a<2; a++){ relativePosition.x=((float)voxel.x+(float)a+0.5f)/(float)c_SourceDim.x; float intensity=tex3D(sourceTexture, relativePosition.x, relativePosition.y, relativePosition.z); tempValueX.x += intensity * deriv[a]; tempValueX.y += intensity * xBasis[a]; } tempValueY.x += tempValueX.x * yBasis[b]; tempValueY.y += tempValueX.y * deriv[b]; tempValueY.z += tempValueX.y * yBasis[b]; } gradientValue.x += tempValueY.x * zBasis[c]; gradientValue.y += tempValueY.y * zBasis[c]; gradientValue.z += tempValueY.z * deriv[c]; } gradientArray[tid]=gradientValue; } else gradientArray[tid]=make_float4(0.0f, 0.0f, 0.0f, 0.0f); } } /* *************************************************************** */ /* *************************************************************** */ __global__ void init(unsigned int seed, hiprandState_t* states) { /* we have to initialize the state */ hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ threadIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[threadIdx.x]); } __global__ void reg_randomsamplingMask_kernel (hiprandState_t* states,int *mask,int samples,int activeVoxelNumber,unsigned int seed) { const int tid= (blockIdx.x)*blockDim.x+threadIdx.x; if (tid < samples) { hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ tid, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[tid]); mask[tid]= hiprand(&states[threadIdx.x]) % (activeVoxelNumber); } return; } #endif
a1eb3999f80bb2c4a0ce67584b2db166c5c6ac67.cu
/* * _reg_resampling_kernels.cu * * * Created by Marc Modat on 24/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_RESAMPLING_KERNELS_CU #define _REG_RESAMPLING_KERNELS_CU #include "stdio.h" #include <curand.h> #include <curand_kernel.h> texture<float, 3, cudaReadModeElementType> sourceTexture; texture<float4, 1, cudaReadModeElementType> sourceMatrixTexture; texture<float4, 1, cudaReadModeElementType> positionFieldTexture; texture<int, 1, cudaReadModeElementType> maskTexture; /* *************************************************************** */ __device__ __constant__ int3 c_SourceDim; __device__ __constant__ int c_VoxelNumber; __device__ __constant__ float c_PaddingValue; __device__ __constant__ int c_ActiveVoxelNumber; /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_resampleSourceImage_kernel(float *resultArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world position in the source space float4 realPosition = tex1Dfetch(positionFieldTexture,tid); //Get the voxel-based position in the source space float3 voxelPosition; float4 matrix = tex1Dfetch(sourceMatrixTexture,0); voxelPosition.x = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; matrix = tex1Dfetch(sourceMatrixTexture,1); voxelPosition.y = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; matrix = tex1Dfetch(sourceMatrixTexture,2); voxelPosition.z = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; int3 sourceImageSize = c_SourceDim; float3 relativePosition; relativePosition.x=(voxelPosition.x+0.5f)/(float)sourceImageSize.x; relativePosition.y=(voxelPosition.y+0.5f)/(float)sourceImageSize.y; relativePosition.z=(voxelPosition.z+0.5f)/(float)sourceImageSize.z; if( relativePosition.x>=0.0f && relativePosition.x<=1.0f && relativePosition.y>=0.0f && relativePosition.y<=1.0f && relativePosition.z>=0.0f && relativePosition.z<=1.0f ){ resultArray[tex1Dfetch(maskTexture,tid)]=tex3D(sourceTexture, relativePosition.x, relativePosition.y, relativePosition.z); } else resultArray[tex1Dfetch(maskTexture,tid)]=c_PaddingValue; } } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_getSourceImageGradient_kernel(float4 *gradientArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world position in the source space //const int index=tex1Dfetch(maskTexture,tid); float4 realPosition = tex1Dfetch(positionFieldTexture,tid); //Get the voxel-based position in the source space float3 voxelPosition; float4 matrix = tex1Dfetch(sourceMatrixTexture,0); voxelPosition.x = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; matrix = tex1Dfetch(sourceMatrixTexture,1); voxelPosition.y = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; matrix = tex1Dfetch(sourceMatrixTexture,2); voxelPosition.z = matrix.x*realPosition.x + matrix.y*realPosition.y + matrix.z*realPosition.z + matrix.w; int3 sourceImageSize = c_SourceDim; if( 0.0f<=voxelPosition.x && voxelPosition.x<=float(sourceImageSize.x-1) && 0.0f<=voxelPosition.y && voxelPosition.y<=float(sourceImageSize.y-1) && 0.0f<=voxelPosition.z && voxelPosition.z<=float(sourceImageSize.z-1)){ int3 voxel; voxel.x = (int)(voxelPosition.x); voxel.y = (int)(voxelPosition.y); voxel.z = (int)(voxelPosition.z); float xBasis[2]; float relative = fabsf(voxelPosition.x - (float)voxel.x); xBasis[0]=1.0f-relative; xBasis[1]=relative; float yBasis[2]; relative = fabsf(voxelPosition.y - (float)voxel.y); yBasis[0]=1.0f-relative; yBasis[1]=relative; float zBasis[2]; relative = fabsf(voxelPosition.z - (float)voxel.z); zBasis[0]=1.0f-relative; zBasis[1]=relative; float deriv[2]; deriv[0]=-1.0f; deriv[1]=1.0f; float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f); float3 relativePosition; for(short c=0; c<2; c++){ relativePosition.z=((float)voxel.z+(float)c+0.5f)/(float)c_SourceDim.z; float3 tempValueY=make_float3(0.0f, 0.0f, 0.0f); for(short b=0; b<2; b++){ float2 tempValueX=make_float2(0.0f, 0.0f); relativePosition.y=((float)voxel.y+(float)b+0.5f)/(float)c_SourceDim.y; for(short a=0; a<2; a++){ relativePosition.x=((float)voxel.x+(float)a+0.5f)/(float)c_SourceDim.x; float intensity=tex3D(sourceTexture, relativePosition.x, relativePosition.y, relativePosition.z); tempValueX.x += intensity * deriv[a]; tempValueX.y += intensity * xBasis[a]; } tempValueY.x += tempValueX.x * yBasis[b]; tempValueY.y += tempValueX.y * deriv[b]; tempValueY.z += tempValueX.y * yBasis[b]; } gradientValue.x += tempValueY.x * zBasis[c]; gradientValue.y += tempValueY.y * zBasis[c]; gradientValue.z += tempValueY.z * deriv[c]; } gradientArray[tid]=gradientValue; } else gradientArray[tid]=make_float4(0.0f, 0.0f, 0.0f, 0.0f); } } /* *************************************************************** */ /* *************************************************************** */ __global__ void init(unsigned int seed, curandState_t* states) { /* we have to initialize the state */ curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ threadIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[threadIdx.x]); } __global__ void reg_randomsamplingMask_kernel (curandState_t* states,int *mask,int samples,int activeVoxelNumber,unsigned int seed) { const int tid= (blockIdx.x)*blockDim.x+threadIdx.x; if (tid < samples) { curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ tid, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[tid]); mask[tid]= curand(&states[threadIdx.x]) % (activeVoxelNumber); } return; } #endif
3077b86b29a2ae75ef3b2081be40d5b4a8a5f296.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "polyaurn.cuh" #include "assert.h" #include "error.cuh" namespace gpulda { __device__ __forceinline__ f32 draw_poisson(f32 u, f32 beta, u32 n, f32** prob, u32** alias, u32 max_lambda, u32 max_value) { // MUST be defined in this file to compile on all platforms // if below cutoff, draw using Alias table if(n < max_lambda) { // determine the slot and update random number f32 mv = (f32) max_value; u32 slot = (u32) (u * mv); u = fmodf(u, __frcp_rz(mv)) * mv; // load table elements from global memory f32 thread_prob = prob[n][slot]; u32 thread_alias = alias[n][slot]; // return the resulting draw if(u < thread_prob) { return (f32) slot; } else { return (f32) thread_alias; } } // if didn't return, draw using Gaussian approximation if(u == 1.0f || u == 0.0f) { u = 0.5f; // prevent overflow edge cases } f32 mu = beta + ((f32) n); return normcdfinvf(u) * sqrtf(mu) + mu; } __device__ __forceinline__ f32 block_reduce_sum(f32* block_sum, f32 thread_sum) { // first, perform a warp reduce for(i32 offset = warpSize/2; offset > 0; offset /= 2) { thread_sum += __shfl_down(thread_sum, offset); } // then, add result to shared memory if(threadIdx.x % warpSize == 0) { atomicAdd(block_sum, thread_sum); } // ensure all threads finish writing __syncthreads(); // return new value to all threads return block_sum[0]; } __global__ void polya_urn_init(u32* n, u32* C, u32 K, f32 beta, u32 V, f32** prob, u32** alias, u32 max_lambda, u32 max_value, hiprandStatePhilox4_32_10_t* rng) { // initialize variables hiprandStatePhilox4_32_10_t thread_rng = rng[0]; skipahead((unsigned long long int) blockIdx.x*blockDim.x + threadIdx.x, &thread_rng); // loop over array and draw samples for(i32 offset = 0; offset < V / blockDim.x + 1; ++offset) { i32 col = threadIdx.x + offset * blockDim.x; i32 array_idx = col + V * blockIdx.x; if(col < V) { // draw n_k ~ Pois(C/K + beta) f32 u = hiprand_uniform(&thread_rng); f32 pois = draw_poisson(u, beta, C[col] / gridDim.x/*=K*/, prob, alias, max_lambda, max_value); n[array_idx] = (u32) pois; } } } __global__ void polya_urn_sample(f32* Phi, u32* n, f32 beta, u32 V, f32** prob, u32** alias, u32 max_lambda, u32 max_value, hiprandStatePhilox4_32_10_t* rng) { // initialize variables hiprandStatePhilox4_32_10_t thread_rng = rng[0]; skipahead((unsigned long long int) blockIdx.x*blockDim.x + threadIdx.x, &thread_rng); f32 thread_sum = 0.0f; __shared__ f32 block_sum[1]; if(threadIdx.x == 0) { block_sum[0] = 0.0f; } __syncthreads(); // loop over array and draw samples for(i32 offset = 0; offset < V / blockDim.x + 1; ++offset) { i32 col = threadIdx.x + offset * blockDim.x; i32 array_idx = col + V * blockIdx.x; if(col < V) { f32 u = hiprand_uniform(&thread_rng); f32 pois = draw_poisson(u, beta, n[array_idx], prob, alias, max_lambda, max_value); Phi[array_idx] = pois; thread_sum += pois; } } // add up thread sums, synchronize, and broadcast thread_sum = block_reduce_sum(block_sum, thread_sum); // normalize draws for(i32 offset = 0; offset < V / blockDim.x + 1; ++offset) { i32 col = threadIdx.x + offset * blockDim.x; i32 array_idx = col + V * blockIdx.x; if(col < V) { Phi[array_idx] /= thread_sum; } } } void polya_urn_transpose(hipStream_t* stream, f32* Phi, f32* Phi_temp, u32 K, u32 V, hipblasHandle_t* handle, f32* d_zero, f32* d_one) { hipMemcpyAsync(Phi_temp, Phi, V * K * sizeof(f32), hipMemcpyDeviceToDevice, *stream) >> GPULDA_CHECK; hipblasSetStream(*handle, *stream) >> GPULDA_CHECK; // hipblasSgeam(*handle, HIPBLAS_OP_T, HIPBLAS_OP_N, K, V, d_one, Phi_temp, V, d_zero, Phi, K, Phi, K) >> GPULDA_CHECK; } __global__ void polya_urn_reset(u32* n, u32 V) { for(i32 offset = 0; offset < V / blockDim.x + 1; ++offset) { i32 col = threadIdx.x + offset * blockDim.x; i32 array_idx = col + V * blockIdx.x; if(col < V) { n[array_idx] = 0; } } } __global__ void polya_urn_colsums(f32* Phi, f32* sigma_a, f32 alpha, f32** prob, u32 K) { // initilize variables // initialize variables f32 thread_sum = 0.0f; __shared__ f32 block_sum[1]; if(threadIdx.x == 0) { block_sum[0] = 0.0f; } __syncthreads(); // loop over array and compute column sums for(i32 offset = 0; offset < K / blockDim.x + 1; ++offset) { i32 row = threadIdx.x + offset * blockDim.x; i32 array_idx = row + K * blockIdx.x; if(row < K) { thread_sum += Phi[array_idx]; } } // add up thread sums, synchronize, and broadcast thread_sum = block_reduce_sum(block_sum, thread_sum); // set sigma_a if(threadIdx.x == 0) { sigma_a[blockIdx.x] = alpha * thread_sum; } // compute and set alias table probabilities for(i32 offset = 0; offset < K / blockDim.x + 1; ++offset) { i32 row = threadIdx.x + offset * blockDim.x; i32 array_idx = row + K * blockIdx.x; if(row < K) { prob[blockIdx.x][row] = Phi[array_idx] / thread_sum; } } } }
3077b86b29a2ae75ef3b2081be40d5b4a8a5f296.cu
#include "polyaurn.cuh" #include "assert.h" #include "error.cuh" namespace gpulda { __device__ __forceinline__ f32 draw_poisson(f32 u, f32 beta, u32 n, f32** prob, u32** alias, u32 max_lambda, u32 max_value) { // MUST be defined in this file to compile on all platforms // if below cutoff, draw using Alias table if(n < max_lambda) { // determine the slot and update random number f32 mv = (f32) max_value; u32 slot = (u32) (u * mv); u = fmodf(u, __frcp_rz(mv)) * mv; // load table elements from global memory f32 thread_prob = prob[n][slot]; u32 thread_alias = alias[n][slot]; // return the resulting draw if(u < thread_prob) { return (f32) slot; } else { return (f32) thread_alias; } } // if didn't return, draw using Gaussian approximation if(u == 1.0f || u == 0.0f) { u = 0.5f; // prevent overflow edge cases } f32 mu = beta + ((f32) n); return normcdfinvf(u) * sqrtf(mu) + mu; } __device__ __forceinline__ f32 block_reduce_sum(f32* block_sum, f32 thread_sum) { // first, perform a warp reduce for(i32 offset = warpSize/2; offset > 0; offset /= 2) { thread_sum += __shfl_down(thread_sum, offset); } // then, add result to shared memory if(threadIdx.x % warpSize == 0) { atomicAdd(block_sum, thread_sum); } // ensure all threads finish writing __syncthreads(); // return new value to all threads return block_sum[0]; } __global__ void polya_urn_init(u32* n, u32* C, u32 K, f32 beta, u32 V, f32** prob, u32** alias, u32 max_lambda, u32 max_value, curandStatePhilox4_32_10_t* rng) { // initialize variables curandStatePhilox4_32_10_t thread_rng = rng[0]; skipahead((unsigned long long int) blockIdx.x*blockDim.x + threadIdx.x, &thread_rng); // loop over array and draw samples for(i32 offset = 0; offset < V / blockDim.x + 1; ++offset) { i32 col = threadIdx.x + offset * blockDim.x; i32 array_idx = col + V * blockIdx.x; if(col < V) { // draw n_k ~ Pois(C/K + beta) f32 u = curand_uniform(&thread_rng); f32 pois = draw_poisson(u, beta, C[col] / gridDim.x/*=K*/, prob, alias, max_lambda, max_value); n[array_idx] = (u32) pois; } } } __global__ void polya_urn_sample(f32* Phi, u32* n, f32 beta, u32 V, f32** prob, u32** alias, u32 max_lambda, u32 max_value, curandStatePhilox4_32_10_t* rng) { // initialize variables curandStatePhilox4_32_10_t thread_rng = rng[0]; skipahead((unsigned long long int) blockIdx.x*blockDim.x + threadIdx.x, &thread_rng); f32 thread_sum = 0.0f; __shared__ f32 block_sum[1]; if(threadIdx.x == 0) { block_sum[0] = 0.0f; } __syncthreads(); // loop over array and draw samples for(i32 offset = 0; offset < V / blockDim.x + 1; ++offset) { i32 col = threadIdx.x + offset * blockDim.x; i32 array_idx = col + V * blockIdx.x; if(col < V) { f32 u = curand_uniform(&thread_rng); f32 pois = draw_poisson(u, beta, n[array_idx], prob, alias, max_lambda, max_value); Phi[array_idx] = pois; thread_sum += pois; } } // add up thread sums, synchronize, and broadcast thread_sum = block_reduce_sum(block_sum, thread_sum); // normalize draws for(i32 offset = 0; offset < V / blockDim.x + 1; ++offset) { i32 col = threadIdx.x + offset * blockDim.x; i32 array_idx = col + V * blockIdx.x; if(col < V) { Phi[array_idx] /= thread_sum; } } } void polya_urn_transpose(cudaStream_t* stream, f32* Phi, f32* Phi_temp, u32 K, u32 V, cublasHandle_t* handle, f32* d_zero, f32* d_one) { cudaMemcpyAsync(Phi_temp, Phi, V * K * sizeof(f32), cudaMemcpyDeviceToDevice, *stream) >> GPULDA_CHECK; cublasSetStream(*handle, *stream) >> GPULDA_CHECK; // cublasSgeam(*handle, CUBLAS_OP_T, CUBLAS_OP_N, K, V, d_one, Phi_temp, V, d_zero, Phi, K, Phi, K) >> GPULDA_CHECK; } __global__ void polya_urn_reset(u32* n, u32 V) { for(i32 offset = 0; offset < V / blockDim.x + 1; ++offset) { i32 col = threadIdx.x + offset * blockDim.x; i32 array_idx = col + V * blockIdx.x; if(col < V) { n[array_idx] = 0; } } } __global__ void polya_urn_colsums(f32* Phi, f32* sigma_a, f32 alpha, f32** prob, u32 K) { // initilize variables // initialize variables f32 thread_sum = 0.0f; __shared__ f32 block_sum[1]; if(threadIdx.x == 0) { block_sum[0] = 0.0f; } __syncthreads(); // loop over array and compute column sums for(i32 offset = 0; offset < K / blockDim.x + 1; ++offset) { i32 row = threadIdx.x + offset * blockDim.x; i32 array_idx = row + K * blockIdx.x; if(row < K) { thread_sum += Phi[array_idx]; } } // add up thread sums, synchronize, and broadcast thread_sum = block_reduce_sum(block_sum, thread_sum); // set sigma_a if(threadIdx.x == 0) { sigma_a[blockIdx.x] = alpha * thread_sum; } // compute and set alias table probabilities for(i32 offset = 0; offset < K / blockDim.x + 1; ++offset) { i32 row = threadIdx.x + offset * blockDim.x; i32 array_idx = row + K * blockIdx.x; if(row < K) { prob[blockIdx.x][row] = Phi[array_idx] / thread_sum; } } } }
2ceb2c5777ca994e828a8b45263101844968fc1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> #include <cstdio> #include <vector> #include <hiprand/hiprand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> #define d_maxColour 9 using namespace std; __device__ int d_count = 0; __global__ void incrementalColouring (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int start, int end){ int i = threadIdx.x; int startStart, startStop; int me, you; if (i==0){ me = start; you = end; } else{ me = end; you = start; } startStart = vertexArray[me-1]; if (me==n){ startStop = 2*m; } else{ startStop = vertexArray[me]; } for (int j=startStart; j<startStop; j++){ if (neighbourArray[j]==0){ neighbourArray[j]=you; break; } } __syncthreads(); if (colouring[start-1]!=colouring[end-1]){ return; } if (i==0) printf("%d and %d Conflict\n", start, end); __shared__ int colours[2]; colours[i]=0; if (i==0) printf("I am %d and %d and %d\n", i, colours[i], colours[1-i]); bool bucket[d_maxColour]; for (int j=0; j<d_maxColour; j++){ bucket[j]=true; } if (i==0){ printf("%d %d", startStart, startStop); for (int j=startStart; j<startStop; j++){ printf("clo %d\n", neighbourArray[j]); if (neighbourArray[j]!=0){ printf("clocli %d\n", colouring[neighbourArray[j]-1]); } } } for (int j=startStart; j<startStop; j++){ if (neighbourArray[j]==0){ continue; } bucket[colouring[neighbourArray[j]-1]-1] = false; if (i==0) printf("buvket clo %d and %d and %d\n", neighbourArray[j]-1, colouring[neighbourArray[j]-1], bucket[colouring[neighbourArray[j]-1]-1]); } for (int j=0; j<d_maxColour; j++){ if(bucket[j]){ colours[i]=j+1; printf("%d ashhas \t", j+1); break; } } if (i==0) for (int j=0; j<d_maxColour; j++){ printf("%d \t",bucket[j]); } if (i==0){ printf("\n"); } __syncthreads(); printf("%d and %d Conflict new colour min %d \n", start, end, colours[i]); // Possible issue: There could be a number inbetween the smallest equal guess and the current colour. if (colours[i]==colours[1-i]){ if (colours[i]<colouring[me-1]){ if(i==0){ colouring[me-1]=colours[i]; } } else{ if (i==1){ colouring[me-1]=colours[i]; } } } else{ if (colours[i]<colouring[me-1]){ colouring[me-1]=colours[i]; } else{ if (colours[i]<colours[1-i]){ colouring[me-1]=colours[i]; } } } __syncthreads(); if (i==0){ for (int j=0; j<n; j++){ printf("%d ", colouring[j]); } printf("\n"); } } __global__ void colourMinMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } if (colouring[i]!=0){ return; } int myValue = numbers[i]; // printf("I am node %d with value %d\n", i+1, myMax); int start = -1, stop = -1; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } bool max = true, min = true; for (int j=start; j<stop; j++){ // printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1); int neighbour = neighbourArray[j]; if (neighbour==0){ continue; } neighbour--; if (max && colouring[neighbour]==0 && numbers[neighbour] >= myValue){ if (numbers[neighbour] == myValue){ if (i < neighbour){ continue; } } max=false; if (!min){ return; } } if (min && colouring[neighbour]==0 && numbers[neighbour] <= myValue){ if (numbers[neighbour] == myValue){ if (i > neighbour){ continue; } } min=false; if (!max){ return; } } } if (max){ colouring[i] = currentColour; } else if (min){ colouring[i] = currentColour+1; } atomicAdd(&d_count, 1); } __global__ void setup_kernel (hiprandState_t * state, unsigned long seed ){ int i= blockDim.x * blockIdx.x + threadIdx.x; hiprand_init (seed, i, 0, &state[i]); } __global__ void randomNumbering (hiprandState_t* globalState, int *degreeCount, int n, int limit){ int i= blockDim.x * blockIdx.x + threadIdx.x; hiprandState_t localState = globalState[i]; float RANDOM = hiprand_uniform( &localState ); globalState[i] = localState; RANDOM *= (limit - 1 + 0.999999); RANDOM += 1; degreeCount[i] = (int) RANDOM; } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int start = -1, stop = -1; int diff=0; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } diff = stop-start; degreeCount[i]=diff; } void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){ for (int i=0; i<n-1; i++){ for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){ cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl; /* code */ } } for (int j = vertexArray[n-1]; j < m; ++j) { cout<<"e "<<n<<" "<<neighbourArray[j]<<endl; /* code */ } } int main(int argc, char const *argv[]) { int n, m; cin>>n>>m; int *h_count = new int; int *h_vertexArray = new int [n]; int *h_neighbourArray = new int [2*m]; int *h_degreeCount = new int [n]; int *h_colour = new int [n]; int *d_vertexArray = NULL; hipMalloc((void **)&d_vertexArray, n*sizeof(int)); int *d_neighbourArray = NULL; hipMalloc((void **)&d_neighbourArray, 2*m*sizeof(int)); int *d_colour = NULL; hipMalloc((void **)&d_colour, (n)*sizeof(int)); hipMemset((void *)d_colour, 0, (n)*sizeof(int)); int *d_degreeCount = NULL; hipMalloc((void **)&d_degreeCount, (n)*sizeof(int)); hipMemset((void *)d_degreeCount, 0, (n)*sizeof(int)); hiprandState_t* devStates; hipMalloc ( &devStates, n*sizeof( hiprandState_t ) ); int offset = 0; vector<int> startArray, stopArray; for (int i = 0; i < n; ++i) { h_vertexArray[i]=offset; int degree; cin>>degree; offset+=degree; } for (int i = 0; i < 2*m; ++i) { h_neighbourArray[i]=0; } for (int i = 0; i < m; ++i) { int start; int end; cin>>start>>end; double r = ((double) rand() / (RAND_MAX)); if (r<=0.5){ int startStart, startStop, stopStart, stopStop; startStart = h_vertexArray[start-1]; if (start==n){ startStop = 2*m; } else{ startStop = h_vertexArray[start]; } stopStart = h_vertexArray[end-1]; if (end==n){ stopStop = 2*m; } else{ stopStop = h_vertexArray[end]; } for (int j=startStart; j<startStop; j++){ if (h_neighbourArray[j]==0){ h_neighbourArray[j]=end; break; } } for (int j=stopStart; j<stopStop; j++){ if (h_neighbourArray[j]==0){ h_neighbourArray[j]=start; break; } } } else{ startArray.push_back(start); stopArray.push_back(end); } } for (int i=0; i<n; i++){ cout<<h_vertexArray[i]<<" "; } cout<<endl; for (int i=0; i<2*m; i++){ cout<<h_neighbourArray[i]<<" "; } cout<<endl; hipMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), hipMemcpyHostToDevice); int threadsPerBlock = 512; int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock; hipLaunchKernelGGL(( setup_kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, time(NULL) ); hipLaunchKernelGGL(( randomNumbering), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, d_degreeCount, n, n); hipMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), hipMemcpyDeviceToHost); cout<<"Random numbers: "<<endl; for (int i=0; i<n; i++){ cout<<h_degreeCount[i]<<endl; } int colourCount = 1; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); cout<<"Worklist: "<<endl; for (int i=0; i<startArray.size(); i++){ cout<<startArray[i]<<" "<<stopArray[i]<<endl; } while (1){ hipLaunchKernelGGL(( colourMinMax), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount); hipMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, hipMemcpyDeviceToHost); cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl; if (*h_count == n){ break; } colourCount+=2; } colourCount++; for (int i=0; i<startArray.size(); i++){ cout<<"New added edge: "<<startArray[i]<<" "<<stopArray[i]<<endl; hipLaunchKernelGGL(( incrementalColouring), dim3(1), dim3(2), 0, 0, d_vertexArray, d_neighbourArray, n, m, d_colour, startArray[i], stopArray[i]); hipDeviceSynchronize(); } hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost); thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour); int maxColour = *(thrust::max_element(c_ptr, c_ptr + n)); cout<<"Max Colour = "<<maxColour<<endl; cout<<"Colour numbers: "<<endl; for (int i=0; i<n; i++){ cout<<h_colour[i]<<endl; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); cout<<"Time for the kernel: "<<time<<" ms"<<endl; hipMemcpy(h_vertexArray, d_vertexArray, n*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_neighbourArray, d_neighbourArray, 2*m*sizeof(int), hipMemcpyDeviceToHost); for (int i=0; i<n; i++){ cout<<h_vertexArray[i]<<" "; } cout<<endl; for (int i=0; i<2*m; i++){ cout<<h_neighbourArray[i]<<" "; } cout<<endl; delete h_count; delete[] h_vertexArray; delete[] h_neighbourArray; delete[] h_degreeCount; delete[] h_colour; hipFree(d_neighbourArray); hipFree(d_vertexArray); hipFree(d_degreeCount); hipFree(d_colour); hipDeviceReset(); return 0; }
2ceb2c5777ca994e828a8b45263101844968fc1a.cu
#include <iostream> #include <cstdlib> #include <cstdio> #include <vector> #include <curand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> #define d_maxColour 9 using namespace std; __device__ int d_count = 0; __global__ void incrementalColouring (int *vertexArray, int *neighbourArray, int n, int m, int *colouring, int start, int end){ int i = threadIdx.x; int startStart, startStop; int me, you; if (i==0){ me = start; you = end; } else{ me = end; you = start; } startStart = vertexArray[me-1]; if (me==n){ startStop = 2*m; } else{ startStop = vertexArray[me]; } for (int j=startStart; j<startStop; j++){ if (neighbourArray[j]==0){ neighbourArray[j]=you; break; } } __syncthreads(); if (colouring[start-1]!=colouring[end-1]){ return; } if (i==0) printf("%d and %d Conflict\n", start, end); __shared__ int colours[2]; colours[i]=0; if (i==0) printf("I am %d and %d and %d\n", i, colours[i], colours[1-i]); bool bucket[d_maxColour]; for (int j=0; j<d_maxColour; j++){ bucket[j]=true; } if (i==0){ printf("%d %d", startStart, startStop); for (int j=startStart; j<startStop; j++){ printf("clo %d\n", neighbourArray[j]); if (neighbourArray[j]!=0){ printf("clocli %d\n", colouring[neighbourArray[j]-1]); } } } for (int j=startStart; j<startStop; j++){ if (neighbourArray[j]==0){ continue; } bucket[colouring[neighbourArray[j]-1]-1] = false; if (i==0) printf("buvket clo %d and %d and %d\n", neighbourArray[j]-1, colouring[neighbourArray[j]-1], bucket[colouring[neighbourArray[j]-1]-1]); } for (int j=0; j<d_maxColour; j++){ if(bucket[j]){ colours[i]=j+1; printf("%d ashhas \t", j+1); break; } } if (i==0) for (int j=0; j<d_maxColour; j++){ printf("%d \t",bucket[j]); } if (i==0){ printf("\n"); } __syncthreads(); printf("%d and %d Conflict new colour min %d \n", start, end, colours[i]); // Possible issue: There could be a number inbetween the smallest equal guess and the current colour. if (colours[i]==colours[1-i]){ if (colours[i]<colouring[me-1]){ if(i==0){ colouring[me-1]=colours[i]; } } else{ if (i==1){ colouring[me-1]=colours[i]; } } } else{ if (colours[i]<colouring[me-1]){ colouring[me-1]=colours[i]; } else{ if (colours[i]<colours[1-i]){ colouring[me-1]=colours[i]; } } } __syncthreads(); if (i==0){ for (int j=0; j<n; j++){ printf("%d ", colouring[j]); } printf("\n"); } } __global__ void colourMinMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } if (colouring[i]!=0){ return; } int myValue = numbers[i]; // printf("I am node %d with value %d\n", i+1, myMax); int start = -1, stop = -1; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } bool max = true, min = true; for (int j=start; j<stop; j++){ // printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1); int neighbour = neighbourArray[j]; if (neighbour==0){ continue; } neighbour--; if (max && colouring[neighbour]==0 && numbers[neighbour] >= myValue){ if (numbers[neighbour] == myValue){ if (i < neighbour){ continue; } } max=false; if (!min){ return; } } if (min && colouring[neighbour]==0 && numbers[neighbour] <= myValue){ if (numbers[neighbour] == myValue){ if (i > neighbour){ continue; } } min=false; if (!max){ return; } } } if (max){ colouring[i] = currentColour; } else if (min){ colouring[i] = currentColour+1; } atomicAdd(&d_count, 1); } __global__ void setup_kernel (curandState * state, unsigned long seed ){ int i= blockDim.x * blockIdx.x + threadIdx.x; curand_init (seed, i, 0, &state[i]); } __global__ void randomNumbering (curandState* globalState, int *degreeCount, int n, int limit){ int i= blockDim.x * blockIdx.x + threadIdx.x; curandState localState = globalState[i]; float RANDOM = curand_uniform( &localState ); globalState[i] = localState; RANDOM *= (limit - 1 + 0.999999); RANDOM += 1; degreeCount[i] = (int) RANDOM; } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int start = -1, stop = -1; int diff=0; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } diff = stop-start; degreeCount[i]=diff; } void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){ for (int i=0; i<n-1; i++){ for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){ cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl; /* code */ } } for (int j = vertexArray[n-1]; j < m; ++j) { cout<<"e "<<n<<" "<<neighbourArray[j]<<endl; /* code */ } } int main(int argc, char const *argv[]) { int n, m; cin>>n>>m; int *h_count = new int; int *h_vertexArray = new int [n]; int *h_neighbourArray = new int [2*m]; int *h_degreeCount = new int [n]; int *h_colour = new int [n]; int *d_vertexArray = NULL; cudaMalloc((void **)&d_vertexArray, n*sizeof(int)); int *d_neighbourArray = NULL; cudaMalloc((void **)&d_neighbourArray, 2*m*sizeof(int)); int *d_colour = NULL; cudaMalloc((void **)&d_colour, (n)*sizeof(int)); cudaMemset((void *)d_colour, 0, (n)*sizeof(int)); int *d_degreeCount = NULL; cudaMalloc((void **)&d_degreeCount, (n)*sizeof(int)); cudaMemset((void *)d_degreeCount, 0, (n)*sizeof(int)); curandState* devStates; cudaMalloc ( &devStates, n*sizeof( curandState ) ); int offset = 0; vector<int> startArray, stopArray; for (int i = 0; i < n; ++i) { h_vertexArray[i]=offset; int degree; cin>>degree; offset+=degree; } for (int i = 0; i < 2*m; ++i) { h_neighbourArray[i]=0; } for (int i = 0; i < m; ++i) { int start; int end; cin>>start>>end; double r = ((double) rand() / (RAND_MAX)); if (r<=0.5){ int startStart, startStop, stopStart, stopStop; startStart = h_vertexArray[start-1]; if (start==n){ startStop = 2*m; } else{ startStop = h_vertexArray[start]; } stopStart = h_vertexArray[end-1]; if (end==n){ stopStop = 2*m; } else{ stopStop = h_vertexArray[end]; } for (int j=startStart; j<startStop; j++){ if (h_neighbourArray[j]==0){ h_neighbourArray[j]=end; break; } } for (int j=stopStart; j<stopStop; j++){ if (h_neighbourArray[j]==0){ h_neighbourArray[j]=start; break; } } } else{ startArray.push_back(start); stopArray.push_back(end); } } for (int i=0; i<n; i++){ cout<<h_vertexArray[i]<<" "; } cout<<endl; for (int i=0; i<2*m; i++){ cout<<h_neighbourArray[i]<<" "; } cout<<endl; cudaMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), cudaMemcpyHostToDevice); int threadsPerBlock = 512; int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock; setup_kernel <<<blocksPerGrid, threadsPerBlock>>> ( devStates, time(NULL) ); randomNumbering<<<blocksPerGrid, threadsPerBlock>>>(devStates, d_degreeCount, n, n); cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost); cout<<"Random numbers: "<<endl; for (int i=0; i<n; i++){ cout<<h_degreeCount[i]<<endl; } int colourCount = 1; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cout<<"Worklist: "<<endl; for (int i=0; i<startArray.size(); i++){ cout<<startArray[i]<<" "<<stopArray[i]<<endl; } while (1){ colourMinMax<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount); cudaMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, cudaMemcpyDeviceToHost); cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl; if (*h_count == n){ break; } colourCount+=2; } colourCount++; for (int i=0; i<startArray.size(); i++){ cout<<"New added edge: "<<startArray[i]<<" "<<stopArray[i]<<endl; incrementalColouring<<<1, 2>>>(d_vertexArray, d_neighbourArray, n, m, d_colour, startArray[i], stopArray[i]); cudaDeviceSynchronize(); } cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost); thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour); int maxColour = *(thrust::max_element(c_ptr, c_ptr + n)); cout<<"Max Colour = "<<maxColour<<endl; cout<<"Colour numbers: "<<endl; for (int i=0; i<n; i++){ cout<<h_colour[i]<<endl; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cout<<"Time for the kernel: "<<time<<" ms"<<endl; cudaMemcpy(h_vertexArray, d_vertexArray, n*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_neighbourArray, d_neighbourArray, 2*m*sizeof(int), cudaMemcpyDeviceToHost); for (int i=0; i<n; i++){ cout<<h_vertexArray[i]<<" "; } cout<<endl; for (int i=0; i<2*m; i++){ cout<<h_neighbourArray[i]<<" "; } cout<<endl; delete h_count; delete[] h_vertexArray; delete[] h_neighbourArray; delete[] h_degreeCount; delete[] h_colour; cudaFree(d_neighbourArray); cudaFree(d_vertexArray); cudaFree(d_degreeCount); cudaFree(d_colour); cudaDeviceReset(); return 0; }
1d764d1efacfa8244d29c0a296912ef8e586fe6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /////////////////////////////////////////////////////////////////////////// // ParallelWaveFunction: // /////////////////////////////////////////////////////////////////////////// #include "ParallelWaveFunction.h" //________________________________________________________________________________________________________________ __device__ int GetBinNumber(double aBinSize, int aNbins, double aValue) { //TODO check the accuracy of this double tBinKStarMin, tBinKStarMax; for(int i=0; i<aNbins; i++) { tBinKStarMin = i*aBinSize; tBinKStarMax = (i+1)*aBinSize; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetBinNumber(int aNbins, double aMin, double aMax, double aValue) { //TODO check the accuracy of this double tBinSize = (aMax-aMin)/aNbins; double tBinKStarMin, tBinKStarMax; for(int i=0; i<aNbins; i++) { tBinKStarMin = i*tBinSize + aMin; tBinKStarMax = (i+1)*tBinSize + aMin; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetBinNumber(double aBinWidth, double aMin, double aMax, double aValue) { //TODO check the accuracy of this int tNbins = (aMax-aMin)/aBinWidth; double tBinKStarMin, tBinKStarMax; for(int i=0; i<tNbins; i++) { tBinKStarMin = i*aBinWidth + aMin; tBinKStarMax = (i+1)*aBinWidth + aMin; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetInterpLowBin(InterpType aInterpType, InterpAxisType aAxisType, double aVal) { int tReturnBin = -2; int tNbins, tBin; double tMin, tMax, tBinWidth, tBinCenter; bool tErrorFlag = false; switch(aInterpType) { case kGTilde: switch(aAxisType) { case kKaxis: tNbins = d_fGTildeInfo->nBinsK; tBinWidth = d_fGTildeInfo->binWidthK; tMin = d_fGTildeInfo->minK; tMax = d_fGTildeInfo->maxK; break; case kRaxis: tNbins = d_fGTildeInfo->nBinsR; tBinWidth = d_fGTildeInfo->binWidthR; tMin = d_fGTildeInfo->minR; tMax = d_fGTildeInfo->maxR; break; //Invalid axis selection case kThetaaxis: tErrorFlag = true; break; case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kHyperGeo1F1: switch(aAxisType) { case kKaxis: tNbins = d_fHyperGeo1F1Info->nBinsK; tBinWidth = d_fHyperGeo1F1Info->binWidthK; tMin = d_fHyperGeo1F1Info->minK; tMax = d_fHyperGeo1F1Info->maxK; break; case kRaxis: tNbins = d_fHyperGeo1F1Info->nBinsR; tBinWidth = d_fHyperGeo1F1Info->binWidthR; tMin = d_fHyperGeo1F1Info->minR; tMax = d_fHyperGeo1F1Info->maxR; break; case kThetaaxis: tNbins = d_fHyperGeo1F1Info->nBinsTheta; tBinWidth = d_fHyperGeo1F1Info->binWidthTheta; tMin = d_fHyperGeo1F1Info->minTheta; tMax = d_fHyperGeo1F1Info->maxTheta; break; //Invalid axis selection case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kScattLen: switch(aAxisType) { case kReF0axis: tNbins = d_fScattLenInfo->nBinsReF0; tBinWidth = d_fScattLenInfo->binWidthReF0; tMin = d_fScattLenInfo->minReF0; tMax = d_fScattLenInfo->maxReF0; break; case kImF0axis: tNbins = d_fScattLenInfo->nBinsImF0; tBinWidth = d_fScattLenInfo->binWidthImF0; tMin = d_fScattLenInfo->minImF0; tMax = d_fScattLenInfo->maxImF0; break; case kD0axis: tNbins = d_fScattLenInfo->nBinsD0; tBinWidth = d_fScattLenInfo->binWidthD0; tMin = d_fScattLenInfo->minD0; tMax = d_fScattLenInfo->maxD0; break; case kKaxis: tNbins = d_fScattLenInfo->nBinsK; tBinWidth = d_fScattLenInfo->binWidthK; tMin = d_fScattLenInfo->minK; tMax = d_fScattLenInfo->maxK; break; //Invalid axis selection case kRaxis: tErrorFlag = true; break; case kThetaaxis: tErrorFlag = true; break; } break; } //Check error if(tErrorFlag) return -2; //--------------------------------- tBin = GetBinNumber(tNbins,tMin,tMax,aVal); tBinCenter = tMin + (tBin+0.5)*tBinWidth; if(aVal < tBinCenter) tReturnBin = tBin-1; else tReturnBin = tBin; if(tReturnBin<0 || tReturnBin >= tNbins) return -2; else return tReturnBin; } //________________________________________________________________________________________________________________ __device__ double GetInterpLowBinCenter(InterpType aInterpType, InterpAxisType aAxisType, double aVal) { double tReturnValue; int tReturnBin = -2; int tNbins, tBin; double tMin, tMax, tBinWidth, tBinCenter; bool tErrorFlag = false; switch(aInterpType) { case kGTilde: switch(aAxisType) { case kKaxis: tNbins = d_fGTildeInfo->nBinsK; tBinWidth = d_fGTildeInfo->binWidthK; tMin = d_fGTildeInfo->minK; tMax = d_fGTildeInfo->maxK; break; case kRaxis: tNbins = d_fGTildeInfo->nBinsR; tBinWidth = d_fGTildeInfo->binWidthR; tMin = d_fGTildeInfo->minR; tMax = d_fGTildeInfo->maxR; break; //Invalid axis selection case kThetaaxis: tErrorFlag = true; break; case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kHyperGeo1F1: switch(aAxisType) { case kKaxis: tNbins = d_fHyperGeo1F1Info->nBinsK; tBinWidth = d_fHyperGeo1F1Info->binWidthK; tMin = d_fHyperGeo1F1Info->minK; tMax = d_fHyperGeo1F1Info->maxK; break; case kRaxis: tNbins = d_fHyperGeo1F1Info->nBinsR; tBinWidth = d_fHyperGeo1F1Info->binWidthR; tMin = d_fHyperGeo1F1Info->minR; tMax = d_fHyperGeo1F1Info->maxR; break; case kThetaaxis: tNbins = d_fHyperGeo1F1Info->nBinsTheta; tBinWidth = d_fHyperGeo1F1Info->binWidthTheta; tMin = d_fHyperGeo1F1Info->minTheta; tMax = d_fHyperGeo1F1Info->maxTheta; break; //Invalid axis selection case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kScattLen: switch(aAxisType) { case kReF0axis: tNbins = d_fScattLenInfo->nBinsReF0; tBinWidth = d_fScattLenInfo->binWidthReF0; tMin = d_fScattLenInfo->minReF0; tMax = d_fScattLenInfo->maxReF0; break; case kImF0axis: tNbins = d_fScattLenInfo->nBinsImF0; tBinWidth = d_fScattLenInfo->binWidthImF0; tMin = d_fScattLenInfo->minImF0; tMax = d_fScattLenInfo->maxImF0; break; case kD0axis: tNbins = d_fScattLenInfo->nBinsD0; tBinWidth = d_fScattLenInfo->binWidthD0; tMin = d_fScattLenInfo->minD0; tMax = d_fScattLenInfo->maxD0; break; case kKaxis: tNbins = d_fScattLenInfo->nBinsK; tBinWidth = d_fScattLenInfo->binWidthK; tMin = d_fScattLenInfo->minK; tMax = d_fScattLenInfo->maxK; break; //Invalid axis selection case kRaxis: tErrorFlag = true; break; case kThetaaxis: tErrorFlag = true; break; } break; } //Check error if(tErrorFlag) return -2; //--------------------------------- tBin = GetBinNumber(tNbins,tMin,tMax,aVal); tBinCenter = tMin + (tBin+0.5)*tBinWidth; if(aVal < tBinCenter) tReturnBin = tBin-1; else tReturnBin = tBin; if(tReturnBin<0 || tReturnBin >= tNbins) return -2; tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth; return tReturnValue; } //________________________________________________________________________________________________________________ __device__ double LednickyHFunctionInterpolate(double aKStar) { double tResult = 0.0; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 //TODO make HFunctionInfo objects instead of using GTilde //TODO check accuracy double tBinWidthK = d_fGTildeInfo->binWidthK; int tBinLowK = GetInterpLowBin(kGTilde,kKaxis,aKStar); int tBinHighK = tBinLowK+1; double tBinLowCenterK = GetInterpLowBinCenter(kGTilde,kKaxis,aKStar); double tBinHighCenterK = tBinLowCenterK+tBinWidthK; double tX0 = tBinLowCenterK; double tX1 = tBinHighCenterK; double tY0 = d_fLednickyHFunction[tBinLowK]; double tY1 = d_fLednickyHFunction[tBinHighK]; tResult = tY0 + (aKStar-tX0)*((tY1-tY0)/(tX1-tX0)); return tResult; } //________________________________________________________________________________________________________________ __device__ hipDoubleComplex GTildeInterpolate(double aKStar, double aRStar) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsR = d_fGTildeInfo->nBinsR; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthK = d_fGTildeInfo->binWidthK; int tBinLowK = GetInterpLowBin(kGTilde,kKaxis,aKStar); int tBinHighK = tBinLowK+1; double tBinLowCenterK = GetInterpLowBinCenter(kGTilde,kKaxis,aKStar); double tBinHighCenterK = tBinLowCenterK+tBinWidthK; double tBinWidthR = d_fGTildeInfo->binWidthR; int tBinLowR = GetInterpLowBin(kGTilde,kRaxis,aRStar); int tBinHighR = tBinLowR+1; double tBinLowCenterR = GetInterpLowBinCenter(kGTilde,kRaxis,aRStar); double tBinHighCenterR = tBinLowCenterR+tBinWidthR; //-------------------------- double tQ11Real = d_fGTildeReal[tBinLowR + tBinLowK*tNbinsR]; double tQ12Real = d_fGTildeReal[tBinHighR + tBinLowK*tNbinsR]; double tQ21Real = d_fGTildeReal[tBinLowR + tBinHighK*tNbinsR]; double tQ22Real = d_fGTildeReal[tBinHighR + tBinHighK*tNbinsR]; double tQ11Imag = d_fGTildeImag[tBinLowR + tBinLowK*tNbinsR]; double tQ12Imag = d_fGTildeImag[tBinHighR + tBinLowK*tNbinsR]; double tQ21Imag = d_fGTildeImag[tBinLowR + tBinHighK*tNbinsR]; double tQ22Imag = d_fGTildeImag[tBinHighR + tBinHighK*tNbinsR]; //-------------------------- double tD = 1.0*tBinWidthK*tBinWidthR; tResultReal = (1.0/tD)*(tQ11Real*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Real*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Real*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Real*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR)); tResultImag = (1.0/tD)*(tQ11Imag*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Imag*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Imag*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Imag*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR)); //-------------------------- hipDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } //________________________________________________________________________________________________________________ __device__ hipDoubleComplex HyperGeo1F1Interpolate(double aKStar, double aRStar, double aTheta) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsTheta = d_fHyperGeo1F1Info->nBinsTheta; int tNbinsR = d_fHyperGeo1F1Info->nBinsR; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthK = d_fHyperGeo1F1Info->binWidthK; int tBin0K = GetInterpLowBin(kHyperGeo1F1,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kHyperGeo1F1,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; double tBinWidthR = d_fHyperGeo1F1Info->binWidthR; int tBin0R = GetInterpLowBin(kHyperGeo1F1,kRaxis,aRStar); int tBin1R = tBin0R+1; double tBin0CenterR = GetInterpLowBinCenter(kHyperGeo1F1,kRaxis,aRStar); // double tBin1CenterR = tBin0CenterR+tBinWidthR; double tBinWidthTheta = d_fHyperGeo1F1Info->binWidthTheta; int tBin0Theta = GetInterpLowBin(kHyperGeo1F1,kThetaaxis,aTheta); int tBin1Theta = tBin0Theta+1; double tBin0CenterTheta = GetInterpLowBinCenter(kHyperGeo1F1,kThetaaxis,aTheta); // double tBin1CenterTheta = tBin0CenterTheta+tBinWidthTheta; //-------------------------- double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; double tDiffR = (aRStar - tBin0CenterR)/tBinWidthR; double tDiffTheta = (aTheta - tBin0CenterTheta)/tBinWidthTheta; //-----------REAL--------------- //interpolate along z (i.e. theta) double tC000Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC001Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC010Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC011Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC100Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC101Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC110Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC111Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC00Real = tC000Real*(1.0-tDiffTheta) + tC001Real*tDiffTheta; double tC01Real = tC010Real*(1.0-tDiffTheta) + tC011Real*tDiffTheta; double tC10Real = tC100Real*(1.0-tDiffTheta) + tC101Real*tDiffTheta; double tC11Real = tC110Real*(1.0-tDiffTheta) + tC111Real*tDiffTheta; //interpolate along y (i.e. r) double tC0Real = tC00Real*(1.0-tDiffR) + tC01Real*tDiffR; double tC1Real = tC10Real*(1.0-tDiffR) + tC11Real*tDiffR; //interpolate along x (i.e. k) tResultReal = tC0Real*(1.0-tDiffK) + tC1Real*tDiffK; //-----------IMAG--------------- //interpolate along z (i.e. theta) double tC000Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC001Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC010Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC011Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC100Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC101Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC110Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC111Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC00Imag = tC000Imag*(1.0-tDiffTheta) + tC001Imag*tDiffTheta; double tC01Imag = tC010Imag*(1.0-tDiffTheta) + tC011Imag*tDiffTheta; double tC10Imag = tC100Imag*(1.0-tDiffTheta) + tC101Imag*tDiffTheta; double tC11Imag = tC110Imag*(1.0-tDiffTheta) + tC111Imag*tDiffTheta; //interpolate along y (i.e. r) double tC0Imag = tC00Imag*(1.0-tDiffR) + tC01Imag*tDiffR; double tC1Imag = tC10Imag*(1.0-tDiffR) + tC11Imag*tDiffR; //interpolate along x (i.e. k) tResultImag = tC0Imag*(1.0-tDiffK) + tC1Imag*tDiffK; //-------------------------------- hipDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } /* //________________________________________________________________________________________________________________ __device__ hipDoubleComplex ScattLenInterpolateFull(double aReF0, double aImF0, double aD0, double aKStar) { //This doesn't work because d_fCoulombScatteringLengthReal and d_fCoulombScatteringLengthImag are // too big to fit onto the GPU memory. I am keeping it in case I figure out how to resolve the memory issue // i.e. figure out how to let the device directly access host memory double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsK = d_fScattLenInfo->nBinsK; int tNbinsD0 = d_fScattLenInfo->nBinsD0; int tNbinsImF0 = d_fScattLenInfo->nBinsImF0; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthReF0 = d_fScattLenInfo->binWidthReF0; int tBin0ReF0 = GetInterpLowBin(kScattLen,kReF0axis,aReF0); int tBin1ReF0 = tBin0ReF0+1; double tBin0CenterReF0 = GetInterpLowBinCenter(kScattLen,kReF0axis,aReF0); // double tBin1CenterReF0 = tBin0CenterReF0+tBinWidthReF0; double tBinWidthImF0 = d_fScattLenInfo->binWidthImF0; int tBin0ImF0 = GetInterpLowBin(kScattLen,kImF0axis,aImF0); int tBin1ImF0 = tBin0ImF0+1; double tBin0CenterImF0 = GetInterpLowBinCenter(kScattLen,kImF0axis,aImF0); // double tBin1CenterImF0 = tBin0CenterImF0+tBinWidthImF0; double tBinWidthD0 = d_fScattLenInfo->binWidthD0; int tBin0D0 = GetInterpLowBin(kScattLen,kD0axis,aD0); int tBin1D0 = tBin0D0+1; double tBin0CenterD0 = GetInterpLowBinCenter(kScattLen,kD0axis,aD0); // double tBin1CenterD0 = tBin0CenterD0+tBinWidthD0; double tBinWidthK = d_fScattLenInfo->binWidthK; int tBin0K = GetInterpLowBin(kScattLen,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kScattLen,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; //-------------------------- double tDiffReF0 = (aReF0 - tBin0CenterReF0)/tBinWidthReF0; double tDiffImF0 = (aImF0 - tBin0CenterImF0)/tBinWidthImF0; double tDiffD0 = (aD0 - tBin0CenterD0)/tBinWidthD0; double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; //-------------------------- //Assuming f(t,x,y,z) = f(ReF0,ImF0,D0,KStar). Ordering for memory access reasons //---------------REAL---------------------------------- //interpolate along z (i.e. KStar) double tC0000Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Real = tC0000Real*(1.0-tDiffK) + tC0001Real*tDiffK; double tC001Real = tC0010Real*(1.0-tDiffK) + tC0011Real*tDiffK; double tC010Real = tC0100Real*(1.0-tDiffK) + tC0101Real*tDiffK; double tC011Real = tC0110Real*(1.0-tDiffK) + tC0111Real*tDiffK; double tC100Real = tC1000Real*(1.0-tDiffK) + tC1001Real*tDiffK; double tC101Real = tC1010Real*(1.0-tDiffK) + tC1011Real*tDiffK; double tC110Real = tC1100Real*(1.0-tDiffK) + tC1101Real*tDiffK; double tC111Real = tC1110Real*(1.0-tDiffK) + tC1111Real*tDiffK; //interpolate along y (i.e. D0) double tC00Real = tC000Real*(1.0-tDiffD0) + tC001Real*tDiffD0; double tC01Real = tC010Real*(1.0-tDiffD0) + tC011Real*tDiffD0; double tC10Real = tC100Real*(1.0-tDiffD0) + tC101Real*tDiffD0; double tC11Real = tC110Real*(1.0-tDiffD0) + tC111Real*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Real = tC00Real*(1.0-tDiffImF0) + tC01Real*tDiffImF0; double tC1Real = tC10Real*(1.0-tDiffImF0) + tC11Real*tDiffImF0; //interpolate along t (i.e. ReF0) tResultReal = tC0Real*(1.0-tDiffReF0) + tC1Real*tDiffReF0; //---------------Imag---------------------------------- //interpolate along z (i.e. KStar) double tC0000Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Imag = tC0000Imag*(1.0-tDiffK) + tC0001Imag*tDiffK; double tC001Imag = tC0010Imag*(1.0-tDiffK) + tC0011Imag*tDiffK; double tC010Imag = tC0100Imag*(1.0-tDiffK) + tC0101Imag*tDiffK; double tC011Imag = tC0110Imag*(1.0-tDiffK) + tC0111Imag*tDiffK; double tC100Imag = tC1000Imag*(1.0-tDiffK) + tC1001Imag*tDiffK; double tC101Imag = tC1010Imag*(1.0-tDiffK) + tC1011Imag*tDiffK; double tC110Imag = tC1100Imag*(1.0-tDiffK) + tC1101Imag*tDiffK; double tC111Imag = tC1110Imag*(1.0-tDiffK) + tC1111Imag*tDiffK; //interpolate along y (i.e. D0) double tC00Imag = tC000Imag*(1.0-tDiffD0) + tC001Imag*tDiffD0; double tC01Imag = tC010Imag*(1.0-tDiffD0) + tC011Imag*tDiffD0; double tC10Imag = tC100Imag*(1.0-tDiffD0) + tC101Imag*tDiffD0; double tC11Imag = tC110Imag*(1.0-tDiffD0) + tC111Imag*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Imag = tC00Imag*(1.0-tDiffImF0) + tC01Imag*tDiffImF0; double tC1Imag = tC10Imag*(1.0-tDiffImF0) + tC11Imag*tDiffImF0; //interpolate along t (i.e. ReF0) tResultImag = tC0Imag*(1.0-tDiffReF0) + tC1Imag*tDiffReF0; //-------------------------------- hipDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } */ //________________________________________________________________________________________________________________ __device__ hipDoubleComplex ScattLenInterpolate(double aReF0, double aImF0, double aD0, double aKStar) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsK = d_fScattLenInfo->nBinsK; // int tNbinsD0 = d_fScattLenInfo->nBinsD0; // int tNbinsImF0 = d_fScattLenInfo->nBinsImF0; int tNbinsD0 = 2; int tNbinsImF0 = 2; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthReF0 = d_fScattLenInfo->binWidthReF0; int tBin0ReF0 = 0; int tBin1ReF0 = tBin0ReF0+1; double tBin0CenterReF0 = GetInterpLowBinCenter(kScattLen,kReF0axis,aReF0); // double tBin1CenterReF0 = tBin0CenterReF0+tBinWidthReF0; double tBinWidthImF0 = d_fScattLenInfo->binWidthImF0; int tBin0ImF0 = 0; int tBin1ImF0 = tBin0ImF0+1; double tBin0CenterImF0 = GetInterpLowBinCenter(kScattLen,kImF0axis,aImF0); // double tBin1CenterImF0 = tBin0CenterImF0+tBinWidthImF0; double tBinWidthD0 = d_fScattLenInfo->binWidthD0; int tBin0D0 = 0; int tBin1D0 = tBin0D0+1; double tBin0CenterD0 = GetInterpLowBinCenter(kScattLen,kD0axis,aD0); // double tBin1CenterD0 = tBin0CenterD0+tBinWidthD0; double tBinWidthK = d_fScattLenInfo->binWidthK; int tBin0K = GetInterpLowBin(kScattLen,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kScattLen,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; //-------------------------- assert(tBin0K>=0); assert(tBin0CenterK>0); double tDiffReF0 = (aReF0 - tBin0CenterReF0)/tBinWidthReF0; double tDiffImF0 = (aImF0 - tBin0CenterImF0)/tBinWidthImF0; double tDiffD0 = (aD0 - tBin0CenterD0)/tBinWidthD0; double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; //-------------------------- //Assuming f(t,x,y,z) = f(ReF0,ImF0,D0,KStar). Ordering for memory access reasons //---------------REAL---------------------------------- //interpolate along z (i.e. KStar) double tC0000Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Real = tC0000Real*(1.0-tDiffK) + tC0001Real*tDiffK; double tC001Real = tC0010Real*(1.0-tDiffK) + tC0011Real*tDiffK; double tC010Real = tC0100Real*(1.0-tDiffK) + tC0101Real*tDiffK; double tC011Real = tC0110Real*(1.0-tDiffK) + tC0111Real*tDiffK; double tC100Real = tC1000Real*(1.0-tDiffK) + tC1001Real*tDiffK; double tC101Real = tC1010Real*(1.0-tDiffK) + tC1011Real*tDiffK; double tC110Real = tC1100Real*(1.0-tDiffK) + tC1101Real*tDiffK; double tC111Real = tC1110Real*(1.0-tDiffK) + tC1111Real*tDiffK; //interpolate along y (i.e. D0) double tC00Real = tC000Real*(1.0-tDiffD0) + tC001Real*tDiffD0; double tC01Real = tC010Real*(1.0-tDiffD0) + tC011Real*tDiffD0; double tC10Real = tC100Real*(1.0-tDiffD0) + tC101Real*tDiffD0; double tC11Real = tC110Real*(1.0-tDiffD0) + tC111Real*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Real = tC00Real*(1.0-tDiffImF0) + tC01Real*tDiffImF0; double tC1Real = tC10Real*(1.0-tDiffImF0) + tC11Real*tDiffImF0; //interpolate along t (i.e. ReF0) tResultReal = tC0Real*(1.0-tDiffReF0) + tC1Real*tDiffReF0; //---------------Imag---------------------------------- //interpolate along z (i.e. KStar) double tC0000Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Imag = tC0000Imag*(1.0-tDiffK) + tC0001Imag*tDiffK; double tC001Imag = tC0010Imag*(1.0-tDiffK) + tC0011Imag*tDiffK; double tC010Imag = tC0100Imag*(1.0-tDiffK) + tC0101Imag*tDiffK; double tC011Imag = tC0110Imag*(1.0-tDiffK) + tC0111Imag*tDiffK; double tC100Imag = tC1000Imag*(1.0-tDiffK) + tC1001Imag*tDiffK; double tC101Imag = tC1010Imag*(1.0-tDiffK) + tC1011Imag*tDiffK; double tC110Imag = tC1100Imag*(1.0-tDiffK) + tC1101Imag*tDiffK; double tC111Imag = tC1110Imag*(1.0-tDiffK) + tC1111Imag*tDiffK; //interpolate along y (i.e. D0) double tC00Imag = tC000Imag*(1.0-tDiffD0) + tC001Imag*tDiffD0; double tC01Imag = tC010Imag*(1.0-tDiffD0) + tC011Imag*tDiffD0; double tC10Imag = tC100Imag*(1.0-tDiffD0) + tC101Imag*tDiffD0; double tC11Imag = tC110Imag*(1.0-tDiffD0) + tC111Imag*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Imag = tC00Imag*(1.0-tDiffImF0) + tC01Imag*tDiffImF0; double tC1Imag = tC10Imag*(1.0-tDiffImF0) + tC11Imag*tDiffImF0; //interpolate along t (i.e. ReF0) tResultImag = tC0Imag*(1.0-tDiffReF0) + tC1Imag*tDiffReF0; //-------------------------------- hipDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } //________________________________________________________________________________________________________________ __device__ double GetEta(double aKStar) { double d_hbarc = 0.197327; double d_gBohrRadius = 75.23349845; //TODO figure out how to load hbarc and gBohrRadius into GPU //TODO figure out how to use Pi here //TODO figure out how to make bohr radius negative when needed double tEta = pow(((aKStar/d_hbarc)*d_gBohrRadius),-1); return tEta; } //________________________________________________________________________________________________________________ __device__ double GetGamowFactor(double aKStar) { double d_hbarc = 0.197327; double d_gBohrRadius = 75.23349845; //TODO figure out how to load hbarc and gBohrRadius into GPU //TODO figure out how to use Pi here //TODO figure out how to make bohr radius negative when needed double tEta = pow(((aKStar/d_hbarc)*d_gBohrRadius),-1); tEta *= 6.28318530718; //eta always comes with 2Pi here double tGamow = tEta*pow((exp(tEta)-1),-1); return tGamow; } //________________________________________________________________________________________________________________ __device__ hipDoubleComplex GetExpTerm(double aKStar, double aRStar, double aTheta) { //TODO figure out how to load hbarc and gBohrRadius into GPU double d_hbarc = 0.197327; double tReal = cos((aKStar/d_hbarc)*aRStar*cos(aTheta)); double tImag = -sin((aKStar/d_hbarc)*aRStar*cos(aTheta)); hipDoubleComplex tExpTermCmplx = make_cuDoubleComplex(tReal,tImag); return tExpTermCmplx; } //________________________________________________________________________________________________________________ __device__ double AssembleWfSquared(double aRStarMag, double aGamowFactor, hipDoubleComplex aExpTermCmplx, hipDoubleComplex aGTildeCmplx, hipDoubleComplex aHyperGeo1F1Cmplx, hipDoubleComplex aScattLenCmplx) { hipDoubleComplex tGTildeCmplxConj = cuConj(aGTildeCmplx); hipDoubleComplex tScattLenCmplxConj = cuConj(aScattLenCmplx); // hipDoubleComplex tGamowFactor = make_cuDoubleComplex(aGamowFactor,0.); //cuda doesn't want to multiply double*double2 //-------------Stupid cuda can only multiple/divide two at once //TODO test to see if there is an easier way to accomplish this double tMagSq_HyperGeo1F1 = cuCabs(aHyperGeo1F1Cmplx)*cuCabs(aHyperGeo1F1Cmplx); double tMagSq_ScattLen = cuCabs(tScattLenCmplxConj)*cuCabs(tScattLenCmplxConj); double tMagSq_GTilde = cuCabs(tGTildeCmplxConj)*cuCabs(tGTildeCmplxConj); hipDoubleComplex tTerm1 = cuCmul(aExpTermCmplx,aHyperGeo1F1Cmplx); hipDoubleComplex tTerm2 = cuCmul(tScattLenCmplxConj,tGTildeCmplxConj); hipDoubleComplex tTerm12 = cuCmul(tTerm1,tTerm2); double tTerm12Real = cuCreal(tTerm12); double tTermFinal = tTerm12Real/aRStarMag; /* hipDoubleComplex tRStarMagCmplx = make_cuDoubleComplex(aRStarMag,0.); hipDoubleComplex tTermFinalCmplx = cuCdiv(tTerm12,tRStarMagCmplx); double tTermFinal = cuCreal(tTermFinalCmplx); */ double tResult = aGamowFactor*(tMagSq_HyperGeo1F1 + tMagSq_ScattLen*tMagSq_GTilde/(aRStarMag*aRStarMag) + 2.0*tTermFinal); return tResult; /* hipDoubleComplex tResultComplex = tGamowFactor*( cuCabs(aHyperGeo1F1Cmplx)*cuCabs(aHyperGeo1F1Cmplx) + cuCabs(tScattLenCmplxConj)*cuCabs(tScattLenCmplxConj)*cuCabs(tGTildeCmplxConj)*cuCabs(tGTildeCmplxConj)/(aRStarMag*aRStarMag) + 2.*cuCreal(aExpTermCmplx*aHyperGeo1F1Cmplx*tScattLenCmplxConj*tGTildeCmplxConj/aRStarMag) ); //TODO put in check to make sure there is no imaginary part // if(imag(tResultComplex) > std::numeric_limits< double >::min()) cout << "\t\t\t !!!!!!!!! Imaginary value in ParellelWaveFunction::InterpolateWfSquared !!!!!" << endl; // assert(imag(tResultComplex) < std::numeric_limits< double >::min()); return cuCreal(tResultComplex); */ } //________________________________________________________________________________________________________________ __device__ double InterpolateWfSquaredInterpScattLen(double aKStarMag, double aRStarMag, double aTheta, double aReF0, double aImF0, double aD0) { double tGamow = GetGamowFactor(aKStarMag); hipDoubleComplex tExpTermCmplx = GetExpTerm(aKStarMag,aRStarMag,aTheta); hipDoubleComplex tGTildeCmplx, tHyperGeo1F1Cmplx, tScattLenCmplx; tGTildeCmplx = GTildeInterpolate(aKStarMag,aRStarMag); tHyperGeo1F1Cmplx = HyperGeo1F1Interpolate(aKStarMag,aRStarMag,aTheta); tScattLenCmplx = ScattLenInterpolate(aReF0,aImF0,aD0,aKStarMag); double tResult = AssembleWfSquared(aRStarMag,tGamow,tExpTermCmplx,tGTildeCmplx,tHyperGeo1F1Cmplx,tScattLenCmplx); return tResult; } //________________________________________________________________________________________________________________ __device__ double InterpolateWfSquared(double aKStarMag, double aRStarMag, double aTheta, double aReF0, double aImF0, double aD0) { double d_hbarc = 0.197327; //TODO double d_gBohrRadius = 75.23349845; hipDoubleComplex tRealUnity = make_cuDoubleComplex(1.0,0); double tGamow = GetGamowFactor(aKStarMag); hipDoubleComplex tExpTermCmplx = GetExpTerm(aKStarMag,aRStarMag,aTheta); hipDoubleComplex tGTildeCmplx, tHyperGeo1F1Cmplx, tScattLenCmplx; tGTildeCmplx = GTildeInterpolate(aKStarMag,aRStarMag); tHyperGeo1F1Cmplx = HyperGeo1F1Interpolate(aKStarMag,aRStarMag,aTheta); //---Build scatt len double tLednickyHFunction = LednickyHFunctionInterpolate(aKStarMag); double tImag = tGamow/(2.0*GetEta(aKStarMag)); hipDoubleComplex tLednickyChi = make_cuDoubleComplex(tLednickyHFunction,tImag); hipDoubleComplex tF0 = make_cuDoubleComplex(aReF0,aImF0); hipDoubleComplex tInvF0 = cuCdiv(tRealUnity,tF0); double tKStar = aKStarMag/d_hbarc; double tTerm2 = 0.5*aD0*tKStar*tKStar; hipDoubleComplex tTerm2Complex = make_cuDoubleComplex(tTerm2,0); double tStupid = 2.0/d_gBohrRadius; hipDoubleComplex tMultFact = make_cuDoubleComplex(tStupid, 0); hipDoubleComplex tTerm3Complex = cuCmul(tMultFact,tLednickyChi); hipDoubleComplex tTerm12 = cuCadd(tInvF0,tTerm2Complex); hipDoubleComplex tInvScattLen = cuCsub(tTerm12,tTerm3Complex); tScattLenCmplx = cuCdiv(tRealUnity,tInvScattLen); //-------------------------- double tResult = AssembleWfSquared(aRStarMag,tGamow,tExpTermCmplx,tGTildeCmplx,tHyperGeo1F1Cmplx,tScattLenCmplx); return tResult; } //________________________________________________________________________________________________________________ __device__ bool CanInterpolate(double aKStar, double aRStar, double aTheta, double aReF0, double aImF0, double aD0) { if(aKStar < d_fScattLenInfo->minInterpK || aKStar > d_fScattLenInfo->maxInterpK) return false; if(aRStar < d_fGTildeInfo->minInterpR || aRStar > d_fGTildeInfo->maxInterpR) return false; if(aTheta < d_fHyperGeo1F1Info->minInterpTheta || aTheta > d_fHyperGeo1F1Info->maxInterpTheta) return false; if(aReF0 < d_fScattLenInfo->minInterpReF0 || aReF0 > d_fScattLenInfo->maxInterpReF0) return false; if(aImF0 < d_fScattLenInfo->minInterpImF0 || aImF0 > d_fScattLenInfo->maxInterpImF0) return false; if(aD0 < d_fScattLenInfo->minInterpD0 || aD0 > d_fScattLenInfo->maxInterpD0) return false; return true; } //________________________________________________________________________________________________________________ __device__ bool CanInterpolate(double aKStar, double aRStar, double aTheta) { if(aKStar < d_fGTildeInfo->minInterpK || aKStar > d_fGTildeInfo->maxInterpK) return false; if(aRStar < d_fGTildeInfo->minInterpR || aRStar > d_fGTildeInfo->maxInterpR) return false; if(aTheta < d_fHyperGeo1F1Info->minInterpTheta || aTheta > d_fHyperGeo1F1Info->maxInterpTheta) return false; return true; } //________________________________________________________________________________________________________________ __global__ void GetWfAverage(double *aKStarMag, double *aRStarMag, double *aTheta, double aReF0, double aImF0, double aD0, double *g_odata, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if(aInterpScattLen) sdata[tid] = InterpolateWfSquaredInterpScattLen(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0,aImF0,aD0); else sdata[tid] = InterpolateWfSquared(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0,aImF0,aD0); __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata[index] += sdata[index+s]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } //________________________________________________________________________________________________________________ __global__ void GetEntireCf(double *aKStarMag, double *aRStarMag, double *aTheta, double aReF0, double aImF0, double aD0, double *g_odata, int aOffsetInput, int aOffsetOutput, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x + aOffsetInput; if(aInterpScattLen) sdata[tid] = InterpolateWfSquaredInterpScattLen(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0,aImF0,aD0); else sdata[tid] = InterpolateWfSquared(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0,aImF0,aD0); __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata[index] += sdata[index+s]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) g_odata[blockIdx.x+aOffsetOutput] = sdata[0]; } //________________________________________________________________________________________________________________ __global__ void GetEntireCfComplete(double *aKStarMag, double *aRStarMag, double *aTheta, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t, double *g_odata, int aOffsetInput, int aOffsetOutput, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x + aOffsetInput; double tWfSqSinglet, tWfSqTriplet, tWfSq; if(aInterpScattLen) { tWfSqSinglet = InterpolateWfSquaredInterpScattLen(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquaredInterpScattLen(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0t,aImF0t,aD0t); } else { tWfSqSinglet = InterpolateWfSquared(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquared(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0t,aImF0t,aD0t); } tWfSq = 0.25*tWfSqSinglet + 0.75*tWfSqTriplet; sdata[tid] = tWfSq; __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata[index] += sdata[index+s]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) g_odata[blockIdx.x+aOffsetOutput] = sdata[0]; } //________________________________________________________________________________________________________________ __device__ int GetSamplePairOffset(int aAnalysis, int aBinK, int aPair) { int tNBinsK = d_fPairSample4dVecInfo->nBinsK; int tNPairsPerBin = d_fPairSample4dVecInfo->nPairsPerBin; int tNElementsPerPair = d_fPairSample4dVecInfo->nElementsPerPair; int tIndex = aPair*tNElementsPerPair + aBinK*tNPairsPerBin*tNElementsPerPair + aAnalysis*tNBinsK*tNPairsPerBin*tNElementsPerPair; return tIndex; } //________________________________________________________________________________________________________________ __device__ bool CanInterpPair(double aKStar, double aRStar, double aTheta) { if(aKStar < d_fHyperGeo1F1Info->minInterpK) return false; if(aKStar > d_fHyperGeo1F1Info->maxInterpK) return false; if(aRStar < d_fHyperGeo1F1Info->minInterpR) return false; if(aRStar > d_fHyperGeo1F1Info->maxInterpR) return false; if(aTheta < d_fHyperGeo1F1Info->minInterpTheta) return false; if(aTheta > d_fHyperGeo1F1Info->maxInterpTheta) return false; return true; } //________________________________________________________________________________________________________________ __global__ void GetEntireCfCompletewStaticPairs(double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t, double *g_odata, double *g_odata2, int aAnalysisNumber, int aBinKNumber, int aOffsetOutput, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata2[][2]; unsigned int tid = threadIdx.x; unsigned int tPairNumber = blockIdx.x*blockDim.x + threadIdx.x; unsigned int i = GetSamplePairOffset(aAnalysisNumber,aBinKNumber,tPairNumber); double tWfSqSinglet, tWfSqTriplet, tWfSq; if(CanInterpPair(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2])) { if(aInterpScattLen) { tWfSqSinglet = InterpolateWfSquaredInterpScattLen(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2],aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquaredInterpScattLen(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2],aReF0t,aImF0t,aD0t); } else { tWfSqSinglet = InterpolateWfSquared(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2],aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquared(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2],aReF0t,aImF0t,aD0t); } tWfSq = 0.25*tWfSqSinglet + 0.75*tWfSqTriplet; sdata2[tid][0] = tWfSq; sdata2[tid][1] = 1.; } else { sdata2[tid][0] = 0.; sdata2[tid][1] = 0.; } __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata2[index][0] += sdata2[index+s][0]; sdata2[index][1] += sdata2[index+s][1]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) { g_odata[blockIdx.x+aOffsetOutput] = sdata2[0][0]; g_odata2[blockIdx.x+aOffsetOutput] = sdata2[0][1]; } } //________________________________________________________________________________________________________________ __global__ void RandInit(hiprandState_t *state, unsigned long seed, int aOffset) { int idx = blockIdx.x * blockDim.x + threadIdx.x + aOffset; hiprand_init(seed, idx, 0, &state[idx]); } //________________________________________________________________________________________________________________ __global__ void GetEntireCfComplete2(hiprandState_t *state1, hiprandState_t *state2, hiprandState_t *state3, double aR, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t, double *g_odata, int aKbin, int aOffsetInput, int aOffsetOutput, double* aCPUPairs, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x + aOffsetInput; bool tPass = false; int tNPairs = d_fPairKStar3dVecInfo->nPairsPerBin[aKbin]; int tSimPairLocalIndex, tSimPairGlobalIndex; double tKStarOut, tKStarSide, tKStarLong, tKStarMagSq, tKStarMag; double tRStarOut, tRStarSide, tRStarLong, tRStarMagSq, tRStarMag; double tCosTheta, tTheta; //TODO need to be able to return all failing pairs back to CPU for mathematica processing while(!tPass) { tSimPairLocalIndex = tNPairs*hiprand_uniform_double(&state1[i]); tSimPairGlobalIndex = d_fPairKStar3dVecInfo->binOffset[aKbin] + 4*tSimPairLocalIndex; tKStarOut = d_fPairKStar3dVec[tSimPairGlobalIndex+1]; //note, 0th element is KStarMag tKStarSide = d_fPairKStar3dVec[tSimPairGlobalIndex+2]; //note, 0th element is KStarMag tKStarLong = d_fPairKStar3dVec[tSimPairGlobalIndex+3]; //note, 0th element is KStarMag tKStarMagSq = tKStarOut*tKStarOut + tKStarSide*tKStarSide + tKStarLong*tKStarLong; tKStarMag = sqrt(tKStarMagSq); tRStarOut = aR*hiprand_normal_double(&state1[i]); tRStarSide = aR*hiprand_normal_double(&state2[i]); tRStarLong = aR*hiprand_normal_double(&state3[i]); tRStarMagSq = tRStarOut*tRStarOut + tRStarSide*tRStarSide + tRStarLong*tRStarLong; tRStarMag = sqrt(tRStarMagSq); tCosTheta = (tKStarOut*tRStarOut + tKStarSide*tRStarSide + tKStarLong*tRStarLong)/(tKStarMag*tRStarMag); tTheta = acos(tCosTheta); if(aInterpScattLen) { bool tPass1 = CanInterpolate(tKStarMag,tRStarMag,tTheta,aReF0s,aImF0s,aD0s); bool tPass2 = CanInterpolate(tKStarMag,tRStarMag,tTheta,aReF0t,aImF0t,aD0t); if(tPass1 && tPass2) tPass = true; else tPass = false; } else tPass = CanInterpolate(tKStarMag,tRStarMag,tTheta); } double tWfSqSinglet, tWfSqTriplet, tWfSq; if(aInterpScattLen) { tWfSqSinglet = InterpolateWfSquaredInterpScattLen(tKStarMag,tRStarMag,tTheta,aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquaredInterpScattLen(tKStarMag,tRStarMag,tTheta,aReF0t,aImF0t,aD0t); } else { tWfSqSinglet = InterpolateWfSquared(tKStarMag,tRStarMag,tTheta,aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquared(tKStarMag,tRStarMag,tTheta,aReF0t,aImF0t,aD0t); } tWfSq = 0.25*tWfSqSinglet + 0.75*tWfSqTriplet; sdata[tid] = tWfSq; __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata[index] += sdata[index+s]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) g_odata[blockIdx.x+aOffsetOutput] = sdata[0]; } //________________________________________________________________________________________________________________ //**************************************************************************************************************** //________________________________________________________________________________________________________________ ParallelWaveFunction::ParallelWaveFunction(bool aInterpScattLen, int aNThreadsPerBlock, int aNBlocks): fInterpScattLen(aInterpScattLen), fNThreadsPerBlock(aNThreadsPerBlock), fNBlocks(aNBlocks) { hipSetDeviceFlags(hipDeviceMapHost); } //________________________________________________________________________________________________________________ ParallelWaveFunction::~ParallelWaveFunction() { checkCudaErrors(hipFree(d_fPairKStar3dVec)); checkCudaErrors(hipFree(d_fPairKStar3dVecInfo)); checkCudaErrors(hipFree(d_fLednickyHFunction)); checkCudaErrors(hipFree(d_fGTildeReal)); checkCudaErrors(hipFree(d_fGTildeImag)); checkCudaErrors(hipFree(d_fGTildeInfo)); checkCudaErrors(hipFree(d_fHyperGeo1F1Real)); checkCudaErrors(hipFree(d_fHyperGeo1F1Imag)); checkCudaErrors(hipFree(d_fHyperGeo1F1Info)); // checkCudaErrors(hipFree(d_fCoulombScatteringLengthReal)); // checkCudaErrors(hipFree(d_fCoulombScatteringLengthImag)); checkCudaErrors(hipFree(d_fScattLenInfo)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadPairSample4dVec(td4dVec &aPairSample4dVec, BinInfoSamplePairs &aBinInfo) { //------ Load bin info first --------------------------- checkCudaErrors(hipMallocManaged(&d_fPairSample4dVecInfo, sizeof(BinInfoSamplePairs))); d_fPairSample4dVecInfo->nAnalyses = aBinInfo.nAnalyses; d_fPairSample4dVecInfo->nBinsK = aBinInfo.nBinsK; d_fPairSample4dVecInfo->nPairsPerBin = aBinInfo.nPairsPerBin; d_fPairSample4dVecInfo->minK = aBinInfo.minK; d_fPairSample4dVecInfo->maxK = aBinInfo.maxK; d_fPairSample4dVecInfo->binWidthK = aBinInfo.binWidthK; d_fPairSample4dVecInfo->nElementsPerPair = aBinInfo.nElementsPerPair; //------------------------------------------------------ fSamplePairsBinInfo.nAnalyses = aBinInfo.nAnalyses; fSamplePairsBinInfo.nBinsK = aBinInfo.nBinsK; fSamplePairsBinInfo.nPairsPerBin = aBinInfo.nPairsPerBin; fSamplePairsBinInfo.minK = aBinInfo.minK; fSamplePairsBinInfo.maxK = aBinInfo.maxK; fSamplePairsBinInfo.binWidthK = aBinInfo.binWidthK; fSamplePairsBinInfo.nElementsPerPair = aBinInfo.nElementsPerPair; //------------------------------------------------------ assert((int)aPairSample4dVec.size() == d_fPairSample4dVecInfo->nAnalyses); assert((int)aPairSample4dVec[0].size() == d_fPairSample4dVecInfo->nBinsK); assert(d_fPairSample4dVecInfo->nElementsPerPair == 3); //------------------------------------------------------ int tTotalPairs = 0; for(int iAnaly=0; iAnaly<(int)aPairSample4dVec.size(); iAnaly++) { for(int iK=0; iK<(int)aPairSample4dVec[iAnaly].size(); iK++) tTotalPairs += aPairSample4dVec[iAnaly][iK].size(); } int tSize = tTotalPairs*fSamplePairsBinInfo.nElementsPerPair*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fPairSample4dVec, tSize)); int tIndex=0; for(int iAnaly=0; iAnaly<(int)aPairSample4dVec.size(); iAnaly++) { for(int iK=0; iK<(int)aPairSample4dVec[iAnaly].size(); iK++) { for(int iPair=0; iPair<(int)aPairSample4dVec[iAnaly][iK].size(); iPair++) { d_fPairSample4dVec[tIndex] = aPairSample4dVec[iAnaly][iK][iPair][0]; d_fPairSample4dVec[tIndex+1] = aPairSample4dVec[iAnaly][iK][iPair][1]; d_fPairSample4dVec[tIndex+2] = aPairSample4dVec[iAnaly][iK][iPair][2]; tIndex += 3; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UpdatePairSampleRadii(double aScaleFactor) { //TODO make this more general, and probably is better way to do this int tTotalEntries = d_fPairSample4dVecInfo->nAnalyses * d_fPairSample4dVecInfo->nBinsK * d_fPairSample4dVecInfo->nPairsPerBin * d_fPairSample4dVecInfo->nElementsPerPair; for(int i=0; i<tTotalEntries; i++) { if(i%3 == 1) d_fPairSample4dVec[i] *= aScaleFactor; } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadPairKStar3dVec(td3dVec &aPairKStar3dVec, BinInfoKStar &aBinInfo) { //------ Load bin info first --------------------------- checkCudaErrors(hipMallocManaged(&d_fPairKStar3dVecInfo, sizeof(BinInfoKStar))); d_fPairKStar3dVecInfo->nBinsK = aBinInfo.nBinsK; d_fPairKStar3dVecInfo->minK = aBinInfo.minK; d_fPairKStar3dVecInfo->maxK = aBinInfo.maxK; d_fPairKStar3dVecInfo->binWidthK = aBinInfo.binWidthK; for(int i=0; i<d_fPairKStar3dVecInfo->nBinsK; i++) { d_fPairKStar3dVecInfo->nPairsPerBin[i] = aBinInfo.nPairsPerBin[i]; d_fPairKStar3dVecInfo->binOffset[i] = aBinInfo.binOffset[i]; } //------------------------------------------------------ int tNbinsK = aPairKStar3dVec.size(); assert(tNbinsK == d_fPairKStar3dVecInfo->nBinsK); int tNPairsTotal=0; for(int i=0; i<tNbinsK; i++) tNPairsTotal += aPairKStar3dVec[i].size(); int tNPairsTotal2=0; for(int i=0; i<tNbinsK; i++) tNPairsTotal2 += d_fPairKStar3dVecInfo->nPairsPerBin[i]; assert(tNPairsTotal == tNPairsTotal2); assert(aPairKStar3dVec[0][0].size() == 4); //all should have the same size, but maybe input a more thorough check int tSize = tNPairsTotal*4*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fPairKStar3dVec, tSize)); int tIndex=0; int tIndex2=0; int tOffset=0; for(int iK=0; iK<tNbinsK; iK++) { for(int iPair=0; iPair<(int)aPairKStar3dVec[iK].size(); iPair++) { tOffset = d_fPairKStar3dVecInfo->binOffset[iK]; tIndex2 = tOffset + 4*iPair; assert(tIndex2 == tIndex); d_fPairKStar3dVec[tIndex] = aPairKStar3dVec[iK][iPair][0]; d_fPairKStar3dVec[tIndex+1] = aPairKStar3dVec[iK][iPair][1]; d_fPairKStar3dVec[tIndex+2] = aPairKStar3dVec[iK][iPair][2]; d_fPairKStar3dVec[tIndex+3] = aPairKStar3dVec[iK][iPair][3]; tIndex+=4; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadLednickyHFunction(td1dVec &aHFunc) { int tNbinsK = aHFunc.size(); int tSize = tNbinsK*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fLednickyHFunction, tSize)); for(int iK=0; iK<tNbinsK; iK++) { d_fLednickyHFunction[iK] = aHFunc[iK]; } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeReal(td2dVec &aGTildeReal) { int tNbinsK = aGTildeReal.size(); int tNbinsR = aGTildeReal[0].size(); int tSize = tNbinsK*tNbinsR*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fGTildeReal, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { tIndex = iR + iK*tNbinsR; d_fGTildeReal[tIndex] = aGTildeReal[iK][iR]; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeImag(td2dVec &aGTildeImag) { int tNbinsK = aGTildeImag.size(); int tNbinsR = aGTildeImag[0].size(); int tSize = tNbinsK*tNbinsR*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fGTildeImag, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { tIndex = iR + iK*tNbinsR; d_fGTildeImag[tIndex] = aGTildeImag[iK][iR]; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Real(td3dVec &aHyperGeo1F1Real) { int tNbinsK = aHyperGeo1F1Real.size(); int tNbinsR = aHyperGeo1F1Real[0].size(); int tNbinsTheta = aHyperGeo1F1Real[0][0].size(); int tSize = tNbinsK*tNbinsR*tNbinsTheta*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fHyperGeo1F1Real, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { for(int iTheta=0; iTheta<tNbinsTheta; iTheta++) { tIndex = iTheta + iR*tNbinsTheta + iK*tNbinsTheta*tNbinsR; d_fHyperGeo1F1Real[tIndex] = aHyperGeo1F1Real[iK][iR][iTheta]; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Imag(td3dVec &aHyperGeo1F1Imag) { int tNbinsK = aHyperGeo1F1Imag.size(); int tNbinsR = aHyperGeo1F1Imag[0].size(); int tNbinsTheta = aHyperGeo1F1Imag[0][0].size(); int tSize = tNbinsK*tNbinsR*tNbinsTheta*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fHyperGeo1F1Imag, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { for(int iTheta=0; iTheta<tNbinsTheta; iTheta++) { tIndex = iTheta + iR*tNbinsTheta + iK*tNbinsTheta*tNbinsR; d_fHyperGeo1F1Imag[tIndex] = aHyperGeo1F1Imag[iK][iR][iTheta]; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenReal(td4dVec &aScattLenReal) { int tNbinsReF0 = aScattLenReal.size(); int tNbinsImF0 = aScattLenReal[0].size(); int tNbinsD0 = aScattLenReal[0][0].size(); int tNbinsK = aScattLenReal[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fCoulombScatteringLengthReal, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fCoulombScatteringLengthReal[tIndex] = aScattLenReal[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenImag(td4dVec &aScattLenImag) { int tNbinsReF0 = aScattLenImag.size(); int tNbinsImF0 = aScattLenImag[0].size(); int tNbinsD0 = aScattLenImag[0][0].size(); int tNbinsK = aScattLenImag[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fCoulombScatteringLengthImag, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fCoulombScatteringLengthImag[tIndex] = aScattLenImag[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenRealSub(td4dVec &aScattLenReal) { int tNbinsReF0 = aScattLenReal.size(); int tNbinsImF0 = aScattLenReal[0].size(); int tNbinsD0 = aScattLenReal[0][0].size(); int tNbinsK = aScattLenReal[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fScattLenRealSubVec, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fScattLenRealSubVec[tIndex] = aScattLenReal[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenImagSub(td4dVec &aScattLenImag) { int tNbinsReF0 = aScattLenImag.size(); int tNbinsImF0 = aScattLenImag[0].size(); int tNbinsD0 = aScattLenImag[0][0].size(); int tNbinsK = aScattLenImag[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fScattLenImagSubVec, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fScattLenImagSubVec[tIndex] = aScattLenImag[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UnLoadScattLenRealSub() { checkCudaErrors(hipFree(d_fScattLenRealSubVec)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UnLoadScattLenImagSub() { checkCudaErrors(hipFree(d_fScattLenImagSubVec)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeInfo(BinInfoGTilde &aBinInfo) { checkCudaErrors(hipMallocManaged(&d_fGTildeInfo, sizeof(BinInfoGTilde))); d_fGTildeInfo->nBinsK = aBinInfo.nBinsK; d_fGTildeInfo->nBinsR = aBinInfo.nBinsR; d_fGTildeInfo->binWidthK = aBinInfo.binWidthK; d_fGTildeInfo->binWidthR = aBinInfo.binWidthR; d_fGTildeInfo->minK = aBinInfo.minK; d_fGTildeInfo->maxK = aBinInfo.maxK; d_fGTildeInfo->minR = aBinInfo.minR; d_fGTildeInfo->maxR = aBinInfo.maxR; d_fGTildeInfo->minInterpK = aBinInfo.minInterpK; d_fGTildeInfo->maxInterpK = aBinInfo.maxInterpK; d_fGTildeInfo->minInterpR = aBinInfo.minInterpR; d_fGTildeInfo->maxInterpR = aBinInfo.maxInterpR; } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Info(BinInfoHyperGeo1F1 &aBinInfo) { checkCudaErrors(hipMallocManaged(&d_fHyperGeo1F1Info, sizeof(BinInfoHyperGeo1F1))); d_fHyperGeo1F1Info->nBinsK = aBinInfo.nBinsK; d_fHyperGeo1F1Info->nBinsR = aBinInfo.nBinsR; d_fHyperGeo1F1Info->nBinsTheta = aBinInfo.nBinsTheta; d_fHyperGeo1F1Info->binWidthK = aBinInfo.binWidthK; d_fHyperGeo1F1Info->binWidthR = aBinInfo.binWidthR; d_fHyperGeo1F1Info->binWidthTheta = aBinInfo.binWidthTheta; d_fHyperGeo1F1Info->minK = aBinInfo.minK; d_fHyperGeo1F1Info->maxK = aBinInfo.maxK; d_fHyperGeo1F1Info->minR = aBinInfo.minR; d_fHyperGeo1F1Info->maxR = aBinInfo.maxR; d_fHyperGeo1F1Info->minTheta = aBinInfo.minTheta; d_fHyperGeo1F1Info->maxTheta = aBinInfo.maxTheta; d_fHyperGeo1F1Info->minInterpK = aBinInfo.minInterpK; d_fHyperGeo1F1Info->maxInterpK = aBinInfo.maxInterpK; d_fHyperGeo1F1Info->minInterpR = aBinInfo.minInterpR; d_fHyperGeo1F1Info->maxInterpR = aBinInfo.maxInterpR; d_fHyperGeo1F1Info->minInterpTheta = aBinInfo.minInterpTheta; d_fHyperGeo1F1Info->maxInterpTheta = aBinInfo.maxInterpTheta; } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenInfo(BinInfoScattLen &aBinInfo) { checkCudaErrors(hipMallocManaged(&d_fScattLenInfo, sizeof(BinInfoScattLen))); d_fScattLenInfo->nBinsReF0 = aBinInfo.nBinsReF0; d_fScattLenInfo->nBinsImF0 = aBinInfo.nBinsImF0; d_fScattLenInfo->nBinsD0 = aBinInfo.nBinsD0; d_fScattLenInfo->nBinsK = aBinInfo.nBinsK; d_fScattLenInfo->binWidthReF0 = aBinInfo.binWidthReF0; d_fScattLenInfo->binWidthImF0 = aBinInfo.binWidthImF0; d_fScattLenInfo->binWidthD0 = aBinInfo.binWidthD0; d_fScattLenInfo->binWidthK = aBinInfo.binWidthK; d_fScattLenInfo->minReF0 = aBinInfo.minReF0; d_fScattLenInfo->maxReF0 = aBinInfo.maxReF0; d_fScattLenInfo->minImF0 = aBinInfo.minImF0; d_fScattLenInfo->maxImF0 = aBinInfo.maxImF0; d_fScattLenInfo->minD0 = aBinInfo.minD0; d_fScattLenInfo->maxD0 = aBinInfo.maxD0; d_fScattLenInfo->minK = aBinInfo.minK; d_fScattLenInfo->maxK = aBinInfo.maxK; d_fScattLenInfo->minInterpReF0 = aBinInfo.minInterpReF0; d_fScattLenInfo->maxInterpReF0 = aBinInfo.maxInterpReF0; d_fScattLenInfo->minInterpImF0 = aBinInfo.minInterpImF0; d_fScattLenInfo->maxInterpImF0 = aBinInfo.maxInterpImF0; d_fScattLenInfo->minInterpD0 = aBinInfo.minInterpD0; d_fScattLenInfo->maxInterpD0 = aBinInfo.maxInterpD0; d_fScattLenInfo->minInterpK = aBinInfo.minInterpK; d_fScattLenInfo->maxInterpK = aBinInfo.maxInterpK; } //________________________________________________________________________________________________________________ //double* ParallelWaveFunction::RunInterpolateWfSquared(td2dVec &aPairs, double aReF0, double aImF0, double aD0) vector<double> ParallelWaveFunction::RunInterpolateWfSquared(td2dVec &aPairs, double aReF0, double aImF0, double aD0) { int tNPairs = aPairs.size(); int tSize = tNPairs*sizeof(double); int tSizeShared = fNThreadsPerBlock*sizeof(double); int tSizeOut = fNBlocks*sizeof(double); //---Host arrays and allocations double * h_KStarMag; double * h_RStarMag; double * h_Theta; double * h_WfSquared; checkCudaErrors(hipMallocManaged(&h_KStarMag, tSize)); checkCudaErrors(hipMallocManaged(&h_RStarMag, tSize)); checkCudaErrors(hipMallocManaged(&h_Theta, tSize)); checkCudaErrors(hipMallocManaged(&h_WfSquared, tSizeOut)); for(int i=0; i<tNPairs; i++) { h_KStarMag[i] = aPairs[i][0]; h_RStarMag[i] = aPairs[i][1]; h_Theta[i] = aPairs[i][2]; } //----------Run the kernel----------------------------------------------- GpuTimer timer; timer.Start(); hipLaunchKernelGGL(( GetWfAverage), dim3(fNBlocks),dim3(fNThreadsPerBlock),tSizeShared, 0, h_KStarMag,h_RStarMag,h_Theta,aReF0,aImF0,aD0,h_WfSquared,fInterpScattLen); timer.Stop(); std::cout << "InterpolateWfSquared kernel finished in " << timer.Elapsed() << " ms" << std::endl; //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(h_KStarMag)); checkCudaErrors(hipFree(h_RStarMag)); checkCudaErrors(hipFree(h_Theta)); // return h_WfSquared; vector<double> tReturnVec(tNPairs); for(int i=0; i<fNBlocks; i++) { tReturnVec[i] = h_WfSquared[i]; // cout << "i = " << i << endl; // cout << "h_WfSquared[i] = " << h_WfSquared[i] << endl; // cout << "tReturnVec[i] = " << tReturnVec[i] << endl << endl; } // checkCudaErrors(hipHostFree(h_WfSquared)); checkCudaErrors(hipFree(h_WfSquared)); return tReturnVec; } //________________________________________________________________________________________________________________ vector<double> ParallelWaveFunction::RunInterpolateEntireCf(td3dVec &aPairs, double aReF0, double aImF0, double aD0) { // GpuTimer timerPre; // timerPre.Start(); int tNBins = aPairs.size(); int tNPairsPerBin = aPairs[0].size(); //TODO all bins should have equal number of pairs int tSizeInput = tNBins*tNPairsPerBin*sizeof(double); int tSizeOutput = tNBins*fNBlocks*sizeof(double); //the kernel reduces the values for tNPairs bins down to fNBlocks bins int tSizeShared = fNThreadsPerBlock*sizeof(double); const int tNStreams = tNBins; //---Host arrays and allocations double * h_KStarMag; double * h_RStarMag; double * h_Theta; double * h_Cf; checkCudaErrors(hipMallocManaged(&h_KStarMag, tSizeInput)); checkCudaErrors(hipMallocManaged(&h_RStarMag, tSizeInput)); checkCudaErrors(hipMallocManaged(&h_Theta, tSizeInput)); checkCudaErrors(hipMallocManaged(&h_Cf, tSizeOutput)); hipStream_t tStreams[tNStreams]; for(int i=0; i<tNBins; i++) { hipStreamCreate(&tStreams[i]); for(int j=0; j<tNPairsPerBin; j++) { h_KStarMag[j+i*tNPairsPerBin] = aPairs[i][j][0]; h_RStarMag[j+i*tNPairsPerBin] = aPairs[i][j][1]; h_Theta[j+i*tNPairsPerBin] = aPairs[i][j][2]; } } // timerPre.Stop(); // std::cout << " timerPre: " << timerPre.Elapsed() << " ms" << std::endl; //----------Run the kernels----------------------------------------------- // GpuTimer timer; // timer.Start(); for(int i=0; i<tNBins; i++) { int tOffsetInput = i*tNPairsPerBin; int tOffsetOutput = i*fNBlocks; hipLaunchKernelGGL(( GetEntireCf), dim3(fNBlocks),dim3(fNThreadsPerBlock),tSizeShared,tStreams[i], h_KStarMag,h_RStarMag,h_Theta,aReF0,aImF0,aD0,h_Cf,tOffsetInput,tOffsetOutput,fInterpScattLen); } // timer.Stop(); // std::cout << "GetEntireCf kernel finished in " << timer.Elapsed() << " ms" << std::endl; // GpuTimer timerPost; // timerPost.Start(); //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(h_KStarMag)); checkCudaErrors(hipFree(h_RStarMag)); checkCudaErrors(hipFree(h_Theta)); // return the CF vector<double> tReturnVec(tNBins); double tSum = 0.0; for(int i=0; i<tNBins; i++) { tSum=0.0; for(int j=0; j<fNBlocks; j++) { tSum += h_Cf[j+i*fNBlocks]; } tReturnVec[i] = tSum; } checkCudaErrors(hipFree(h_Cf)); for(int i=0; i<tNStreams; i++) hipStreamDestroy(tStreams[i]); // timerPost.Stop(); // std::cout << " timerPost: " << timerPost.Elapsed() << " ms" << std::endl; return tReturnVec; } //________________________________________________________________________________________________________________ vector<double> ParallelWaveFunction::RunInterpolateEntireCfComplete(td3dVec &aPairs, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t) { // GpuTimer timerPre; // timerPre.Start(); int tNBins = aPairs.size(); int tNPairsPerBin = aPairs[0].size(); //TODO all bins should have equal number of pairs int tSizeInput = tNBins*tNPairsPerBin*sizeof(double); int tSizeOutput = tNBins*fNBlocks*sizeof(double); //the kernel reduces the values for tNPairs bins down to fNBlocks bins int tSizeShared = fNThreadsPerBlock*sizeof(double); const int tNStreams = tNBins; //---Host arrays and allocations double * h_KStarMag; double * h_RStarMag; double * h_Theta; double * h_Cf; checkCudaErrors(hipMallocManaged(&h_KStarMag, tSizeInput)); checkCudaErrors(hipMallocManaged(&h_RStarMag, tSizeInput)); checkCudaErrors(hipMallocManaged(&h_Theta, tSizeInput)); checkCudaErrors(hipMallocManaged(&h_Cf, tSizeOutput)); hipStream_t tStreams[tNStreams]; for(int i=0; i<tNBins; i++) { hipStreamCreate(&tStreams[i]); for(int j=0; j<tNPairsPerBin; j++) { h_KStarMag[j+i*tNPairsPerBin] = aPairs[i][j][0]; h_RStarMag[j+i*tNPairsPerBin] = aPairs[i][j][1]; h_Theta[j+i*tNPairsPerBin] = aPairs[i][j][2]; } } // timerPre.Stop(); // std::cout << " timerPre: " << timerPre.Elapsed() << " ms" << std::endl; //----------Run the kernels----------------------------------------------- // GpuTimer timer; // timer.Start(); //TODO this doesn't work with fInterpScattLen = true. If I want this to work, I need to add singlet and triplet interpolation vectors assert(!fInterpScattLen); for(int i=0; i<tNBins; i++) { int tOffsetInput = i*tNPairsPerBin; int tOffsetOutput = i*fNBlocks; hipLaunchKernelGGL(( GetEntireCfComplete), dim3(fNBlocks),dim3(fNThreadsPerBlock),tSizeShared,tStreams[i], h_KStarMag,h_RStarMag,h_Theta,aReF0s,aImF0s,aD0s,aReF0t,aImF0t,aD0t,h_Cf,tOffsetInput,tOffsetOutput,fInterpScattLen); } // timer.Stop(); // std::cout << "GetEntireCf kernel finished in " << timer.Elapsed() << " ms" << std::endl; // GpuTimer timerPost; // timerPost.Start(); //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(h_KStarMag)); checkCudaErrors(hipFree(h_RStarMag)); checkCudaErrors(hipFree(h_Theta)); // return the CF vector<double> tReturnVec(tNBins); double tSum = 0.0; for(int i=0; i<tNBins; i++) { tSum=0.0; for(int j=0; j<fNBlocks; j++) { tSum += h_Cf[j+i*fNBlocks]; } tReturnVec[i] = tSum; } checkCudaErrors(hipFree(h_Cf)); for(int i=0; i<tNStreams; i++) hipStreamDestroy(tStreams[i]); // timerPost.Stop(); // std::cout << " timerPost: " << timerPost.Elapsed() << " ms" << std::endl; return tReturnVec; } //________________________________________________________________________________________________________________ td2dVec ParallelWaveFunction::RunInterpolateEntireCfCompletewStaticPairs(int aAnalysisNumber, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t) { // GpuTimer timerPre; // timerPre.Start(); int tNBins = fSamplePairsBinInfo.nBinsK; int tNPairsPerBin = fSamplePairsBinInfo.nPairsPerBin; int tSizeOutput = tNBins*fNBlocks*sizeof(double); //the kernel reduces the values for tNPairs bins down to fNBlocks bins int tSizeShared = fNThreadsPerBlock*sizeof(double); tSizeShared *= 2; //to account for Cf values and counts const int tNStreams = tNBins; //---Host arrays and allocations double * h_CfSums; double * h_CfCounts; checkCudaErrors(hipMallocManaged(&h_CfSums, tSizeOutput)); checkCudaErrors(hipMallocManaged(&h_CfCounts, tSizeOutput)); hipStream_t tStreams[tNStreams]; for(int i=0; i<tNBins; i++) { hipStreamCreate(&tStreams[i]); } // timerPre.Stop(); // std::cout << " timerPre: " << timerPre.Elapsed() << " ms" << std::endl; //----------Run the kernels----------------------------------------------- // GpuTimer timer; // timer.Start(); //TODO this doesn't work with fInterpScattLen = true. If I want this to work, I need to add singlet and triplet interpolation vectors assert(!fInterpScattLen); for(int i=0; i<tNBins; i++) { int tOffsetOutput = i*fNBlocks; hipLaunchKernelGGL(( GetEntireCfCompletewStaticPairs), dim3(fNBlocks),dim3(fNThreadsPerBlock),tSizeShared,tStreams[i], aReF0s, aImF0s, aD0s, aReF0t, aImF0t, aD0t, h_CfSums, h_CfCounts, aAnalysisNumber, i, tOffsetOutput, fInterpScattLen); } // timer.Stop(); // std::cout << "GetEntireCf kernel finished in " << timer.Elapsed() << " ms" << std::endl; // GpuTimer timerPost; // timerPost.Start(); //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(hipDeviceSynchronize()); // return the CF td2dVec tReturnVec; tReturnVec.resize(tNBins,td1dVec(2)); double tSum = 0.0; int tCounts = 0; for(int i=0; i<tNBins; i++) { tSum=0.0; tCounts = 0; for(int j=0; j<fNBlocks; j++) { tSum += h_CfSums[j+i*fNBlocks]; tCounts += h_CfCounts[j+i*fNBlocks]; } tReturnVec[i][0] = tSum; tReturnVec[i][1] = tCounts; } checkCudaErrors(hipFree(h_CfSums)); checkCudaErrors(hipFree(h_CfCounts)); for(int i=0; i<tNStreams; i++) hipStreamDestroy(tStreams[i]); // timerPost.Stop(); // std::cout << " timerPost: " << timerPost.Elapsed() << " ms" << std::endl; return tReturnVec; } //________________________________________________________________________________________________________________ vector<double> ParallelWaveFunction::RunInterpolateEntireCfComplete2(int aNSimPairsPerBin, double aKStarMin, double aKStarMax, double aNbinsK, double aR, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t) { // GpuTimer timerPre; // timerPre.Start(); int tNBins = aNbinsK; int tNPairsPerBin = aNSimPairsPerBin; int tSizeInput = tNBins*tNPairsPerBin*sizeof(double); int tSizeOutput = tNBins*fNBlocks*sizeof(double); //the kernel reduces the values for tNPairs bins down to fNBlocks bins int tSizeShared = fNThreadsPerBlock*sizeof(double); int tSizedState = tNBins*tNPairsPerBin*sizeof(hiprandState_t); int tSizeCPUPairs = tNBins*tNPairsPerBin*3*sizeof(double); const int tNStreams = tNBins; //---Host arrays and allocations double * h_Cf; double * h_CPUPairs; checkCudaErrors(hipMallocManaged(&h_Cf, tSizeOutput)); checkCudaErrors(hipMallocManaged(&h_CPUPairs, tSizeCPUPairs)); hipStream_t tStreams[tNStreams]; for(int i=0; i<tNBins; i++) hipStreamCreate(&tStreams[i]); hiprandState_t *d_state1; checkCudaErrors(hipMallocManaged(&d_state1, tSizedState)); hiprandState_t *d_state2; checkCudaErrors(hipMallocManaged(&d_state2, tSizedState)); hiprandState_t *d_state3; checkCudaErrors(hipMallocManaged(&d_state3, tSizedState)); // timerPre.Stop(); // std::cout << " timerPre: " << timerPre.Elapsed() << " ms" << std::endl; //----------Run the kernels----------------------------------------------- // GpuTimer timer; // timer.Start(); for(int i=0; i<tNBins; i++) { int tOffsetInput = i*tNPairsPerBin; int tOffsetOutput = i*fNBlocks; hipLaunchKernelGGL(( RandInit), dim3(fNBlocks),dim3(fNThreadsPerBlock),0,tStreams[i], d_state1,std::clock(),tOffsetInput); hipLaunchKernelGGL(( RandInit), dim3(fNBlocks),dim3(fNThreadsPerBlock),0,tStreams[i], d_state2,std::clock(),tOffsetInput); hipLaunchKernelGGL(( RandInit), dim3(fNBlocks),dim3(fNThreadsPerBlock),0,tStreams[i], d_state3,std::clock(),tOffsetInput); hipLaunchKernelGGL(( GetEntireCfComplete2), dim3(fNBlocks),dim3(fNThreadsPerBlock),tSizeShared,tStreams[i], d_state1,d_state2,d_state3, aR, aReF0s,aImF0s,aD0s, aReF0t,aImF0t,aD0t, h_Cf,i,tOffsetInput,tOffsetOutput,h_CPUPairs,fInterpScattLen); } // timer.Stop(); // std::cout << "GetEntireCfComplete2 kernel finished in " << timer.Elapsed() << " ms" << std::endl; // GpuTimer timerPost; // timerPost.Start(); //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(d_state1)); checkCudaErrors(hipFree(d_state2)); checkCudaErrors(hipFree(d_state3)); // return the CF vector<double> tReturnVec(tNBins); double tSum = 0.0; for(int i=0; i<tNBins; i++) { tSum=0.0; for(int j=0; j<fNBlocks; j++) { tSum += h_Cf[j+i*fNBlocks]; } tReturnVec[i] = tSum; } checkCudaErrors(hipFree(h_Cf)); checkCudaErrors(hipFree(h_CPUPairs)); for(int i=0; i<tNStreams; i++) hipStreamDestroy(tStreams[i]); // timerPost.Stop(); // std::cout << " timerPost: " << timerPost.Elapsed() << " ms" << std::endl; return tReturnVec; }
1d764d1efacfa8244d29c0a296912ef8e586fe6b.cu
/////////////////////////////////////////////////////////////////////////// // ParallelWaveFunction: // /////////////////////////////////////////////////////////////////////////// #include "ParallelWaveFunction.h" //________________________________________________________________________________________________________________ __device__ int GetBinNumber(double aBinSize, int aNbins, double aValue) { //TODO check the accuracy of this double tBinKStarMin, tBinKStarMax; for(int i=0; i<aNbins; i++) { tBinKStarMin = i*aBinSize; tBinKStarMax = (i+1)*aBinSize; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetBinNumber(int aNbins, double aMin, double aMax, double aValue) { //TODO check the accuracy of this double tBinSize = (aMax-aMin)/aNbins; double tBinKStarMin, tBinKStarMax; for(int i=0; i<aNbins; i++) { tBinKStarMin = i*tBinSize + aMin; tBinKStarMax = (i+1)*tBinSize + aMin; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetBinNumber(double aBinWidth, double aMin, double aMax, double aValue) { //TODO check the accuracy of this int tNbins = (aMax-aMin)/aBinWidth; double tBinKStarMin, tBinKStarMax; for(int i=0; i<tNbins; i++) { tBinKStarMin = i*aBinWidth + aMin; tBinKStarMax = (i+1)*aBinWidth + aMin; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetInterpLowBin(InterpType aInterpType, InterpAxisType aAxisType, double aVal) { int tReturnBin = -2; int tNbins, tBin; double tMin, tMax, tBinWidth, tBinCenter; bool tErrorFlag = false; switch(aInterpType) { case kGTilde: switch(aAxisType) { case kKaxis: tNbins = d_fGTildeInfo->nBinsK; tBinWidth = d_fGTildeInfo->binWidthK; tMin = d_fGTildeInfo->minK; tMax = d_fGTildeInfo->maxK; break; case kRaxis: tNbins = d_fGTildeInfo->nBinsR; tBinWidth = d_fGTildeInfo->binWidthR; tMin = d_fGTildeInfo->minR; tMax = d_fGTildeInfo->maxR; break; //Invalid axis selection case kThetaaxis: tErrorFlag = true; break; case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kHyperGeo1F1: switch(aAxisType) { case kKaxis: tNbins = d_fHyperGeo1F1Info->nBinsK; tBinWidth = d_fHyperGeo1F1Info->binWidthK; tMin = d_fHyperGeo1F1Info->minK; tMax = d_fHyperGeo1F1Info->maxK; break; case kRaxis: tNbins = d_fHyperGeo1F1Info->nBinsR; tBinWidth = d_fHyperGeo1F1Info->binWidthR; tMin = d_fHyperGeo1F1Info->minR; tMax = d_fHyperGeo1F1Info->maxR; break; case kThetaaxis: tNbins = d_fHyperGeo1F1Info->nBinsTheta; tBinWidth = d_fHyperGeo1F1Info->binWidthTheta; tMin = d_fHyperGeo1F1Info->minTheta; tMax = d_fHyperGeo1F1Info->maxTheta; break; //Invalid axis selection case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kScattLen: switch(aAxisType) { case kReF0axis: tNbins = d_fScattLenInfo->nBinsReF0; tBinWidth = d_fScattLenInfo->binWidthReF0; tMin = d_fScattLenInfo->minReF0; tMax = d_fScattLenInfo->maxReF0; break; case kImF0axis: tNbins = d_fScattLenInfo->nBinsImF0; tBinWidth = d_fScattLenInfo->binWidthImF0; tMin = d_fScattLenInfo->minImF0; tMax = d_fScattLenInfo->maxImF0; break; case kD0axis: tNbins = d_fScattLenInfo->nBinsD0; tBinWidth = d_fScattLenInfo->binWidthD0; tMin = d_fScattLenInfo->minD0; tMax = d_fScattLenInfo->maxD0; break; case kKaxis: tNbins = d_fScattLenInfo->nBinsK; tBinWidth = d_fScattLenInfo->binWidthK; tMin = d_fScattLenInfo->minK; tMax = d_fScattLenInfo->maxK; break; //Invalid axis selection case kRaxis: tErrorFlag = true; break; case kThetaaxis: tErrorFlag = true; break; } break; } //Check error if(tErrorFlag) return -2; //--------------------------------- tBin = GetBinNumber(tNbins,tMin,tMax,aVal); tBinCenter = tMin + (tBin+0.5)*tBinWidth; if(aVal < tBinCenter) tReturnBin = tBin-1; else tReturnBin = tBin; if(tReturnBin<0 || tReturnBin >= tNbins) return -2; else return tReturnBin; } //________________________________________________________________________________________________________________ __device__ double GetInterpLowBinCenter(InterpType aInterpType, InterpAxisType aAxisType, double aVal) { double tReturnValue; int tReturnBin = -2; int tNbins, tBin; double tMin, tMax, tBinWidth, tBinCenter; bool tErrorFlag = false; switch(aInterpType) { case kGTilde: switch(aAxisType) { case kKaxis: tNbins = d_fGTildeInfo->nBinsK; tBinWidth = d_fGTildeInfo->binWidthK; tMin = d_fGTildeInfo->minK; tMax = d_fGTildeInfo->maxK; break; case kRaxis: tNbins = d_fGTildeInfo->nBinsR; tBinWidth = d_fGTildeInfo->binWidthR; tMin = d_fGTildeInfo->minR; tMax = d_fGTildeInfo->maxR; break; //Invalid axis selection case kThetaaxis: tErrorFlag = true; break; case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kHyperGeo1F1: switch(aAxisType) { case kKaxis: tNbins = d_fHyperGeo1F1Info->nBinsK; tBinWidth = d_fHyperGeo1F1Info->binWidthK; tMin = d_fHyperGeo1F1Info->minK; tMax = d_fHyperGeo1F1Info->maxK; break; case kRaxis: tNbins = d_fHyperGeo1F1Info->nBinsR; tBinWidth = d_fHyperGeo1F1Info->binWidthR; tMin = d_fHyperGeo1F1Info->minR; tMax = d_fHyperGeo1F1Info->maxR; break; case kThetaaxis: tNbins = d_fHyperGeo1F1Info->nBinsTheta; tBinWidth = d_fHyperGeo1F1Info->binWidthTheta; tMin = d_fHyperGeo1F1Info->minTheta; tMax = d_fHyperGeo1F1Info->maxTheta; break; //Invalid axis selection case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kScattLen: switch(aAxisType) { case kReF0axis: tNbins = d_fScattLenInfo->nBinsReF0; tBinWidth = d_fScattLenInfo->binWidthReF0; tMin = d_fScattLenInfo->minReF0; tMax = d_fScattLenInfo->maxReF0; break; case kImF0axis: tNbins = d_fScattLenInfo->nBinsImF0; tBinWidth = d_fScattLenInfo->binWidthImF0; tMin = d_fScattLenInfo->minImF0; tMax = d_fScattLenInfo->maxImF0; break; case kD0axis: tNbins = d_fScattLenInfo->nBinsD0; tBinWidth = d_fScattLenInfo->binWidthD0; tMin = d_fScattLenInfo->minD0; tMax = d_fScattLenInfo->maxD0; break; case kKaxis: tNbins = d_fScattLenInfo->nBinsK; tBinWidth = d_fScattLenInfo->binWidthK; tMin = d_fScattLenInfo->minK; tMax = d_fScattLenInfo->maxK; break; //Invalid axis selection case kRaxis: tErrorFlag = true; break; case kThetaaxis: tErrorFlag = true; break; } break; } //Check error if(tErrorFlag) return -2; //--------------------------------- tBin = GetBinNumber(tNbins,tMin,tMax,aVal); tBinCenter = tMin + (tBin+0.5)*tBinWidth; if(aVal < tBinCenter) tReturnBin = tBin-1; else tReturnBin = tBin; if(tReturnBin<0 || tReturnBin >= tNbins) return -2; tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth; return tReturnValue; } //________________________________________________________________________________________________________________ __device__ double LednickyHFunctionInterpolate(double aKStar) { double tResult = 0.0; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 //TODO make HFunctionInfo objects instead of using GTilde //TODO check accuracy double tBinWidthK = d_fGTildeInfo->binWidthK; int tBinLowK = GetInterpLowBin(kGTilde,kKaxis,aKStar); int tBinHighK = tBinLowK+1; double tBinLowCenterK = GetInterpLowBinCenter(kGTilde,kKaxis,aKStar); double tBinHighCenterK = tBinLowCenterK+tBinWidthK; double tX0 = tBinLowCenterK; double tX1 = tBinHighCenterK; double tY0 = d_fLednickyHFunction[tBinLowK]; double tY1 = d_fLednickyHFunction[tBinHighK]; tResult = tY0 + (aKStar-tX0)*((tY1-tY0)/(tX1-tX0)); return tResult; } //________________________________________________________________________________________________________________ __device__ cuDoubleComplex GTildeInterpolate(double aKStar, double aRStar) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsR = d_fGTildeInfo->nBinsR; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthK = d_fGTildeInfo->binWidthK; int tBinLowK = GetInterpLowBin(kGTilde,kKaxis,aKStar); int tBinHighK = tBinLowK+1; double tBinLowCenterK = GetInterpLowBinCenter(kGTilde,kKaxis,aKStar); double tBinHighCenterK = tBinLowCenterK+tBinWidthK; double tBinWidthR = d_fGTildeInfo->binWidthR; int tBinLowR = GetInterpLowBin(kGTilde,kRaxis,aRStar); int tBinHighR = tBinLowR+1; double tBinLowCenterR = GetInterpLowBinCenter(kGTilde,kRaxis,aRStar); double tBinHighCenterR = tBinLowCenterR+tBinWidthR; //-------------------------- double tQ11Real = d_fGTildeReal[tBinLowR + tBinLowK*tNbinsR]; double tQ12Real = d_fGTildeReal[tBinHighR + tBinLowK*tNbinsR]; double tQ21Real = d_fGTildeReal[tBinLowR + tBinHighK*tNbinsR]; double tQ22Real = d_fGTildeReal[tBinHighR + tBinHighK*tNbinsR]; double tQ11Imag = d_fGTildeImag[tBinLowR + tBinLowK*tNbinsR]; double tQ12Imag = d_fGTildeImag[tBinHighR + tBinLowK*tNbinsR]; double tQ21Imag = d_fGTildeImag[tBinLowR + tBinHighK*tNbinsR]; double tQ22Imag = d_fGTildeImag[tBinHighR + tBinHighK*tNbinsR]; //-------------------------- double tD = 1.0*tBinWidthK*tBinWidthR; tResultReal = (1.0/tD)*(tQ11Real*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Real*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Real*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Real*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR)); tResultImag = (1.0/tD)*(tQ11Imag*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Imag*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Imag*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Imag*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR)); //-------------------------- cuDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } //________________________________________________________________________________________________________________ __device__ cuDoubleComplex HyperGeo1F1Interpolate(double aKStar, double aRStar, double aTheta) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsTheta = d_fHyperGeo1F1Info->nBinsTheta; int tNbinsR = d_fHyperGeo1F1Info->nBinsR; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthK = d_fHyperGeo1F1Info->binWidthK; int tBin0K = GetInterpLowBin(kHyperGeo1F1,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kHyperGeo1F1,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; double tBinWidthR = d_fHyperGeo1F1Info->binWidthR; int tBin0R = GetInterpLowBin(kHyperGeo1F1,kRaxis,aRStar); int tBin1R = tBin0R+1; double tBin0CenterR = GetInterpLowBinCenter(kHyperGeo1F1,kRaxis,aRStar); // double tBin1CenterR = tBin0CenterR+tBinWidthR; double tBinWidthTheta = d_fHyperGeo1F1Info->binWidthTheta; int tBin0Theta = GetInterpLowBin(kHyperGeo1F1,kThetaaxis,aTheta); int tBin1Theta = tBin0Theta+1; double tBin0CenterTheta = GetInterpLowBinCenter(kHyperGeo1F1,kThetaaxis,aTheta); // double tBin1CenterTheta = tBin0CenterTheta+tBinWidthTheta; //-------------------------- double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; double tDiffR = (aRStar - tBin0CenterR)/tBinWidthR; double tDiffTheta = (aTheta - tBin0CenterTheta)/tBinWidthTheta; //-----------REAL--------------- //interpolate along z (i.e. theta) double tC000Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC001Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC010Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC011Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC100Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC101Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC110Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC111Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC00Real = tC000Real*(1.0-tDiffTheta) + tC001Real*tDiffTheta; double tC01Real = tC010Real*(1.0-tDiffTheta) + tC011Real*tDiffTheta; double tC10Real = tC100Real*(1.0-tDiffTheta) + tC101Real*tDiffTheta; double tC11Real = tC110Real*(1.0-tDiffTheta) + tC111Real*tDiffTheta; //interpolate along y (i.e. r) double tC0Real = tC00Real*(1.0-tDiffR) + tC01Real*tDiffR; double tC1Real = tC10Real*(1.0-tDiffR) + tC11Real*tDiffR; //interpolate along x (i.e. k) tResultReal = tC0Real*(1.0-tDiffK) + tC1Real*tDiffK; //-----------IMAG--------------- //interpolate along z (i.e. theta) double tC000Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC001Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC010Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC011Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC100Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC101Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC110Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC111Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC00Imag = tC000Imag*(1.0-tDiffTheta) + tC001Imag*tDiffTheta; double tC01Imag = tC010Imag*(1.0-tDiffTheta) + tC011Imag*tDiffTheta; double tC10Imag = tC100Imag*(1.0-tDiffTheta) + tC101Imag*tDiffTheta; double tC11Imag = tC110Imag*(1.0-tDiffTheta) + tC111Imag*tDiffTheta; //interpolate along y (i.e. r) double tC0Imag = tC00Imag*(1.0-tDiffR) + tC01Imag*tDiffR; double tC1Imag = tC10Imag*(1.0-tDiffR) + tC11Imag*tDiffR; //interpolate along x (i.e. k) tResultImag = tC0Imag*(1.0-tDiffK) + tC1Imag*tDiffK; //-------------------------------- cuDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } /* //________________________________________________________________________________________________________________ __device__ cuDoubleComplex ScattLenInterpolateFull(double aReF0, double aImF0, double aD0, double aKStar) { //This doesn't work because d_fCoulombScatteringLengthReal and d_fCoulombScatteringLengthImag are // too big to fit onto the GPU memory. I am keeping it in case I figure out how to resolve the memory issue // i.e. figure out how to let the device directly access host memory double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsK = d_fScattLenInfo->nBinsK; int tNbinsD0 = d_fScattLenInfo->nBinsD0; int tNbinsImF0 = d_fScattLenInfo->nBinsImF0; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthReF0 = d_fScattLenInfo->binWidthReF0; int tBin0ReF0 = GetInterpLowBin(kScattLen,kReF0axis,aReF0); int tBin1ReF0 = tBin0ReF0+1; double tBin0CenterReF0 = GetInterpLowBinCenter(kScattLen,kReF0axis,aReF0); // double tBin1CenterReF0 = tBin0CenterReF0+tBinWidthReF0; double tBinWidthImF0 = d_fScattLenInfo->binWidthImF0; int tBin0ImF0 = GetInterpLowBin(kScattLen,kImF0axis,aImF0); int tBin1ImF0 = tBin0ImF0+1; double tBin0CenterImF0 = GetInterpLowBinCenter(kScattLen,kImF0axis,aImF0); // double tBin1CenterImF0 = tBin0CenterImF0+tBinWidthImF0; double tBinWidthD0 = d_fScattLenInfo->binWidthD0; int tBin0D0 = GetInterpLowBin(kScattLen,kD0axis,aD0); int tBin1D0 = tBin0D0+1; double tBin0CenterD0 = GetInterpLowBinCenter(kScattLen,kD0axis,aD0); // double tBin1CenterD0 = tBin0CenterD0+tBinWidthD0; double tBinWidthK = d_fScattLenInfo->binWidthK; int tBin0K = GetInterpLowBin(kScattLen,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kScattLen,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; //-------------------------- double tDiffReF0 = (aReF0 - tBin0CenterReF0)/tBinWidthReF0; double tDiffImF0 = (aImF0 - tBin0CenterImF0)/tBinWidthImF0; double tDiffD0 = (aD0 - tBin0CenterD0)/tBinWidthD0; double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; //-------------------------- //Assuming f(t,x,y,z) = f(ReF0,ImF0,D0,KStar). Ordering for memory access reasons //---------------REAL---------------------------------- //interpolate along z (i.e. KStar) double tC0000Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Real = tC0000Real*(1.0-tDiffK) + tC0001Real*tDiffK; double tC001Real = tC0010Real*(1.0-tDiffK) + tC0011Real*tDiffK; double tC010Real = tC0100Real*(1.0-tDiffK) + tC0101Real*tDiffK; double tC011Real = tC0110Real*(1.0-tDiffK) + tC0111Real*tDiffK; double tC100Real = tC1000Real*(1.0-tDiffK) + tC1001Real*tDiffK; double tC101Real = tC1010Real*(1.0-tDiffK) + tC1011Real*tDiffK; double tC110Real = tC1100Real*(1.0-tDiffK) + tC1101Real*tDiffK; double tC111Real = tC1110Real*(1.0-tDiffK) + tC1111Real*tDiffK; //interpolate along y (i.e. D0) double tC00Real = tC000Real*(1.0-tDiffD0) + tC001Real*tDiffD0; double tC01Real = tC010Real*(1.0-tDiffD0) + tC011Real*tDiffD0; double tC10Real = tC100Real*(1.0-tDiffD0) + tC101Real*tDiffD0; double tC11Real = tC110Real*(1.0-tDiffD0) + tC111Real*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Real = tC00Real*(1.0-tDiffImF0) + tC01Real*tDiffImF0; double tC1Real = tC10Real*(1.0-tDiffImF0) + tC11Real*tDiffImF0; //interpolate along t (i.e. ReF0) tResultReal = tC0Real*(1.0-tDiffReF0) + tC1Real*tDiffReF0; //---------------Imag---------------------------------- //interpolate along z (i.e. KStar) double tC0000Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Imag = tC0000Imag*(1.0-tDiffK) + tC0001Imag*tDiffK; double tC001Imag = tC0010Imag*(1.0-tDiffK) + tC0011Imag*tDiffK; double tC010Imag = tC0100Imag*(1.0-tDiffK) + tC0101Imag*tDiffK; double tC011Imag = tC0110Imag*(1.0-tDiffK) + tC0111Imag*tDiffK; double tC100Imag = tC1000Imag*(1.0-tDiffK) + tC1001Imag*tDiffK; double tC101Imag = tC1010Imag*(1.0-tDiffK) + tC1011Imag*tDiffK; double tC110Imag = tC1100Imag*(1.0-tDiffK) + tC1101Imag*tDiffK; double tC111Imag = tC1110Imag*(1.0-tDiffK) + tC1111Imag*tDiffK; //interpolate along y (i.e. D0) double tC00Imag = tC000Imag*(1.0-tDiffD0) + tC001Imag*tDiffD0; double tC01Imag = tC010Imag*(1.0-tDiffD0) + tC011Imag*tDiffD0; double tC10Imag = tC100Imag*(1.0-tDiffD0) + tC101Imag*tDiffD0; double tC11Imag = tC110Imag*(1.0-tDiffD0) + tC111Imag*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Imag = tC00Imag*(1.0-tDiffImF0) + tC01Imag*tDiffImF0; double tC1Imag = tC10Imag*(1.0-tDiffImF0) + tC11Imag*tDiffImF0; //interpolate along t (i.e. ReF0) tResultImag = tC0Imag*(1.0-tDiffReF0) + tC1Imag*tDiffReF0; //-------------------------------- cuDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } */ //________________________________________________________________________________________________________________ __device__ cuDoubleComplex ScattLenInterpolate(double aReF0, double aImF0, double aD0, double aKStar) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsK = d_fScattLenInfo->nBinsK; // int tNbinsD0 = d_fScattLenInfo->nBinsD0; // int tNbinsImF0 = d_fScattLenInfo->nBinsImF0; int tNbinsD0 = 2; int tNbinsImF0 = 2; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthReF0 = d_fScattLenInfo->binWidthReF0; int tBin0ReF0 = 0; int tBin1ReF0 = tBin0ReF0+1; double tBin0CenterReF0 = GetInterpLowBinCenter(kScattLen,kReF0axis,aReF0); // double tBin1CenterReF0 = tBin0CenterReF0+tBinWidthReF0; double tBinWidthImF0 = d_fScattLenInfo->binWidthImF0; int tBin0ImF0 = 0; int tBin1ImF0 = tBin0ImF0+1; double tBin0CenterImF0 = GetInterpLowBinCenter(kScattLen,kImF0axis,aImF0); // double tBin1CenterImF0 = tBin0CenterImF0+tBinWidthImF0; double tBinWidthD0 = d_fScattLenInfo->binWidthD0; int tBin0D0 = 0; int tBin1D0 = tBin0D0+1; double tBin0CenterD0 = GetInterpLowBinCenter(kScattLen,kD0axis,aD0); // double tBin1CenterD0 = tBin0CenterD0+tBinWidthD0; double tBinWidthK = d_fScattLenInfo->binWidthK; int tBin0K = GetInterpLowBin(kScattLen,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kScattLen,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; //-------------------------- assert(tBin0K>=0); assert(tBin0CenterK>0); double tDiffReF0 = (aReF0 - tBin0CenterReF0)/tBinWidthReF0; double tDiffImF0 = (aImF0 - tBin0CenterImF0)/tBinWidthImF0; double tDiffD0 = (aD0 - tBin0CenterD0)/tBinWidthD0; double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; //-------------------------- //Assuming f(t,x,y,z) = f(ReF0,ImF0,D0,KStar). Ordering for memory access reasons //---------------REAL---------------------------------- //interpolate along z (i.e. KStar) double tC0000Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Real = tC0000Real*(1.0-tDiffK) + tC0001Real*tDiffK; double tC001Real = tC0010Real*(1.0-tDiffK) + tC0011Real*tDiffK; double tC010Real = tC0100Real*(1.0-tDiffK) + tC0101Real*tDiffK; double tC011Real = tC0110Real*(1.0-tDiffK) + tC0111Real*tDiffK; double tC100Real = tC1000Real*(1.0-tDiffK) + tC1001Real*tDiffK; double tC101Real = tC1010Real*(1.0-tDiffK) + tC1011Real*tDiffK; double tC110Real = tC1100Real*(1.0-tDiffK) + tC1101Real*tDiffK; double tC111Real = tC1110Real*(1.0-tDiffK) + tC1111Real*tDiffK; //interpolate along y (i.e. D0) double tC00Real = tC000Real*(1.0-tDiffD0) + tC001Real*tDiffD0; double tC01Real = tC010Real*(1.0-tDiffD0) + tC011Real*tDiffD0; double tC10Real = tC100Real*(1.0-tDiffD0) + tC101Real*tDiffD0; double tC11Real = tC110Real*(1.0-tDiffD0) + tC111Real*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Real = tC00Real*(1.0-tDiffImF0) + tC01Real*tDiffImF0; double tC1Real = tC10Real*(1.0-tDiffImF0) + tC11Real*tDiffImF0; //interpolate along t (i.e. ReF0) tResultReal = tC0Real*(1.0-tDiffReF0) + tC1Real*tDiffReF0; //---------------Imag---------------------------------- //interpolate along z (i.e. KStar) double tC0000Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Imag = tC0000Imag*(1.0-tDiffK) + tC0001Imag*tDiffK; double tC001Imag = tC0010Imag*(1.0-tDiffK) + tC0011Imag*tDiffK; double tC010Imag = tC0100Imag*(1.0-tDiffK) + tC0101Imag*tDiffK; double tC011Imag = tC0110Imag*(1.0-tDiffK) + tC0111Imag*tDiffK; double tC100Imag = tC1000Imag*(1.0-tDiffK) + tC1001Imag*tDiffK; double tC101Imag = tC1010Imag*(1.0-tDiffK) + tC1011Imag*tDiffK; double tC110Imag = tC1100Imag*(1.0-tDiffK) + tC1101Imag*tDiffK; double tC111Imag = tC1110Imag*(1.0-tDiffK) + tC1111Imag*tDiffK; //interpolate along y (i.e. D0) double tC00Imag = tC000Imag*(1.0-tDiffD0) + tC001Imag*tDiffD0; double tC01Imag = tC010Imag*(1.0-tDiffD0) + tC011Imag*tDiffD0; double tC10Imag = tC100Imag*(1.0-tDiffD0) + tC101Imag*tDiffD0; double tC11Imag = tC110Imag*(1.0-tDiffD0) + tC111Imag*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Imag = tC00Imag*(1.0-tDiffImF0) + tC01Imag*tDiffImF0; double tC1Imag = tC10Imag*(1.0-tDiffImF0) + tC11Imag*tDiffImF0; //interpolate along t (i.e. ReF0) tResultImag = tC0Imag*(1.0-tDiffReF0) + tC1Imag*tDiffReF0; //-------------------------------- cuDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } //________________________________________________________________________________________________________________ __device__ double GetEta(double aKStar) { double d_hbarc = 0.197327; double d_gBohrRadius = 75.23349845; //TODO figure out how to load hbarc and gBohrRadius into GPU //TODO figure out how to use Pi here //TODO figure out how to make bohr radius negative when needed double tEta = pow(((aKStar/d_hbarc)*d_gBohrRadius),-1); return tEta; } //________________________________________________________________________________________________________________ __device__ double GetGamowFactor(double aKStar) { double d_hbarc = 0.197327; double d_gBohrRadius = 75.23349845; //TODO figure out how to load hbarc and gBohrRadius into GPU //TODO figure out how to use Pi here //TODO figure out how to make bohr radius negative when needed double tEta = pow(((aKStar/d_hbarc)*d_gBohrRadius),-1); tEta *= 6.28318530718; //eta always comes with 2Pi here double tGamow = tEta*pow((exp(tEta)-1),-1); return tGamow; } //________________________________________________________________________________________________________________ __device__ cuDoubleComplex GetExpTerm(double aKStar, double aRStar, double aTheta) { //TODO figure out how to load hbarc and gBohrRadius into GPU double d_hbarc = 0.197327; double tReal = cos((aKStar/d_hbarc)*aRStar*cos(aTheta)); double tImag = -sin((aKStar/d_hbarc)*aRStar*cos(aTheta)); cuDoubleComplex tExpTermCmplx = make_cuDoubleComplex(tReal,tImag); return tExpTermCmplx; } //________________________________________________________________________________________________________________ __device__ double AssembleWfSquared(double aRStarMag, double aGamowFactor, cuDoubleComplex aExpTermCmplx, cuDoubleComplex aGTildeCmplx, cuDoubleComplex aHyperGeo1F1Cmplx, cuDoubleComplex aScattLenCmplx) { cuDoubleComplex tGTildeCmplxConj = cuConj(aGTildeCmplx); cuDoubleComplex tScattLenCmplxConj = cuConj(aScattLenCmplx); // cuDoubleComplex tGamowFactor = make_cuDoubleComplex(aGamowFactor,0.); //cuda doesn't want to multiply double*double2 //-------------Stupid cuda can only multiple/divide two at once //TODO test to see if there is an easier way to accomplish this double tMagSq_HyperGeo1F1 = cuCabs(aHyperGeo1F1Cmplx)*cuCabs(aHyperGeo1F1Cmplx); double tMagSq_ScattLen = cuCabs(tScattLenCmplxConj)*cuCabs(tScattLenCmplxConj); double tMagSq_GTilde = cuCabs(tGTildeCmplxConj)*cuCabs(tGTildeCmplxConj); cuDoubleComplex tTerm1 = cuCmul(aExpTermCmplx,aHyperGeo1F1Cmplx); cuDoubleComplex tTerm2 = cuCmul(tScattLenCmplxConj,tGTildeCmplxConj); cuDoubleComplex tTerm12 = cuCmul(tTerm1,tTerm2); double tTerm12Real = cuCreal(tTerm12); double tTermFinal = tTerm12Real/aRStarMag; /* cuDoubleComplex tRStarMagCmplx = make_cuDoubleComplex(aRStarMag,0.); cuDoubleComplex tTermFinalCmplx = cuCdiv(tTerm12,tRStarMagCmplx); double tTermFinal = cuCreal(tTermFinalCmplx); */ double tResult = aGamowFactor*(tMagSq_HyperGeo1F1 + tMagSq_ScattLen*tMagSq_GTilde/(aRStarMag*aRStarMag) + 2.0*tTermFinal); return tResult; /* cuDoubleComplex tResultComplex = tGamowFactor*( cuCabs(aHyperGeo1F1Cmplx)*cuCabs(aHyperGeo1F1Cmplx) + cuCabs(tScattLenCmplxConj)*cuCabs(tScattLenCmplxConj)*cuCabs(tGTildeCmplxConj)*cuCabs(tGTildeCmplxConj)/(aRStarMag*aRStarMag) + 2.*cuCreal(aExpTermCmplx*aHyperGeo1F1Cmplx*tScattLenCmplxConj*tGTildeCmplxConj/aRStarMag) ); //TODO put in check to make sure there is no imaginary part // if(imag(tResultComplex) > std::numeric_limits< double >::min()) cout << "\t\t\t !!!!!!!!! Imaginary value in ParellelWaveFunction::InterpolateWfSquared !!!!!" << endl; // assert(imag(tResultComplex) < std::numeric_limits< double >::min()); return cuCreal(tResultComplex); */ } //________________________________________________________________________________________________________________ __device__ double InterpolateWfSquaredInterpScattLen(double aKStarMag, double aRStarMag, double aTheta, double aReF0, double aImF0, double aD0) { double tGamow = GetGamowFactor(aKStarMag); cuDoubleComplex tExpTermCmplx = GetExpTerm(aKStarMag,aRStarMag,aTheta); cuDoubleComplex tGTildeCmplx, tHyperGeo1F1Cmplx, tScattLenCmplx; tGTildeCmplx = GTildeInterpolate(aKStarMag,aRStarMag); tHyperGeo1F1Cmplx = HyperGeo1F1Interpolate(aKStarMag,aRStarMag,aTheta); tScattLenCmplx = ScattLenInterpolate(aReF0,aImF0,aD0,aKStarMag); double tResult = AssembleWfSquared(aRStarMag,tGamow,tExpTermCmplx,tGTildeCmplx,tHyperGeo1F1Cmplx,tScattLenCmplx); return tResult; } //________________________________________________________________________________________________________________ __device__ double InterpolateWfSquared(double aKStarMag, double aRStarMag, double aTheta, double aReF0, double aImF0, double aD0) { double d_hbarc = 0.197327; //TODO double d_gBohrRadius = 75.23349845; cuDoubleComplex tRealUnity = make_cuDoubleComplex(1.0,0); double tGamow = GetGamowFactor(aKStarMag); cuDoubleComplex tExpTermCmplx = GetExpTerm(aKStarMag,aRStarMag,aTheta); cuDoubleComplex tGTildeCmplx, tHyperGeo1F1Cmplx, tScattLenCmplx; tGTildeCmplx = GTildeInterpolate(aKStarMag,aRStarMag); tHyperGeo1F1Cmplx = HyperGeo1F1Interpolate(aKStarMag,aRStarMag,aTheta); //---Build scatt len double tLednickyHFunction = LednickyHFunctionInterpolate(aKStarMag); double tImag = tGamow/(2.0*GetEta(aKStarMag)); cuDoubleComplex tLednickyChi = make_cuDoubleComplex(tLednickyHFunction,tImag); cuDoubleComplex tF0 = make_cuDoubleComplex(aReF0,aImF0); cuDoubleComplex tInvF0 = cuCdiv(tRealUnity,tF0); double tKStar = aKStarMag/d_hbarc; double tTerm2 = 0.5*aD0*tKStar*tKStar; cuDoubleComplex tTerm2Complex = make_cuDoubleComplex(tTerm2,0); double tStupid = 2.0/d_gBohrRadius; cuDoubleComplex tMultFact = make_cuDoubleComplex(tStupid, 0); cuDoubleComplex tTerm3Complex = cuCmul(tMultFact,tLednickyChi); cuDoubleComplex tTerm12 = cuCadd(tInvF0,tTerm2Complex); cuDoubleComplex tInvScattLen = cuCsub(tTerm12,tTerm3Complex); tScattLenCmplx = cuCdiv(tRealUnity,tInvScattLen); //-------------------------- double tResult = AssembleWfSquared(aRStarMag,tGamow,tExpTermCmplx,tGTildeCmplx,tHyperGeo1F1Cmplx,tScattLenCmplx); return tResult; } //________________________________________________________________________________________________________________ __device__ bool CanInterpolate(double aKStar, double aRStar, double aTheta, double aReF0, double aImF0, double aD0) { if(aKStar < d_fScattLenInfo->minInterpK || aKStar > d_fScattLenInfo->maxInterpK) return false; if(aRStar < d_fGTildeInfo->minInterpR || aRStar > d_fGTildeInfo->maxInterpR) return false; if(aTheta < d_fHyperGeo1F1Info->minInterpTheta || aTheta > d_fHyperGeo1F1Info->maxInterpTheta) return false; if(aReF0 < d_fScattLenInfo->minInterpReF0 || aReF0 > d_fScattLenInfo->maxInterpReF0) return false; if(aImF0 < d_fScattLenInfo->minInterpImF0 || aImF0 > d_fScattLenInfo->maxInterpImF0) return false; if(aD0 < d_fScattLenInfo->minInterpD0 || aD0 > d_fScattLenInfo->maxInterpD0) return false; return true; } //________________________________________________________________________________________________________________ __device__ bool CanInterpolate(double aKStar, double aRStar, double aTheta) { if(aKStar < d_fGTildeInfo->minInterpK || aKStar > d_fGTildeInfo->maxInterpK) return false; if(aRStar < d_fGTildeInfo->minInterpR || aRStar > d_fGTildeInfo->maxInterpR) return false; if(aTheta < d_fHyperGeo1F1Info->minInterpTheta || aTheta > d_fHyperGeo1F1Info->maxInterpTheta) return false; return true; } //________________________________________________________________________________________________________________ __global__ void GetWfAverage(double *aKStarMag, double *aRStarMag, double *aTheta, double aReF0, double aImF0, double aD0, double *g_odata, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if(aInterpScattLen) sdata[tid] = InterpolateWfSquaredInterpScattLen(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0,aImF0,aD0); else sdata[tid] = InterpolateWfSquared(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0,aImF0,aD0); __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata[index] += sdata[index+s]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } //________________________________________________________________________________________________________________ __global__ void GetEntireCf(double *aKStarMag, double *aRStarMag, double *aTheta, double aReF0, double aImF0, double aD0, double *g_odata, int aOffsetInput, int aOffsetOutput, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x + aOffsetInput; if(aInterpScattLen) sdata[tid] = InterpolateWfSquaredInterpScattLen(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0,aImF0,aD0); else sdata[tid] = InterpolateWfSquared(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0,aImF0,aD0); __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata[index] += sdata[index+s]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) g_odata[blockIdx.x+aOffsetOutput] = sdata[0]; } //________________________________________________________________________________________________________________ __global__ void GetEntireCfComplete(double *aKStarMag, double *aRStarMag, double *aTheta, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t, double *g_odata, int aOffsetInput, int aOffsetOutput, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x + aOffsetInput; double tWfSqSinglet, tWfSqTriplet, tWfSq; if(aInterpScattLen) { tWfSqSinglet = InterpolateWfSquaredInterpScattLen(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquaredInterpScattLen(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0t,aImF0t,aD0t); } else { tWfSqSinglet = InterpolateWfSquared(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquared(aKStarMag[i],aRStarMag[i],aTheta[i],aReF0t,aImF0t,aD0t); } tWfSq = 0.25*tWfSqSinglet + 0.75*tWfSqTriplet; sdata[tid] = tWfSq; __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata[index] += sdata[index+s]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) g_odata[blockIdx.x+aOffsetOutput] = sdata[0]; } //________________________________________________________________________________________________________________ __device__ int GetSamplePairOffset(int aAnalysis, int aBinK, int aPair) { int tNBinsK = d_fPairSample4dVecInfo->nBinsK; int tNPairsPerBin = d_fPairSample4dVecInfo->nPairsPerBin; int tNElementsPerPair = d_fPairSample4dVecInfo->nElementsPerPair; int tIndex = aPair*tNElementsPerPair + aBinK*tNPairsPerBin*tNElementsPerPair + aAnalysis*tNBinsK*tNPairsPerBin*tNElementsPerPair; return tIndex; } //________________________________________________________________________________________________________________ __device__ bool CanInterpPair(double aKStar, double aRStar, double aTheta) { if(aKStar < d_fHyperGeo1F1Info->minInterpK) return false; if(aKStar > d_fHyperGeo1F1Info->maxInterpK) return false; if(aRStar < d_fHyperGeo1F1Info->minInterpR) return false; if(aRStar > d_fHyperGeo1F1Info->maxInterpR) return false; if(aTheta < d_fHyperGeo1F1Info->minInterpTheta) return false; if(aTheta > d_fHyperGeo1F1Info->maxInterpTheta) return false; return true; } //________________________________________________________________________________________________________________ __global__ void GetEntireCfCompletewStaticPairs(double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t, double *g_odata, double *g_odata2, int aAnalysisNumber, int aBinKNumber, int aOffsetOutput, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata2[][2]; unsigned int tid = threadIdx.x; unsigned int tPairNumber = blockIdx.x*blockDim.x + threadIdx.x; unsigned int i = GetSamplePairOffset(aAnalysisNumber,aBinKNumber,tPairNumber); double tWfSqSinglet, tWfSqTriplet, tWfSq; if(CanInterpPair(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2])) { if(aInterpScattLen) { tWfSqSinglet = InterpolateWfSquaredInterpScattLen(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2],aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquaredInterpScattLen(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2],aReF0t,aImF0t,aD0t); } else { tWfSqSinglet = InterpolateWfSquared(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2],aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquared(d_fPairSample4dVec[i],d_fPairSample4dVec[i+1],d_fPairSample4dVec[i+2],aReF0t,aImF0t,aD0t); } tWfSq = 0.25*tWfSqSinglet + 0.75*tWfSqTriplet; sdata2[tid][0] = tWfSq; sdata2[tid][1] = 1.; } else { sdata2[tid][0] = 0.; sdata2[tid][1] = 0.; } __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata2[index][0] += sdata2[index+s][0]; sdata2[index][1] += sdata2[index+s][1]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) { g_odata[blockIdx.x+aOffsetOutput] = sdata2[0][0]; g_odata2[blockIdx.x+aOffsetOutput] = sdata2[0][1]; } } //________________________________________________________________________________________________________________ __global__ void RandInit(curandState *state, unsigned long seed, int aOffset) { int idx = blockIdx.x * blockDim.x + threadIdx.x + aOffset; curand_init(seed, idx, 0, &state[idx]); } //________________________________________________________________________________________________________________ __global__ void GetEntireCfComplete2(curandState *state1, curandState *state2, curandState *state3, double aR, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t, double *g_odata, int aKbin, int aOffsetInput, int aOffsetOutput, double* aCPUPairs, bool aInterpScattLen) { // int idx = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x + aOffsetInput; bool tPass = false; int tNPairs = d_fPairKStar3dVecInfo->nPairsPerBin[aKbin]; int tSimPairLocalIndex, tSimPairGlobalIndex; double tKStarOut, tKStarSide, tKStarLong, tKStarMagSq, tKStarMag; double tRStarOut, tRStarSide, tRStarLong, tRStarMagSq, tRStarMag; double tCosTheta, tTheta; //TODO need to be able to return all failing pairs back to CPU for mathematica processing while(!tPass) { tSimPairLocalIndex = tNPairs*curand_uniform_double(&state1[i]); tSimPairGlobalIndex = d_fPairKStar3dVecInfo->binOffset[aKbin] + 4*tSimPairLocalIndex; tKStarOut = d_fPairKStar3dVec[tSimPairGlobalIndex+1]; //note, 0th element is KStarMag tKStarSide = d_fPairKStar3dVec[tSimPairGlobalIndex+2]; //note, 0th element is KStarMag tKStarLong = d_fPairKStar3dVec[tSimPairGlobalIndex+3]; //note, 0th element is KStarMag tKStarMagSq = tKStarOut*tKStarOut + tKStarSide*tKStarSide + tKStarLong*tKStarLong; tKStarMag = sqrt(tKStarMagSq); tRStarOut = aR*curand_normal_double(&state1[i]); tRStarSide = aR*curand_normal_double(&state2[i]); tRStarLong = aR*curand_normal_double(&state3[i]); tRStarMagSq = tRStarOut*tRStarOut + tRStarSide*tRStarSide + tRStarLong*tRStarLong; tRStarMag = sqrt(tRStarMagSq); tCosTheta = (tKStarOut*tRStarOut + tKStarSide*tRStarSide + tKStarLong*tRStarLong)/(tKStarMag*tRStarMag); tTheta = acos(tCosTheta); if(aInterpScattLen) { bool tPass1 = CanInterpolate(tKStarMag,tRStarMag,tTheta,aReF0s,aImF0s,aD0s); bool tPass2 = CanInterpolate(tKStarMag,tRStarMag,tTheta,aReF0t,aImF0t,aD0t); if(tPass1 && tPass2) tPass = true; else tPass = false; } else tPass = CanInterpolate(tKStarMag,tRStarMag,tTheta); } double tWfSqSinglet, tWfSqTriplet, tWfSq; if(aInterpScattLen) { tWfSqSinglet = InterpolateWfSquaredInterpScattLen(tKStarMag,tRStarMag,tTheta,aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquaredInterpScattLen(tKStarMag,tRStarMag,tTheta,aReF0t,aImF0t,aD0t); } else { tWfSqSinglet = InterpolateWfSquared(tKStarMag,tRStarMag,tTheta,aReF0s,aImF0s,aD0s); tWfSqTriplet = InterpolateWfSquared(tKStarMag,tRStarMag,tTheta,aReF0t,aImF0t,aD0t); } tWfSq = 0.25*tWfSqSinglet + 0.75*tWfSqTriplet; sdata[tid] = tWfSq; __syncthreads(); //do reduction in shared mem //strided for(unsigned int s=1; s<blockDim.x; s*=2) { int index = 2*s*tid; if(index < blockDim.x) { sdata[index] += sdata[index+s]; } __syncthreads(); } /* //sequential for(unsigned int s=blockDim.x/2; s>0; s>>=1) //>>= is bitwise shift, here reducing s in powers of 2 { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } */ //write result for this block to global mem if(tid == 0) g_odata[blockIdx.x+aOffsetOutput] = sdata[0]; } //________________________________________________________________________________________________________________ //**************************************************************************************************************** //________________________________________________________________________________________________________________ ParallelWaveFunction::ParallelWaveFunction(bool aInterpScattLen, int aNThreadsPerBlock, int aNBlocks): fInterpScattLen(aInterpScattLen), fNThreadsPerBlock(aNThreadsPerBlock), fNBlocks(aNBlocks) { cudaSetDeviceFlags(cudaDeviceMapHost); } //________________________________________________________________________________________________________________ ParallelWaveFunction::~ParallelWaveFunction() { checkCudaErrors(cudaFree(d_fPairKStar3dVec)); checkCudaErrors(cudaFree(d_fPairKStar3dVecInfo)); checkCudaErrors(cudaFree(d_fLednickyHFunction)); checkCudaErrors(cudaFree(d_fGTildeReal)); checkCudaErrors(cudaFree(d_fGTildeImag)); checkCudaErrors(cudaFree(d_fGTildeInfo)); checkCudaErrors(cudaFree(d_fHyperGeo1F1Real)); checkCudaErrors(cudaFree(d_fHyperGeo1F1Imag)); checkCudaErrors(cudaFree(d_fHyperGeo1F1Info)); // checkCudaErrors(cudaFree(d_fCoulombScatteringLengthReal)); // checkCudaErrors(cudaFree(d_fCoulombScatteringLengthImag)); checkCudaErrors(cudaFree(d_fScattLenInfo)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadPairSample4dVec(td4dVec &aPairSample4dVec, BinInfoSamplePairs &aBinInfo) { //------ Load bin info first --------------------------- checkCudaErrors(cudaMallocManaged(&d_fPairSample4dVecInfo, sizeof(BinInfoSamplePairs))); d_fPairSample4dVecInfo->nAnalyses = aBinInfo.nAnalyses; d_fPairSample4dVecInfo->nBinsK = aBinInfo.nBinsK; d_fPairSample4dVecInfo->nPairsPerBin = aBinInfo.nPairsPerBin; d_fPairSample4dVecInfo->minK = aBinInfo.minK; d_fPairSample4dVecInfo->maxK = aBinInfo.maxK; d_fPairSample4dVecInfo->binWidthK = aBinInfo.binWidthK; d_fPairSample4dVecInfo->nElementsPerPair = aBinInfo.nElementsPerPair; //------------------------------------------------------ fSamplePairsBinInfo.nAnalyses = aBinInfo.nAnalyses; fSamplePairsBinInfo.nBinsK = aBinInfo.nBinsK; fSamplePairsBinInfo.nPairsPerBin = aBinInfo.nPairsPerBin; fSamplePairsBinInfo.minK = aBinInfo.minK; fSamplePairsBinInfo.maxK = aBinInfo.maxK; fSamplePairsBinInfo.binWidthK = aBinInfo.binWidthK; fSamplePairsBinInfo.nElementsPerPair = aBinInfo.nElementsPerPair; //------------------------------------------------------ assert((int)aPairSample4dVec.size() == d_fPairSample4dVecInfo->nAnalyses); assert((int)aPairSample4dVec[0].size() == d_fPairSample4dVecInfo->nBinsK); assert(d_fPairSample4dVecInfo->nElementsPerPair == 3); //------------------------------------------------------ int tTotalPairs = 0; for(int iAnaly=0; iAnaly<(int)aPairSample4dVec.size(); iAnaly++) { for(int iK=0; iK<(int)aPairSample4dVec[iAnaly].size(); iK++) tTotalPairs += aPairSample4dVec[iAnaly][iK].size(); } int tSize = tTotalPairs*fSamplePairsBinInfo.nElementsPerPair*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fPairSample4dVec, tSize)); int tIndex=0; for(int iAnaly=0; iAnaly<(int)aPairSample4dVec.size(); iAnaly++) { for(int iK=0; iK<(int)aPairSample4dVec[iAnaly].size(); iK++) { for(int iPair=0; iPair<(int)aPairSample4dVec[iAnaly][iK].size(); iPair++) { d_fPairSample4dVec[tIndex] = aPairSample4dVec[iAnaly][iK][iPair][0]; d_fPairSample4dVec[tIndex+1] = aPairSample4dVec[iAnaly][iK][iPair][1]; d_fPairSample4dVec[tIndex+2] = aPairSample4dVec[iAnaly][iK][iPair][2]; tIndex += 3; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UpdatePairSampleRadii(double aScaleFactor) { //TODO make this more general, and probably is better way to do this int tTotalEntries = d_fPairSample4dVecInfo->nAnalyses * d_fPairSample4dVecInfo->nBinsK * d_fPairSample4dVecInfo->nPairsPerBin * d_fPairSample4dVecInfo->nElementsPerPair; for(int i=0; i<tTotalEntries; i++) { if(i%3 == 1) d_fPairSample4dVec[i] *= aScaleFactor; } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadPairKStar3dVec(td3dVec &aPairKStar3dVec, BinInfoKStar &aBinInfo) { //------ Load bin info first --------------------------- checkCudaErrors(cudaMallocManaged(&d_fPairKStar3dVecInfo, sizeof(BinInfoKStar))); d_fPairKStar3dVecInfo->nBinsK = aBinInfo.nBinsK; d_fPairKStar3dVecInfo->minK = aBinInfo.minK; d_fPairKStar3dVecInfo->maxK = aBinInfo.maxK; d_fPairKStar3dVecInfo->binWidthK = aBinInfo.binWidthK; for(int i=0; i<d_fPairKStar3dVecInfo->nBinsK; i++) { d_fPairKStar3dVecInfo->nPairsPerBin[i] = aBinInfo.nPairsPerBin[i]; d_fPairKStar3dVecInfo->binOffset[i] = aBinInfo.binOffset[i]; } //------------------------------------------------------ int tNbinsK = aPairKStar3dVec.size(); assert(tNbinsK == d_fPairKStar3dVecInfo->nBinsK); int tNPairsTotal=0; for(int i=0; i<tNbinsK; i++) tNPairsTotal += aPairKStar3dVec[i].size(); int tNPairsTotal2=0; for(int i=0; i<tNbinsK; i++) tNPairsTotal2 += d_fPairKStar3dVecInfo->nPairsPerBin[i]; assert(tNPairsTotal == tNPairsTotal2); assert(aPairKStar3dVec[0][0].size() == 4); //all should have the same size, but maybe input a more thorough check int tSize = tNPairsTotal*4*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fPairKStar3dVec, tSize)); int tIndex=0; int tIndex2=0; int tOffset=0; for(int iK=0; iK<tNbinsK; iK++) { for(int iPair=0; iPair<(int)aPairKStar3dVec[iK].size(); iPair++) { tOffset = d_fPairKStar3dVecInfo->binOffset[iK]; tIndex2 = tOffset + 4*iPair; assert(tIndex2 == tIndex); d_fPairKStar3dVec[tIndex] = aPairKStar3dVec[iK][iPair][0]; d_fPairKStar3dVec[tIndex+1] = aPairKStar3dVec[iK][iPair][1]; d_fPairKStar3dVec[tIndex+2] = aPairKStar3dVec[iK][iPair][2]; d_fPairKStar3dVec[tIndex+3] = aPairKStar3dVec[iK][iPair][3]; tIndex+=4; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadLednickyHFunction(td1dVec &aHFunc) { int tNbinsK = aHFunc.size(); int tSize = tNbinsK*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fLednickyHFunction, tSize)); for(int iK=0; iK<tNbinsK; iK++) { d_fLednickyHFunction[iK] = aHFunc[iK]; } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeReal(td2dVec &aGTildeReal) { int tNbinsK = aGTildeReal.size(); int tNbinsR = aGTildeReal[0].size(); int tSize = tNbinsK*tNbinsR*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fGTildeReal, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { tIndex = iR + iK*tNbinsR; d_fGTildeReal[tIndex] = aGTildeReal[iK][iR]; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeImag(td2dVec &aGTildeImag) { int tNbinsK = aGTildeImag.size(); int tNbinsR = aGTildeImag[0].size(); int tSize = tNbinsK*tNbinsR*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fGTildeImag, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { tIndex = iR + iK*tNbinsR; d_fGTildeImag[tIndex] = aGTildeImag[iK][iR]; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Real(td3dVec &aHyperGeo1F1Real) { int tNbinsK = aHyperGeo1F1Real.size(); int tNbinsR = aHyperGeo1F1Real[0].size(); int tNbinsTheta = aHyperGeo1F1Real[0][0].size(); int tSize = tNbinsK*tNbinsR*tNbinsTheta*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fHyperGeo1F1Real, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { for(int iTheta=0; iTheta<tNbinsTheta; iTheta++) { tIndex = iTheta + iR*tNbinsTheta + iK*tNbinsTheta*tNbinsR; d_fHyperGeo1F1Real[tIndex] = aHyperGeo1F1Real[iK][iR][iTheta]; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Imag(td3dVec &aHyperGeo1F1Imag) { int tNbinsK = aHyperGeo1F1Imag.size(); int tNbinsR = aHyperGeo1F1Imag[0].size(); int tNbinsTheta = aHyperGeo1F1Imag[0][0].size(); int tSize = tNbinsK*tNbinsR*tNbinsTheta*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fHyperGeo1F1Imag, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { for(int iTheta=0; iTheta<tNbinsTheta; iTheta++) { tIndex = iTheta + iR*tNbinsTheta + iK*tNbinsTheta*tNbinsR; d_fHyperGeo1F1Imag[tIndex] = aHyperGeo1F1Imag[iK][iR][iTheta]; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenReal(td4dVec &aScattLenReal) { int tNbinsReF0 = aScattLenReal.size(); int tNbinsImF0 = aScattLenReal[0].size(); int tNbinsD0 = aScattLenReal[0][0].size(); int tNbinsK = aScattLenReal[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fCoulombScatteringLengthReal, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fCoulombScatteringLengthReal[tIndex] = aScattLenReal[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenImag(td4dVec &aScattLenImag) { int tNbinsReF0 = aScattLenImag.size(); int tNbinsImF0 = aScattLenImag[0].size(); int tNbinsD0 = aScattLenImag[0][0].size(); int tNbinsK = aScattLenImag[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fCoulombScatteringLengthImag, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fCoulombScatteringLengthImag[tIndex] = aScattLenImag[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenRealSub(td4dVec &aScattLenReal) { int tNbinsReF0 = aScattLenReal.size(); int tNbinsImF0 = aScattLenReal[0].size(); int tNbinsD0 = aScattLenReal[0][0].size(); int tNbinsK = aScattLenReal[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fScattLenRealSubVec, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fScattLenRealSubVec[tIndex] = aScattLenReal[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenImagSub(td4dVec &aScattLenImag) { int tNbinsReF0 = aScattLenImag.size(); int tNbinsImF0 = aScattLenImag[0].size(); int tNbinsD0 = aScattLenImag[0][0].size(); int tNbinsK = aScattLenImag[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fScattLenImagSubVec, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fScattLenImagSubVec[tIndex] = aScattLenImag[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UnLoadScattLenRealSub() { checkCudaErrors(cudaFree(d_fScattLenRealSubVec)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UnLoadScattLenImagSub() { checkCudaErrors(cudaFree(d_fScattLenImagSubVec)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeInfo(BinInfoGTilde &aBinInfo) { checkCudaErrors(cudaMallocManaged(&d_fGTildeInfo, sizeof(BinInfoGTilde))); d_fGTildeInfo->nBinsK = aBinInfo.nBinsK; d_fGTildeInfo->nBinsR = aBinInfo.nBinsR; d_fGTildeInfo->binWidthK = aBinInfo.binWidthK; d_fGTildeInfo->binWidthR = aBinInfo.binWidthR; d_fGTildeInfo->minK = aBinInfo.minK; d_fGTildeInfo->maxK = aBinInfo.maxK; d_fGTildeInfo->minR = aBinInfo.minR; d_fGTildeInfo->maxR = aBinInfo.maxR; d_fGTildeInfo->minInterpK = aBinInfo.minInterpK; d_fGTildeInfo->maxInterpK = aBinInfo.maxInterpK; d_fGTildeInfo->minInterpR = aBinInfo.minInterpR; d_fGTildeInfo->maxInterpR = aBinInfo.maxInterpR; } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Info(BinInfoHyperGeo1F1 &aBinInfo) { checkCudaErrors(cudaMallocManaged(&d_fHyperGeo1F1Info, sizeof(BinInfoHyperGeo1F1))); d_fHyperGeo1F1Info->nBinsK = aBinInfo.nBinsK; d_fHyperGeo1F1Info->nBinsR = aBinInfo.nBinsR; d_fHyperGeo1F1Info->nBinsTheta = aBinInfo.nBinsTheta; d_fHyperGeo1F1Info->binWidthK = aBinInfo.binWidthK; d_fHyperGeo1F1Info->binWidthR = aBinInfo.binWidthR; d_fHyperGeo1F1Info->binWidthTheta = aBinInfo.binWidthTheta; d_fHyperGeo1F1Info->minK = aBinInfo.minK; d_fHyperGeo1F1Info->maxK = aBinInfo.maxK; d_fHyperGeo1F1Info->minR = aBinInfo.minR; d_fHyperGeo1F1Info->maxR = aBinInfo.maxR; d_fHyperGeo1F1Info->minTheta = aBinInfo.minTheta; d_fHyperGeo1F1Info->maxTheta = aBinInfo.maxTheta; d_fHyperGeo1F1Info->minInterpK = aBinInfo.minInterpK; d_fHyperGeo1F1Info->maxInterpK = aBinInfo.maxInterpK; d_fHyperGeo1F1Info->minInterpR = aBinInfo.minInterpR; d_fHyperGeo1F1Info->maxInterpR = aBinInfo.maxInterpR; d_fHyperGeo1F1Info->minInterpTheta = aBinInfo.minInterpTheta; d_fHyperGeo1F1Info->maxInterpTheta = aBinInfo.maxInterpTheta; } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenInfo(BinInfoScattLen &aBinInfo) { checkCudaErrors(cudaMallocManaged(&d_fScattLenInfo, sizeof(BinInfoScattLen))); d_fScattLenInfo->nBinsReF0 = aBinInfo.nBinsReF0; d_fScattLenInfo->nBinsImF0 = aBinInfo.nBinsImF0; d_fScattLenInfo->nBinsD0 = aBinInfo.nBinsD0; d_fScattLenInfo->nBinsK = aBinInfo.nBinsK; d_fScattLenInfo->binWidthReF0 = aBinInfo.binWidthReF0; d_fScattLenInfo->binWidthImF0 = aBinInfo.binWidthImF0; d_fScattLenInfo->binWidthD0 = aBinInfo.binWidthD0; d_fScattLenInfo->binWidthK = aBinInfo.binWidthK; d_fScattLenInfo->minReF0 = aBinInfo.minReF0; d_fScattLenInfo->maxReF0 = aBinInfo.maxReF0; d_fScattLenInfo->minImF0 = aBinInfo.minImF0; d_fScattLenInfo->maxImF0 = aBinInfo.maxImF0; d_fScattLenInfo->minD0 = aBinInfo.minD0; d_fScattLenInfo->maxD0 = aBinInfo.maxD0; d_fScattLenInfo->minK = aBinInfo.minK; d_fScattLenInfo->maxK = aBinInfo.maxK; d_fScattLenInfo->minInterpReF0 = aBinInfo.minInterpReF0; d_fScattLenInfo->maxInterpReF0 = aBinInfo.maxInterpReF0; d_fScattLenInfo->minInterpImF0 = aBinInfo.minInterpImF0; d_fScattLenInfo->maxInterpImF0 = aBinInfo.maxInterpImF0; d_fScattLenInfo->minInterpD0 = aBinInfo.minInterpD0; d_fScattLenInfo->maxInterpD0 = aBinInfo.maxInterpD0; d_fScattLenInfo->minInterpK = aBinInfo.minInterpK; d_fScattLenInfo->maxInterpK = aBinInfo.maxInterpK; } //________________________________________________________________________________________________________________ //double* ParallelWaveFunction::RunInterpolateWfSquared(td2dVec &aPairs, double aReF0, double aImF0, double aD0) vector<double> ParallelWaveFunction::RunInterpolateWfSquared(td2dVec &aPairs, double aReF0, double aImF0, double aD0) { int tNPairs = aPairs.size(); int tSize = tNPairs*sizeof(double); int tSizeShared = fNThreadsPerBlock*sizeof(double); int tSizeOut = fNBlocks*sizeof(double); //---Host arrays and allocations double * h_KStarMag; double * h_RStarMag; double * h_Theta; double * h_WfSquared; checkCudaErrors(cudaMallocManaged(&h_KStarMag, tSize)); checkCudaErrors(cudaMallocManaged(&h_RStarMag, tSize)); checkCudaErrors(cudaMallocManaged(&h_Theta, tSize)); checkCudaErrors(cudaMallocManaged(&h_WfSquared, tSizeOut)); for(int i=0; i<tNPairs; i++) { h_KStarMag[i] = aPairs[i][0]; h_RStarMag[i] = aPairs[i][1]; h_Theta[i] = aPairs[i][2]; } //----------Run the kernel----------------------------------------------- GpuTimer timer; timer.Start(); GetWfAverage<<<fNBlocks,fNThreadsPerBlock,tSizeShared>>>(h_KStarMag,h_RStarMag,h_Theta,aReF0,aImF0,aD0,h_WfSquared,fInterpScattLen); timer.Stop(); std::cout << "InterpolateWfSquared kernel finished in " << timer.Elapsed() << " ms" << std::endl; //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(h_KStarMag)); checkCudaErrors(cudaFree(h_RStarMag)); checkCudaErrors(cudaFree(h_Theta)); // return h_WfSquared; vector<double> tReturnVec(tNPairs); for(int i=0; i<fNBlocks; i++) { tReturnVec[i] = h_WfSquared[i]; // cout << "i = " << i << endl; // cout << "h_WfSquared[i] = " << h_WfSquared[i] << endl; // cout << "tReturnVec[i] = " << tReturnVec[i] << endl << endl; } // checkCudaErrors(cudaFreeHost(h_WfSquared)); checkCudaErrors(cudaFree(h_WfSquared)); return tReturnVec; } //________________________________________________________________________________________________________________ vector<double> ParallelWaveFunction::RunInterpolateEntireCf(td3dVec &aPairs, double aReF0, double aImF0, double aD0) { // GpuTimer timerPre; // timerPre.Start(); int tNBins = aPairs.size(); int tNPairsPerBin = aPairs[0].size(); //TODO all bins should have equal number of pairs int tSizeInput = tNBins*tNPairsPerBin*sizeof(double); int tSizeOutput = tNBins*fNBlocks*sizeof(double); //the kernel reduces the values for tNPairs bins down to fNBlocks bins int tSizeShared = fNThreadsPerBlock*sizeof(double); const int tNStreams = tNBins; //---Host arrays and allocations double * h_KStarMag; double * h_RStarMag; double * h_Theta; double * h_Cf; checkCudaErrors(cudaMallocManaged(&h_KStarMag, tSizeInput)); checkCudaErrors(cudaMallocManaged(&h_RStarMag, tSizeInput)); checkCudaErrors(cudaMallocManaged(&h_Theta, tSizeInput)); checkCudaErrors(cudaMallocManaged(&h_Cf, tSizeOutput)); cudaStream_t tStreams[tNStreams]; for(int i=0; i<tNBins; i++) { cudaStreamCreate(&tStreams[i]); for(int j=0; j<tNPairsPerBin; j++) { h_KStarMag[j+i*tNPairsPerBin] = aPairs[i][j][0]; h_RStarMag[j+i*tNPairsPerBin] = aPairs[i][j][1]; h_Theta[j+i*tNPairsPerBin] = aPairs[i][j][2]; } } // timerPre.Stop(); // std::cout << " timerPre: " << timerPre.Elapsed() << " ms" << std::endl; //----------Run the kernels----------------------------------------------- // GpuTimer timer; // timer.Start(); for(int i=0; i<tNBins; i++) { int tOffsetInput = i*tNPairsPerBin; int tOffsetOutput = i*fNBlocks; GetEntireCf<<<fNBlocks,fNThreadsPerBlock,tSizeShared,tStreams[i]>>>(h_KStarMag,h_RStarMag,h_Theta,aReF0,aImF0,aD0,h_Cf,tOffsetInput,tOffsetOutput,fInterpScattLen); } // timer.Stop(); // std::cout << "GetEntireCf kernel finished in " << timer.Elapsed() << " ms" << std::endl; // GpuTimer timerPost; // timerPost.Start(); //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(h_KStarMag)); checkCudaErrors(cudaFree(h_RStarMag)); checkCudaErrors(cudaFree(h_Theta)); // return the CF vector<double> tReturnVec(tNBins); double tSum = 0.0; for(int i=0; i<tNBins; i++) { tSum=0.0; for(int j=0; j<fNBlocks; j++) { tSum += h_Cf[j+i*fNBlocks]; } tReturnVec[i] = tSum; } checkCudaErrors(cudaFree(h_Cf)); for(int i=0; i<tNStreams; i++) cudaStreamDestroy(tStreams[i]); // timerPost.Stop(); // std::cout << " timerPost: " << timerPost.Elapsed() << " ms" << std::endl; return tReturnVec; } //________________________________________________________________________________________________________________ vector<double> ParallelWaveFunction::RunInterpolateEntireCfComplete(td3dVec &aPairs, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t) { // GpuTimer timerPre; // timerPre.Start(); int tNBins = aPairs.size(); int tNPairsPerBin = aPairs[0].size(); //TODO all bins should have equal number of pairs int tSizeInput = tNBins*tNPairsPerBin*sizeof(double); int tSizeOutput = tNBins*fNBlocks*sizeof(double); //the kernel reduces the values for tNPairs bins down to fNBlocks bins int tSizeShared = fNThreadsPerBlock*sizeof(double); const int tNStreams = tNBins; //---Host arrays and allocations double * h_KStarMag; double * h_RStarMag; double * h_Theta; double * h_Cf; checkCudaErrors(cudaMallocManaged(&h_KStarMag, tSizeInput)); checkCudaErrors(cudaMallocManaged(&h_RStarMag, tSizeInput)); checkCudaErrors(cudaMallocManaged(&h_Theta, tSizeInput)); checkCudaErrors(cudaMallocManaged(&h_Cf, tSizeOutput)); cudaStream_t tStreams[tNStreams]; for(int i=0; i<tNBins; i++) { cudaStreamCreate(&tStreams[i]); for(int j=0; j<tNPairsPerBin; j++) { h_KStarMag[j+i*tNPairsPerBin] = aPairs[i][j][0]; h_RStarMag[j+i*tNPairsPerBin] = aPairs[i][j][1]; h_Theta[j+i*tNPairsPerBin] = aPairs[i][j][2]; } } // timerPre.Stop(); // std::cout << " timerPre: " << timerPre.Elapsed() << " ms" << std::endl; //----------Run the kernels----------------------------------------------- // GpuTimer timer; // timer.Start(); //TODO this doesn't work with fInterpScattLen = true. If I want this to work, I need to add singlet and triplet interpolation vectors assert(!fInterpScattLen); for(int i=0; i<tNBins; i++) { int tOffsetInput = i*tNPairsPerBin; int tOffsetOutput = i*fNBlocks; GetEntireCfComplete<<<fNBlocks,fNThreadsPerBlock,tSizeShared,tStreams[i]>>>(h_KStarMag,h_RStarMag,h_Theta,aReF0s,aImF0s,aD0s,aReF0t,aImF0t,aD0t,h_Cf,tOffsetInput,tOffsetOutput,fInterpScattLen); } // timer.Stop(); // std::cout << "GetEntireCf kernel finished in " << timer.Elapsed() << " ms" << std::endl; // GpuTimer timerPost; // timerPost.Start(); //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(h_KStarMag)); checkCudaErrors(cudaFree(h_RStarMag)); checkCudaErrors(cudaFree(h_Theta)); // return the CF vector<double> tReturnVec(tNBins); double tSum = 0.0; for(int i=0; i<tNBins; i++) { tSum=0.0; for(int j=0; j<fNBlocks; j++) { tSum += h_Cf[j+i*fNBlocks]; } tReturnVec[i] = tSum; } checkCudaErrors(cudaFree(h_Cf)); for(int i=0; i<tNStreams; i++) cudaStreamDestroy(tStreams[i]); // timerPost.Stop(); // std::cout << " timerPost: " << timerPost.Elapsed() << " ms" << std::endl; return tReturnVec; } //________________________________________________________________________________________________________________ td2dVec ParallelWaveFunction::RunInterpolateEntireCfCompletewStaticPairs(int aAnalysisNumber, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t) { // GpuTimer timerPre; // timerPre.Start(); int tNBins = fSamplePairsBinInfo.nBinsK; int tNPairsPerBin = fSamplePairsBinInfo.nPairsPerBin; int tSizeOutput = tNBins*fNBlocks*sizeof(double); //the kernel reduces the values for tNPairs bins down to fNBlocks bins int tSizeShared = fNThreadsPerBlock*sizeof(double); tSizeShared *= 2; //to account for Cf values and counts const int tNStreams = tNBins; //---Host arrays and allocations double * h_CfSums; double * h_CfCounts; checkCudaErrors(cudaMallocManaged(&h_CfSums, tSizeOutput)); checkCudaErrors(cudaMallocManaged(&h_CfCounts, tSizeOutput)); cudaStream_t tStreams[tNStreams]; for(int i=0; i<tNBins; i++) { cudaStreamCreate(&tStreams[i]); } // timerPre.Stop(); // std::cout << " timerPre: " << timerPre.Elapsed() << " ms" << std::endl; //----------Run the kernels----------------------------------------------- // GpuTimer timer; // timer.Start(); //TODO this doesn't work with fInterpScattLen = true. If I want this to work, I need to add singlet and triplet interpolation vectors assert(!fInterpScattLen); for(int i=0; i<tNBins; i++) { int tOffsetOutput = i*fNBlocks; GetEntireCfCompletewStaticPairs<<<fNBlocks,fNThreadsPerBlock,tSizeShared,tStreams[i]>>>(aReF0s, aImF0s, aD0s, aReF0t, aImF0t, aD0t, h_CfSums, h_CfCounts, aAnalysisNumber, i, tOffsetOutput, fInterpScattLen); } // timer.Stop(); // std::cout << "GetEntireCf kernel finished in " << timer.Elapsed() << " ms" << std::endl; // GpuTimer timerPost; // timerPost.Start(); //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(cudaDeviceSynchronize()); // return the CF td2dVec tReturnVec; tReturnVec.resize(tNBins,td1dVec(2)); double tSum = 0.0; int tCounts = 0; for(int i=0; i<tNBins; i++) { tSum=0.0; tCounts = 0; for(int j=0; j<fNBlocks; j++) { tSum += h_CfSums[j+i*fNBlocks]; tCounts += h_CfCounts[j+i*fNBlocks]; } tReturnVec[i][0] = tSum; tReturnVec[i][1] = tCounts; } checkCudaErrors(cudaFree(h_CfSums)); checkCudaErrors(cudaFree(h_CfCounts)); for(int i=0; i<tNStreams; i++) cudaStreamDestroy(tStreams[i]); // timerPost.Stop(); // std::cout << " timerPost: " << timerPost.Elapsed() << " ms" << std::endl; return tReturnVec; } //________________________________________________________________________________________________________________ vector<double> ParallelWaveFunction::RunInterpolateEntireCfComplete2(int aNSimPairsPerBin, double aKStarMin, double aKStarMax, double aNbinsK, double aR, double aReF0s, double aImF0s, double aD0s, double aReF0t, double aImF0t, double aD0t) { // GpuTimer timerPre; // timerPre.Start(); int tNBins = aNbinsK; int tNPairsPerBin = aNSimPairsPerBin; int tSizeInput = tNBins*tNPairsPerBin*sizeof(double); int tSizeOutput = tNBins*fNBlocks*sizeof(double); //the kernel reduces the values for tNPairs bins down to fNBlocks bins int tSizeShared = fNThreadsPerBlock*sizeof(double); int tSizedState = tNBins*tNPairsPerBin*sizeof(curandState); int tSizeCPUPairs = tNBins*tNPairsPerBin*3*sizeof(double); const int tNStreams = tNBins; //---Host arrays and allocations double * h_Cf; double * h_CPUPairs; checkCudaErrors(cudaMallocManaged(&h_Cf, tSizeOutput)); checkCudaErrors(cudaMallocManaged(&h_CPUPairs, tSizeCPUPairs)); cudaStream_t tStreams[tNStreams]; for(int i=0; i<tNBins; i++) cudaStreamCreate(&tStreams[i]); curandState *d_state1; checkCudaErrors(cudaMallocManaged(&d_state1, tSizedState)); curandState *d_state2; checkCudaErrors(cudaMallocManaged(&d_state2, tSizedState)); curandState *d_state3; checkCudaErrors(cudaMallocManaged(&d_state3, tSizedState)); // timerPre.Stop(); // std::cout << " timerPre: " << timerPre.Elapsed() << " ms" << std::endl; //----------Run the kernels----------------------------------------------- // GpuTimer timer; // timer.Start(); for(int i=0; i<tNBins; i++) { int tOffsetInput = i*tNPairsPerBin; int tOffsetOutput = i*fNBlocks; RandInit<<<fNBlocks,fNThreadsPerBlock,0,tStreams[i]>>>(d_state1,std::clock(),tOffsetInput); RandInit<<<fNBlocks,fNThreadsPerBlock,0,tStreams[i]>>>(d_state2,std::clock(),tOffsetInput); RandInit<<<fNBlocks,fNThreadsPerBlock,0,tStreams[i]>>>(d_state3,std::clock(),tOffsetInput); GetEntireCfComplete2<<<fNBlocks,fNThreadsPerBlock,tSizeShared,tStreams[i]>>>(d_state1,d_state2,d_state3, aR, aReF0s,aImF0s,aD0s, aReF0t,aImF0t,aD0t, h_Cf,i,tOffsetInput,tOffsetOutput,h_CPUPairs,fInterpScattLen); } // timer.Stop(); // std::cout << "GetEntireCfComplete2 kernel finished in " << timer.Elapsed() << " ms" << std::endl; // GpuTimer timerPost; // timerPost.Start(); //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(d_state1)); checkCudaErrors(cudaFree(d_state2)); checkCudaErrors(cudaFree(d_state3)); // return the CF vector<double> tReturnVec(tNBins); double tSum = 0.0; for(int i=0; i<tNBins; i++) { tSum=0.0; for(int j=0; j<fNBlocks; j++) { tSum += h_Cf[j+i*fNBlocks]; } tReturnVec[i] = tSum; } checkCudaErrors(cudaFree(h_Cf)); checkCudaErrors(cudaFree(h_CPUPairs)); for(int i=0; i<tNStreams; i++) cudaStreamDestroy(tStreams[i]); // timerPost.Stop(); // std::cout << " timerPost: " << timerPost.Elapsed() << " ms" << std::endl; return tReturnVec; }
98720b39635209c45113e5a365bc2c4a8a714b01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdlib.h> #include <cmath> #include "kmeans.h" #define MAX_BLOCK 512 #define THREADS_PER_BLOCK 256 #define ERR 0.001 #define COORDS 3 typedef double Float; __device__ inline static float euclid_distance(int numObjs, int numClusters, int objectId, int clusterId, float *objects, float *clusters ){ float ans = 0.0; for (int i = 0; i < COORDS; i++) { ans += (objects[3*objectId+i] - clusters[i + clusterId*3]) * (objects[3*objectId+i] - clusters[i + clusterId*3]); } return(ans); } __global__ static void find_nearest_cluster(int numObjs, int numClusters, float *objects, float *deviceClusters, int *membership, int *changedmembership, float *temp_clusters, int *temp_clusters_sizes ){ int objectId = blockDim.x * blockIdx.x + threadIdx.x; while (objectId < numObjs) { int index = 0; float dist, min_dist; min_dist = euclid_distance(numObjs, numClusters, objectId, 0, objects, deviceClusters); for (int i=1; i<numClusters; i++) { dist = euclid_distance(numObjs, numClusters, objectId, i, objects, deviceClusters); if (dist < min_dist) { min_dist = dist; index = i; } } if(membership[objectId] != index){ atomicAdd(changedmembership,1); membership[objectId] = index; } atomicAdd(&temp_clusters_sizes[index],1); for(int j=0; j<COORDS; j++) atomicAdd(&temp_clusters[index*3+j], objects[objectId*3+j]); objectId += blockDim.x * gridDim.x; } } float** cuda_kmeans(float **objects, int numObjs, int numClusters, int *membership ){ #pragma region declaration int loop = 0; int total_sum = 0; float delta; int *newClusterSize; float **loopClusters; float **clusters; float **newClusters; float **zero;; int *d_Membership; int *d_Changedmembership; float *d_Objects; float *d_Clusters; float *d_temp_clusters; int *d_temp_cluster_sizes; #pragma endregion #pragma region initialization gpuErrchk(hipSetDevice(0)); malloc2D(loopClusters, numClusters ,COORDS , float); malloc2D(zero, numClusters ,COORDS , float); for (int i = 0; i < numClusters; i++) { for (int j = 0; j < COORDS; j++) { loopClusters[i][j] = objects[i][j]; zero[i][j] = 0; } } newClusterSize = (int*) malloc(numClusters* sizeof(int)); assert(newClusterSize != NULL); malloc2D(newClusters, numClusters,COORDS, float); memset(newClusters[0], 0, (COORDS * numClusters) * sizeof(float)); memset(newClusterSize, 0, numClusters * sizeof(int)); memset(membership,0,numObjs*sizeof(int)); gpuErrchk(hipMalloc(&d_Objects, numObjs*COORDS*sizeof(float))); gpuErrchk(hipMalloc(&d_Clusters, numClusters*COORDS*sizeof(float))); gpuErrchk(hipMalloc(&d_Membership, numObjs*sizeof(int))); gpuErrchk(hipMalloc(&d_Changedmembership, sizeof(int))); gpuErrchk(hipMalloc(&d_temp_clusters, numClusters*COORDS*sizeof(float))); gpuErrchk(hipMalloc(&d_temp_cluster_sizes, numClusters*sizeof(int))); gpuErrchk(hipMemset(d_Changedmembership,0, sizeof(int))); gpuErrchk(hipMemset(d_temp_cluster_sizes,0, numClusters*sizeof(int))); gpuErrchk(hipMemcpy(d_Objects, objects[0], numObjs*COORDS*sizeof(float), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_Membership, membership, numObjs*sizeof(int), hipMemcpyHostToDevice)); #pragma endregion #pragma region exeqution do { int tot_cor = 0; gpuErrchk(hipMemcpy(d_Clusters, loopClusters[0], numClusters*COORDS*sizeof(float), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_temp_clusters, zero[0], numClusters*COORDS*sizeof(float), hipMemcpyHostToDevice)); gpuErrchk(hipMemset(d_temp_cluster_sizes,0, numClusters*sizeof(int))); gpuErrchk(hipMemset(d_Changedmembership,0, sizeof(int))); hipLaunchKernelGGL(( find_nearest_cluster), dim3(THREADS_PER_BLOCK), dim3(MAX_BLOCK) , 0, 0, numObjs, numClusters, d_Objects, d_Clusters, d_Membership, d_Changedmembership, d_temp_clusters, d_temp_cluster_sizes); gpuErrchk(hipMemcpy(&total_sum, d_Changedmembership, sizeof(int), hipMemcpyDeviceToHost)); delta = (float)total_sum/(float)numObjs; gpuErrchk(hipMemcpy(membership, d_Membership, numObjs*sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(newClusterSize, d_temp_cluster_sizes, numClusters*sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(newClusters[0], d_temp_clusters, numClusters*COORDS*sizeof(float), hipMemcpyDeviceToHost)); /*set new cluster centers*/ for (int i=0; i<numClusters; i++) { for (int j=0; j<COORDS; j++) { if (newClusterSize[i] > 0) { loopClusters[i][j] = (float)newClusters[i][j] / (float)newClusterSize[i]; } newClusters[i][j] = 0.0; /* set back to 0 */ } tot_cor += newClusterSize[i]; newClusterSize[i] = 0; /* set back to 0 */ } if(tot_cor != numObjs) { printf("Sum ERR \n"); exit(-1); } } while (delta > ERR && loop++ < 500); /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ malloc2D(clusters, numClusters, COORDS, float); for (int i = 0; i < numClusters; i++) { for (int j = 0; j < COORDS; j++) { clusters[i][j] = loopClusters[i][j]; } } #pragma endregion #pragma region free gpuErrchk(hipFree(d_Membership)); gpuErrchk(hipFree(d_Changedmembership)); gpuErrchk(hipFree(d_Objects)); gpuErrchk(hipFree(d_Clusters)); gpuErrchk(hipFree(d_temp_clusters)); gpuErrchk(hipFree(d_temp_cluster_sizes)); free(zero[0]); free(zero); free(loopClusters[0]); free(loopClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); #pragma endregion return clusters; }
98720b39635209c45113e5a365bc2c4a8a714b01.cu
#include <iostream> #include <stdlib.h> #include <cmath> #include "kmeans.h" #define MAX_BLOCK 512 #define THREADS_PER_BLOCK 256 #define ERR 0.001 #define COORDS 3 typedef double Float; __device__ inline static float euclid_distance(int numObjs, int numClusters, int objectId, int clusterId, float *objects, float *clusters ){ float ans = 0.0; for (int i = 0; i < COORDS; i++) { ans += (objects[3*objectId+i] - clusters[i + clusterId*3]) * (objects[3*objectId+i] - clusters[i + clusterId*3]); } return(ans); } __global__ static void find_nearest_cluster(int numObjs, int numClusters, float *objects, float *deviceClusters, int *membership, int *changedmembership, float *temp_clusters, int *temp_clusters_sizes ){ int objectId = blockDim.x * blockIdx.x + threadIdx.x; while (objectId < numObjs) { int index = 0; float dist, min_dist; min_dist = euclid_distance(numObjs, numClusters, objectId, 0, objects, deviceClusters); for (int i=1; i<numClusters; i++) { dist = euclid_distance(numObjs, numClusters, objectId, i, objects, deviceClusters); if (dist < min_dist) { min_dist = dist; index = i; } } if(membership[objectId] != index){ atomicAdd(changedmembership,1); membership[objectId] = index; } atomicAdd(&temp_clusters_sizes[index],1); for(int j=0; j<COORDS; j++) atomicAdd(&temp_clusters[index*3+j], objects[objectId*3+j]); objectId += blockDim.x * gridDim.x; } } float** cuda_kmeans(float **objects, int numObjs, int numClusters, int *membership ){ #pragma region declaration int loop = 0; int total_sum = 0; float delta; int *newClusterSize; float **loopClusters; float **clusters; float **newClusters; float **zero;; int *d_Membership; int *d_Changedmembership; float *d_Objects; float *d_Clusters; float *d_temp_clusters; int *d_temp_cluster_sizes; #pragma endregion #pragma region initialization gpuErrchk(cudaSetDevice(0)); malloc2D(loopClusters, numClusters ,COORDS , float); malloc2D(zero, numClusters ,COORDS , float); for (int i = 0; i < numClusters; i++) { for (int j = 0; j < COORDS; j++) { loopClusters[i][j] = objects[i][j]; zero[i][j] = 0; } } newClusterSize = (int*) malloc(numClusters* sizeof(int)); assert(newClusterSize != NULL); malloc2D(newClusters, numClusters,COORDS, float); memset(newClusters[0], 0, (COORDS * numClusters) * sizeof(float)); memset(newClusterSize, 0, numClusters * sizeof(int)); memset(membership,0,numObjs*sizeof(int)); gpuErrchk(cudaMalloc(&d_Objects, numObjs*COORDS*sizeof(float))); gpuErrchk(cudaMalloc(&d_Clusters, numClusters*COORDS*sizeof(float))); gpuErrchk(cudaMalloc(&d_Membership, numObjs*sizeof(int))); gpuErrchk(cudaMalloc(&d_Changedmembership, sizeof(int))); gpuErrchk(cudaMalloc(&d_temp_clusters, numClusters*COORDS*sizeof(float))); gpuErrchk(cudaMalloc(&d_temp_cluster_sizes, numClusters*sizeof(int))); gpuErrchk(cudaMemset(d_Changedmembership,0, sizeof(int))); gpuErrchk(cudaMemset(d_temp_cluster_sizes,0, numClusters*sizeof(int))); gpuErrchk(cudaMemcpy(d_Objects, objects[0], numObjs*COORDS*sizeof(float), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_Membership, membership, numObjs*sizeof(int), cudaMemcpyHostToDevice)); #pragma endregion #pragma region exeqution do { int tot_cor = 0; gpuErrchk(cudaMemcpy(d_Clusters, loopClusters[0], numClusters*COORDS*sizeof(float), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_temp_clusters, zero[0], numClusters*COORDS*sizeof(float), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemset(d_temp_cluster_sizes,0, numClusters*sizeof(int))); gpuErrchk(cudaMemset(d_Changedmembership,0, sizeof(int))); find_nearest_cluster<<<THREADS_PER_BLOCK, MAX_BLOCK >>>(numObjs, numClusters, d_Objects, d_Clusters, d_Membership, d_Changedmembership, d_temp_clusters, d_temp_cluster_sizes); gpuErrchk(cudaMemcpy(&total_sum, d_Changedmembership, sizeof(int), cudaMemcpyDeviceToHost)); delta = (float)total_sum/(float)numObjs; gpuErrchk(cudaMemcpy(membership, d_Membership, numObjs*sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(newClusterSize, d_temp_cluster_sizes, numClusters*sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(newClusters[0], d_temp_clusters, numClusters*COORDS*sizeof(float), cudaMemcpyDeviceToHost)); /*set new cluster centers*/ for (int i=0; i<numClusters; i++) { for (int j=0; j<COORDS; j++) { if (newClusterSize[i] > 0) { loopClusters[i][j] = (float)newClusters[i][j] / (float)newClusterSize[i]; } newClusters[i][j] = 0.0; /* set back to 0 */ } tot_cor += newClusterSize[i]; newClusterSize[i] = 0; /* set back to 0 */ } if(tot_cor != numObjs) { printf("Sum ERR \n"); exit(-1); } } while (delta > ERR && loop++ < 500); /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ malloc2D(clusters, numClusters, COORDS, float); for (int i = 0; i < numClusters; i++) { for (int j = 0; j < COORDS; j++) { clusters[i][j] = loopClusters[i][j]; } } #pragma endregion #pragma region free gpuErrchk(cudaFree(d_Membership)); gpuErrchk(cudaFree(d_Changedmembership)); gpuErrchk(cudaFree(d_Objects)); gpuErrchk(cudaFree(d_Clusters)); gpuErrchk(cudaFree(d_temp_clusters)); gpuErrchk(cudaFree(d_temp_cluster_sizes)); free(zero[0]); free(zero); free(loopClusters[0]); free(loopClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); #pragma endregion return clusters; }
core.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "core.h" #include "libwrap.h" #include <sys/mman.h> #include <sys/stat.h> #include <sys/types.h> #include <sched.h> #include <fcntl.h> #include <unistd.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <string.h> #include <errno.h> DebugLevel ncclDebugLevel; extern "C" DSOGLOBAL ncclResult_t ncclGetUniqueId(ncclUniqueId* out) { pid_t pid = getpid(); static int count = 0; int commId = __sync_fetch_and_add(&count, 1); int len = snprintf(out->internal, NCCL_UNIQUE_ID_BYTES, "nccl-%d-%d", pid, commId); if(strlen(out->internal) < len) { WARN("ncclUniqueId truncated"); return ncclInternalError; } return ncclSuccess; } static ncclResult_t shmOpen(const char* shmname, size_t bytes, void** ptr) { int fd = shm_open(shmname, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR); if (fd == -1) { WARN("shm_open failed to open %s", shmname); return ncclSystemError; } if (ftruncate(fd, bytes) == -1) { WARN("ftruncate failed to allocate %ld bytes", bytes); shm_unlink(shmname); close(fd); return ncclSystemError; } *ptr = mmap(NULL, bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (*ptr == MAP_FAILED) { WARN("failure in mmap"); shm_unlink(shmname); close(fd); return ncclSystemError; } close(fd); return ncclSuccess; } static ncclResult_t shmUnlink(const char* shmname) { if(shm_unlink(shmname) == -1) { WARN("smh_unlink failed"); return ncclSystemError; } else { return ncclSuccess; } } static ncclResult_t shmUnmap(void* ptr, size_t bytes) { if(munmap(ptr, bytes) == -1) { WARN("munmap failed"); return ncclSystemError; } else { return ncclSuccess; } } typedef struct { int rank; int ndev; int cudaDev; int ncclId; pid_t pid; ncclMem* hostptr; ncclMem* devptr; hipIpcMemHandle devipc; size_t buffSize; } RankEntry; static int compRanks(const void* a, const void* b) { const RankEntry* A = (const RankEntry*)a; const RankEntry* B = (const RankEntry*)b; if (A->ncclId < B->ncclId) return -1; if (A->ncclId > B->ncclId) return 1; return 0; } static void orderRanks(RankEntry* ranks, int count) { qsort(ranks, count, sizeof(RankEntry), compRanks); for(int i=0; i<count; ++i) ranks[i].ncclId = i; } typedef struct { union { struct { volatile int bar; }; char pad[16]; }; RankEntry ranks[1]; } RankGather; static ncclResult_t initGather(RankGather** gather, ncclUniqueId commId, int ndev, int rank, RankEntry myInfo) { size_t bytes = offsetof(RankGather, ranks) + ndev*sizeof(RankEntry); RankGather* tmp = NULL; int bar_tmp; ncclResult_t res = shmOpen(commId.internal, bytes, (void**)&tmp); if (res != ncclSuccess) { WARN("rank %d failed to open shm segment for gather", rank); return res; } tmp->ranks[rank] = myInfo; bar_tmp = tmp->bar - 1; bool swapped; do { bar_tmp += 1; if (bar_tmp == ndev-1) { // everyone is done ncclResult_t res = shmUnlink(commId.internal); if (res != ncclSuccess) { WARN("rank %d failed to unlink shm segment for gather", rank); shmUnmap(tmp, bytes); return res; } orderRanks(tmp->ranks, ndev); } swapped = __sync_bool_compare_and_swap(&tmp->bar, bar_tmp, bar_tmp+1); } while(!swapped); while (tmp->bar < ndev) sched_yield(); __sync_synchronize(); *gather = tmp; return ncclSuccess; } static ncclResult_t closeGather(RankGather* gather, int ndev) { int bar_tmp = gather->bar - 1; bool swapped; do { bar_tmp += 1; swapped = __sync_bool_compare_and_swap(&gather->bar, bar_tmp, bar_tmp+1); } while(!swapped); while (gather->bar != 2*ndev) sched_yield(); __sync_synchronize(); size_t bytes = offsetof(RankGather, ranks) + ndev*sizeof(RankEntry); ncclResult_t res = shmUnmap(gather, bytes); if (res != ncclSuccess) { WARN("failed to unmap %ld bytes of gather", bytes); return res; } return ncclSuccess; } static ncclResult_t allocDevMem(ncclMem** ptr, size_t buffSize) { size_t size = offsetof(struct ncclMem, buff) + buffSize; hipError_t res = hipMalloc((void**)ptr, size); if (res != hipSuccess) { *ptr = NULL; WARN("failed to allocate %lu byte device buffer", size); return ncclCudaMallocFailed; } if (hipMemset(*ptr, 0, size) != hipSuccess) { WARN("failed to memset device buffer."); hipFree(*ptr); *ptr = NULL; return ncclUnhandledCudaError; } return ncclSuccess; } static const int ShmMapped = 1; static const int ShmLinked = 2; static ncclResult_t allocHostMem(ncclMem** ptr, size_t buffSize) { size_t size = offsetof(struct ncclMem, buff) + buffSize; hipError_t res = hipHostMalloc((void**)ptr, size); if (res != hipSuccess) { *ptr = NULL; WARN("failed to allocate %lu byte host buffer", size); return ncclSystemError; } memset(*ptr, 0, size); return ncclSuccess; } static ncclResult_t openHostMemShm(const char* shmname, ncclMem** ptr, size_t buffSize) { size_t size = offsetof(struct ncclMem, buff) + buffSize; ncclResult_t res = shmOpen(shmname, size, (void**)ptr); if (res != ncclSuccess) { WARN("failed to allocate %lu byte shm buffer", size); *ptr = NULL; return res; } if(hipHostRegister(*ptr, size, hipHostRegisterMapped) != hipSuccess) { WARN("failed to register host buffer"); shmUnlink(shmname); shmUnmap(*ptr, size); *ptr = NULL; return ncclUnhandledCudaError; } return ncclSuccess; } static ncclResult_t populateRankInfo(RankEntry* info, int rank, ncclComm_t comm) { char busId[13]; uint32_t nvmlHandle; hipError_t res = hipDeviceGetPCIBusId(busId, 13, comm->cudaDev); if (res == hipErrorInvalidDevice) { WARN("rank %d attempted to access an invalid cuda device %d", rank, comm->cudaDev); return ncclInvalidDeviceIndex; } else if (res != hipSuccess) { WARN("rank %d failed to get PCI Bus Id for device %d", rank, comm->cudaDev); return ncclUnhandledCudaError; } INFO("rank %d using device %d (%s)", rank, comm->cudaDev, busId); if (wrapNvmlDeviceGetHandleByPciBusId(busId, &nvmlHandle) != ncclSuccess) { WARN("rank %d failed to get nvml handle for device %s", rank, busId); return ncclUnhandledCudaError; } // Order by nvml index if (wrapNvmlDeviceGetIndex(nvmlHandle, (unsigned*)&info->ncclId) != ncclSuccess) { WARN("rank %d failed to get nvml device index for device %d", rank, comm->cudaDev); return ncclUnhandledCudaError; } info->rank = rank; info->ndev = comm->nDev; info->cudaDev = comm->cudaDev; info->pid = getpid(); info->buffSize = comm->buffSize; info->hostptr = comm->hostMem; info->devptr = comm->devMem; if (wrapCuIpcGetMemHandle(&info->devipc, (hipDeviceptr_t)comm->devMem) != ncclSuccess) { WARN("rank %d failed to open CUDA IPC handle", rank); return ncclUnhandledCudaError; } return ncclSuccess; } static const int CLEANUP_NONE = 0; static const int CLEANUP_CUIPC = 1; static const int CLEANUP_UNMAP = 2; static ncclResult_t commClearMaps(ncclComm_t comm) { ncclResult_t res, retval = ncclSuccess; hipError_t cures; for(int d=0; d<comm->nDev; ++d) { switch(comm->cleanup[d].type) { case CLEANUP_NONE: break; case CLEANUP_CUIPC: res = wrapCuIpcCloseMemHandle((hipDeviceptr_t)comm->cleanup[d].handle); if (res != ncclSuccess) { WARN("rank %d failed to close IPC handle to rank %d", comm->userFromRing[comm->ncclId], comm->userFromRing[d]); retval = (retval == ncclSuccess) ? res : retval; } break; case CLEANUP_UNMAP: cures = hipHostUnregister(comm->cleanup[d].handle); if (cures != hipSuccess) { WARN("rank %d failed to unregister handle to rank %d", comm->userFromRing[comm->ncclId], comm->userFromRing[d]); retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval; } res = shmUnmap(comm->cleanup[d].handle, offsetof(ncclMem, buff) + comm->buffSize); if (res != ncclSuccess) { WARN("rank %d failed to unmap handle to rank %d", comm->userFromRing[comm->ncclId], comm->userFromRing[d]); retval = (retval == ncclSuccess) ? res : retval; } break; default: WARN("Unknown cleanup type %d", comm->cleanup[d].type); } comm->cleanup[d].type = 0; comm->cleanup[d].handle = NULL; } memset(comm->userFromRing, 0, sizeof(int)*MAXPEERS); memset(comm->ringFromUser, 0, sizeof(int)*MAXPEERS); if (comm->devUserFromRing != NULL) { hipError_t err = hipMemset(comm->devUserFromRing, 0, sizeof(int)*MAXPEERS); if (err != hipSuccess) { WARN("Faild to clear dev map: %s", hipGetErrorString(err)); retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval; } } return retval; } static ncclResult_t commBuildMaps(ncclComm_t comm, ncclUniqueId* commId, int rank, RankEntry* ranks) { int ndev = comm->nDev; for(int i=0; i<ndev; ++i) { // Check for inconsistencies between ranks // If two ranks use the same rank, then one slot of // ranks[] will be left unset with zero ndev/buffSize. if (ranks[i].buffSize != comm->buffSize || ranks[i].ndev != comm->nDev) { commClearMaps(comm); return ncclRankMismatch; } // Create rank<->nccl maps int iRank = ranks[i].rank; comm->userFromRing[i] = iRank; comm->ringFromUser[iRank] = i; } if (hipMemcpy(comm->devUserFromRing, comm->userFromRing, ndev*sizeof(int), hipMemcpyHostToDevice) != hipSuccess) { WARN("rank %d failed to copy maps to device", rank); commClearMaps(comm); return ncclUnhandledCudaError; } int myId = -1; for (int i=0; i<ndev; ++i) { if(ranks[i].rank == rank) { myId = i; break; } } if (myId == -1) { WARN("rank %d not found in communicator", rank); return ncclInvalidRank; } comm->ncclId = myId; int myDev = ranks[myId].cudaDev; pid_t myPid = ranks[myId].pid; comm->useRemoteRecv = 1; // Assume we directly write to result ptrs. for (int i=0; i<ndev; ++i) { int iRank = ranks[i].rank; int iDev = ranks[i].cudaDev; pid_t iPid = ranks[i].pid; int canpeer = 0; if (hipDeviceCanAccessPeer(&canpeer, myDev, iDev) != hipSuccess) { INFO("peer query failed between rank %d (dev %d) and rank %d (dev %d)", rank, myDev, iRank, iDev); canpeer = 0; } if (canpeer) { hipError_t err; err = hipDeviceEnablePeerAccess(iDev, 0); if (err == hipErrorPeerAccessAlreadyEnabled) { hipGetLastError(); } else if (err != hipSuccess) { INFO("peer access failed between rank %d (dev %d) and rank %d (dev %d)\n", rank, myDev, iRank, iDev); canpeer = 0; } } if (iPid == myPid && (canpeer || myDev == iDev)) { INFO("rank access %d -> %d via P2P device mem", rank, iRank); comm->local[i] = ranks[myId].devptr; comm->remote[i] = ranks[i].devptr; comm->cleanup[i].type = CLEANUP_NONE; } else if (iPid == myPid) { INFO("rank access %d -> %d via zero-copy host mem", rank, iRank); comm->useRemoteRecv = 0; if (hipHostGetDevicePointer(comm->local+i, ranks[myId].hostptr, 0) != hipSuccess) { WARN("rank %d failed to map zero copy buffer to device", rank); commClearMaps(comm); return ncclUnhandledCudaError; } if (hipHostGetDevicePointer(comm->remote+i, ranks[i].hostptr, 0) != hipSuccess) { WARN("rank %d failed to map %d's zero copy buffer to device", rank, iRank); commClearMaps(comm); return ncclUnhandledCudaError; } comm->cleanup[i].type = CLEANUP_NONE; } else if (canpeer || myDev == iDev) { INFO("rank access %d -> %d via Ipc P2P device mem", rank, iRank); comm->useRemoteRecv = 0; comm->local[i] = ranks[myId].devptr; if (wrapCuIpcOpenMemHandle((hipDeviceptr_t*)(&comm->remote[i]), ranks[i].devipc, hipIpcMemLazyEnablePeerAccess) != ncclSuccess) { WARN("rank %d failed to open Ipc handle to rank %d", rank, iRank); commClearMaps(comm); return ncclUnhandledCudaError; } comm->cleanup[i].type = CLEANUP_CUIPC; comm->cleanup[i].handle = comm->remote[i]; } else { INFO("rank access %d -> %d via zero copy host shm", rank, iRank); comm->useRemoteRecv = 0; if (hipHostGetDevicePointer(comm->local+i, ranks[myId].hostptr, 0) != hipSuccess) { WARN("rank %d failed to obtain dev ptr to sysmem buffer", rank); commClearMaps(comm); return ncclUnhandledCudaError; } char rankname[1024]; sprintf(rankname, "%s-%d", commId->internal, ranks[i].rank); if (openHostMemShm(rankname, (ncclMem**)&comm->cleanup[i].handle, ranks[i].buffSize) != ncclSuccess) { WARN("rank %d failed to open sysmem buffer of rank %d", rank, iRank); commClearMaps(comm); return ncclUnhandledCudaError; } if (hipHostGetDevicePointer(comm->remote+i, comm->cleanup[i].handle, 0) != hipSuccess) { WARN("rank %d failed to obtain dev ptr for rank %d", rank, iRank); commClearMaps(comm); return ncclUnhandledCudaError; } comm->cleanup[i].type = CLEANUP_UNMAP; } } INFO("PushToRecv algos are %s\n", (comm->useRemoteRecv) ? "enabled" : "disabled"); return ncclSuccess; } static void initDebug() { const char* nccl_debug = getenv("NCCL_DEBUG"); if (nccl_debug == NULL) { ncclDebugLevel = NONE; } else if (strcmp(nccl_debug, "WARN") == 0) { ncclDebugLevel = WARN; } else if (strcmp(nccl_debug, "INFO") == 0) { ncclDebugLevel = INFO; INFO("NCCL debug level set to INFO"); } else if (strcmp(nccl_debug, "ABORT") == 0) { ncclDebugLevel = ABORT; INFO("NCCL debug level set to ABORT"); } } static void commFree(ncclComm_t comm) { if (comm == NULL) return; for(int i=0; i<MAXQUEUE; ++i) { if (comm->events.isDone[i] != NULL) if (hipEventDestroy(comm->events.isDone[i]) != hipSuccess) INFO("failed to destroy cuda event %d", i); } ncclResult_t res = commClearMaps(comm); if (res != ncclSuccess) INFO("failed to cleanup comm maps"); if (comm->devUserFromRing != NULL) if (hipFree(comm->devUserFromRing) != hipSuccess) INFO("commFree failed to free dev maps"); if (comm->devMem != NULL && hipFree(comm->devMem) != hipSuccess) INFO("Failed to free devMap"); if (comm->hostMem != NULL) { if (comm->hostMemState & ShmMapped) { if (hipHostUnregister(comm->hostMem) != hipSuccess) INFO("Failed to unregister hostMem"); size_t size = offsetof(ncclMem, buff) + comm->buffSize; if (shmUnmap(comm->hostMem, size) != ncclSuccess) INFO("Failed to unmap hostMem"); comm->hostMemState ^= ShmMapped; } else { hipHostFree(comm->hostMem); } } free(comm); } static ncclResult_t commAlloc(ncclComm_t* comret, int ndev, const ncclUniqueId* commId, int rank) { if (ndev < 1 || ndev > MAXPEERS) { WARN("requested device count (%d) exceeds maximum of %d", ndev, MAXPEERS); return ncclUnsupportedDeviceCount; } if (rank >= ndev || rank < 0) { WARN("rank %d exceeds ndev=%d", rank, ndev); return ncclInvalidRank; } struct ncclComm* comm = (struct ncclComm*)malloc(sizeof(struct ncclComm)); if (comm == NULL) { WARN("comm allocation failed"); return ncclSystemError; } memset(comm, 0, sizeof(struct ncclComm)); comm->nDev = ndev; hipGetDevice(&comm->cudaDev); const char* str = getenv("NCCL_BUFFSIZE"); if (str != NULL) { errno = 0; comm->buffSize = strtol(str, NULL, 10); if (errno == ERANGE || comm->buffSize == 0) { INFO("rank %d invalid NCCL_BUFFSIZE: %s, using default %lu", rank, str, DEFAULT_BUFFER_SIZE_BYTES); comm->buffSize = DEFAULT_BUFFER_SIZE_BYTES; } } else { comm->buffSize = DEFAULT_BUFFER_SIZE_BYTES; } INFO("rank %d using buffSize = %lu", rank, comm->buffSize); ncclResult_t res; res = allocDevMem(&comm->devMem, comm->buffSize); if (res != ncclSuccess) { WARN("rank %d failed to allocate device buffer", rank); commFree(comm); return res; } if (hipMalloc(&comm->devUserFromRing, MAXPEERS*sizeof(int)) != hipSuccess) { WARN("rank %d failed to allocated device maps", rank); commFree(comm); return ncclCudaMallocFailed; } EventQueue* eq = &comm->events; for(int i=0; i<MAXQUEUE; ++i) { if (hipEventCreateWithFlags(eq->isDone+i, hipEventDisableTiming) != hipSuccess) { WARN("rank %d failed to create nccl event %d", rank, i); commFree(comm); return ncclUnhandledCudaError; } } if(commId == NULL) { comm->hostMemState = 0; res = allocHostMem(&comm->hostMem, comm->buffSize); } else { char rankname[1024]; sprintf(rankname, "%s-%d", commId->internal, rank); res = openHostMemShm(rankname, &comm->hostMem, comm->buffSize); if (res != ncclSuccess) { WARN("rank %d failed to allocate host buffer", rank); commFree(comm); return res; } comm->hostMemState = ShmMapped | ShmLinked; } *comret = comm; return ncclSuccess; } static ncclResult_t commUnlinkHostMem(ncclComm_t comm, ncclUniqueId commId, int rank) { char rankname[1024]; sprintf(rankname, "%s-%d", commId.internal, rank); if (comm->hostMemState & ShmLinked) comm->hostMemState ^= ShmLinked; return shmUnlink(rankname); } extern "C" DSOGLOBAL ncclResult_t ncclCommInitRank(ncclComm_t* newcomm, int ndev, ncclUniqueId commId, int myrank) { if (strlen(commId.internal) < 1 || strlen(commId.internal) >= NCCL_UNIQUE_ID_BYTES) { WARN("rank %d invalid commId", myrank); return ncclInvalidArgument; } initDebug(); ncclResult_t res; RankEntry myStuff; RankGather* gath = NULL; res = wrapSymbols(); if (res != ncclSuccess) { WARN("NCCL failed to initialize client libs"); return res; } res = wrapNvmlInit(); if (res != ncclSuccess) { WARN("rank %d failed to initialize nvml", myrank); return res; } res = commAlloc(newcomm, ndev, &commId, myrank); if (res != ncclSuccess) { WARN("rank %d failed to allocate communicator", myrank); return res; } res = populateRankInfo(&myStuff, myrank, *newcomm); if (res != ncclSuccess) { WARN("rank %d failed to obtain rank info", myrank); goto cleanup; } res = initGather(&gath, commId, ndev, myrank, myStuff); if (res != ncclSuccess) { WARN("rank %d failed to gather rank info", myrank); goto cleanup; } res = commBuildMaps(*newcomm, &commId, myrank, gath->ranks); if (res != ncclSuccess) { WARN("rank %d failed to build comm maps", myrank); goto cleanup; } res = closeGather(gath, ndev); // includes a barrier gath = NULL; if (res != ncclSuccess) { WARN("rank %d failed to close gather", myrank); goto cleanup; } goto final; cleanup: if (gath != NULL) closeGather(gath, ndev); commFree(*newcomm); final: if ((*newcomm)->hostMemState & ShmLinked) { if (commUnlinkHostMem(*newcomm, commId, myrank) != ncclSuccess) INFO("rank %d failed to unlink host mem shm segment", myrank); } if (wrapNvmlShutdown() != ncclSuccess) INFO("rank %d did not shutdown nvml properly", myrank); return res; } extern "C" DSOGLOBAL ncclResult_t ncclCommInitAll(ncclComm_t* comms, int ndev, int* devlist) { initDebug(); ncclResult_t res; int savedDevice; RankEntry* ranks = NULL; int rank, cudaDev; ncclComm_t comm = NULL; char busId[13]; uint32_t nvmlHandle; int affinity_set = 0; res = wrapSymbols(); if (res != ncclSuccess) { WARN("NCCL failed to initialize client libs"); return res; } hipGetDevice(&savedDevice); ranks = (RankEntry*)malloc(ndev*sizeof(RankEntry)); if (ranks == NULL) { WARN("NCCL allocation failed"); return ncclSystemError; } memset(ranks, 0, ndev*sizeof(RankEntry)); res = wrapNvmlInit(); if (res != ncclSuccess) { WARN("nccl failed to initialize nvml"); return res; } for(rank=0; rank<ndev; ++rank) comms[rank] = NULL; for (rank=0; rank<ndev; ++rank) { cudaDev = (devlist == NULL) ? rank : devlist[rank]; if (hipSetDevice(cudaDev) != hipSuccess) { WARN("rank %d failed to set cuda device %d", rank, cudaDev); res = ncclInvalidDeviceIndex; goto cleanup; } // Set CPU affinity affinity_set = 0; if (hipDeviceGetPCIBusId(busId, 13, cudaDev) != hipSuccess) { INFO("rank %d failed to get PCI Bus Id for device %d", rank, cudaDev); goto skipaffinity; } if (wrapNvmlDeviceGetHandleByPciBusId(busId, &nvmlHandle) != ncclSuccess) { INFO("rank %d failed to get nvml handle for device %s", rank, busId); goto skipaffinity; } if (wrapNvmlDeviceSetCpuAffinity(nvmlHandle) != ncclSuccess) { INFO("rank %d failed to set affinity", rank); goto skipaffinity; } affinity_set = 1; skipaffinity: res = commAlloc(&comm, ndev, NULL, rank); if (res != ncclSuccess) { WARN("rank %d failed to allocate communicator", rank); goto cleanup; } comms[rank] = comm; if (affinity_set && wrapNvmlDeviceClearCpuAffinity(nvmlHandle) != ncclSuccess) { INFO("rank %d set but failed to clear cpu affinity", rank); } res = populateRankInfo(ranks+rank, rank, comm); if (res != ncclSuccess) { WARN("rank %d failed to obtain rank info", rank); goto cleanup; } } orderRanks(ranks, ndev); for(rank=0; rank<ndev; ++rank) { comm = comms[rank]; hipSetDevice(comm->cudaDev); res = commBuildMaps(comm, NULL, rank, ranks); if (res != ncclSuccess) { WARN("rank %d failed to build comm maps", rank); goto cleanup; } } free(ranks); ranks = NULL; res = ncclSuccess; goto final; cleanup: if (ranks != NULL) free(ranks); for(rank=0; rank<ndev; ++rank) { if(comms[rank] != NULL) { commFree(comms[rank]); } } final: if(wrapNvmlShutdown() != ncclSuccess) INFO("NCCL did not shutdown nvml properly"); hipSetDevice(savedDevice); return res; } extern "C" DSOGLOBAL void ncclCommDestroy(ncclComm_t comm) { if (comm == NULL) return; int savedDevice; hipGetDevice(&savedDevice); int commDevice = comm->cudaDev; if (savedDevice != commDevice) { CUDACHECK(hipSetDevice(commDevice)); } commFree(comm); if (savedDevice != commDevice) hipSetDevice(savedDevice); } extern "C" DSOGLOBAL const char* ncclGetErrorString(ncclResult_t code) { switch (code) { case ncclSuccess : return "no error"; case ncclUnhandledCudaError : return "unhandled cuda error"; case ncclSystemError : return "system error"; case ncclInternalError : return "internal error"; case ncclInvalidDevicePointer : return "invalid device pointer"; case ncclInvalidRank : return "invalid rank"; case ncclUnsupportedDeviceCount : return "unsupported device count"; case ncclDeviceNotFound : return "device not found"; case ncclInvalidDeviceIndex : return "invalid device index"; case ncclLibWrapperNotSet : return "lib wrapper not initialized"; case ncclCudaMallocFailed : return "cuda malloc failed"; case ncclRankMismatch : return "parameter mismatch between ranks"; case ncclInvalidArgument : return "invalid argument"; case ncclInvalidType : return "invalid data type"; case ncclInvalidOperation : return "invalid reduction operations"; } return "unknown result code"; } extern "C" DSOGLOBAL ncclResult_t ncclCommCount(const ncclComm_t comm, int* count) { *count = comm->nDev; return ncclSuccess; } extern "C" DSOGLOBAL ncclResult_t ncclCommCuDevice(const ncclComm_t comm, int* devid) { *devid = comm->cudaDev; return ncclSuccess; } extern "C" DSOGLOBAL ncclResult_t ncclCommUserRank(const ncclComm_t comm, int* rank) { *rank = comm->userFromRing[comm->ncclId]; return ncclSuccess; }
core.cu
/************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "core.h" #include "libwrap.h" #include <sys/mman.h> #include <sys/stat.h> #include <sys/types.h> #include <sched.h> #include <fcntl.h> #include <unistd.h> #include <cuda.h> #include <cuda_runtime.h> #include <string.h> #include <errno.h> DebugLevel ncclDebugLevel; extern "C" DSOGLOBAL ncclResult_t ncclGetUniqueId(ncclUniqueId* out) { pid_t pid = getpid(); static int count = 0; int commId = __sync_fetch_and_add(&count, 1); int len = snprintf(out->internal, NCCL_UNIQUE_ID_BYTES, "nccl-%d-%d", pid, commId); if(strlen(out->internal) < len) { WARN("ncclUniqueId truncated"); return ncclInternalError; } return ncclSuccess; } static ncclResult_t shmOpen(const char* shmname, size_t bytes, void** ptr) { int fd = shm_open(shmname, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR); if (fd == -1) { WARN("shm_open failed to open %s", shmname); return ncclSystemError; } if (ftruncate(fd, bytes) == -1) { WARN("ftruncate failed to allocate %ld bytes", bytes); shm_unlink(shmname); close(fd); return ncclSystemError; } *ptr = mmap(NULL, bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (*ptr == MAP_FAILED) { WARN("failure in mmap"); shm_unlink(shmname); close(fd); return ncclSystemError; } close(fd); return ncclSuccess; } static ncclResult_t shmUnlink(const char* shmname) { if(shm_unlink(shmname) == -1) { WARN("smh_unlink failed"); return ncclSystemError; } else { return ncclSuccess; } } static ncclResult_t shmUnmap(void* ptr, size_t bytes) { if(munmap(ptr, bytes) == -1) { WARN("munmap failed"); return ncclSystemError; } else { return ncclSuccess; } } typedef struct { int rank; int ndev; int cudaDev; int ncclId; pid_t pid; ncclMem* hostptr; ncclMem* devptr; CUipcMemHandle devipc; size_t buffSize; } RankEntry; static int compRanks(const void* a, const void* b) { const RankEntry* A = (const RankEntry*)a; const RankEntry* B = (const RankEntry*)b; if (A->ncclId < B->ncclId) return -1; if (A->ncclId > B->ncclId) return 1; return 0; } static void orderRanks(RankEntry* ranks, int count) { qsort(ranks, count, sizeof(RankEntry), compRanks); for(int i=0; i<count; ++i) ranks[i].ncclId = i; } typedef struct { union { struct { volatile int bar; }; char pad[16]; }; RankEntry ranks[1]; } RankGather; static ncclResult_t initGather(RankGather** gather, ncclUniqueId commId, int ndev, int rank, RankEntry myInfo) { size_t bytes = offsetof(RankGather, ranks) + ndev*sizeof(RankEntry); RankGather* tmp = NULL; int bar_tmp; ncclResult_t res = shmOpen(commId.internal, bytes, (void**)&tmp); if (res != ncclSuccess) { WARN("rank %d failed to open shm segment for gather", rank); return res; } tmp->ranks[rank] = myInfo; bar_tmp = tmp->bar - 1; bool swapped; do { bar_tmp += 1; if (bar_tmp == ndev-1) { // everyone is done ncclResult_t res = shmUnlink(commId.internal); if (res != ncclSuccess) { WARN("rank %d failed to unlink shm segment for gather", rank); shmUnmap(tmp, bytes); return res; } orderRanks(tmp->ranks, ndev); } swapped = __sync_bool_compare_and_swap(&tmp->bar, bar_tmp, bar_tmp+1); } while(!swapped); while (tmp->bar < ndev) sched_yield(); __sync_synchronize(); *gather = tmp; return ncclSuccess; } static ncclResult_t closeGather(RankGather* gather, int ndev) { int bar_tmp = gather->bar - 1; bool swapped; do { bar_tmp += 1; swapped = __sync_bool_compare_and_swap(&gather->bar, bar_tmp, bar_tmp+1); } while(!swapped); while (gather->bar != 2*ndev) sched_yield(); __sync_synchronize(); size_t bytes = offsetof(RankGather, ranks) + ndev*sizeof(RankEntry); ncclResult_t res = shmUnmap(gather, bytes); if (res != ncclSuccess) { WARN("failed to unmap %ld bytes of gather", bytes); return res; } return ncclSuccess; } static ncclResult_t allocDevMem(ncclMem** ptr, size_t buffSize) { size_t size = offsetof(struct ncclMem, buff) + buffSize; cudaError_t res = cudaMalloc((void**)ptr, size); if (res != cudaSuccess) { *ptr = NULL; WARN("failed to allocate %lu byte device buffer", size); return ncclCudaMallocFailed; } if (cudaMemset(*ptr, 0, size) != cudaSuccess) { WARN("failed to memset device buffer."); cudaFree(*ptr); *ptr = NULL; return ncclUnhandledCudaError; } return ncclSuccess; } static const int ShmMapped = 1; static const int ShmLinked = 2; static ncclResult_t allocHostMem(ncclMem** ptr, size_t buffSize) { size_t size = offsetof(struct ncclMem, buff) + buffSize; cudaError_t res = cudaMallocHost((void**)ptr, size); if (res != cudaSuccess) { *ptr = NULL; WARN("failed to allocate %lu byte host buffer", size); return ncclSystemError; } memset(*ptr, 0, size); return ncclSuccess; } static ncclResult_t openHostMemShm(const char* shmname, ncclMem** ptr, size_t buffSize) { size_t size = offsetof(struct ncclMem, buff) + buffSize; ncclResult_t res = shmOpen(shmname, size, (void**)ptr); if (res != ncclSuccess) { WARN("failed to allocate %lu byte shm buffer", size); *ptr = NULL; return res; } if(cudaHostRegister(*ptr, size, cudaHostRegisterMapped) != cudaSuccess) { WARN("failed to register host buffer"); shmUnlink(shmname); shmUnmap(*ptr, size); *ptr = NULL; return ncclUnhandledCudaError; } return ncclSuccess; } static ncclResult_t populateRankInfo(RankEntry* info, int rank, ncclComm_t comm) { char busId[13]; nvmlDevice_t nvmlHandle; cudaError_t res = cudaDeviceGetPCIBusId(busId, 13, comm->cudaDev); if (res == cudaErrorInvalidDevice) { WARN("rank %d attempted to access an invalid cuda device %d", rank, comm->cudaDev); return ncclInvalidDeviceIndex; } else if (res != cudaSuccess) { WARN("rank %d failed to get PCI Bus Id for device %d", rank, comm->cudaDev); return ncclUnhandledCudaError; } INFO("rank %d using device %d (%s)", rank, comm->cudaDev, busId); if (wrapNvmlDeviceGetHandleByPciBusId(busId, &nvmlHandle) != ncclSuccess) { WARN("rank %d failed to get nvml handle for device %s", rank, busId); return ncclUnhandledCudaError; } // Order by nvml index if (wrapNvmlDeviceGetIndex(nvmlHandle, (unsigned*)&info->ncclId) != ncclSuccess) { WARN("rank %d failed to get nvml device index for device %d", rank, comm->cudaDev); return ncclUnhandledCudaError; } info->rank = rank; info->ndev = comm->nDev; info->cudaDev = comm->cudaDev; info->pid = getpid(); info->buffSize = comm->buffSize; info->hostptr = comm->hostMem; info->devptr = comm->devMem; if (wrapCuIpcGetMemHandle(&info->devipc, (CUdeviceptr)comm->devMem) != ncclSuccess) { WARN("rank %d failed to open CUDA IPC handle", rank); return ncclUnhandledCudaError; } return ncclSuccess; } static const int CLEANUP_NONE = 0; static const int CLEANUP_CUIPC = 1; static const int CLEANUP_UNMAP = 2; static ncclResult_t commClearMaps(ncclComm_t comm) { ncclResult_t res, retval = ncclSuccess; cudaError_t cures; for(int d=0; d<comm->nDev; ++d) { switch(comm->cleanup[d].type) { case CLEANUP_NONE: break; case CLEANUP_CUIPC: res = wrapCuIpcCloseMemHandle((CUdeviceptr)comm->cleanup[d].handle); if (res != ncclSuccess) { WARN("rank %d failed to close IPC handle to rank %d", comm->userFromRing[comm->ncclId], comm->userFromRing[d]); retval = (retval == ncclSuccess) ? res : retval; } break; case CLEANUP_UNMAP: cures = cudaHostUnregister(comm->cleanup[d].handle); if (cures != cudaSuccess) { WARN("rank %d failed to unregister handle to rank %d", comm->userFromRing[comm->ncclId], comm->userFromRing[d]); retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval; } res = shmUnmap(comm->cleanup[d].handle, offsetof(ncclMem, buff) + comm->buffSize); if (res != ncclSuccess) { WARN("rank %d failed to unmap handle to rank %d", comm->userFromRing[comm->ncclId], comm->userFromRing[d]); retval = (retval == ncclSuccess) ? res : retval; } break; default: WARN("Unknown cleanup type %d", comm->cleanup[d].type); } comm->cleanup[d].type = 0; comm->cleanup[d].handle = NULL; } memset(comm->userFromRing, 0, sizeof(int)*MAXPEERS); memset(comm->ringFromUser, 0, sizeof(int)*MAXPEERS); if (comm->devUserFromRing != NULL) { cudaError_t err = cudaMemset(comm->devUserFromRing, 0, sizeof(int)*MAXPEERS); if (err != cudaSuccess) { WARN("Faild to clear dev map: %s", cudaGetErrorString(err)); retval = (retval == ncclSuccess) ? ncclUnhandledCudaError : retval; } } return retval; } static ncclResult_t commBuildMaps(ncclComm_t comm, ncclUniqueId* commId, int rank, RankEntry* ranks) { int ndev = comm->nDev; for(int i=0; i<ndev; ++i) { // Check for inconsistencies between ranks // If two ranks use the same rank, then one slot of // ranks[] will be left unset with zero ndev/buffSize. if (ranks[i].buffSize != comm->buffSize || ranks[i].ndev != comm->nDev) { commClearMaps(comm); return ncclRankMismatch; } // Create rank<->nccl maps int iRank = ranks[i].rank; comm->userFromRing[i] = iRank; comm->ringFromUser[iRank] = i; } if (cudaMemcpy(comm->devUserFromRing, comm->userFromRing, ndev*sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) { WARN("rank %d failed to copy maps to device", rank); commClearMaps(comm); return ncclUnhandledCudaError; } int myId = -1; for (int i=0; i<ndev; ++i) { if(ranks[i].rank == rank) { myId = i; break; } } if (myId == -1) { WARN("rank %d not found in communicator", rank); return ncclInvalidRank; } comm->ncclId = myId; int myDev = ranks[myId].cudaDev; pid_t myPid = ranks[myId].pid; comm->useRemoteRecv = 1; // Assume we directly write to result ptrs. for (int i=0; i<ndev; ++i) { int iRank = ranks[i].rank; int iDev = ranks[i].cudaDev; pid_t iPid = ranks[i].pid; int canpeer = 0; if (cudaDeviceCanAccessPeer(&canpeer, myDev, iDev) != cudaSuccess) { INFO("peer query failed between rank %d (dev %d) and rank %d (dev %d)", rank, myDev, iRank, iDev); canpeer = 0; } if (canpeer) { cudaError_t err; err = cudaDeviceEnablePeerAccess(iDev, 0); if (err == cudaErrorPeerAccessAlreadyEnabled) { cudaGetLastError(); } else if (err != cudaSuccess) { INFO("peer access failed between rank %d (dev %d) and rank %d (dev %d)\n", rank, myDev, iRank, iDev); canpeer = 0; } } if (iPid == myPid && (canpeer || myDev == iDev)) { INFO("rank access %d -> %d via P2P device mem", rank, iRank); comm->local[i] = ranks[myId].devptr; comm->remote[i] = ranks[i].devptr; comm->cleanup[i].type = CLEANUP_NONE; } else if (iPid == myPid) { INFO("rank access %d -> %d via zero-copy host mem", rank, iRank); comm->useRemoteRecv = 0; if (cudaHostGetDevicePointer(comm->local+i, ranks[myId].hostptr, 0) != cudaSuccess) { WARN("rank %d failed to map zero copy buffer to device", rank); commClearMaps(comm); return ncclUnhandledCudaError; } if (cudaHostGetDevicePointer(comm->remote+i, ranks[i].hostptr, 0) != cudaSuccess) { WARN("rank %d failed to map %d's zero copy buffer to device", rank, iRank); commClearMaps(comm); return ncclUnhandledCudaError; } comm->cleanup[i].type = CLEANUP_NONE; } else if (canpeer || myDev == iDev) { INFO("rank access %d -> %d via Ipc P2P device mem", rank, iRank); comm->useRemoteRecv = 0; comm->local[i] = ranks[myId].devptr; if (wrapCuIpcOpenMemHandle((CUdeviceptr*)(&comm->remote[i]), ranks[i].devipc, CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS) != ncclSuccess) { WARN("rank %d failed to open Ipc handle to rank %d", rank, iRank); commClearMaps(comm); return ncclUnhandledCudaError; } comm->cleanup[i].type = CLEANUP_CUIPC; comm->cleanup[i].handle = comm->remote[i]; } else { INFO("rank access %d -> %d via zero copy host shm", rank, iRank); comm->useRemoteRecv = 0; if (cudaHostGetDevicePointer(comm->local+i, ranks[myId].hostptr, 0) != cudaSuccess) { WARN("rank %d failed to obtain dev ptr to sysmem buffer", rank); commClearMaps(comm); return ncclUnhandledCudaError; } char rankname[1024]; sprintf(rankname, "%s-%d", commId->internal, ranks[i].rank); if (openHostMemShm(rankname, (ncclMem**)&comm->cleanup[i].handle, ranks[i].buffSize) != ncclSuccess) { WARN("rank %d failed to open sysmem buffer of rank %d", rank, iRank); commClearMaps(comm); return ncclUnhandledCudaError; } if (cudaHostGetDevicePointer(comm->remote+i, comm->cleanup[i].handle, 0) != cudaSuccess) { WARN("rank %d failed to obtain dev ptr for rank %d", rank, iRank); commClearMaps(comm); return ncclUnhandledCudaError; } comm->cleanup[i].type = CLEANUP_UNMAP; } } INFO("PushToRecv algos are %s\n", (comm->useRemoteRecv) ? "enabled" : "disabled"); return ncclSuccess; } static void initDebug() { const char* nccl_debug = getenv("NCCL_DEBUG"); if (nccl_debug == NULL) { ncclDebugLevel = NONE; } else if (strcmp(nccl_debug, "WARN") == 0) { ncclDebugLevel = WARN; } else if (strcmp(nccl_debug, "INFO") == 0) { ncclDebugLevel = INFO; INFO("NCCL debug level set to INFO"); } else if (strcmp(nccl_debug, "ABORT") == 0) { ncclDebugLevel = ABORT; INFO("NCCL debug level set to ABORT"); } } static void commFree(ncclComm_t comm) { if (comm == NULL) return; for(int i=0; i<MAXQUEUE; ++i) { if (comm->events.isDone[i] != NULL) if (cudaEventDestroy(comm->events.isDone[i]) != cudaSuccess) INFO("failed to destroy cuda event %d", i); } ncclResult_t res = commClearMaps(comm); if (res != ncclSuccess) INFO("failed to cleanup comm maps"); if (comm->devUserFromRing != NULL) if (cudaFree(comm->devUserFromRing) != cudaSuccess) INFO("commFree failed to free dev maps"); if (comm->devMem != NULL && cudaFree(comm->devMem) != cudaSuccess) INFO("Failed to free devMap"); if (comm->hostMem != NULL) { if (comm->hostMemState & ShmMapped) { if (cudaHostUnregister(comm->hostMem) != cudaSuccess) INFO("Failed to unregister hostMem"); size_t size = offsetof(ncclMem, buff) + comm->buffSize; if (shmUnmap(comm->hostMem, size) != ncclSuccess) INFO("Failed to unmap hostMem"); comm->hostMemState ^= ShmMapped; } else { cudaFreeHost(comm->hostMem); } } free(comm); } static ncclResult_t commAlloc(ncclComm_t* comret, int ndev, const ncclUniqueId* commId, int rank) { if (ndev < 1 || ndev > MAXPEERS) { WARN("requested device count (%d) exceeds maximum of %d", ndev, MAXPEERS); return ncclUnsupportedDeviceCount; } if (rank >= ndev || rank < 0) { WARN("rank %d exceeds ndev=%d", rank, ndev); return ncclInvalidRank; } struct ncclComm* comm = (struct ncclComm*)malloc(sizeof(struct ncclComm)); if (comm == NULL) { WARN("comm allocation failed"); return ncclSystemError; } memset(comm, 0, sizeof(struct ncclComm)); comm->nDev = ndev; cudaGetDevice(&comm->cudaDev); const char* str = getenv("NCCL_BUFFSIZE"); if (str != NULL) { errno = 0; comm->buffSize = strtol(str, NULL, 10); if (errno == ERANGE || comm->buffSize == 0) { INFO("rank %d invalid NCCL_BUFFSIZE: %s, using default %lu", rank, str, DEFAULT_BUFFER_SIZE_BYTES); comm->buffSize = DEFAULT_BUFFER_SIZE_BYTES; } } else { comm->buffSize = DEFAULT_BUFFER_SIZE_BYTES; } INFO("rank %d using buffSize = %lu", rank, comm->buffSize); ncclResult_t res; res = allocDevMem(&comm->devMem, comm->buffSize); if (res != ncclSuccess) { WARN("rank %d failed to allocate device buffer", rank); commFree(comm); return res; } if (cudaMalloc(&comm->devUserFromRing, MAXPEERS*sizeof(int)) != cudaSuccess) { WARN("rank %d failed to allocated device maps", rank); commFree(comm); return ncclCudaMallocFailed; } EventQueue* eq = &comm->events; for(int i=0; i<MAXQUEUE; ++i) { if (cudaEventCreateWithFlags(eq->isDone+i, cudaEventDisableTiming) != cudaSuccess) { WARN("rank %d failed to create nccl event %d", rank, i); commFree(comm); return ncclUnhandledCudaError; } } if(commId == NULL) { comm->hostMemState = 0; res = allocHostMem(&comm->hostMem, comm->buffSize); } else { char rankname[1024]; sprintf(rankname, "%s-%d", commId->internal, rank); res = openHostMemShm(rankname, &comm->hostMem, comm->buffSize); if (res != ncclSuccess) { WARN("rank %d failed to allocate host buffer", rank); commFree(comm); return res; } comm->hostMemState = ShmMapped | ShmLinked; } *comret = comm; return ncclSuccess; } static ncclResult_t commUnlinkHostMem(ncclComm_t comm, ncclUniqueId commId, int rank) { char rankname[1024]; sprintf(rankname, "%s-%d", commId.internal, rank); if (comm->hostMemState & ShmLinked) comm->hostMemState ^= ShmLinked; return shmUnlink(rankname); } extern "C" DSOGLOBAL ncclResult_t ncclCommInitRank(ncclComm_t* newcomm, int ndev, ncclUniqueId commId, int myrank) { if (strlen(commId.internal) < 1 || strlen(commId.internal) >= NCCL_UNIQUE_ID_BYTES) { WARN("rank %d invalid commId", myrank); return ncclInvalidArgument; } initDebug(); ncclResult_t res; RankEntry myStuff; RankGather* gath = NULL; res = wrapSymbols(); if (res != ncclSuccess) { WARN("NCCL failed to initialize client libs"); return res; } res = wrapNvmlInit(); if (res != ncclSuccess) { WARN("rank %d failed to initialize nvml", myrank); return res; } res = commAlloc(newcomm, ndev, &commId, myrank); if (res != ncclSuccess) { WARN("rank %d failed to allocate communicator", myrank); return res; } res = populateRankInfo(&myStuff, myrank, *newcomm); if (res != ncclSuccess) { WARN("rank %d failed to obtain rank info", myrank); goto cleanup; } res = initGather(&gath, commId, ndev, myrank, myStuff); if (res != ncclSuccess) { WARN("rank %d failed to gather rank info", myrank); goto cleanup; } res = commBuildMaps(*newcomm, &commId, myrank, gath->ranks); if (res != ncclSuccess) { WARN("rank %d failed to build comm maps", myrank); goto cleanup; } res = closeGather(gath, ndev); // includes a barrier gath = NULL; if (res != ncclSuccess) { WARN("rank %d failed to close gather", myrank); goto cleanup; } goto final; cleanup: if (gath != NULL) closeGather(gath, ndev); commFree(*newcomm); final: if ((*newcomm)->hostMemState & ShmLinked) { if (commUnlinkHostMem(*newcomm, commId, myrank) != ncclSuccess) INFO("rank %d failed to unlink host mem shm segment", myrank); } if (wrapNvmlShutdown() != ncclSuccess) INFO("rank %d did not shutdown nvml properly", myrank); return res; } extern "C" DSOGLOBAL ncclResult_t ncclCommInitAll(ncclComm_t* comms, int ndev, int* devlist) { initDebug(); ncclResult_t res; int savedDevice; RankEntry* ranks = NULL; int rank, cudaDev; ncclComm_t comm = NULL; char busId[13]; nvmlDevice_t nvmlHandle; int affinity_set = 0; res = wrapSymbols(); if (res != ncclSuccess) { WARN("NCCL failed to initialize client libs"); return res; } cudaGetDevice(&savedDevice); ranks = (RankEntry*)malloc(ndev*sizeof(RankEntry)); if (ranks == NULL) { WARN("NCCL allocation failed"); return ncclSystemError; } memset(ranks, 0, ndev*sizeof(RankEntry)); res = wrapNvmlInit(); if (res != ncclSuccess) { WARN("nccl failed to initialize nvml"); return res; } for(rank=0; rank<ndev; ++rank) comms[rank] = NULL; for (rank=0; rank<ndev; ++rank) { cudaDev = (devlist == NULL) ? rank : devlist[rank]; if (cudaSetDevice(cudaDev) != cudaSuccess) { WARN("rank %d failed to set cuda device %d", rank, cudaDev); res = ncclInvalidDeviceIndex; goto cleanup; } // Set CPU affinity affinity_set = 0; if (cudaDeviceGetPCIBusId(busId, 13, cudaDev) != cudaSuccess) { INFO("rank %d failed to get PCI Bus Id for device %d", rank, cudaDev); goto skipaffinity; } if (wrapNvmlDeviceGetHandleByPciBusId(busId, &nvmlHandle) != ncclSuccess) { INFO("rank %d failed to get nvml handle for device %s", rank, busId); goto skipaffinity; } if (wrapNvmlDeviceSetCpuAffinity(nvmlHandle) != ncclSuccess) { INFO("rank %d failed to set affinity", rank); goto skipaffinity; } affinity_set = 1; skipaffinity: res = commAlloc(&comm, ndev, NULL, rank); if (res != ncclSuccess) { WARN("rank %d failed to allocate communicator", rank); goto cleanup; } comms[rank] = comm; if (affinity_set && wrapNvmlDeviceClearCpuAffinity(nvmlHandle) != ncclSuccess) { INFO("rank %d set but failed to clear cpu affinity", rank); } res = populateRankInfo(ranks+rank, rank, comm); if (res != ncclSuccess) { WARN("rank %d failed to obtain rank info", rank); goto cleanup; } } orderRanks(ranks, ndev); for(rank=0; rank<ndev; ++rank) { comm = comms[rank]; cudaSetDevice(comm->cudaDev); res = commBuildMaps(comm, NULL, rank, ranks); if (res != ncclSuccess) { WARN("rank %d failed to build comm maps", rank); goto cleanup; } } free(ranks); ranks = NULL; res = ncclSuccess; goto final; cleanup: if (ranks != NULL) free(ranks); for(rank=0; rank<ndev; ++rank) { if(comms[rank] != NULL) { commFree(comms[rank]); } } final: if(wrapNvmlShutdown() != ncclSuccess) INFO("NCCL did not shutdown nvml properly"); cudaSetDevice(savedDevice); return res; } extern "C" DSOGLOBAL void ncclCommDestroy(ncclComm_t comm) { if (comm == NULL) return; int savedDevice; cudaGetDevice(&savedDevice); int commDevice = comm->cudaDev; if (savedDevice != commDevice) { CUDACHECK(cudaSetDevice(commDevice)); } commFree(comm); if (savedDevice != commDevice) cudaSetDevice(savedDevice); } extern "C" DSOGLOBAL const char* ncclGetErrorString(ncclResult_t code) { switch (code) { case ncclSuccess : return "no error"; case ncclUnhandledCudaError : return "unhandled cuda error"; case ncclSystemError : return "system error"; case ncclInternalError : return "internal error"; case ncclInvalidDevicePointer : return "invalid device pointer"; case ncclInvalidRank : return "invalid rank"; case ncclUnsupportedDeviceCount : return "unsupported device count"; case ncclDeviceNotFound : return "device not found"; case ncclInvalidDeviceIndex : return "invalid device index"; case ncclLibWrapperNotSet : return "lib wrapper not initialized"; case ncclCudaMallocFailed : return "cuda malloc failed"; case ncclRankMismatch : return "parameter mismatch between ranks"; case ncclInvalidArgument : return "invalid argument"; case ncclInvalidType : return "invalid data type"; case ncclInvalidOperation : return "invalid reduction operations"; } return "unknown result code"; } extern "C" DSOGLOBAL ncclResult_t ncclCommCount(const ncclComm_t comm, int* count) { *count = comm->nDev; return ncclSuccess; } extern "C" DSOGLOBAL ncclResult_t ncclCommCuDevice(const ncclComm_t comm, int* devid) { *devid = comm->cudaDev; return ncclSuccess; } extern "C" DSOGLOBAL ncclResult_t ncclCommUserRank(const ncclComm_t comm, int* rank) { *rank = comm->userFromRing[comm->ncclId]; return ncclSuccess; }
484889853b185f7356e2da6d18a4a9761845ffab.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/hip/HIPContext.h> #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cstdio> #include <vector> #include "nn/cuda/im2col.cuh" namespace mapped_conv { namespace nn { namespace cuda { /* The transposed forward pass works by leveraging the same GEMM operation as the backward pass w.r.t the inputs. The difference is in the arguments. First, recall a traditional forward convolution operation. For each batch: 1. We form the matrix C from the image using the im2col function. C has dimensions (ck^2, n) for c input channels, (square) filter size k, and n filtering locations (e.g. pixels in the output) 2. We then perform the convolution as a matrix multiplication between our weights W (dim: d, ck^2, for d output channels). 3. We get the resulting output O = WC, with dimensions (d, n). We can then reshape n into the correct (h,w). Now, recall the traiditonal backward pass w.r.t the inputs. For each batch: 1. Compute the matrix multiplication between the output gradients O (dim: d, n) and the weights W (dim: d, ck^2). This gives the input gradients in column form s.t. matrix C = W^T O 2. We then use the col2im function to decompose C back into the input shape The transposed convolution takes the same arguments as the traditional forward convolution, but the GEMM operation is the same as the traditional backward convolution. To make this work, we first need to transpose the dimensions of the weight matrix, hence the term "transposed convolution." This transpose swaps the input and output channels, so the weights matrix goes from (d, c, kh, kw) --> (c, d, kh, kw). Then, for each batch: 1. Compute the matrix multiplication between the input I (dim: c, n) and the weight matrix (dim: c, dk^2, note the change here). This gives the output in column form s.t. matrix C = W^T I (C dims: dk^2, n) 2. Now redistribute the column matrix C into the output O (dim: d, n) using the col2im function. In this function call, we assume the weight matrix has already been transposed. */ torch::Tensor TransposedConvForward(torch::Tensor input, torch::Tensor weight, torch::Tensor bias, int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW) { // Useful dimensions to have const int64_t nInputPlanes = weight.size(0); const int64_t nOutputPlanes = weight.size(1); const int64_t inputHeight = input.size(2); const int64_t inputWidth = input.size(3); const int64_t outputHeight = dH * (inputHeight - 1) + kH + (kH - 1) * (dilationH - 1) - 2 * padH; const int64_t outputWidth = dW * (inputWidth - 1) + kW + (kW - 1) * (dilationW - 1) - 2 * padW; const int64_t batchSize = input.size(0); const int64_t inputBatchStride = nInputPlanes * inputHeight * inputWidth; // Initialize output and temporary columns torch::Tensor output = torch::zeros( {batchSize, nOutputPlanes, outputHeight, outputWidth}, input.options()); torch::Tensor columns = torch::zeros( {kW * kH * nOutputPlanes, inputHeight * inputWidth}, input.options()); // For each elt in batch, do: for (int b = 0; b < batchSize; b++) { // Get cuda stream hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipblasSetStream(handle, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); // Call the GEMM function (note that it expects column major matrices) const int64_t m = columns.size(1); const int64_t n = weight.size(1) * weight.size(2) * weight.size(3); const int64_t k = weight.size(0); if (input.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 0.0; hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alpha, input.data<double>() + b * inputBatchStride, m, weight.data<double>(), n, &beta, columns.data<double>(), m); } else if (input.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 0.0; hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alpha, input.data<float>() + b * inputBatchStride, m, weight.data<float>(), n, &beta, columns.data<float>(), m); } else { printf("Can only support double and float\n"); std::exit(-1); } CUDA_CHECK(hipGetLastError()) Col2Im2DLauncher(columns, nOutputPlanes, outputHeight, outputWidth, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, output[b]); // Use PyTorch to add the bias output[b] += bias.view({output[b].size(0), 1, 1}); } return output; } torch::Tensor TransposedConvBackwardInput(torch::Tensor grad_output, torch::Tensor weight, int inputHeight, int inputWidth, int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW) { // Useful dimensions to have const int64_t nInputPlanes = weight.size(0); const int64_t nOutputPlanes = weight.size(1); const int64_t outputHeight = grad_output.size(2); const int64_t outputWidth = grad_output.size(3); const int64_t batchSize = grad_output.size(0); // Initialize output and temporary columns torch::Tensor input_grad = torch::zeros({batchSize, nInputPlanes, inputHeight, inputWidth}, grad_output.options()); torch::Tensor columns = torch::zeros({kW * kH * nOutputPlanes, inputHeight * inputWidth}, grad_output.options()); // For each elt in batch, do: const int64_t inputBatchStride = nInputPlanes * inputHeight * inputWidth; for (int b = 0; b < batchSize; b++) { Im2Col2DLauncher(grad_output[b], nOutputPlanes, outputHeight, outputWidth, inputWidth, columns.size(1), kH, kW, padH, padW, dH, dW, dilationH, dilationW, columns); // Get cuda stream hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipblasSetStream(handle, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); // Call the GEMM function (note that it expects column major matrices) const int64_t m = columns.size(1); const int64_t k = weight.size(1) * weight.size(2) * weight.size(3); const int64_t n = weight.size(0); if (grad_output.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 0.0; hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, columns.data<double>(), m, weight.data<double>(), k, &beta, input_grad.data<double>() + b * inputBatchStride, m); } else if (grad_output.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 0.0; hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, columns.data<float>(), m, weight.data<float>(), k, &beta, input_grad.data<float>() + b * inputBatchStride, m); } CUDA_CHECK(hipGetLastError()) } return input_grad; } torch::Tensor TransposedConvBackwardWeight(torch::Tensor grad_output, torch::Tensor input, int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW) { // Useful dimensions to have const int64_t nOutputPlanes = grad_output.size(1); const int64_t nInputPlanes = input.size(1); const int64_t outputHeight = grad_output.size(2); const int64_t outputWidth = grad_output.size(3); const int64_t inputHeight = input.size(2); const int64_t inputWidth = input.size(3); const int64_t batchSize = grad_output.size(0); const int64_t inputBatchStride = nInputPlanes * inputHeight * inputWidth; // Initialize output and temporary columns torch::Tensor weight_grad = torch::zeros( {nInputPlanes, nOutputPlanes, kH, kW}, grad_output.options()); torch::Tensor columns = torch::zeros({kW * kH * nOutputPlanes, inputHeight * inputWidth}, grad_output.options()); // For each elt in batch, do: for (int b = 0; b < batchSize; b++) { // Create the column matrix from the grad output as we would for the input // in the standard conv_forward Im2Col2DLauncher(grad_output[b], nOutputPlanes, outputHeight, outputWidth, inputWidth, columns.size(1), kH, kW, padH, padW, dH, dW, dilationH, dilationW, columns); // Get cuda stream hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipblasSetStream(handle, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); // Propagate the gradients from the outputs to the weights using GEMM // Note that GEMM expects column major matrices const int64_t m = weight_grad.size(1) * weight_grad.size(2) * weight_grad.size(3); const int64_t n = weight_grad.size(0); const int64_t k = columns.size(1); if (grad_output.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 1.0; hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, &alpha, columns.data<double>(), k, input.data<double>() + b * inputBatchStride, k, &beta, weight_grad.data<double>(), m); } else if (grad_output.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 1.0; hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, &alpha, columns.data<float>(), k, input.data<float>() + b * inputBatchStride, k, &beta, weight_grad.data<float>(), m); } CUDA_CHECK(hipGetLastError()) } return weight_grad; } } // namespace cuda } // namespace nn } // namespace mapped_conv
484889853b185f7356e2da6d18a4a9761845ffab.cu
#include <ATen/cuda/CUDAContext.h> #include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <cstdio> #include <vector> #include "nn/cuda/im2col.cuh" namespace mapped_conv { namespace nn { namespace cuda { /* The transposed forward pass works by leveraging the same GEMM operation as the backward pass w.r.t the inputs. The difference is in the arguments. First, recall a traditional forward convolution operation. For each batch: 1. We form the matrix C from the image using the im2col function. C has dimensions (ck^2, n) for c input channels, (square) filter size k, and n filtering locations (e.g. pixels in the output) 2. We then perform the convolution as a matrix multiplication between our weights W (dim: d, ck^2, for d output channels). 3. We get the resulting output O = WC, with dimensions (d, n). We can then reshape n into the correct (h,w). Now, recall the traiditonal backward pass w.r.t the inputs. For each batch: 1. Compute the matrix multiplication between the output gradients O (dim: d, n) and the weights W (dim: d, ck^2). This gives the input gradients in column form s.t. matrix C = W^T O 2. We then use the col2im function to decompose C back into the input shape The transposed convolution takes the same arguments as the traditional forward convolution, but the GEMM operation is the same as the traditional backward convolution. To make this work, we first need to transpose the dimensions of the weight matrix, hence the term "transposed convolution." This transpose swaps the input and output channels, so the weights matrix goes from (d, c, kh, kw) --> (c, d, kh, kw). Then, for each batch: 1. Compute the matrix multiplication between the input I (dim: c, n) and the weight matrix (dim: c, dk^2, note the change here). This gives the output in column form s.t. matrix C = W^T I (C dims: dk^2, n) 2. Now redistribute the column matrix C into the output O (dim: d, n) using the col2im function. In this function call, we assume the weight matrix has already been transposed. */ torch::Tensor TransposedConvForward(torch::Tensor input, torch::Tensor weight, torch::Tensor bias, int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW) { // Useful dimensions to have const int64_t nInputPlanes = weight.size(0); const int64_t nOutputPlanes = weight.size(1); const int64_t inputHeight = input.size(2); const int64_t inputWidth = input.size(3); const int64_t outputHeight = dH * (inputHeight - 1) + kH + (kH - 1) * (dilationH - 1) - 2 * padH; const int64_t outputWidth = dW * (inputWidth - 1) + kW + (kW - 1) * (dilationW - 1) - 2 * padW; const int64_t batchSize = input.size(0); const int64_t inputBatchStride = nInputPlanes * inputHeight * inputWidth; // Initialize output and temporary columns torch::Tensor output = torch::zeros( {batchSize, nOutputPlanes, outputHeight, outputWidth}, input.options()); torch::Tensor columns = torch::zeros( {kW * kH * nOutputPlanes, inputHeight * inputWidth}, input.options()); // For each elt in batch, do: for (int b = 0; b < batchSize; b++) { // Get cuda stream cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cublasSetStream(handle, at::cuda::getCurrentCUDAStream()); // Call the GEMM function (note that it expects column major matrices) const int64_t m = columns.size(1); const int64_t n = weight.size(1) * weight.size(2) * weight.size(3); const int64_t k = weight.size(0); if (input.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 0.0; cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, input.data<double>() + b * inputBatchStride, m, weight.data<double>(), n, &beta, columns.data<double>(), m); } else if (input.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 0.0; cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, input.data<float>() + b * inputBatchStride, m, weight.data<float>(), n, &beta, columns.data<float>(), m); } else { printf("Can only support double and float\n"); std::exit(-1); } CUDA_CHECK(cudaGetLastError()) Col2Im2DLauncher(columns, nOutputPlanes, outputHeight, outputWidth, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, output[b]); // Use PyTorch to add the bias output[b] += bias.view({output[b].size(0), 1, 1}); } return output; } torch::Tensor TransposedConvBackwardInput(torch::Tensor grad_output, torch::Tensor weight, int inputHeight, int inputWidth, int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW) { // Useful dimensions to have const int64_t nInputPlanes = weight.size(0); const int64_t nOutputPlanes = weight.size(1); const int64_t outputHeight = grad_output.size(2); const int64_t outputWidth = grad_output.size(3); const int64_t batchSize = grad_output.size(0); // Initialize output and temporary columns torch::Tensor input_grad = torch::zeros({batchSize, nInputPlanes, inputHeight, inputWidth}, grad_output.options()); torch::Tensor columns = torch::zeros({kW * kH * nOutputPlanes, inputHeight * inputWidth}, grad_output.options()); // For each elt in batch, do: const int64_t inputBatchStride = nInputPlanes * inputHeight * inputWidth; for (int b = 0; b < batchSize; b++) { Im2Col2DLauncher(grad_output[b], nOutputPlanes, outputHeight, outputWidth, inputWidth, columns.size(1), kH, kW, padH, padW, dH, dW, dilationH, dilationW, columns); // Get cuda stream cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cublasSetStream(handle, at::cuda::getCurrentCUDAStream()); // Call the GEMM function (note that it expects column major matrices) const int64_t m = columns.size(1); const int64_t k = weight.size(1) * weight.size(2) * weight.size(3); const int64_t n = weight.size(0); if (grad_output.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 0.0; cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, columns.data<double>(), m, weight.data<double>(), k, &beta, input_grad.data<double>() + b * inputBatchStride, m); } else if (grad_output.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 0.0; cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, columns.data<float>(), m, weight.data<float>(), k, &beta, input_grad.data<float>() + b * inputBatchStride, m); } CUDA_CHECK(cudaGetLastError()) } return input_grad; } torch::Tensor TransposedConvBackwardWeight(torch::Tensor grad_output, torch::Tensor input, int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW) { // Useful dimensions to have const int64_t nOutputPlanes = grad_output.size(1); const int64_t nInputPlanes = input.size(1); const int64_t outputHeight = grad_output.size(2); const int64_t outputWidth = grad_output.size(3); const int64_t inputHeight = input.size(2); const int64_t inputWidth = input.size(3); const int64_t batchSize = grad_output.size(0); const int64_t inputBatchStride = nInputPlanes * inputHeight * inputWidth; // Initialize output and temporary columns torch::Tensor weight_grad = torch::zeros( {nInputPlanes, nOutputPlanes, kH, kW}, grad_output.options()); torch::Tensor columns = torch::zeros({kW * kH * nOutputPlanes, inputHeight * inputWidth}, grad_output.options()); // For each elt in batch, do: for (int b = 0; b < batchSize; b++) { // Create the column matrix from the grad output as we would for the input // in the standard conv_forward Im2Col2DLauncher(grad_output[b], nOutputPlanes, outputHeight, outputWidth, inputWidth, columns.size(1), kH, kW, padH, padW, dH, dW, dilationH, dilationW, columns); // Get cuda stream cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cublasSetStream(handle, at::cuda::getCurrentCUDAStream()); // Propagate the gradients from the outputs to the weights using GEMM // Note that GEMM expects column major matrices const int64_t m = weight_grad.size(1) * weight_grad.size(2) * weight_grad.size(3); const int64_t n = weight_grad.size(0); const int64_t k = columns.size(1); if (grad_output.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 1.0; cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, columns.data<double>(), k, input.data<double>() + b * inputBatchStride, k, &beta, weight_grad.data<double>(), m); } else if (grad_output.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 1.0; cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, columns.data<float>(), k, input.data<float>() + b * inputBatchStride, k, &beta, weight_grad.data<float>(), m); } CUDA_CHECK(cudaGetLastError()) } return weight_grad; } } // namespace cuda } // namespace nn } // namespace mapped_conv