hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
4da4ef3657d2247a33ad26e3688cee403cdb2c90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void findAtomGridIndex(const real4* __restrict__ posq, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // Compute the index of the grid point each atom is associated with. for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += blockDim.x*gridDim.x) { real4 pos = posq[atom]; APPLY_PERIODIC_TO_POS(pos) real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); pmeAtomGridIndex[atom] = make_int2(atom, gridIndex.x*GRID_SIZE_Y*GRID_SIZE_Z+gridIndex.y*GRID_SIZE_Z+gridIndex.z); } } extern "C" __global__ void gridSpreadCharge(const real4* __restrict__ posq, real* __restrict__ originalPmeGrid, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, const int2* __restrict__ pmeAtomGridIndex #ifdef USE_LJPME , const float2* __restrict__ sigmaEpsilon #else , const real* __restrict__ charges #endif ) { // To improve memory efficiency, we divide indices along the z axis into // PME_ORDER blocks, where the data for each block is stored together. We // can ensure that all threads write to the same block at the same time, // which leads to better coalescing of writes. __shared__ int zindexTable[GRID_SIZE_Z+PME_ORDER]; int blockSize = (int) ceil(GRID_SIZE_Z/(real) PME_ORDER); for (int i = threadIdx.x; i < GRID_SIZE_Z+PME_ORDER; i += blockDim.x) { int zindex = i % GRID_SIZE_Z; int block = zindex % PME_ORDER; zindexTable[i] = zindex/PME_ORDER + block*GRID_SIZE_X*GRID_SIZE_Y*blockSize; } __syncthreads(); // Process the atoms in spatially sorted order. This improves efficiency when writing // the grid values. real3 data[PME_ORDER]; const real scale = RECIP(PME_ORDER-1); for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int atom = pmeAtomGridIndex[i].x; real4 pos = posq[atom]; #ifdef USE_LJPME const float2 sigEps = sigmaEpsilon[atom]; const real charge = 8*sigEps.x*sigEps.x*sigEps.x*sigEps.y; #else const real charge = (CHARGE)*EPSILON_FACTOR; #endif if (charge == 0) continue; APPLY_PERIODIC_TO_POS(pos) real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real3 dr = make_real3(t.x-(int) t.x, t.y-(int) t.y, t.z-(int) t.z); data[PME_ORDER-1] = make_real3(0); data[1] = dr; data[0] = make_real3(1)-dr; for (int j = 3; j < PME_ORDER; j++) { real div = RECIP(j-1); data[j-1] = div*dr*data[j-2]; for (int k = 1; k < (j-1); k++) data[j-k-1] = div*((dr+make_real3(k))*data[j-k-2] + (make_real3(j-k)-dr)*data[j-k-1]); data[0] = div*(make_real3(1)-dr)*data[0]; } data[PME_ORDER-1] = scale*dr*data[PME_ORDER-2]; for (int j = 1; j < (PME_ORDER-1); j++) data[PME_ORDER-j-1] = scale*((dr+make_real3(j))*data[PME_ORDER-j-2] + (make_real3(PME_ORDER-j)-dr)*data[PME_ORDER-j-1]); data[0] = scale*(make_real3(1)-dr)*data[0]; // Spread the charge from this atom onto each grid point. int izoffset = (PME_ORDER-(gridIndex.z%PME_ORDER)) % PME_ORDER; for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = gridIndex.x+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y; real dx = charge*data[ix].x; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = gridIndex.y+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = (xbase+ybase)*blockSize; real dxdy = dx*data[iy].y; for (int i = 0; i < PME_ORDER; i++) { int iz = (i+izoffset) % PME_ORDER; int zindex = gridIndex.z+iz; int index = ybase + zindexTable[zindex]; real add = dxdy*data[iz].z; #if defined(USE_DOUBLE_PRECISION) || defined(USE_DETERMINISTIC_FORCES) unsigned long long * ulonglong_p = (unsigned long long *) originalPmeGrid; atomicAdd(&ulonglong_p[index], static_cast<unsigned long long>((long long) (add*0x100000000))); #else atomicAdd(&originalPmeGrid[index], add); #endif } } } } } extern "C" __global__ void finishSpreadCharge( #if defined(USE_DOUBLE_PRECISION) || defined(USE_DETERMINISTIC_FORCES) const long long* __restrict__ grid1, #else const real* __restrict__ grid1, #endif real* __restrict__ grid2) { // During charge spreading, we shuffled the order of indices along the z // axis to make memory access more efficient. We now need to unshuffle // them. If the values were accumulated as fixed point, we also need to // convert them to floating point. __shared__ int zindexTable[GRID_SIZE_Z]; int blockSize = (int) ceil(GRID_SIZE_Z/(real) PME_ORDER); for (int i = threadIdx.x; i < GRID_SIZE_Z; i += blockDim.x) { int block = i % PME_ORDER; zindexTable[i] = i/PME_ORDER + block*GRID_SIZE_X*GRID_SIZE_Y*blockSize; } __syncthreads(); const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; real scale = 1/(real) 0x100000000; for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { int zindex = index%GRID_SIZE_Z; int loadIndex = zindexTable[zindex] + blockSize*(int) (index/GRID_SIZE_Z); #if defined(USE_DOUBLE_PRECISION) || defined(USE_DETERMINISTIC_FORCES) grid2[index] = scale*grid1[loadIndex]; #else grid2[index] = grid1[loadIndex]; #endif } } // convolutes on the halfcomplex_pmeGrid, which is of size NX*NY*(NZ/2+1) as F(Q) is conjugate symmetric extern "C" __global__ void reciprocalConvolution(real2* __restrict__ halfcomplex_pmeGrid, mixed* __restrict__ energyBuffer, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // R2C stores into a half complex matrix where the last dimension is cut by half const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*(GRID_SIZE_Z/2+1); #ifdef USE_LJPME const real recipScaleFactor = -2*M_PI*SQRT(M_PI)*RECIP(6*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); real bfac = M_PI / EWALD_ALPHA; real fac1 = 2*M_PI*M_PI*M_PI*SQRT(M_PI); real fac2 = EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA; real fac3 = -2*EWALD_ALPHA*M_PI*M_PI; #else const real recipScaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); #endif for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { // real indices int kx = index/(GRID_SIZE_Y*(GRID_SIZE_Z/2+1)); int remainder = index-kx*GRID_SIZE_Y*(GRID_SIZE_Z/2+1); int ky = remainder/(GRID_SIZE_Z/2+1); int kz = remainder-ky*(GRID_SIZE_Z/2+1); int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; real2 grid = halfcomplex_pmeGrid[index]; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; #ifdef USE_LJPME real denom = recipScaleFactor/(bx*by*bz); real m = SQRT(m2); real m3 = m*m2; real b = bfac*m; real expfac = -b*b; real expterm = EXP(expfac); real erfcterm = ERFC(b); real eterm = (fac1*erfcterm*m3 + expterm*(fac2 + fac3*m2)) * denom; halfcomplex_pmeGrid[index] = make_real2(grid.x*eterm, grid.y*eterm); #else real denom = m2*bx*by*bz; real eterm = recipScaleFactor*EXP(-RECIP_EXP_FACTOR*m2)/denom; if (kx != 0 || ky != 0 || kz != 0) { halfcomplex_pmeGrid[index] = make_real2(grid.x*eterm, grid.y*eterm); } #endif } } extern "C" __global__ void gridEvaluateEnergy(real2* __restrict__ halfcomplex_pmeGrid, mixed* __restrict__ energyBuffer, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // R2C stores into a half complex matrix where the last dimension is cut by half const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; #ifdef USE_LJPME const real recipScaleFactor = -2*M_PI*SQRT(M_PI)*RECIP(6*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); real bfac = M_PI / EWALD_ALPHA; real fac1 = 2*M_PI*M_PI*M_PI*SQRT(M_PI); real fac2 = EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA; real fac3 = -2*EWALD_ALPHA*M_PI*M_PI; #else const real recipScaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); #endif mixed energy = 0; for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { // real indices int kx = index/(GRID_SIZE_Y*(GRID_SIZE_Z)); int remainder = index-kx*GRID_SIZE_Y*(GRID_SIZE_Z); int ky = remainder/(GRID_SIZE_Z); int kz = remainder-ky*(GRID_SIZE_Z); int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; #ifdef USE_LJPME real denom = recipScaleFactor/(bx*by*bz); real m = SQRT(m2); real m3 = m*m2; real b = bfac*m; real expfac = -b*b; real expterm = EXP(expfac); real erfcterm = ERFC(b); real eterm = (fac1*erfcterm*m3 + expterm*(fac2 + fac3*m2)) * denom; #else real denom = m2*bx*by*bz; real eterm = recipScaleFactor*EXP(-RECIP_EXP_FACTOR*m2)/denom; #endif if (kz >= (GRID_SIZE_Z/2+1)) { kx = ((kx == 0) ? kx : GRID_SIZE_X-kx); ky = ((ky == 0) ? ky : GRID_SIZE_Y-ky); kz = GRID_SIZE_Z-kz; } int indexInHalfComplexGrid = kz + ky*(GRID_SIZE_Z/2+1)+kx*(GRID_SIZE_Y*(GRID_SIZE_Z/2+1)); real2 grid = halfcomplex_pmeGrid[indexInHalfComplexGrid]; #ifndef USE_LJPME if (kx != 0 || ky != 0 || kz != 0) #endif energy += eterm*(grid.x*grid.x + grid.y*grid.y); } #if defined(USE_PME_STREAM) && !defined(USE_LJPME) energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] = 0.5f*energy; #else energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += 0.5f*energy; #endif } extern "C" __global__ void gridInterpolateForce(const real4* __restrict__ posq, unsigned long long* __restrict__ forceBuffers, const real* __restrict__ originalPmeGrid, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, const int2* __restrict__ pmeAtomGridIndex #ifdef USE_LJPME , const float2* __restrict__ sigmaEpsilon #else , const real* __restrict__ charges #endif ) { real3 data[PME_ORDER]; real3 ddata[PME_ORDER]; const real scale = RECIP(PME_ORDER-1); // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int atom = pmeAtomGridIndex[i].x; real3 force = make_real3(0); real4 pos = posq[atom]; APPLY_PERIODIC_TO_POS(pos) real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real3 dr = make_real3(t.x-(int) t.x, t.y-(int) t.y, t.z-(int) t.z); data[PME_ORDER-1] = make_real3(0); data[1] = dr; data[0] = make_real3(1)-dr; for (int j = 3; j < PME_ORDER; j++) { real div = RECIP(j-1); data[j-1] = div*dr*data[j-2]; for (int k = 1; k < (j-1); k++) data[j-k-1] = div*((dr+make_real3(k))*data[j-k-2] + (make_real3(j-k)-dr)*data[j-k-1]); data[0] = div*(make_real3(1)-dr)*data[0]; } ddata[0] = -data[0]; for (int j = 1; j < PME_ORDER; j++) ddata[j] = data[j-1]-data[j]; data[PME_ORDER-1] = scale*dr*data[PME_ORDER-2]; for (int j = 1; j < (PME_ORDER-1); j++) data[PME_ORDER-j-1] = scale*((dr+make_real3(j))*data[PME_ORDER-j-2] + (make_real3(PME_ORDER-j)-dr)*data[PME_ORDER-j-1]); data[0] = scale*(make_real3(1)-dr)*data[0]; // Compute the force on this atom. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = gridIndex.x+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real dx = data[ix].x; real ddx = ddata[ix].x; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = gridIndex.y+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real dy = data[iy].y; real ddy = ddata[iy].y; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = gridIndex.z+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real gridvalue = originalPmeGrid[index]; force.x += ddx*dy*data[iz].z*gridvalue; force.y += dx*ddy*data[iz].z*gridvalue; force.z += dx*dy*ddata[iz].z*gridvalue; } } } #ifdef USE_LJPME const float2 sigEps = sigmaEpsilon[atom]; real q = 8*sigEps.x*sigEps.x*sigEps.x*sigEps.y; #else real q = CHARGE*EPSILON_FACTOR; #endif real forceX = -q*(force.x*GRID_SIZE_X*recipBoxVecX.x); real forceY = -q*(force.x*GRID_SIZE_X*recipBoxVecY.x+force.y*GRID_SIZE_Y*recipBoxVecY.y); real forceZ = -q*(force.x*GRID_SIZE_X*recipBoxVecZ.x+force.y*GRID_SIZE_Y*recipBoxVecZ.y+force.z*GRID_SIZE_Z*recipBoxVecZ.z); atomicAdd(&forceBuffers[atom], static_cast<unsigned long long>((long long) (forceX*0x100000000))); atomicAdd(&forceBuffers[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forceY*0x100000000))); atomicAdd(&forceBuffers[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forceZ*0x100000000))); } } extern "C" __global__ void addForces(const real4* __restrict__ forces, unsigned long long* __restrict__ forceBuffers) { for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += blockDim.x*gridDim.x) { real4 f = forces[atom]; forceBuffers[atom] += static_cast<unsigned long long>((long long) (f.x*0x100000000)); forceBuffers[atom+PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (f.y*0x100000000)); forceBuffers[atom+2*PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (f.z*0x100000000)); } } extern "C" __global__ void addEnergy(const mixed* __restrict__ pmeEnergyBuffer, mixed* __restrict__ energyBuffer, int bufferSize) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < bufferSize; i += blockDim.x*gridDim.x) energyBuffer[i] += pmeEnergyBuffer[i]; }
4da4ef3657d2247a33ad26e3688cee403cdb2c90.cu
extern "C" __global__ void findAtomGridIndex(const real4* __restrict__ posq, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // Compute the index of the grid point each atom is associated with. for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += blockDim.x*gridDim.x) { real4 pos = posq[atom]; APPLY_PERIODIC_TO_POS(pos) real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); pmeAtomGridIndex[atom] = make_int2(atom, gridIndex.x*GRID_SIZE_Y*GRID_SIZE_Z+gridIndex.y*GRID_SIZE_Z+gridIndex.z); } } extern "C" __global__ void gridSpreadCharge(const real4* __restrict__ posq, real* __restrict__ originalPmeGrid, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, const int2* __restrict__ pmeAtomGridIndex #ifdef USE_LJPME , const float2* __restrict__ sigmaEpsilon #else , const real* __restrict__ charges #endif ) { // To improve memory efficiency, we divide indices along the z axis into // PME_ORDER blocks, where the data for each block is stored together. We // can ensure that all threads write to the same block at the same time, // which leads to better coalescing of writes. __shared__ int zindexTable[GRID_SIZE_Z+PME_ORDER]; int blockSize = (int) ceil(GRID_SIZE_Z/(real) PME_ORDER); for (int i = threadIdx.x; i < GRID_SIZE_Z+PME_ORDER; i += blockDim.x) { int zindex = i % GRID_SIZE_Z; int block = zindex % PME_ORDER; zindexTable[i] = zindex/PME_ORDER + block*GRID_SIZE_X*GRID_SIZE_Y*blockSize; } __syncthreads(); // Process the atoms in spatially sorted order. This improves efficiency when writing // the grid values. real3 data[PME_ORDER]; const real scale = RECIP(PME_ORDER-1); for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int atom = pmeAtomGridIndex[i].x; real4 pos = posq[atom]; #ifdef USE_LJPME const float2 sigEps = sigmaEpsilon[atom]; const real charge = 8*sigEps.x*sigEps.x*sigEps.x*sigEps.y; #else const real charge = (CHARGE)*EPSILON_FACTOR; #endif if (charge == 0) continue; APPLY_PERIODIC_TO_POS(pos) real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real3 dr = make_real3(t.x-(int) t.x, t.y-(int) t.y, t.z-(int) t.z); data[PME_ORDER-1] = make_real3(0); data[1] = dr; data[0] = make_real3(1)-dr; for (int j = 3; j < PME_ORDER; j++) { real div = RECIP(j-1); data[j-1] = div*dr*data[j-2]; for (int k = 1; k < (j-1); k++) data[j-k-1] = div*((dr+make_real3(k))*data[j-k-2] + (make_real3(j-k)-dr)*data[j-k-1]); data[0] = div*(make_real3(1)-dr)*data[0]; } data[PME_ORDER-1] = scale*dr*data[PME_ORDER-2]; for (int j = 1; j < (PME_ORDER-1); j++) data[PME_ORDER-j-1] = scale*((dr+make_real3(j))*data[PME_ORDER-j-2] + (make_real3(PME_ORDER-j)-dr)*data[PME_ORDER-j-1]); data[0] = scale*(make_real3(1)-dr)*data[0]; // Spread the charge from this atom onto each grid point. int izoffset = (PME_ORDER-(gridIndex.z%PME_ORDER)) % PME_ORDER; for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = gridIndex.x+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y; real dx = charge*data[ix].x; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = gridIndex.y+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = (xbase+ybase)*blockSize; real dxdy = dx*data[iy].y; for (int i = 0; i < PME_ORDER; i++) { int iz = (i+izoffset) % PME_ORDER; int zindex = gridIndex.z+iz; int index = ybase + zindexTable[zindex]; real add = dxdy*data[iz].z; #if defined(USE_DOUBLE_PRECISION) || defined(USE_DETERMINISTIC_FORCES) unsigned long long * ulonglong_p = (unsigned long long *) originalPmeGrid; atomicAdd(&ulonglong_p[index], static_cast<unsigned long long>((long long) (add*0x100000000))); #else atomicAdd(&originalPmeGrid[index], add); #endif } } } } } extern "C" __global__ void finishSpreadCharge( #if defined(USE_DOUBLE_PRECISION) || defined(USE_DETERMINISTIC_FORCES) const long long* __restrict__ grid1, #else const real* __restrict__ grid1, #endif real* __restrict__ grid2) { // During charge spreading, we shuffled the order of indices along the z // axis to make memory access more efficient. We now need to unshuffle // them. If the values were accumulated as fixed point, we also need to // convert them to floating point. __shared__ int zindexTable[GRID_SIZE_Z]; int blockSize = (int) ceil(GRID_SIZE_Z/(real) PME_ORDER); for (int i = threadIdx.x; i < GRID_SIZE_Z; i += blockDim.x) { int block = i % PME_ORDER; zindexTable[i] = i/PME_ORDER + block*GRID_SIZE_X*GRID_SIZE_Y*blockSize; } __syncthreads(); const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; real scale = 1/(real) 0x100000000; for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { int zindex = index%GRID_SIZE_Z; int loadIndex = zindexTable[zindex] + blockSize*(int) (index/GRID_SIZE_Z); #if defined(USE_DOUBLE_PRECISION) || defined(USE_DETERMINISTIC_FORCES) grid2[index] = scale*grid1[loadIndex]; #else grid2[index] = grid1[loadIndex]; #endif } } // convolutes on the halfcomplex_pmeGrid, which is of size NX*NY*(NZ/2+1) as F(Q) is conjugate symmetric extern "C" __global__ void reciprocalConvolution(real2* __restrict__ halfcomplex_pmeGrid, mixed* __restrict__ energyBuffer, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // R2C stores into a half complex matrix where the last dimension is cut by half const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*(GRID_SIZE_Z/2+1); #ifdef USE_LJPME const real recipScaleFactor = -2*M_PI*SQRT(M_PI)*RECIP(6*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); real bfac = M_PI / EWALD_ALPHA; real fac1 = 2*M_PI*M_PI*M_PI*SQRT(M_PI); real fac2 = EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA; real fac3 = -2*EWALD_ALPHA*M_PI*M_PI; #else const real recipScaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); #endif for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { // real indices int kx = index/(GRID_SIZE_Y*(GRID_SIZE_Z/2+1)); int remainder = index-kx*GRID_SIZE_Y*(GRID_SIZE_Z/2+1); int ky = remainder/(GRID_SIZE_Z/2+1); int kz = remainder-ky*(GRID_SIZE_Z/2+1); int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; real2 grid = halfcomplex_pmeGrid[index]; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; #ifdef USE_LJPME real denom = recipScaleFactor/(bx*by*bz); real m = SQRT(m2); real m3 = m*m2; real b = bfac*m; real expfac = -b*b; real expterm = EXP(expfac); real erfcterm = ERFC(b); real eterm = (fac1*erfcterm*m3 + expterm*(fac2 + fac3*m2)) * denom; halfcomplex_pmeGrid[index] = make_real2(grid.x*eterm, grid.y*eterm); #else real denom = m2*bx*by*bz; real eterm = recipScaleFactor*EXP(-RECIP_EXP_FACTOR*m2)/denom; if (kx != 0 || ky != 0 || kz != 0) { halfcomplex_pmeGrid[index] = make_real2(grid.x*eterm, grid.y*eterm); } #endif } } extern "C" __global__ void gridEvaluateEnergy(real2* __restrict__ halfcomplex_pmeGrid, mixed* __restrict__ energyBuffer, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // R2C stores into a half complex matrix where the last dimension is cut by half const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; #ifdef USE_LJPME const real recipScaleFactor = -2*M_PI*SQRT(M_PI)*RECIP(6*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); real bfac = M_PI / EWALD_ALPHA; real fac1 = 2*M_PI*M_PI*M_PI*SQRT(M_PI); real fac2 = EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA; real fac3 = -2*EWALD_ALPHA*M_PI*M_PI; #else const real recipScaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); #endif mixed energy = 0; for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { // real indices int kx = index/(GRID_SIZE_Y*(GRID_SIZE_Z)); int remainder = index-kx*GRID_SIZE_Y*(GRID_SIZE_Z); int ky = remainder/(GRID_SIZE_Z); int kz = remainder-ky*(GRID_SIZE_Z); int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; #ifdef USE_LJPME real denom = recipScaleFactor/(bx*by*bz); real m = SQRT(m2); real m3 = m*m2; real b = bfac*m; real expfac = -b*b; real expterm = EXP(expfac); real erfcterm = ERFC(b); real eterm = (fac1*erfcterm*m3 + expterm*(fac2 + fac3*m2)) * denom; #else real denom = m2*bx*by*bz; real eterm = recipScaleFactor*EXP(-RECIP_EXP_FACTOR*m2)/denom; #endif if (kz >= (GRID_SIZE_Z/2+1)) { kx = ((kx == 0) ? kx : GRID_SIZE_X-kx); ky = ((ky == 0) ? ky : GRID_SIZE_Y-ky); kz = GRID_SIZE_Z-kz; } int indexInHalfComplexGrid = kz + ky*(GRID_SIZE_Z/2+1)+kx*(GRID_SIZE_Y*(GRID_SIZE_Z/2+1)); real2 grid = halfcomplex_pmeGrid[indexInHalfComplexGrid]; #ifndef USE_LJPME if (kx != 0 || ky != 0 || kz != 0) #endif energy += eterm*(grid.x*grid.x + grid.y*grid.y); } #if defined(USE_PME_STREAM) && !defined(USE_LJPME) energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] = 0.5f*energy; #else energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += 0.5f*energy; #endif } extern "C" __global__ void gridInterpolateForce(const real4* __restrict__ posq, unsigned long long* __restrict__ forceBuffers, const real* __restrict__ originalPmeGrid, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, const int2* __restrict__ pmeAtomGridIndex #ifdef USE_LJPME , const float2* __restrict__ sigmaEpsilon #else , const real* __restrict__ charges #endif ) { real3 data[PME_ORDER]; real3 ddata[PME_ORDER]; const real scale = RECIP(PME_ORDER-1); // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int atom = pmeAtomGridIndex[i].x; real3 force = make_real3(0); real4 pos = posq[atom]; APPLY_PERIODIC_TO_POS(pos) real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real3 dr = make_real3(t.x-(int) t.x, t.y-(int) t.y, t.z-(int) t.z); data[PME_ORDER-1] = make_real3(0); data[1] = dr; data[0] = make_real3(1)-dr; for (int j = 3; j < PME_ORDER; j++) { real div = RECIP(j-1); data[j-1] = div*dr*data[j-2]; for (int k = 1; k < (j-1); k++) data[j-k-1] = div*((dr+make_real3(k))*data[j-k-2] + (make_real3(j-k)-dr)*data[j-k-1]); data[0] = div*(make_real3(1)-dr)*data[0]; } ddata[0] = -data[0]; for (int j = 1; j < PME_ORDER; j++) ddata[j] = data[j-1]-data[j]; data[PME_ORDER-1] = scale*dr*data[PME_ORDER-2]; for (int j = 1; j < (PME_ORDER-1); j++) data[PME_ORDER-j-1] = scale*((dr+make_real3(j))*data[PME_ORDER-j-2] + (make_real3(PME_ORDER-j)-dr)*data[PME_ORDER-j-1]); data[0] = scale*(make_real3(1)-dr)*data[0]; // Compute the force on this atom. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = gridIndex.x+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real dx = data[ix].x; real ddx = ddata[ix].x; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = gridIndex.y+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real dy = data[iy].y; real ddy = ddata[iy].y; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = gridIndex.z+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real gridvalue = originalPmeGrid[index]; force.x += ddx*dy*data[iz].z*gridvalue; force.y += dx*ddy*data[iz].z*gridvalue; force.z += dx*dy*ddata[iz].z*gridvalue; } } } #ifdef USE_LJPME const float2 sigEps = sigmaEpsilon[atom]; real q = 8*sigEps.x*sigEps.x*sigEps.x*sigEps.y; #else real q = CHARGE*EPSILON_FACTOR; #endif real forceX = -q*(force.x*GRID_SIZE_X*recipBoxVecX.x); real forceY = -q*(force.x*GRID_SIZE_X*recipBoxVecY.x+force.y*GRID_SIZE_Y*recipBoxVecY.y); real forceZ = -q*(force.x*GRID_SIZE_X*recipBoxVecZ.x+force.y*GRID_SIZE_Y*recipBoxVecZ.y+force.z*GRID_SIZE_Z*recipBoxVecZ.z); atomicAdd(&forceBuffers[atom], static_cast<unsigned long long>((long long) (forceX*0x100000000))); atomicAdd(&forceBuffers[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forceY*0x100000000))); atomicAdd(&forceBuffers[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (forceZ*0x100000000))); } } extern "C" __global__ void addForces(const real4* __restrict__ forces, unsigned long long* __restrict__ forceBuffers) { for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += blockDim.x*gridDim.x) { real4 f = forces[atom]; forceBuffers[atom] += static_cast<unsigned long long>((long long) (f.x*0x100000000)); forceBuffers[atom+PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (f.y*0x100000000)); forceBuffers[atom+2*PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (f.z*0x100000000)); } } extern "C" __global__ void addEnergy(const mixed* __restrict__ pmeEnergyBuffer, mixed* __restrict__ energyBuffer, int bufferSize) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < bufferSize; i += blockDim.x*gridDim.x) energyBuffer[i] += pmeEnergyBuffer[i]; }
26d748752789d241f9c53845259b3858ee3f65d2.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include<stdlib.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include "timing.cpp" #define RTOLERANCE 10e-16 #define ATOLERANCE 10e-16 //============================================================================= // Standard CG routine in double precision arithmetic //============================================================================= // Reference SpMV product on the PCU void dsmv(double *h_A, int *h_I, int *h_J, int N, double *h_X, double *h_Y) { double res; for(int i=0; i<N; i++) { res=0; for(int j=h_I[i];j<h_I[i+1];j++) { res+=h_A[j]*h_X[h_J[j]]; } h_Y[i]=res; } } void CGd(int dofs, int & num_of_iter, double *x, double *b, double *h_A, int *h_I, int *h_J, double rtol = RTOLERANCE ){ double *r=new double[dofs], *d=new double[dofs], *z=new double[dofs]; double r0, den, nom, nom0, betanom, alpha, beta; int i, j; nom = 0.0; for(j=0; j<dofs; j++){ x[j] = 0.; r[j] = d[j] = b[j]; nom += r[j]*r[j]; } nom0 = nom; // nom = r dot r dsmv(h_A, h_I, h_J, dofs, r, z); // z = A r den = 0.0; for(j=0; j<dofs; j++) den += z[j]*r[j]; // den = z dot r if ( (r0 = nom * rtol) < ATOLERANCE) r0 = ATOLERANCE; if (nom < r0) return; if (den <= 0.0) { printf("Operator A is not postive definite. (Ar,r) = %f\n", den); return; } // printf("Iteration : %4d Norm: %f\n", 0, nom); // start iteration for(i= 1; i<num_of_iter ;i++) { alpha = nom/den; betanom = 0.0; for(j=0;j<dofs; j++){ x[j] += alpha*d[j]; // x = x + alpha d r[j] -= alpha*z[j]; // r = r - alpha z betanom += r[j]*r[j]; // betanom = r dot r } // printf("Iteration : %4d Norm: %f\n", i, betanom); if ( betanom < r0 ) { num_of_iter = i; break; } beta = betanom/nom; // beta = betanom/nom for(j=0;j<dofs; j++) d[j] = r[j] + beta * d[j]; // d = r + beta d dsmv(h_A, h_I, h_J, dofs, d, z); // z = A d den = 0.; for(j=0;j<dofs; j++) den += d[j]*z[j]; // den = d dot z nom = betanom; } // end iteration printf( " (r_0, r_0) = %e\n", nom0); printf( " (r_N, r_N) = %e\n", betanom); printf( " Number of CG iterations: %d\n", i); if (rtol == RTOLERANCE) { dsmv(h_A, h_I, h_J, dofs, x, r); // r = A x den = 0.0; for(j=0; j<dofs; j++){ r[j] = b[j] - r[j]; // r = b - r den += r[j]*r[j]; } printf( " || r_N || = %f\n", sqrt(den)); } delete [] r; delete [] z; delete [] d; } //============================================================================= // Standard CG routine in double precision arithmetic on the GPU //============================================================================= // SpMV on the GPU #define num_threads 32 __global__ void dsmv_kernel(double* A, int *I, int *J, int n, double *d_X, double *d_Y) { int ind = blockIdx.x*num_threads + threadIdx.x; if (ind < n){ I += ind; int j, last=I[1]; double res = 0.f; for(j=I[0];j<last;j++) res += A[j] * d_X[ J[j] ]; d_Y[ind] = res; } } void dsmv_gpu(double *d_A, int *d_I, int *d_J, int N, double *d_X, double *d_Y) { dim3 grid(N/num_threads, 1, 1); dim3 threads(num_threads, 1, 1); hipLaunchKernelGGL(( dsmv_kernel), dim3(grid), dim3(threads), 0, 0, d_A, d_I, d_J, N, d_X, d_Y); } void CGd_GPU(int dofs, int & num_of_iter, double *x, double *b, double *d_A, int *d_I, int *d_J, double *dwork, double rtol = RTOLERANCE ){ double *r = dwork; double *d = dwork + dofs; double *z = dwork + 2*dofs; double r0, den, nom, nom0, betanom, alpha, beta; int i; hipblasDscal(dofs, 0.f, x, 1); // x = 0 hipblasDcopy(dofs, b, 1, r, 1); // r = b hipblasDcopy(dofs, b, 1, d, 1); // d = b nom = hipblasDnrm2(dofs, r, 1); // nom = || r || nom = nom * nom; nom0 = nom; // nom = r dot r dsmv_gpu(d_A, d_I, d_J, dofs, r, z); // z = A r den = hipblasDdot(dofs, z, 1, r, 1); // den = z dot r if ( (r0 = nom * rtol) < ATOLERANCE) r0 = ATOLERANCE; if (nom < r0) return; if (den <= 0.0) { printf("Operator A is not postive definite. (Ar,r) = %f\n", den); return; } // printf("Iteration : %4d Norm: %f\n", 0, nom); // start iteration for(i= 1; i<num_of_iter ;i++) { alpha = nom/den; hipblasDaxpy(dofs, alpha, d, 1, x, 1); // x = x + alpha d hipblasDaxpy(dofs, -alpha, z, 1, r, 1); // r = r - alpha z betanom = hipblasDnrm2(dofs, r, 1); // betanom = || r || betanom = betanom * betanom; // betanom = r dot r // printf("Iteration : %4d Norm: %f\n", i, betanom); if ( betanom < r0 ) { num_of_iter = i; break; } beta = betanom/nom; // beta = betanom/nom hipblasDscal(dofs, beta, d, 1); // d = beta*d hipblasDaxpy(dofs, 1.f, r, 1, d, 1); // d = d + r dsmv_gpu(d_A, d_I, d_J, dofs, d, z); // z = A d den = hipblasDdot(dofs, d, 1, z, 1); // den = d dot z nom = betanom; } // end iteration printf( " (r_0, r_0) = %e\n", nom0); printf( " (r_N, r_N) = %e\n", betanom); printf( " Number of CG iterations: %d\n", i); if (rtol == RTOLERANCE) { dsmv_gpu(d_A, d_I, d_J, dofs, x, r); // r = A x hipblasDaxpy(dofs, -1.f, b, 1, r, 1); // r = r - b den = hipblasDnrm2(dofs, r, 1); // den = || r || printf( " || r_N || = %f\n", den); } } //============================================================================ int main(int argc,char **argv) { hipInit( 0 ); hipblasInit( ); TimeStruct start, end; int N, i, NNZ, inc=0, filelines=0; int read,col1,row1; float val1; FILE *DataFile; //======================Reading file======================================= //========================================================================== printf("\n....... Reading matrix.output ......................... \n"); if ((DataFile = fopen("matrix.output", "r")) == NULL) printf("\nCan't read matrix.output\n"); fscanf(DataFile,"%d%d%d", &N, &N, &NNZ); int current_col = 0, k = 0, *nnz_row; nnz_row = (int*)malloc( sizeof(int)*(N+1)); nnz_row[k] = inc; //=======================Memory allocation================= //=========================================================== double *h_Y, *h_X, *d_X, *d_Y, *h_Y1, *dwork; double *h_A,*d_A; int *h_J, *h_I, *d_J, *d_I; h_X=(double*)malloc(N*sizeof(double)); if (h_X==NULL) printf("fail to allocate h_X\n"), exit(1); h_A=(double*)malloc((NNZ+1)*sizeof(double)); if (h_A==NULL) printf("fail to allocate h_A\n"), exit(1); h_Y=(double*)malloc(N*sizeof(double)); if (h_Y==NULL) printf("fail to allocate h_Y\n"), exit(1); h_Y1=(double*)malloc(N*sizeof(double)); if (h_Y1==NULL) printf("fail to allocate h_Y1\n"), exit(1); h_A=(double*)malloc(NNZ*sizeof(double)); if (h_A==NULL) printf("fail to allocate h_A\n"), exit(1); h_J=(int*)malloc((NNZ+1)*sizeof(int)); if (h_J==NULL) printf("fail to allocate h_J\n"), exit(1); h_I=(int*)malloc((N+1)*sizeof(int)); if (h_I==NULL) printf("fail to allocate h_I\n"), exit(1); for(i=0; i<N; i++) h_X[i] = 1.f*rand()/RAND_MAX; for(i=0;i<NNZ;i++){ read=fscanf(DataFile,"%d%d%f",&col1,&row1,&val1); if(read!=3)break; h_J[filelines]=col1-1; if (current_col == row1-1) nnz_row[k]++; else { current_col = row1-1; k++; inc=1; nnz_row[k] = 1; } h_A[filelines]=val1; filelines++; } printf("file lines: %d\n", filelines); fclose(DataFile); fprintf(stderr,"File is closed\n"); h_I[0]=0; i = 0; for(i=1;i<=N;i++) h_I[i]=h_I[i-1] + nnz_row[i-1]; printf("N = %d\n", N); //=========================================================== //=============sparse Matrix vector product on CPU================ //============================================================== start = get_current_time(); dsmv(h_A, h_I, h_J, N, h_X, h_Y); // h_Y = h_A * h_X end = get_current_time(); printf("CPU Processing time: %f (ms) \n", GetTimerValue(start,end)); printf("Speed: %f GFlops \n", 2.*NNZ/ (1.*1000000*GetTimerValue(start,end))); //====================================================== //=====================GPU============================= //=================================================== printf("....... allocating GPU memory ........................... \n\n"); hipMalloc((void**)&dwork,3*N*sizeof(double)); hipMalloc((void**)&d_X,N*sizeof(double)); hipMalloc((void**)&d_Y,N*sizeof(double)); hipMalloc((void**)&d_A,(NNZ+1)*sizeof(double)); hipMalloc((void**)&d_I,(N+1)*sizeof(int)); hipMalloc((void**)&d_J,(NNZ+1)*sizeof(int)); hipMemcpy(d_A, h_A, NNZ*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_J, h_J, (NNZ+1)*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_I, h_I, (N+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_X, h_X, N*sizeof(double), hipMemcpyHostToDevice); printf("memory allocated\n"); start = get_current_time(); dsmv_gpu(d_A, d_I, d_J, N, d_X, d_Y); end = get_current_time(); hipMemcpy(h_Y1, d_Y, N*sizeof(double), hipMemcpyDeviceToHost); //========================================================================== //=======print the result( first three values )from GPU===================== printf("\n....................................................... \n"); double norm = 0.f; for(i=0;i<N;i++) norm += (h_Y[i] - h_Y1[i])*(h_Y[i] - h_Y1[i]); //========================================================================== printf("GPU Processing time: %f (ms) \n", GetTimerValue(start,end)); printf("Speed: %f GFlops \n", 2.*NNZ/ (1.*1000000*GetTimerValue(start, end))); printf("|| Y_GPU - Y_CPU ||_2 = %f \n", sqrt(norm)); //========================================================================== // Solve h_A * h_X = h_Y on the CPU using CG int max_num_iters = 5000; printf("\n....... Solving Ax = b using CG on the CPU ............ \n"); start = get_current_time(); CGd( N, max_num_iters, h_X, h_Y, h_A, h_I, h_J); end = get_current_time(); printf("Time (s) = %f\n", GetTimerValue(start,end)/1000.); //========================================================================== // Solve d_A * d_X = d_Y on the GPU using CG printf("\n....... Solving Ax = b using CG on the GPU ............ \n"); start = get_current_time(); CGd_GPU(N, max_num_iters, d_X, d_Y, d_A, d_I, d_J, dwork); end = get_current_time(); printf("Time (s) = %f\n\n", GetTimerValue(start,end)/1000.); hipFree(d_X); hipFree(d_Y); hipFree(d_A); hipFree(d_I); hipFree(d_J); hipFree(dwork); free(h_A); free(h_X); free(h_Y); free(h_Y1); free(h_J); free(h_I); }
26d748752789d241f9c53845259b3858ee3f65d2.cu
#include<stdio.h> #include<stdlib.h> #include <cuda.h> #include <cublas.h> #include "timing.cpp" #define RTOLERANCE 10e-16 #define ATOLERANCE 10e-16 //============================================================================= // Standard CG routine in double precision arithmetic //============================================================================= // Reference SpMV product on the PCU void dsmv(double *h_A, int *h_I, int *h_J, int N, double *h_X, double *h_Y) { double res; for(int i=0; i<N; i++) { res=0; for(int j=h_I[i];j<h_I[i+1];j++) { res+=h_A[j]*h_X[h_J[j]]; } h_Y[i]=res; } } void CGd(int dofs, int & num_of_iter, double *x, double *b, double *h_A, int *h_I, int *h_J, double rtol = RTOLERANCE ){ double *r=new double[dofs], *d=new double[dofs], *z=new double[dofs]; double r0, den, nom, nom0, betanom, alpha, beta; int i, j; nom = 0.0; for(j=0; j<dofs; j++){ x[j] = 0.; r[j] = d[j] = b[j]; nom += r[j]*r[j]; } nom0 = nom; // nom = r dot r dsmv(h_A, h_I, h_J, dofs, r, z); // z = A r den = 0.0; for(j=0; j<dofs; j++) den += z[j]*r[j]; // den = z dot r if ( (r0 = nom * rtol) < ATOLERANCE) r0 = ATOLERANCE; if (nom < r0) return; if (den <= 0.0) { printf("Operator A is not postive definite. (Ar,r) = %f\n", den); return; } // printf("Iteration : %4d Norm: %f\n", 0, nom); // start iteration for(i= 1; i<num_of_iter ;i++) { alpha = nom/den; betanom = 0.0; for(j=0;j<dofs; j++){ x[j] += alpha*d[j]; // x = x + alpha d r[j] -= alpha*z[j]; // r = r - alpha z betanom += r[j]*r[j]; // betanom = r dot r } // printf("Iteration : %4d Norm: %f\n", i, betanom); if ( betanom < r0 ) { num_of_iter = i; break; } beta = betanom/nom; // beta = betanom/nom for(j=0;j<dofs; j++) d[j] = r[j] + beta * d[j]; // d = r + beta d dsmv(h_A, h_I, h_J, dofs, d, z); // z = A d den = 0.; for(j=0;j<dofs; j++) den += d[j]*z[j]; // den = d dot z nom = betanom; } // end iteration printf( " (r_0, r_0) = %e\n", nom0); printf( " (r_N, r_N) = %e\n", betanom); printf( " Number of CG iterations: %d\n", i); if (rtol == RTOLERANCE) { dsmv(h_A, h_I, h_J, dofs, x, r); // r = A x den = 0.0; for(j=0; j<dofs; j++){ r[j] = b[j] - r[j]; // r = b - r den += r[j]*r[j]; } printf( " || r_N || = %f\n", sqrt(den)); } delete [] r; delete [] z; delete [] d; } //============================================================================= // Standard CG routine in double precision arithmetic on the GPU //============================================================================= // SpMV on the GPU #define num_threads 32 __global__ void dsmv_kernel(double* A, int *I, int *J, int n, double *d_X, double *d_Y) { int ind = blockIdx.x*num_threads + threadIdx.x; if (ind < n){ I += ind; int j, last=I[1]; double res = 0.f; for(j=I[0];j<last;j++) res += A[j] * d_X[ J[j] ]; d_Y[ind] = res; } } void dsmv_gpu(double *d_A, int *d_I, int *d_J, int N, double *d_X, double *d_Y) { dim3 grid(N/num_threads, 1, 1); dim3 threads(num_threads, 1, 1); dsmv_kernel<<<grid, threads>>>(d_A, d_I, d_J, N, d_X, d_Y); } void CGd_GPU(int dofs, int & num_of_iter, double *x, double *b, double *d_A, int *d_I, int *d_J, double *dwork, double rtol = RTOLERANCE ){ double *r = dwork; double *d = dwork + dofs; double *z = dwork + 2*dofs; double r0, den, nom, nom0, betanom, alpha, beta; int i; cublasDscal(dofs, 0.f, x, 1); // x = 0 cublasDcopy(dofs, b, 1, r, 1); // r = b cublasDcopy(dofs, b, 1, d, 1); // d = b nom = cublasDnrm2(dofs, r, 1); // nom = || r || nom = nom * nom; nom0 = nom; // nom = r dot r dsmv_gpu(d_A, d_I, d_J, dofs, r, z); // z = A r den = cublasDdot(dofs, z, 1, r, 1); // den = z dot r if ( (r0 = nom * rtol) < ATOLERANCE) r0 = ATOLERANCE; if (nom < r0) return; if (den <= 0.0) { printf("Operator A is not postive definite. (Ar,r) = %f\n", den); return; } // printf("Iteration : %4d Norm: %f\n", 0, nom); // start iteration for(i= 1; i<num_of_iter ;i++) { alpha = nom/den; cublasDaxpy(dofs, alpha, d, 1, x, 1); // x = x + alpha d cublasDaxpy(dofs, -alpha, z, 1, r, 1); // r = r - alpha z betanom = cublasDnrm2(dofs, r, 1); // betanom = || r || betanom = betanom * betanom; // betanom = r dot r // printf("Iteration : %4d Norm: %f\n", i, betanom); if ( betanom < r0 ) { num_of_iter = i; break; } beta = betanom/nom; // beta = betanom/nom cublasDscal(dofs, beta, d, 1); // d = beta*d cublasDaxpy(dofs, 1.f, r, 1, d, 1); // d = d + r dsmv_gpu(d_A, d_I, d_J, dofs, d, z); // z = A d den = cublasDdot(dofs, d, 1, z, 1); // den = d dot z nom = betanom; } // end iteration printf( " (r_0, r_0) = %e\n", nom0); printf( " (r_N, r_N) = %e\n", betanom); printf( " Number of CG iterations: %d\n", i); if (rtol == RTOLERANCE) { dsmv_gpu(d_A, d_I, d_J, dofs, x, r); // r = A x cublasDaxpy(dofs, -1.f, b, 1, r, 1); // r = r - b den = cublasDnrm2(dofs, r, 1); // den = || r || printf( " || r_N || = %f\n", den); } } //============================================================================ int main(int argc,char **argv) { cuInit( 0 ); cublasInit( ); TimeStruct start, end; int N, i, NNZ, inc=0, filelines=0; int read,col1,row1; float val1; FILE *DataFile; //======================Reading file======================================= //========================================================================== printf("\n....... Reading matrix.output ......................... \n"); if ((DataFile = fopen("matrix.output", "r")) == NULL) printf("\nCan't read matrix.output\n"); fscanf(DataFile,"%d%d%d", &N, &N, &NNZ); int current_col = 0, k = 0, *nnz_row; nnz_row = (int*)malloc( sizeof(int)*(N+1)); nnz_row[k] = inc; //=======================Memory allocation================= //=========================================================== double *h_Y, *h_X, *d_X, *d_Y, *h_Y1, *dwork; double *h_A,*d_A; int *h_J, *h_I, *d_J, *d_I; h_X=(double*)malloc(N*sizeof(double)); if (h_X==NULL) printf("fail to allocate h_X\n"), exit(1); h_A=(double*)malloc((NNZ+1)*sizeof(double)); if (h_A==NULL) printf("fail to allocate h_A\n"), exit(1); h_Y=(double*)malloc(N*sizeof(double)); if (h_Y==NULL) printf("fail to allocate h_Y\n"), exit(1); h_Y1=(double*)malloc(N*sizeof(double)); if (h_Y1==NULL) printf("fail to allocate h_Y1\n"), exit(1); h_A=(double*)malloc(NNZ*sizeof(double)); if (h_A==NULL) printf("fail to allocate h_A\n"), exit(1); h_J=(int*)malloc((NNZ+1)*sizeof(int)); if (h_J==NULL) printf("fail to allocate h_J\n"), exit(1); h_I=(int*)malloc((N+1)*sizeof(int)); if (h_I==NULL) printf("fail to allocate h_I\n"), exit(1); for(i=0; i<N; i++) h_X[i] = 1.f*rand()/RAND_MAX; for(i=0;i<NNZ;i++){ read=fscanf(DataFile,"%d%d%f",&col1,&row1,&val1); if(read!=3)break; h_J[filelines]=col1-1; if (current_col == row1-1) nnz_row[k]++; else { current_col = row1-1; k++; inc=1; nnz_row[k] = 1; } h_A[filelines]=val1; filelines++; } printf("file lines: %d\n", filelines); fclose(DataFile); fprintf(stderr,"File is closed\n"); h_I[0]=0; i = 0; for(i=1;i<=N;i++) h_I[i]=h_I[i-1] + nnz_row[i-1]; printf("N = %d\n", N); //=========================================================== //=============sparse Matrix vector product on CPU================ //============================================================== start = get_current_time(); dsmv(h_A, h_I, h_J, N, h_X, h_Y); // h_Y = h_A * h_X end = get_current_time(); printf("CPU Processing time: %f (ms) \n", GetTimerValue(start,end)); printf("Speed: %f GFlops \n", 2.*NNZ/ (1.*1000000*GetTimerValue(start,end))); //====================================================== //=====================GPU============================= //=================================================== printf("....... allocating GPU memory ........................... \n\n"); cudaMalloc((void**)&dwork,3*N*sizeof(double)); cudaMalloc((void**)&d_X,N*sizeof(double)); cudaMalloc((void**)&d_Y,N*sizeof(double)); cudaMalloc((void**)&d_A,(NNZ+1)*sizeof(double)); cudaMalloc((void**)&d_I,(N+1)*sizeof(int)); cudaMalloc((void**)&d_J,(NNZ+1)*sizeof(int)); cudaMemcpy(d_A, h_A, NNZ*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_J, h_J, (NNZ+1)*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_I, h_I, (N+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_X, h_X, N*sizeof(double), cudaMemcpyHostToDevice); printf("memory allocated\n"); start = get_current_time(); dsmv_gpu(d_A, d_I, d_J, N, d_X, d_Y); end = get_current_time(); cudaMemcpy(h_Y1, d_Y, N*sizeof(double), cudaMemcpyDeviceToHost); //========================================================================== //=======print the result( first three values )from GPU===================== printf("\n....................................................... \n"); double norm = 0.f; for(i=0;i<N;i++) norm += (h_Y[i] - h_Y1[i])*(h_Y[i] - h_Y1[i]); //========================================================================== printf("GPU Processing time: %f (ms) \n", GetTimerValue(start,end)); printf("Speed: %f GFlops \n", 2.*NNZ/ (1.*1000000*GetTimerValue(start, end))); printf("|| Y_GPU - Y_CPU ||_2 = %f \n", sqrt(norm)); //========================================================================== // Solve h_A * h_X = h_Y on the CPU using CG int max_num_iters = 5000; printf("\n....... Solving Ax = b using CG on the CPU ............ \n"); start = get_current_time(); CGd( N, max_num_iters, h_X, h_Y, h_A, h_I, h_J); end = get_current_time(); printf("Time (s) = %f\n", GetTimerValue(start,end)/1000.); //========================================================================== // Solve d_A * d_X = d_Y on the GPU using CG printf("\n....... Solving Ax = b using CG on the GPU ............ \n"); start = get_current_time(); CGd_GPU(N, max_num_iters, d_X, d_Y, d_A, d_I, d_J, dwork); end = get_current_time(); printf("Time (s) = %f\n\n", GetTimerValue(start,end)/1000.); cudaFree(d_X); cudaFree(d_Y); cudaFree(d_A); cudaFree(d_I); cudaFree(d_J); cudaFree(dwork); free(h_A); free(h_X); free(h_Y); free(h_Y1); free(h_J); free(h_I); }
b0523f754e1d76f283196c2280a4d58b4e6d555e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include "utilities.h" // kernel to increment values __global__ void incrementKernel(int len, int *input, int *output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; const int threadN = gridDim.x * blockDim.x; for (int pos = tid; pos < len; pos += threadN) output[pos] = input[pos] + 1; } // returns 0 on success extern "C" int gpu_increment(int partitionId, int len, int* input, int* output) { int deviceID = get_gpu(); if (deviceID == -1) return 1; // no device present hipDeviceProp_t deviceProp; if (hipSetDevice(deviceID) || hipGetDeviceProperties(&deviceProp, deviceID)) { fprintf(stderr, "Cuda error in SetDevice\n"); return 1; } fprintf(stderr, "gpu_test(): Partition %d will be executed on GPU %d\n", partitionId, deviceID); // register host memory in the GPU space // note that we pinned these buffers in the JNI code already if (hipHostRegister(input, len*sizeof(int), 0) || hipHostRegister(output, len*sizeof(int), 0)) { fprintf(stderr, "Unable to register data buffer: %s: %s\n", hipGetErrorName(hipPeekAtLastError()), hipGetErrorString(hipGetLastError())); return 1; } // get pointers valid from the device int *d_input, *d_output; if (hipHostGetDevicePointer((void **)&d_input, input, 0) || hipHostGetDevicePointer((void **)&d_output, output, 0)) { fprintf(stderr, "Unable to get device pointer to host memory: %s: %s\n", hipGetErrorName(hipPeekAtLastError()), hipGetErrorString(hipGetLastError())); return 1; } // invoke kernel int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( incrementKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, len, d_input, d_output); hipDeviceSynchronize(); if (hipPeekAtLastError()) { fprintf(stderr, "Unable to invoke kernel: %s\n", hipGetErrorString(hipGetLastError())); return 1; } // unregister host memory if (hipHostUnregister(input) || hipHostUnregister(output)) { fprintf(stderr, "Unable to unregister host memory: %s: %s\n", hipGetErrorName(hipPeekAtLastError()), hipGetErrorString(hipGetLastError())); return 1; } return 0; }
b0523f754e1d76f283196c2280a4d58b4e6d555e.cu
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include "utilities.h" // kernel to increment values __global__ void incrementKernel(int len, int *input, int *output) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; const int threadN = gridDim.x * blockDim.x; for (int pos = tid; pos < len; pos += threadN) output[pos] = input[pos] + 1; } // returns 0 on success extern "C" int gpu_increment(int partitionId, int len, int* input, int* output) { int deviceID = get_gpu(); if (deviceID == -1) return 1; // no device present cudaDeviceProp deviceProp; if (cudaSetDevice(deviceID) || cudaGetDeviceProperties(&deviceProp, deviceID)) { fprintf(stderr, "Cuda error in SetDevice\n"); return 1; } fprintf(stderr, "gpu_test(): Partition %d will be executed on GPU %d\n", partitionId, deviceID); // register host memory in the GPU space // note that we pinned these buffers in the JNI code already if (cudaHostRegister(input, len*sizeof(int), 0) || cudaHostRegister(output, len*sizeof(int), 0)) { fprintf(stderr, "Unable to register data buffer: %s: %s\n", cudaGetErrorName(cudaPeekAtLastError()), cudaGetErrorString(cudaGetLastError())); return 1; } // get pointers valid from the device int *d_input, *d_output; if (cudaHostGetDevicePointer((void **)&d_input, input, 0) || cudaHostGetDevicePointer((void **)&d_output, output, 0)) { fprintf(stderr, "Unable to get device pointer to host memory: %s: %s\n", cudaGetErrorName(cudaPeekAtLastError()), cudaGetErrorString(cudaGetLastError())); return 1; } // invoke kernel int threadsPerBlock = 256; int blocksPerGrid = (len + threadsPerBlock - 1) / threadsPerBlock; incrementKernel<<<blocksPerGrid, threadsPerBlock>>>(len, d_input, d_output); cudaDeviceSynchronize(); if (cudaPeekAtLastError()) { fprintf(stderr, "Unable to invoke kernel: %s\n", cudaGetErrorString(cudaGetLastError())); return 1; } // unregister host memory if (cudaHostUnregister(input) || cudaHostUnregister(output)) { fprintf(stderr, "Unable to unregister host memory: %s: %s\n", cudaGetErrorName(cudaPeekAtLastError()), cudaGetErrorString(cudaGetLastError())); return 1; } return 0; }
4867eb8757be14d68b797cca55aae895d45ecd15.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2019 by Contributors * \file multiclass_metric.cc * \brief evaluation metrics for multiclass classification. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <xgboost/metric.h> #include <cmath> #include "metric_common.h" #include "../common/math.h" #include "../common/common.h" #if defined(XGBOOST_USE_CUDA) #include <thrust/execution_policy.h> // thrust::hip::par #include <thrust/functional.h> // thrust::plus<> #include <thrust/transform_reduce.h> #include <thrust/iterator/counting_iterator.h> #include "../common/device_helpers.cuh" #endif // XGBOOST_USE_CUDA namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(multiclass_metric); template <typename EvalRowPolicy> class MultiClassMetricsReduction { void CheckLabelError(int32_t label_error, size_t n_class) const { CHECK(label_error >= 0 && label_error < static_cast<int32_t>(n_class)) << "MultiClassEvaluation: label must be in [0, num_class)," << " num_class=" << n_class << " but found " << label_error << " in label"; } public: MultiClassMetricsReduction() = default; PackedReduceResult CpuReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds, const size_t n_class) const { size_t ndata = labels.Size(); const auto& h_labels = labels.HostVector(); const auto& h_weights = weights.HostVector(); const auto& h_preds = preds.HostVector(); bst_float residue_sum = 0; bst_float weights_sum = 0; int label_error = 0; bool const is_null_weight = weights.Size() == 0; #pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static) for (omp_ulong idx = 0; idx < ndata; ++idx) { bst_float weight = is_null_weight ? 1.0f : h_weights[idx]; auto label = static_cast<int>(h_labels[idx]); if (label >= 0 && label < static_cast<int>(n_class)) { residue_sum += EvalRowPolicy::EvalRow( label, h_preds.data() + idx * n_class, n_class) * weight; weights_sum += weight; } else { label_error = label; } } CheckLabelError(label_error, n_class); PackedReduceResult res { residue_sum, weights_sum }; return res; } #if defined(XGBOOST_USE_CUDA) PackedReduceResult DeviceReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds, const size_t n_class) { size_t n_data = labels.Size(); thrust::counting_iterator<size_t> begin(0); thrust::counting_iterator<size_t> end = begin + n_data; auto s_labels = labels.DeviceSpan(); auto s_preds = preds.DeviceSpan(); auto s_weights = weights.DeviceSpan(); bool const is_null_weight = weights.Size() == 0; auto s_label_error = label_error_.GetSpan<int32_t>(1); s_label_error[0] = 0; dh::XGBCachingDeviceAllocator<char> alloc; PackedReduceResult result = thrust::transform_reduce( thrust::hip::par(alloc), begin, end, [=] XGBOOST_DEVICE(size_t idx) { bst_float weight = is_null_weight ? 1.0f : s_weights[idx]; bst_float residue = 0; auto label = static_cast<int>(s_labels[idx]); if (label >= 0 && label < static_cast<int32_t>(n_class)) { residue = EvalRowPolicy::EvalRow( label, &s_preds[idx * n_class], n_class) * weight; } else { s_label_error[0] = label; } return PackedReduceResult{ residue, weight }; }, PackedReduceResult(), thrust::plus<PackedReduceResult>()); CheckLabelError(s_label_error[0], n_class); return result; } #endif // XGBOOST_USE_CUDA PackedReduceResult Reduce( const GenericParameter &tparam, int device, size_t n_class, const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) { PackedReduceResult result; if (device < 0) { result = CpuReduceMetrics(weights, labels, preds, n_class); } #if defined(XGBOOST_USE_CUDA) else { // NOLINT device_ = tparam.gpu_id; preds.SetDevice(device_); labels.SetDevice(device_); weights.SetDevice(device_); dh::safe_cuda(hipSetDevice(device_)); result = DeviceReduceMetrics(weights, labels, preds, n_class); } #endif // defined(XGBOOST_USE_CUDA) return result; } private: #if defined(XGBOOST_USE_CUDA) dh::PinnedMemory label_error_; int device_{-1}; #endif // defined(XGBOOST_USE_CUDA) }; /*! * \brief base class of multi-class evaluation * \tparam Derived the name of subclass */ template<typename Derived> struct EvalMClassBase : public Metric { bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { if (info.labels_.Size() == 0) { CHECK_EQ(preds.Size(), 0); } else { CHECK(preds.Size() % info.labels_.Size() == 0) << "label and prediction size not match"; } double dat[2] { 0.0, 0.0 }; if (info.labels_.Size() != 0) { const size_t nclass = preds.Size() / info.labels_.Size(); CHECK_GE(nclass, 1U) << "mlogloss and merror are only used for multi-class classification," << " use logloss for binary classification"; int device = tparam_->gpu_id; auto result = reducer_.Reduce(*tparam_, device, nclass, info.weights_, info.labels_, preds); dat[0] = result.Residue(); dat[1] = result.Weights(); } if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } return Derived::GetFinal(dat[0], dat[1]); } /*! * \brief to be implemented by subclass, * get evaluation result from one row * \param label label of current instance * \param pred prediction value of current instance * \param nclass number of class in the prediction */ XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass); /*! * \brief to be overridden by subclass, final transformation * \param esum the sum statistics returned by EvalRow * \param wsum sum of weight */ inline static bst_float GetFinal(bst_float esum, bst_float wsum) { return esum / wsum; } private: MultiClassMetricsReduction<Derived> reducer_; // used to store error message const char *error_msg_; }; /*! \brief match error */ struct EvalMatchError : public EvalMClassBase<EvalMatchError> { const char* Name() const override { return "merror"; } XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass) { return common::FindMaxIndex(pred, pred + nclass) != pred + static_cast<int>(label); } }; /*! \brief match error */ struct EvalMultiLogLoss : public EvalMClassBase<EvalMultiLogLoss> { const char* Name() const override { return "mlogloss"; } XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass) { const bst_float eps = 1e-16f; auto k = static_cast<size_t>(label); if (pred[k] > eps) { return -::log(pred[k]); } else { return -::log(eps); } } }; XGBOOST_REGISTER_METRIC(MatchError, "merror") .describe("Multiclass classification error.") .set_body([](const char* param) { return new EvalMatchError(); }); XGBOOST_REGISTER_METRIC(MultiLogLoss, "mlogloss") .describe("Multiclass negative loglikelihood.") .set_body([](const char* param) { return new EvalMultiLogLoss(); }); } // namespace metric } // namespace xgboost
4867eb8757be14d68b797cca55aae895d45ecd15.cu
/*! * Copyright 2015-2019 by Contributors * \file multiclass_metric.cc * \brief evaluation metrics for multiclass classification. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <xgboost/metric.h> #include <cmath> #include "metric_common.h" #include "../common/math.h" #include "../common/common.h" #if defined(XGBOOST_USE_CUDA) #include <thrust/execution_policy.h> // thrust::cuda::par #include <thrust/functional.h> // thrust::plus<> #include <thrust/transform_reduce.h> #include <thrust/iterator/counting_iterator.h> #include "../common/device_helpers.cuh" #endif // XGBOOST_USE_CUDA namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(multiclass_metric); template <typename EvalRowPolicy> class MultiClassMetricsReduction { void CheckLabelError(int32_t label_error, size_t n_class) const { CHECK(label_error >= 0 && label_error < static_cast<int32_t>(n_class)) << "MultiClassEvaluation: label must be in [0, num_class)," << " num_class=" << n_class << " but found " << label_error << " in label"; } public: MultiClassMetricsReduction() = default; PackedReduceResult CpuReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds, const size_t n_class) const { size_t ndata = labels.Size(); const auto& h_labels = labels.HostVector(); const auto& h_weights = weights.HostVector(); const auto& h_preds = preds.HostVector(); bst_float residue_sum = 0; bst_float weights_sum = 0; int label_error = 0; bool const is_null_weight = weights.Size() == 0; #pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static) for (omp_ulong idx = 0; idx < ndata; ++idx) { bst_float weight = is_null_weight ? 1.0f : h_weights[idx]; auto label = static_cast<int>(h_labels[idx]); if (label >= 0 && label < static_cast<int>(n_class)) { residue_sum += EvalRowPolicy::EvalRow( label, h_preds.data() + idx * n_class, n_class) * weight; weights_sum += weight; } else { label_error = label; } } CheckLabelError(label_error, n_class); PackedReduceResult res { residue_sum, weights_sum }; return res; } #if defined(XGBOOST_USE_CUDA) PackedReduceResult DeviceReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds, const size_t n_class) { size_t n_data = labels.Size(); thrust::counting_iterator<size_t> begin(0); thrust::counting_iterator<size_t> end = begin + n_data; auto s_labels = labels.DeviceSpan(); auto s_preds = preds.DeviceSpan(); auto s_weights = weights.DeviceSpan(); bool const is_null_weight = weights.Size() == 0; auto s_label_error = label_error_.GetSpan<int32_t>(1); s_label_error[0] = 0; dh::XGBCachingDeviceAllocator<char> alloc; PackedReduceResult result = thrust::transform_reduce( thrust::cuda::par(alloc), begin, end, [=] XGBOOST_DEVICE(size_t idx) { bst_float weight = is_null_weight ? 1.0f : s_weights[idx]; bst_float residue = 0; auto label = static_cast<int>(s_labels[idx]); if (label >= 0 && label < static_cast<int32_t>(n_class)) { residue = EvalRowPolicy::EvalRow( label, &s_preds[idx * n_class], n_class) * weight; } else { s_label_error[0] = label; } return PackedReduceResult{ residue, weight }; }, PackedReduceResult(), thrust::plus<PackedReduceResult>()); CheckLabelError(s_label_error[0], n_class); return result; } #endif // XGBOOST_USE_CUDA PackedReduceResult Reduce( const GenericParameter &tparam, int device, size_t n_class, const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) { PackedReduceResult result; if (device < 0) { result = CpuReduceMetrics(weights, labels, preds, n_class); } #if defined(XGBOOST_USE_CUDA) else { // NOLINT device_ = tparam.gpu_id; preds.SetDevice(device_); labels.SetDevice(device_); weights.SetDevice(device_); dh::safe_cuda(cudaSetDevice(device_)); result = DeviceReduceMetrics(weights, labels, preds, n_class); } #endif // defined(XGBOOST_USE_CUDA) return result; } private: #if defined(XGBOOST_USE_CUDA) dh::PinnedMemory label_error_; int device_{-1}; #endif // defined(XGBOOST_USE_CUDA) }; /*! * \brief base class of multi-class evaluation * \tparam Derived the name of subclass */ template<typename Derived> struct EvalMClassBase : public Metric { bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { if (info.labels_.Size() == 0) { CHECK_EQ(preds.Size(), 0); } else { CHECK(preds.Size() % info.labels_.Size() == 0) << "label and prediction size not match"; } double dat[2] { 0.0, 0.0 }; if (info.labels_.Size() != 0) { const size_t nclass = preds.Size() / info.labels_.Size(); CHECK_GE(nclass, 1U) << "mlogloss and merror are only used for multi-class classification," << " use logloss for binary classification"; int device = tparam_->gpu_id; auto result = reducer_.Reduce(*tparam_, device, nclass, info.weights_, info.labels_, preds); dat[0] = result.Residue(); dat[1] = result.Weights(); } if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } return Derived::GetFinal(dat[0], dat[1]); } /*! * \brief to be implemented by subclass, * get evaluation result from one row * \param label label of current instance * \param pred prediction value of current instance * \param nclass number of class in the prediction */ XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass); /*! * \brief to be overridden by subclass, final transformation * \param esum the sum statistics returned by EvalRow * \param wsum sum of weight */ inline static bst_float GetFinal(bst_float esum, bst_float wsum) { return esum / wsum; } private: MultiClassMetricsReduction<Derived> reducer_; // used to store error message const char *error_msg_; }; /*! \brief match error */ struct EvalMatchError : public EvalMClassBase<EvalMatchError> { const char* Name() const override { return "merror"; } XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass) { return common::FindMaxIndex(pred, pred + nclass) != pred + static_cast<int>(label); } }; /*! \brief match error */ struct EvalMultiLogLoss : public EvalMClassBase<EvalMultiLogLoss> { const char* Name() const override { return "mlogloss"; } XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass) { const bst_float eps = 1e-16f; auto k = static_cast<size_t>(label); if (pred[k] > eps) { return -std::log(pred[k]); } else { return -std::log(eps); } } }; XGBOOST_REGISTER_METRIC(MatchError, "merror") .describe("Multiclass classification error.") .set_body([](const char* param) { return new EvalMatchError(); }); XGBOOST_REGISTER_METRIC(MultiLogLoss, "mlogloss") .describe("Multiclass negative loglikelihood.") .set_body([](const char* param) { return new EvalMultiLogLoss(); }); } // namespace metric } // namespace xgboost
784982901981612ec197bd2681415a45572c2a60.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // See this header for all of the recursive handling of tuples of vectors #include "test_parameters.cuh" #include "groupby_test_helpers.cuh" #include <tests/utilities/cudf_test_fixtures.h> #include <tests/utilities/tuple_vectors.h> #include <utilities/cudf_utils.h> #include <cudf/cudf.h> #include <gtest/gtest.h> #include <gmock/gmock.h> #include <iostream> #include <vector> #include <utility> #include <type_traits> #include <typeinfo> #include <memory> #include <cstdlib> // A new instance of this class will be created for each *TEST(GroupTest, ...) // Put all repeated setup and validation stuff here template <class test_parameters> struct GroupTest : public GdfTest { // The aggregation type is passed via a member of the template argument class const agg_op op = test_parameters::op; gdf_context ctxt = {0, test_parameters::group_type, 0}; // multi_column_t is a tuple of vectors. The number of vectors in the tuple // determines the number of columns to be grouped, and the value_type of each // vector determiens the data type of the column using multi_column_t = typename test_parameters::multi_column_t; //output_t is the output type of the aggregation column using output_t = typename test_parameters::output_type; //map_t is used for reference solution using map_t = typename test_parameters::ref_map_type; //tuple_t is tuple of datatypes associated with each column to be grouped using tuple_t = typename test_parameters::tuple_t; //contains input generated for gdf calculation and reference solution multi_column_t input_key; //contains the input aggregation column std::vector<output_t> input_value; //contains grouped by column output of the gdf groupby call multi_column_t output_key; //contains the aggregated output column std::vector<output_t> output_value; // Type for a unique_ptr to a gdf_column with a custom deleter // Custom deleter is defined at construction using gdf_col_pointer = typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>; // Containers for unique_ptrs to gdf_columns that will be used in the gdf_group_by functions // unique_ptrs are used to automate freeing device memory std::vector<gdf_col_pointer> gdf_input_key_columns; gdf_col_pointer gdf_input_value_column; std::vector<gdf_col_pointer> gdf_output_key_columns; gdf_col_pointer gdf_output_value_column; // Containers for the raw pointers to the gdf_columns that will be used as input // to the gdf_group_by functions std::vector<gdf_column*> gdf_raw_input_key_columns; gdf_column* gdf_raw_input_val_column; std::vector<gdf_column*> gdf_raw_output_key_columns; gdf_column* gdf_raw_output_val_column; GroupTest() { // Use constant seed so the psuedo-random order is the same each time // Each time the class is constructed a new constant seed is used static size_t number_of_instantiations{0}; std::srand(number_of_instantiations++); } ~GroupTest() { } template <typename col_type> gdf_col_pointer create_gdf_column(std::vector<col_type> const & host_vector, const gdf_size_type n_count = 0) { // Deduce the type and set the gdf_dtype accordingly gdf_dtype gdf_col_type = N_GDF_TYPES; if (std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32; else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64; // Create a new instance of a gdf_column with a custom deleter that will free // the associated device memory when it eventually goes out of scope auto deleter = [](gdf_column* col){col->size = 0; RMM_FREE(col->data, 0); RMM_FREE(col->valid, 0); }; gdf_col_pointer the_column{new gdf_column{}, deleter}; // Allocate device storage for gdf_column and copy contents from host_vector EXPECT_EQ(RMM_ALLOC(&(the_column->data), host_vector.size() * sizeof(col_type), 0), RMM_SUCCESS); EXPECT_EQ(hipMemcpy(the_column->data, host_vector.data(), host_vector.size() * sizeof(col_type), hipMemcpyHostToDevice), hipSuccess); int valid_size = gdf_valid_allocation_size(host_vector.size()); EXPECT_EQ(RMM_ALLOC((void**)&(the_column->valid), valid_size, 0), RMM_SUCCESS); EXPECT_EQ(hipMemset(the_column->valid, 0xff, valid_size), hipSuccess); // Fill the gdf_column members the_column->null_count = n_count; the_column->size = host_vector.size(); the_column->dtype = gdf_col_type; gdf_dtype_extra_info extra_info; extra_info.time_unit = TIME_UNIT_NONE; the_column->dtype_info = extra_info; return the_column; } // Converts a tuple of host vectors into a vector of gdf_columns std::vector<gdf_col_pointer> initialize_gdf_columns(multi_column_t host_columns) { std::vector<gdf_col_pointer> gdf_columns; convert_tuple_to_gdf_columns(gdf_columns, host_columns); return gdf_columns; } /* --------------------------------------------------------------------------*/ /** * @brief Initializes key columns and aggregation column for gdf group by call * * @param key_count The number of unique keys * @param value_per_key The number of times a random aggregation value is generated for a key * @param max_key The maximum value of the key columns * @param max_val The maximum value of aggregation column * @param print Optionally print the keys and aggregation columns for debugging */ /* ----------------------------------------------------------------------------*/ void create_input(const size_t key_count, const size_t value_per_key, const size_t max_key, const size_t max_val, bool print = false, const gdf_size_type n_count = 0) { size_t shuffle_seed = rand(); initialize_keys(input_key, key_count, value_per_key, max_key, shuffle_seed); initialize_values(input_value, key_count, value_per_key, max_val, shuffle_seed); gdf_input_key_columns = initialize_gdf_columns(input_key); gdf_input_value_column = create_gdf_column(input_value, n_count); // Fill vector of raw pointers to gdf_columns for(auto const& c : gdf_input_key_columns){ gdf_raw_input_key_columns.push_back(c.get()); } gdf_raw_input_val_column = gdf_input_value_column.get(); if(print) { std::cout << "Key column(s) created. Size: " << std::get<0>(input_key).size() << std::endl; print_tuple(input_key); std::cout << "Value column(s) created. Size: " << input_value.size() << std::endl; print_vector(input_value); } } /* --------------------------------------------------------------------------*/ /** * @brief Creates a unique_ptr that wraps a gdf_column structure intialized with a host vector * * @param host_vector The host vector whose data is used to initialize the gdf_column * * @returns A unique_ptr wrapping the new gdf_column */ /* ----------------------------------------------------------------------------*/ // Compile time recursion to convert each vector in a tuple of vectors into // a gdf_column and append it to a vector of gdf_columns template<std::size_t I = 0, typename... Tp> inline typename std::enable_if<I == sizeof...(Tp), void>::type convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t) { //bottom of compile-time recursion //purposely empty... } template<std::size_t I = 0, typename... Tp> inline typename std::enable_if<I < sizeof...(Tp), void>::type convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t) { // Creates a gdf_column for the current vector and pushes it onto // the vector of gdf_columns gdf_columns.push_back(create_gdf_column(std::get<I>(t))); //recurse to next vector in tuple convert_tuple_to_gdf_columns<I + 1, Tp...>(gdf_columns, t); } void create_gdf_output_buffers(const size_t key_count, const size_t value_per_key) { initialize_keys(output_key, key_count, value_per_key, 0, 0, false); initialize_values(output_value, key_count, value_per_key, 0, 0); gdf_output_key_columns = initialize_gdf_columns(output_key); gdf_output_value_column = create_gdf_column(output_value); for(auto const& c : gdf_output_key_columns){ gdf_raw_output_key_columns.push_back(c.get()); } gdf_raw_output_val_column = gdf_output_value_column.get(); } map_t compute_reference_solution(void) { map_t key_val_map; if (test_parameters::op != agg_op::AVG) { AggOp<test_parameters::op> agg; for (size_t i = 0; i < input_value.size(); ++i) { auto l_key = extractKey(input_key, i); auto sch = key_val_map.find(l_key); if (sch != key_val_map.end()) { key_val_map[l_key] = agg(sch->second, input_value[i]); } else { key_val_map[l_key] = agg(input_value[i]); } } } else { std::map<tuple_t, size_t> counters; AggOp<agg_op::SUM> agg; for (size_t i = 0; i < input_value.size(); ++i) { auto l_key = extractKey(input_key, i); counters[l_key]++; auto sch = key_val_map.find(l_key); if (sch != key_val_map.end()) { key_val_map[l_key] = agg(sch->second, input_value[i]); } else { key_val_map[l_key] = agg(input_value[i]); } } for (auto& e : key_val_map) { e.second = e.second/counters[e.first]; } } return key_val_map; } /* --------------------------------------------------------------------------*/ /** * @brief Computes the gdf result of grouping the input_keys and input_value */ /* ----------------------------------------------------------------------------*/ void compute_gdf_result(const gdf_error expected_error = GDF_SUCCESS) { const int num_columns = std::tuple_size<multi_column_t>::value; gdf_error error{GDF_SUCCESS}; gdf_column **group_by_input_key = gdf_raw_input_key_columns.data(); gdf_column *group_by_input_value = gdf_raw_input_val_column; gdf_column **group_by_output_key = gdf_raw_output_key_columns.data(); gdf_column *group_by_output_value = gdf_raw_output_val_column; switch(op) { case agg_op::MIN: { error = gdf_group_by_min(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } case agg_op::MAX: { error = gdf_group_by_max(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } case agg_op::SUM: { error = gdf_group_by_sum(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } case agg_op::CNT: { error = gdf_group_by_count(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } case agg_op::AVG: { error = gdf_group_by_avg(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } default: error = GDF_INVALID_AGGREGATOR; } EXPECT_EQ(expected_error, error) << "The gdf group by function did not complete successfully"; if (GDF_SUCCESS == expected_error) { copy_output( group_by_output_key, output_key, group_by_output_value, output_value); } } void compare_gdf_result(map_t& reference_map) { ASSERT_EQ(output_value.size(), reference_map.size()) << "Size of gdf result does not match reference result\n"; ASSERT_EQ(std::get<0>(output_key).size(), output_value.size()) << "Mismatch between aggregation and group by column size."; for (size_t i = 0; i < output_value.size(); ++i) { auto sch = reference_map.find(extractKey(output_key, i)); bool found = (sch != reference_map.end()); EXPECT_EQ(found, true); if (!found) { continue; } if (std::is_integral<output_t>::value) { EXPECT_EQ(sch->second, output_value[i]); } else { EXPECT_NEAR(sch->second, output_value[i], sch->second/100.0); } //ensure no duplicates in gdf output reference_map.erase(sch); } } }; TYPED_TEST_CASE(GroupTest, Implementations); TYPED_TEST(GroupTest, GroupbyExampleTest) { const size_t num_keys = 1; const size_t num_values_per_key = 8; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, AllKeysSame) { const size_t num_keys = 1; const size_t num_values_per_key = 1<<14; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, AllKeysDifferent) { const size_t num_keys = 1<<14; const size_t num_values_per_key = 1; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, WarpKeysSame) { const size_t num_keys = 1<<10; const size_t num_values_per_key = 32; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, BlockKeysSame) { const size_t num_keys = 1<<10; const size_t num_values_per_key = 256; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, EmptyInput) { const size_t num_keys = 0; const size_t num_values_per_key = 0; const size_t max_key = 0; const size_t max_val = 0; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } // Create a new derived class from JoinTest so we can do a new Typed Test set of tests template <class test_parameters> struct GroupValidTest : public GroupTest<test_parameters> { }; TYPED_TEST_CASE(GroupValidTest, ValidTestImplementations); TYPED_TEST(GroupValidTest, ReportValidMaskError) { const size_t num_keys = 1; const size_t num_values_per_key = 8; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val, false, 1); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(GDF_VALIDITY_UNSUPPORTED); }
784982901981612ec197bd2681415a45572c2a60.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // See this header for all of the recursive handling of tuples of vectors #include "test_parameters.cuh" #include "groupby_test_helpers.cuh" #include <tests/utilities/cudf_test_fixtures.h> #include <tests/utilities/tuple_vectors.h> #include <utilities/cudf_utils.h> #include <cudf/cudf.h> #include <gtest/gtest.h> #include <gmock/gmock.h> #include <iostream> #include <vector> #include <utility> #include <type_traits> #include <typeinfo> #include <memory> #include <cstdlib> // A new instance of this class will be created for each *TEST(GroupTest, ...) // Put all repeated setup and validation stuff here template <class test_parameters> struct GroupTest : public GdfTest { // The aggregation type is passed via a member of the template argument class const agg_op op = test_parameters::op; gdf_context ctxt = {0, test_parameters::group_type, 0}; // multi_column_t is a tuple of vectors. The number of vectors in the tuple // determines the number of columns to be grouped, and the value_type of each // vector determiens the data type of the column using multi_column_t = typename test_parameters::multi_column_t; //output_t is the output type of the aggregation column using output_t = typename test_parameters::output_type; //map_t is used for reference solution using map_t = typename test_parameters::ref_map_type; //tuple_t is tuple of datatypes associated with each column to be grouped using tuple_t = typename test_parameters::tuple_t; //contains input generated for gdf calculation and reference solution multi_column_t input_key; //contains the input aggregation column std::vector<output_t> input_value; //contains grouped by column output of the gdf groupby call multi_column_t output_key; //contains the aggregated output column std::vector<output_t> output_value; // Type for a unique_ptr to a gdf_column with a custom deleter // Custom deleter is defined at construction using gdf_col_pointer = typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>; // Containers for unique_ptrs to gdf_columns that will be used in the gdf_group_by functions // unique_ptrs are used to automate freeing device memory std::vector<gdf_col_pointer> gdf_input_key_columns; gdf_col_pointer gdf_input_value_column; std::vector<gdf_col_pointer> gdf_output_key_columns; gdf_col_pointer gdf_output_value_column; // Containers for the raw pointers to the gdf_columns that will be used as input // to the gdf_group_by functions std::vector<gdf_column*> gdf_raw_input_key_columns; gdf_column* gdf_raw_input_val_column; std::vector<gdf_column*> gdf_raw_output_key_columns; gdf_column* gdf_raw_output_val_column; GroupTest() { // Use constant seed so the psuedo-random order is the same each time // Each time the class is constructed a new constant seed is used static size_t number_of_instantiations{0}; std::srand(number_of_instantiations++); } ~GroupTest() { } template <typename col_type> gdf_col_pointer create_gdf_column(std::vector<col_type> const & host_vector, const gdf_size_type n_count = 0) { // Deduce the type and set the gdf_dtype accordingly gdf_dtype gdf_col_type = N_GDF_TYPES; if (std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32; else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64; // Create a new instance of a gdf_column with a custom deleter that will free // the associated device memory when it eventually goes out of scope auto deleter = [](gdf_column* col){col->size = 0; RMM_FREE(col->data, 0); RMM_FREE(col->valid, 0); }; gdf_col_pointer the_column{new gdf_column{}, deleter}; // Allocate device storage for gdf_column and copy contents from host_vector EXPECT_EQ(RMM_ALLOC(&(the_column->data), host_vector.size() * sizeof(col_type), 0), RMM_SUCCESS); EXPECT_EQ(cudaMemcpy(the_column->data, host_vector.data(), host_vector.size() * sizeof(col_type), cudaMemcpyHostToDevice), cudaSuccess); int valid_size = gdf_valid_allocation_size(host_vector.size()); EXPECT_EQ(RMM_ALLOC((void**)&(the_column->valid), valid_size, 0), RMM_SUCCESS); EXPECT_EQ(cudaMemset(the_column->valid, 0xff, valid_size), cudaSuccess); // Fill the gdf_column members the_column->null_count = n_count; the_column->size = host_vector.size(); the_column->dtype = gdf_col_type; gdf_dtype_extra_info extra_info; extra_info.time_unit = TIME_UNIT_NONE; the_column->dtype_info = extra_info; return the_column; } // Converts a tuple of host vectors into a vector of gdf_columns std::vector<gdf_col_pointer> initialize_gdf_columns(multi_column_t host_columns) { std::vector<gdf_col_pointer> gdf_columns; convert_tuple_to_gdf_columns(gdf_columns, host_columns); return gdf_columns; } /* --------------------------------------------------------------------------*/ /** * @brief Initializes key columns and aggregation column for gdf group by call * * @param key_count The number of unique keys * @param value_per_key The number of times a random aggregation value is generated for a key * @param max_key The maximum value of the key columns * @param max_val The maximum value of aggregation column * @param print Optionally print the keys and aggregation columns for debugging */ /* ----------------------------------------------------------------------------*/ void create_input(const size_t key_count, const size_t value_per_key, const size_t max_key, const size_t max_val, bool print = false, const gdf_size_type n_count = 0) { size_t shuffle_seed = rand(); initialize_keys(input_key, key_count, value_per_key, max_key, shuffle_seed); initialize_values(input_value, key_count, value_per_key, max_val, shuffle_seed); gdf_input_key_columns = initialize_gdf_columns(input_key); gdf_input_value_column = create_gdf_column(input_value, n_count); // Fill vector of raw pointers to gdf_columns for(auto const& c : gdf_input_key_columns){ gdf_raw_input_key_columns.push_back(c.get()); } gdf_raw_input_val_column = gdf_input_value_column.get(); if(print) { std::cout << "Key column(s) created. Size: " << std::get<0>(input_key).size() << std::endl; print_tuple(input_key); std::cout << "Value column(s) created. Size: " << input_value.size() << std::endl; print_vector(input_value); } } /* --------------------------------------------------------------------------*/ /** * @brief Creates a unique_ptr that wraps a gdf_column structure intialized with a host vector * * @param host_vector The host vector whose data is used to initialize the gdf_column * * @returns A unique_ptr wrapping the new gdf_column */ /* ----------------------------------------------------------------------------*/ // Compile time recursion to convert each vector in a tuple of vectors into // a gdf_column and append it to a vector of gdf_columns template<std::size_t I = 0, typename... Tp> inline typename std::enable_if<I == sizeof...(Tp), void>::type convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t) { //bottom of compile-time recursion //purposely empty... } template<std::size_t I = 0, typename... Tp> inline typename std::enable_if<I < sizeof...(Tp), void>::type convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t) { // Creates a gdf_column for the current vector and pushes it onto // the vector of gdf_columns gdf_columns.push_back(create_gdf_column(std::get<I>(t))); //recurse to next vector in tuple convert_tuple_to_gdf_columns<I + 1, Tp...>(gdf_columns, t); } void create_gdf_output_buffers(const size_t key_count, const size_t value_per_key) { initialize_keys(output_key, key_count, value_per_key, 0, 0, false); initialize_values(output_value, key_count, value_per_key, 0, 0); gdf_output_key_columns = initialize_gdf_columns(output_key); gdf_output_value_column = create_gdf_column(output_value); for(auto const& c : gdf_output_key_columns){ gdf_raw_output_key_columns.push_back(c.get()); } gdf_raw_output_val_column = gdf_output_value_column.get(); } map_t compute_reference_solution(void) { map_t key_val_map; if (test_parameters::op != agg_op::AVG) { AggOp<test_parameters::op> agg; for (size_t i = 0; i < input_value.size(); ++i) { auto l_key = extractKey(input_key, i); auto sch = key_val_map.find(l_key); if (sch != key_val_map.end()) { key_val_map[l_key] = agg(sch->second, input_value[i]); } else { key_val_map[l_key] = agg(input_value[i]); } } } else { std::map<tuple_t, size_t> counters; AggOp<agg_op::SUM> agg; for (size_t i = 0; i < input_value.size(); ++i) { auto l_key = extractKey(input_key, i); counters[l_key]++; auto sch = key_val_map.find(l_key); if (sch != key_val_map.end()) { key_val_map[l_key] = agg(sch->second, input_value[i]); } else { key_val_map[l_key] = agg(input_value[i]); } } for (auto& e : key_val_map) { e.second = e.second/counters[e.first]; } } return key_val_map; } /* --------------------------------------------------------------------------*/ /** * @brief Computes the gdf result of grouping the input_keys and input_value */ /* ----------------------------------------------------------------------------*/ void compute_gdf_result(const gdf_error expected_error = GDF_SUCCESS) { const int num_columns = std::tuple_size<multi_column_t>::value; gdf_error error{GDF_SUCCESS}; gdf_column **group_by_input_key = gdf_raw_input_key_columns.data(); gdf_column *group_by_input_value = gdf_raw_input_val_column; gdf_column **group_by_output_key = gdf_raw_output_key_columns.data(); gdf_column *group_by_output_value = gdf_raw_output_val_column; switch(op) { case agg_op::MIN: { error = gdf_group_by_min(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } case agg_op::MAX: { error = gdf_group_by_max(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } case agg_op::SUM: { error = gdf_group_by_sum(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } case agg_op::CNT: { error = gdf_group_by_count(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } case agg_op::AVG: { error = gdf_group_by_avg(num_columns, group_by_input_key, group_by_input_value, nullptr, group_by_output_key, group_by_output_value, &ctxt); break; } default: error = GDF_INVALID_AGGREGATOR; } EXPECT_EQ(expected_error, error) << "The gdf group by function did not complete successfully"; if (GDF_SUCCESS == expected_error) { copy_output( group_by_output_key, output_key, group_by_output_value, output_value); } } void compare_gdf_result(map_t& reference_map) { ASSERT_EQ(output_value.size(), reference_map.size()) << "Size of gdf result does not match reference result\n"; ASSERT_EQ(std::get<0>(output_key).size(), output_value.size()) << "Mismatch between aggregation and group by column size."; for (size_t i = 0; i < output_value.size(); ++i) { auto sch = reference_map.find(extractKey(output_key, i)); bool found = (sch != reference_map.end()); EXPECT_EQ(found, true); if (!found) { continue; } if (std::is_integral<output_t>::value) { EXPECT_EQ(sch->second, output_value[i]); } else { EXPECT_NEAR(sch->second, output_value[i], sch->second/100.0); } //ensure no duplicates in gdf output reference_map.erase(sch); } } }; TYPED_TEST_CASE(GroupTest, Implementations); TYPED_TEST(GroupTest, GroupbyExampleTest) { const size_t num_keys = 1; const size_t num_values_per_key = 8; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, AllKeysSame) { const size_t num_keys = 1; const size_t num_values_per_key = 1<<14; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, AllKeysDifferent) { const size_t num_keys = 1<<14; const size_t num_values_per_key = 1; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, WarpKeysSame) { const size_t num_keys = 1<<10; const size_t num_values_per_key = 32; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, BlockKeysSame) { const size_t num_keys = 1<<10; const size_t num_values_per_key = 256; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } TYPED_TEST(GroupTest, EmptyInput) { const size_t num_keys = 0; const size_t num_values_per_key = 0; const size_t max_key = 0; const size_t max_val = 0; this->create_input(num_keys, num_values_per_key, max_key, max_val); auto reference_map = this->compute_reference_solution(); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(); this->compare_gdf_result(reference_map); } // Create a new derived class from JoinTest so we can do a new Typed Test set of tests template <class test_parameters> struct GroupValidTest : public GroupTest<test_parameters> { }; TYPED_TEST_CASE(GroupValidTest, ValidTestImplementations); TYPED_TEST(GroupValidTest, ReportValidMaskError) { const size_t num_keys = 1; const size_t num_values_per_key = 8; const size_t max_key = num_keys*2; const size_t max_val = 1000; this->create_input(num_keys, num_values_per_key, max_key, max_val, false, 1); this->create_gdf_output_buffers(num_keys, num_values_per_key); this->compute_gdf_result(GDF_VALIDITY_UNSUPPORTED); }
4ba75d55313ddb0b90afec0f4ce145f8adf40e61.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "update_mixed_derivatives.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *temppsix = NULL; hipMalloc(&temppsix, XSIZE*YSIZE); double *temppsiy = NULL; hipMalloc(&temppsiy, XSIZE*YSIZE); double *temppsixy = NULL; hipMalloc(&temppsixy, XSIZE*YSIZE); unsigned int nx = 1; unsigned int ny = 1; double dx = 1; double dy = 1; unsigned int TileSize = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( update_mixed_derivatives), dim3(gridBlock),dim3(threadBlock), 0, 0, temppsix,temppsiy,temppsixy,nx,ny,dx,dy,TileSize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( update_mixed_derivatives), dim3(gridBlock),dim3(threadBlock), 0, 0, temppsix,temppsiy,temppsixy,nx,ny,dx,dy,TileSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( update_mixed_derivatives), dim3(gridBlock),dim3(threadBlock), 0, 0, temppsix,temppsiy,temppsixy,nx,ny,dx,dy,TileSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4ba75d55313ddb0b90afec0f4ce145f8adf40e61.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "update_mixed_derivatives.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *temppsix = NULL; cudaMalloc(&temppsix, XSIZE*YSIZE); double *temppsiy = NULL; cudaMalloc(&temppsiy, XSIZE*YSIZE); double *temppsixy = NULL; cudaMalloc(&temppsixy, XSIZE*YSIZE); unsigned int nx = 1; unsigned int ny = 1; double dx = 1; double dy = 1; unsigned int TileSize = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); update_mixed_derivatives<<<gridBlock,threadBlock>>>(temppsix,temppsiy,temppsixy,nx,ny,dx,dy,TileSize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { update_mixed_derivatives<<<gridBlock,threadBlock>>>(temppsix,temppsiy,temppsixy,nx,ny,dx,dy,TileSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { update_mixed_derivatives<<<gridBlock,threadBlock>>>(temppsix,temppsiy,temppsixy,nx,ny,dx,dy,TileSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
energymin_amg_level.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <energymin/energymin_amg_level.h> #include <amg_level.h> #include <basic_types.h> #include <cutil.h> #include <multiply.h> #include <transpose.h> #include <blas.h> #include <util.h> #include <thrust/logical.h> #include <thrust/remove.h> #include <thrust/adjacent_difference.h> #include <assert.h> #include <matrix_io.h> #include <csr_multiply.h> #include <thrust/logical.h> #include <thrust/count.h> #include <thrust/sort.h> #include <profile.h> #include <string> #include <algorithm> namespace amgx { namespace energymin { // --------------------------- Begin Base Class Public methods ------------------------------------ template <class T_Config> Energymin_AMG_Level_Base<T_Config> ::Energymin_AMG_Level_Base(AMG_Class *amg) : AMG_Level<T_Config>(amg) { selector = amgx::classical::SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); interpolator = InterpolatorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); strength = NULL; std::string selector_val = amg->m_cfg->template getParameter<std::string>("selector", amg->m_cfg_scope); if (selector_val == "PMIS") //or any other classical selector { strength = StrengthFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); //using default strength max_row_sum = amg->m_cfg->AMG_Config::getParameter<double>("max_row_sum", amg->m_cfg_scope); } } template <class T_Config> Energymin_AMG_Level_Base<T_Config>::~Energymin_AMG_Level_Base() { delete selector; delete interpolator; if (strength != NULL) { delete strength; } } // Compute A, P, and R operators template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::createCoarseVertices() { Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA(); Matrix<T_Config> &A = this->getA(); int size_all; size_all = A.get_num_rows(); this->m_cf_map.resize(size_all); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); markCoarseFinePoints(); } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::createCoarseMatrices() { Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA(); Matrix<T_Config> &A = this->getA(); /* WARNING: exit if D1 interpolator is selected in distributed setting */ std::string s(""); s += AMG_Level<T_Config>::amg->m_cfg->AMG_Config ::getParameter<std::string>("energymin_interpolator", AMG_Level<T_Config>::amg->m_cfg_scope); // Compute Restriction operator computeRestrictionOperator(); // Compute Prolongation operator and coarse matrix Ac if (!this->A->is_matrix_distributed() || this->A->manager->get_num_partitions() == 1) { // Create Prolongation operator computeProlongationOperator(); computeAOperator(); } else { computeAOperator_distributed(); } RAP.copyAuxData(&A); if (this->getA().is_matrix_singleGPU()) { this->m_next_level_size = this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_num_rows() * this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy(); } else { // m_next_level_size is the size that will be used to allocate xc, bc vectors int size, offset; this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().getOffsetAndSizeForView(FULL, &offset, &size); this->m_next_level_size = size * this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy(); } } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::markCoarseFinePoints() { Matrix<T_Config> &A = this->getA(); // Allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; int size_all, size_full, nnz_full; BVector m_s_con; IVector m_scratch; FVector weights; if (!A.is_matrix_singleGPU()) { int offset; // Need to get number of 2-ring rows A.getOffsetAndSizeForView(ALL, &offset, &size_all); A.getOffsetAndSizeForView(FULL, &offset, &size_full); A.getNnzForView(FULL, &nnz_full); weights.resize(size_full); } else { size_all = A.get_num_rows(); size_full = A.get_num_rows(); nnz_full = A.get_num_nz(); weights.resize(A.get_num_rows()); } this->m_cf_map.resize(size_all); m_s_con.resize(nnz_full); m_scratch.resize(size_full); thrust::fill(weights.begin(), weights.end(), 0.0); cudaCheckError(); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); thrust::fill(m_s_con.begin(), m_s_con.end(), false); cudaCheckError(); thrust::fill(m_scratch.begin(), m_scratch.end(), 0); cudaCheckError(); if (strength != NULL) { if (!A.is_matrix_singleGPU()) { ViewType oldView = A.currentView(); A.setView(FULL); strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum); A.setView(oldView); A.manager->exchange_halo(weights, weights.tag); } else { strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum); } } // Mark coarse and fine points selector->markCoarseFinePoints(A, weights, m_s_con, this->m_cf_map, m_scratch); this->m_cf_map.dirtybit = 1; } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeRestrictionOperator() { this->Profile.tic("computeR"); Matrix<T_Config> &A = this->getA(); //allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; // WARNING: Since energymin P is in computed in CSC format and AMGX does not support // CSC format, we are actually computing P^T (=R) in generateInterpolationMatrix!! //generate the interpolation matrix interpolator->generateInterpolationMatrix(A, this->m_cf_map, R, AMG_Level<TConfig>::amg); this->m_cf_map.clear(); this->m_cf_map.shrink_to_fit(); this->Profile.toc("computeR"); } // Compute R=P^T template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeProlongationOperator() { this->Profile.tic("computeP"); P.set_initialized(0); R.setView(OWNED); transpose(R, P, R.get_num_rows()); if (this->m_min_rows_latency_hiding < 0 || P.get_num_rows() < this->m_min_rows_latency_hiding) { // This will cause bsrmv to not do latency hiding P.setInteriorView(OWNED); P.setExteriorView(OWNED); } P.set_initialized(1); this->Profile.toc("computeP"); } // Compute the Galerkin product: A_c=R*A*P template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1() { FatalError("Energymin AMG computeAOperator_1x1 not implemented on host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1_distributed() { FatalError("Distributed energymin AMG not implemented for host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1() { this->Profile.tic("computeA"); Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory() )->getA(); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); this->R.set_initialized(0); this->R.addProps(CSR); this->R.set_initialized(1); this->P.set_initialized(0); this->P.addProps(CSR); this->P.set_initialized(1); void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace(); if ( wk == NULL ) { wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg), AMG_Level<TConfig_d>::amg->m_cfg_scope ); AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk ); } RAP.set_initialized(0); CSR_Multiply<TConfig_d>::csr_galerkin_product(this->R, this->getA(), this->P, RAP, NULL, NULL, NULL, NULL, NULL, NULL, wk); RAP.set_initialized(1); this->Profile.toc("computeA"); } // Compute the restriction: rr=R*r template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::restrictResidual(VVector &r, VVector &rr) { typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); // we need to resize residual vector to make sure it can store halo rows to be sent if (!P.is_matrix_singleGPU()) { int desired_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size()); rr.resize(desired_size); } this->Profile.tic("restrictRes"); // Disable speculative send of rr if (P.is_matrix_singleGPU()) { multiply( R, r, rr); } else { multiply_with_mask_restriction( R, r, rr, P); } rr.dirtybit = 1; // Do I need this? if (!P.is_matrix_singleGPU()) { int desired_size = P.manager->halo_offsets[P.manager->neighbors.size()] * rr.get_block_size(); // P.manager->transformVector(rr); //This is just to make sure size is right if (rr.size() < desired_size) { rr.resize(P.manager->halo_offsets[P.manager->neighbors.size()]*rr.get_block_size()); } // P.manager->exchange_halo(rr, rr.tag); } this->Profile.toc("restrictRes"); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1_distributed() { FatalError("Energymin AMG Level computeAOperator_1x1_distributed() not implemented", AMGX_ERR_NOT_IMPLEMENTED); } // Prolongate the error: x+=P*e template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::prolongateAndApplyCorrection(VVector &e, VVector &bc, VVector &x, VVector &tmp) { this->Profile.tic("proCorr"); // get coarse matrix typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); // Use P.manager to exchange halo of e before doing P // (since P has columns belonging to one of P.neighbors) e.dirtybit = 1; if (!P.is_matrix_singleGPU()) { int e_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()]) * e.get_block_size(); e.resize(e_size); } if (P.is_matrix_singleGPU()) { if (e.size() > 0) {multiply( P, e, tmp);} } else { multiply_with_mask( P, e, tmp); } // get owned num rows for fine matrix int owned_size; if (Ac.is_matrix_distributed()) { int owned_offset; P.manager->getOffsetAndSizeForView(OWNED, &owned_offset, &owned_size); } else { owned_size = x.size(); } //apply axpby(x, tmp, x, ValueType(1), ValueType(1), 0, owned_size); this->Profile.toc("proCorr"); x.dirtybit = 1; } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeAOperator() { if (this->A->get_block_size() == 1) { computeAOperator_1x1(); } else { FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeAOperator_distributed() { if (this->A->get_block_size() == 1) { computeAOperator_1x1_distributed(); } else { FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } /**************************************** * Explicit instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace energymin } // namespace amgx
energymin_amg_level.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <energymin/energymin_amg_level.h> #include <amg_level.h> #include <basic_types.h> #include <cutil.h> #include <multiply.h> #include <transpose.h> #include <blas.h> #include <util.h> #include <thrust/logical.h> #include <thrust/remove.h> #include <thrust/adjacent_difference.h> #include <assert.h> #include <matrix_io.h> #include <csr_multiply.h> #include <thrust/logical.h> #include <thrust/count.h> #include <thrust/sort.h> #include <profile.h> #include <string> #include <algorithm> namespace amgx { namespace energymin { // --------------------------- Begin Base Class Public methods ------------------------------------ template <class T_Config> Energymin_AMG_Level_Base<T_Config> ::Energymin_AMG_Level_Base(AMG_Class *amg) : AMG_Level<T_Config>(amg) { selector = amgx::classical::SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); interpolator = InterpolatorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); strength = NULL; std::string selector_val = amg->m_cfg->template getParameter<std::string>("selector", amg->m_cfg_scope); if (selector_val == "PMIS") //or any other classical selector { strength = StrengthFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); //using default strength max_row_sum = amg->m_cfg->AMG_Config::getParameter<double>("max_row_sum", amg->m_cfg_scope); } } template <class T_Config> Energymin_AMG_Level_Base<T_Config>::~Energymin_AMG_Level_Base() { delete selector; delete interpolator; if (strength != NULL) { delete strength; } } // Compute A, P, and R operators template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::createCoarseVertices() { Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA(); Matrix<T_Config> &A = this->getA(); int size_all; size_all = A.get_num_rows(); this->m_cf_map.resize(size_all); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); markCoarseFinePoints(); } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::createCoarseMatrices() { Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA(); Matrix<T_Config> &A = this->getA(); /* WARNING: exit if D1 interpolator is selected in distributed setting */ std::string s(""); s += AMG_Level<T_Config>::amg->m_cfg->AMG_Config ::getParameter<std::string>("energymin_interpolator", AMG_Level<T_Config>::amg->m_cfg_scope); // Compute Restriction operator computeRestrictionOperator(); // Compute Prolongation operator and coarse matrix Ac if (!this->A->is_matrix_distributed() || this->A->manager->get_num_partitions() == 1) { // Create Prolongation operator computeProlongationOperator(); computeAOperator(); } else { computeAOperator_distributed(); } RAP.copyAuxData(&A); if (this->getA().is_matrix_singleGPU()) { this->m_next_level_size = this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_num_rows() * this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy(); } else { // m_next_level_size is the size that will be used to allocate xc, bc vectors int size, offset; this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().getOffsetAndSizeForView(FULL, &offset, &size); this->m_next_level_size = size * this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy(); } } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::markCoarseFinePoints() { Matrix<T_Config> &A = this->getA(); // Allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; int size_all, size_full, nnz_full; BVector m_s_con; IVector m_scratch; FVector weights; if (!A.is_matrix_singleGPU()) { int offset; // Need to get number of 2-ring rows A.getOffsetAndSizeForView(ALL, &offset, &size_all); A.getOffsetAndSizeForView(FULL, &offset, &size_full); A.getNnzForView(FULL, &nnz_full); weights.resize(size_full); } else { size_all = A.get_num_rows(); size_full = A.get_num_rows(); nnz_full = A.get_num_nz(); weights.resize(A.get_num_rows()); } this->m_cf_map.resize(size_all); m_s_con.resize(nnz_full); m_scratch.resize(size_full); thrust::fill(weights.begin(), weights.end(), 0.0); cudaCheckError(); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); thrust::fill(m_s_con.begin(), m_s_con.end(), false); cudaCheckError(); thrust::fill(m_scratch.begin(), m_scratch.end(), 0); cudaCheckError(); if (strength != NULL) { if (!A.is_matrix_singleGPU()) { ViewType oldView = A.currentView(); A.setView(FULL); strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum); A.setView(oldView); A.manager->exchange_halo(weights, weights.tag); } else { strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum); } } // Mark coarse and fine points selector->markCoarseFinePoints(A, weights, m_s_con, this->m_cf_map, m_scratch); this->m_cf_map.dirtybit = 1; } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeRestrictionOperator() { this->Profile.tic("computeR"); Matrix<T_Config> &A = this->getA(); //allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; // WARNING: Since energymin P is in computed in CSC format and AMGX does not support // CSC format, we are actually computing P^T (=R) in generateInterpolationMatrix!! //generate the interpolation matrix interpolator->generateInterpolationMatrix(A, this->m_cf_map, R, AMG_Level<TConfig>::amg); this->m_cf_map.clear(); this->m_cf_map.shrink_to_fit(); this->Profile.toc("computeR"); } // Compute R=P^T template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeProlongationOperator() { this->Profile.tic("computeP"); P.set_initialized(0); R.setView(OWNED); transpose(R, P, R.get_num_rows()); if (this->m_min_rows_latency_hiding < 0 || P.get_num_rows() < this->m_min_rows_latency_hiding) { // This will cause bsrmv to not do latency hiding P.setInteriorView(OWNED); P.setExteriorView(OWNED); } P.set_initialized(1); this->Profile.toc("computeP"); } // Compute the Galerkin product: A_c=R*A*P template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1() { FatalError("Energymin AMG computeAOperator_1x1 not implemented on host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1_distributed() { FatalError("Distributed energymin AMG not implemented for host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1() { this->Profile.tic("computeA"); Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory() )->getA(); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); this->R.set_initialized(0); this->R.addProps(CSR); this->R.set_initialized(1); this->P.set_initialized(0); this->P.addProps(CSR); this->P.set_initialized(1); void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace(); if ( wk == NULL ) { wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg), AMG_Level<TConfig_d>::amg->m_cfg_scope ); AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk ); } RAP.set_initialized(0); CSR_Multiply<TConfig_d>::csr_galerkin_product(this->R, this->getA(), this->P, RAP, NULL, NULL, NULL, NULL, NULL, NULL, wk); RAP.set_initialized(1); this->Profile.toc("computeA"); } // Compute the restriction: rr=R*r template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::restrictResidual(VVector &r, VVector &rr) { typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); // we need to resize residual vector to make sure it can store halo rows to be sent if (!P.is_matrix_singleGPU()) { int desired_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size()); rr.resize(desired_size); } this->Profile.tic("restrictRes"); // Disable speculative send of rr if (P.is_matrix_singleGPU()) { multiply( R, r, rr); } else { multiply_with_mask_restriction( R, r, rr, P); } rr.dirtybit = 1; // Do I need this? if (!P.is_matrix_singleGPU()) { int desired_size = P.manager->halo_offsets[P.manager->neighbors.size()] * rr.get_block_size(); // P.manager->transformVector(rr); //This is just to make sure size is right if (rr.size() < desired_size) { rr.resize(P.manager->halo_offsets[P.manager->neighbors.size()]*rr.get_block_size()); } // P.manager->exchange_halo(rr, rr.tag); } this->Profile.toc("restrictRes"); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1_distributed() { FatalError("Energymin AMG Level computeAOperator_1x1_distributed() not implemented", AMGX_ERR_NOT_IMPLEMENTED); } // Prolongate the error: x+=P*e template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::prolongateAndApplyCorrection(VVector &e, VVector &bc, VVector &x, VVector &tmp) { this->Profile.tic("proCorr"); // get coarse matrix typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); // Use P.manager to exchange halo of e before doing P // (since P has columns belonging to one of P.neighbors) e.dirtybit = 1; if (!P.is_matrix_singleGPU()) { int e_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()]) * e.get_block_size(); e.resize(e_size); } if (P.is_matrix_singleGPU()) { if (e.size() > 0) {multiply( P, e, tmp);} } else { multiply_with_mask( P, e, tmp); } // get owned num rows for fine matrix int owned_size; if (Ac.is_matrix_distributed()) { int owned_offset; P.manager->getOffsetAndSizeForView(OWNED, &owned_offset, &owned_size); } else { owned_size = x.size(); } //apply axpby(x, tmp, x, ValueType(1), ValueType(1), 0, owned_size); this->Profile.toc("proCorr"); x.dirtybit = 1; } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeAOperator() { if (this->A->get_block_size() == 1) { computeAOperator_1x1(); } else { FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeAOperator_distributed() { if (this->A->get_block_size() == 1) { computeAOperator_1x1_distributed(); } else { FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } /**************************************** * Explicit instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace energymin } // namespace amgx
1fe1de75afdea1e3d23ce21ae37821b0a4acc08f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; // const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] * b[i]; } }
1fe1de75afdea1e3d23ce21ae37821b0a4acc08f.cu
#include "includes.h" __global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; // const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] * b[i]; } }
3220763a975551e9b6eb4255445a81e14b2d974c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:48 2012 */ #include "common_magma.h" #define BLOCK_SIZE 64 /********************************************************* * * SWAP BLAS: permute to set of N elements * ********************************************************/ /* * First version: line per line */ typedef struct { float *A1; float *A2; int n, lda1, lda2; } magmagpu_sswap_params_t; __global__ void magmagpu_sswap( magmagpu_sswap_params_t params ) { unsigned int x = threadIdx.x + __mul24(blockDim.x, blockIdx.x); unsigned int offset1 = __mul24( x, params.lda1); unsigned int offset2 = __mul24( x, params.lda2); if( x < params.n ) { float *A1 = params.A1 + offset1; float *A2 = params.A2 + offset2; float temp = *A1; *A1 = *A2; *A2 = temp; } } extern "C" void magmablas_sswap( magma_int_t n, float *dA1T, magma_int_t lda1, float *dA2T, magma_int_t lda2) { int blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magmagpu_sswap_params_t params = { dA1T, dA2T, n, lda1, lda2 }; hipLaunchKernelGGL(( magmagpu_sswap), dim3(blocks), dim3(blocksize), 0, magma_stream , params ); }
3220763a975551e9b6eb4255445a81e14b2d974c.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:48 2012 */ #include "common_magma.h" #define BLOCK_SIZE 64 /********************************************************* * * SWAP BLAS: permute to set of N elements * ********************************************************/ /* * First version: line per line */ typedef struct { float *A1; float *A2; int n, lda1, lda2; } magmagpu_sswap_params_t; __global__ void magmagpu_sswap( magmagpu_sswap_params_t params ) { unsigned int x = threadIdx.x + __mul24(blockDim.x, blockIdx.x); unsigned int offset1 = __mul24( x, params.lda1); unsigned int offset2 = __mul24( x, params.lda2); if( x < params.n ) { float *A1 = params.A1 + offset1; float *A2 = params.A2 + offset2; float temp = *A1; *A1 = *A2; *A2 = temp; } } extern "C" void magmablas_sswap( magma_int_t n, float *dA1T, magma_int_t lda1, float *dA2T, magma_int_t lda2) { int blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magmagpu_sswap_params_t params = { dA1T, dA2T, n, lda1, lda2 }; magmagpu_sswap<<< blocks, blocksize, 0, magma_stream >>>( params ); }
fb35d2719d546ce6b7b99e2d54920d5588e8cff8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define GPU inline __device__ #define uchar unsigned char #define BORDER_REPLICATE 1 #define BORDER_REFLECT 2 #define BORDER_REFLECT_101 3 #define BORDER_WRAP 4 #include<stdio.h> #include<hip/hip_vector_types.h> #include<vector_functions.h> __device__ int DEVICE_NUMBER; __device__ float modelview[4][4]; __device__ float inv_modelview[4][4]; __device__ int slider[4]; __device__ int slider_opacity[4]; __device__ float TF_bandwidth; __device__ int front_back; // clamp GPU float clamp(float f, float a, float b); GPU float2 clamp(float2 v, float a, float b); GPU float3 clamp(float3 v, float a, float b); GPU float3 clamp(float3 v, float3 a, float3 b); GPU float4 clamp(float4 v, float a, float b); GPU float4 clamp(float4 v, float4 a, float4 b); // rgba class class RGBA{ public: unsigned char r, g, b, a; GPU RGBA(float3 rgb, float a_in){ r = clamp(rgb.x, 0.0f, 255.0f); g = clamp(rgb.y, 0.0f, 255.0f); b = clamp(rgb.z, 0.0f, 255.0f); a = clamp(a_in, 0.0f, 255.0f); } GPU RGBA(float4 rgba){ r = clamp(rgba.x, 0.0f, 255.0f); g = clamp(rgba.y, 0.0f, 255.0f); b = clamp(rgba.z, 0.0f, 255.0f); a = clamp(rgba.w, 0.0f, 255.0f); } GPU RGBA(float r_in, float g_in, float b_in, float a_in){ r = clamp(r_in, 0.0f, 255.0f); g = clamp(g_in, 0.0f, 255.0f); b = clamp(b_in, 0.0f, 255.0f); a = clamp(a_in, 0.0f, 255.0f); } GPU RGBA(float c){ r = clamp(c, 0.0f, 255.0f); g = clamp(c, 0.0f, 255.0f); b = clamp(c, 0.0f, 255.0f); a = clamp(255.0f, 0.0f, 255.0f); } GPU RGBA(){ r = g = b = 0; a = 1; } }; // rgb class class RGB{ public: unsigned char r, g, b; GPU RGB(float3 rgb) { r = clamp(rgb.x, 0.0f, 255.0f); g = clamp(rgb.y, 0.0f, 255.0f); b = clamp(rgb.z, 0.0f, 255.0f); } GPU RGB(float4 rgb) { r = clamp(rgb.x, 0.0f, 255.0f); g = clamp(rgb.y, 0.0f, 255.0f); b = clamp(rgb.z, 0.0f, 255.0f); } GPU RGB(float r_in, float g_in, float b_in) { r = clamp(r_in, 0.0f, 255.0f); g = clamp(g_in, 0.0f, 255.0f); b = clamp(b_in, 0.0f, 255.0f); } GPU RGB(float a) { r = g = b = clamp(a, 0.0f, 255.0f); } GPU RGB() { r = g = b = 0; } }; class VIVALDI_DATA_RANGE{ public: int4 data_start, data_end; int4 full_data_start, full_data_end; int4 buffer_start, buffer_end; int data_halo; int buffer_halo; }; // data type converters //////////////////////////////////////////////////////////////////////////////// GPU float convert(char1 a){ return float(a.x); } GPU float convert(uchar1 a){ return float(a.x); } GPU float convert(short1 a){ return float(a.x); } GPU float convert(ushort1 a){ return float(a.x); } GPU float convert(int1 a){ return float(a.x); } GPU float convert(uint1 a){ return float(a.x); } GPU float convert(float1 a){ return float(a.x); } GPU float convert(double1 a){ return float(a.x); } GPU float convert(double a){ return float(a); } GPU float2 convert(char2 a){ return make_float2(a.x, a.y); } GPU float2 convert(uchar2 a){ return make_float2(a.x, a.y); } GPU float2 convert(short2 a){ return make_float2(a.x,a.y); } GPU float2 convert(ushort2 a){ return make_float2(a.x,a.y); } GPU float2 convert(int2 a){ return make_float2(a.x,a.y); } GPU float2 convert(uint2 a){ return make_float2(a.x,a.y); } GPU float2 convert(float2 a){ return make_float2(a.x,a.y); } GPU float2 convert(double2 a){ return make_float2(a.x,a.y); } GPU float3 convert(RGB a){ return make_float3(a.r,a.g,a.b); } GPU float3 convert(char3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(uchar3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(short3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(ushort3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(int3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(uint3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(float3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(double3 a){ return make_float3(a.x, a.y, a.z); } GPU float4 convert(RGBA a){ return make_float4(a.r,a.g,a.b,a.a); } GPU float4 convert(char4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(uchar4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(short4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(ushort4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(int4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(uint4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(float4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(double4 a){ return make_float4(a.x, a.y, a.z, a.w); } // data_type init ////////////////////////////////////////////////////////////// GPU float initial(uchar1 a){ return 0.0; } GPU float initial(char1 a){ return 0.0; } GPU float initial(ushort1 a){ return 0.0; } GPU float initial(short1 a){ return 0.0; } GPU float initial(int1 a){ return 0.0; } GPU float initial(uint1 a){ return 0.0; } GPU float initial(float1 a){ return 0.0; } GPU float initial(float a){ return 0.0; } GPU float initial(double1 a){ return 0.0; } GPU float2 initial(char2 a){ return make_float2(0); } GPU float2 initial(uchar2 a){ return make_float2(0); } GPU float2 initial(short2 a){ return make_float2(0); } GPU float2 initial(ushort2 a){ return make_float2(0); } GPU float2 initial(int2 a){ return make_float2(0); } GPU float2 initial(uint2 a){ return make_float2(0); } GPU float2 initial(float2 a){ return make_float2(0); } GPU float2 initial(double2 a){ return make_float2(0); } GPU float3 initial(RGB a){ return make_float3(0); } GPU float3 initial(char3 a){ return make_float3(0); } GPU float3 initial(uchar3 a){ return make_float3(0); } GPU float3 initial(short3 a){ return make_float3(0); } GPU float3 initial(ushort3 a){ return make_float3(0); } GPU float3 initial(int3 a){ return make_float3(0); } GPU float3 initial(uint3 a){ return make_float3(0); } GPU float3 initial(float3 a){ return make_float3(0); } GPU float3 initial(double3 a){ return make_float3(0); } GPU float4 initial(RGBA a){ return make_float4(0); } GPU float4 initial(char4 a){ return make_float4(0); } GPU float4 initial(uchar4 a){ return make_float4(0); } GPU float4 initial(short4 a){ return make_float4(0); } GPU float4 initial(ushort4 a){ return make_float4(0); } GPU float4 initial(int4 a){ return make_float4(0); } GPU float4 initial(uint4 a){ return make_float4(0); } GPU float4 initial(float4 a){ return make_float4(0); } GPU float4 initial(double4 a){ return make_float4(0); } GPU float initial2(float a){ return 1.0; } // float functions //////////////////////////////////////////////////////////////////////////////// GPU float length(float a){ if(a < 0){ return -a; } return a; } //f = value, a = min, b = max GPU float step(float edge, float x){ return x < edge ? 0 : 1; } GPU float rect(float edge0, float edge1, float x){ return edge0 <= x && x <= edge1 ? 1 : 0; } // float2 functions //////////////////////////////////////////////////////////////////////////////// // negate GPU float2 operator-(float2 a){ return make_float2(-a.x, -a.y); } // floor GPU float2 floor(const float2 v){ return make_float2(floor(v.x), floor(v.y)); } // reflect GPU float2 reflect(float2 i, float2 n){ return i - 2.0f * n * dot(n,i); } // float3 functions //////////////////////////////////////////////////////////////////////////////// // floor GPU float3 floor(const float3 v){ return make_float3(floor(v.x), floor(v.y), floor(v.z)); } // float4 functions //////////////////////////////////////////////////////////////////////////////// // additional constructors // negate GPU float4 operator-(float4 a){ return make_float4(-a.x, -a.y, -a.z, -a.w); } // floor GPU float4 floor(const float4 v){ return make_float4(floor(v.x), floor(v.y), floor(v.z), floor(v.w)); } // Frame //////////////////////////////////////////////////////////////////////////////// class Frame{ public: float3 x, y, z, origin; GPU void setDefault(float3 position) { origin = position; x = make_float3(1, 0, 0); y = make_float3(0, 1, 0); z = make_float3(0, 0, 1); } GPU void lookAt(float3 position, float3 target, float3 up) { origin = position; z = normalize(position - target); x = normalize(cross(up, z)); y = normalize(cross(z, x)); } GPU float3 getVectorToWorld(float3 v) { return v.x * x + v.y * y + v.z * z; } GPU float3 getPointToWorld(float3 p) { return p.x * x + p.y * y + p.z * z + origin; } }; // transfer2 //////////////////////////////////////////////////////////////////////////////// GPU float transfer2( float x0, float f0, float x1, float f1, float x) { if (x < x0) return 0; if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); return 0; } GPU float2 transfer2( float x0, float2 f0, float x1, float2 f1, float x) { if (x < x0) return make_float2(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); return make_float2(0); } GPU float3 transfer2( float x0, float3 f0, float x1, float3 f1, float x) { if (x < x0) return make_float3(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); return make_float3(0); } GPU float4 transfer2( float x0, float4 f0, float x1, float4 f1, float x) { if (x < x0) return make_float4(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); return make_float4(0); } // transfer3 //////////////////////////////////////////////////////////////////////////////// GPU float transfer3( float x0, float f0, float x1, float f1, float x2, float f2, float x) { if (x < x0) return 0; if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); if (x < x2) return lerp(f1, f2, (x - x1) / (x2 - x0)); return 0; } GPU float2 transfer3( float x0, float2 f0, float x1, float2 f1, float x2, float2 f2, float x) { if (x < x0) return make_float2(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); if (x < x2) return lerp(f1, f2, (x - x1) / (x2 - x0)); return make_float2(0); } GPU float3 transfer3( float x0, float3 f0, float x1, float3 f1, float x2, float3 f2, float x) { if (x < x0) return make_float3(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); if (x < x2) return lerp(f1, f2, (x - x1) / (x2 - x0)); return make_float3(0); } GPU float4 transfer3( float x0, float4 f0, float x1, float4 f1, float x2, float4 f2, float x) { if (x < x0) return make_float4(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); if (x < x2) return lerp(f1, f2, (x - x1) / (x2 - x0)); return make_float4(0); } // helper textures for cubic interpolation and random numbers //////////////////////////////////////////////////////////////////////////////// texture<float4, 2, hipReadModeElementType> hgTexture; texture<float4, 2, hipReadModeElementType> dhgTexture; texture<int, 2, hipReadModeElementType> randomTexture; GPU float3 hg(float a){ // float a2 = a * a; // float a3 = a2 * a; // float w0 = (-a3 + 3*a2 - 3*a + 1) / 6; // float w1 = (3*a3 - 6*a2 + 4) / 6; // float w2 = (-3*a3 + 3*a2 + 3*a + 1) / 6; // float w3 = a3 / 6; // float g = w2 + w3; // float h0 = (1.0f + a) - w1 / (w0 + w1); // float h1 = (1.0f - a) + w3 / (w2 + w3); // return make_float3(h0, h1, g); return make_float3(tex2D(hgTexture, a, 0)); } GPU float3 dhg(float a){ return make_float3(tex2D(dhgTexture, a, 0)); } //iterators ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// // class make_laplacian_iter{} class line_iter{ public: float3 S,E,P,step; float len; GPU line_iter(){ } GPU line_iter(float3 from, float3 to, float d){ S = from; E = to; step = normalize(to-from)*d; len = length(to-from); P = S; if( S.x == E.x && S.y == E.y && S.z == E.z )step = make_float3(0,0,1); } GPU float3 begin(){ return S; } GPU bool hasNext(){ float3 T = P + step; if( length(T-S) > len)return false; return true; } GPU bool valid(){ if(length(P-S) < len)return true; return false; } GPU float3 next(){ P += step; return P; } GPU float3 direction(){ return normalize(step); } }; class plane_iter{ public: float2 S; float d; int max_step, step; int width; float x,y; GPU plane_iter(){ } GPU plane_iter(float2 point, float size){ S = point; d = size; width = 1+2*size; max_step = width*width; } GPU plane_iter(int x, int y, float size){ S = make_float2(x,y); d = size; width = 1+2*size; max_step = width*width; } GPU plane_iter(float x, float y, float size){ S = make_float2(x,y); d = size; width = 1+2*size; max_step = width*width; } GPU float2 begin(){ step = 0; x = 0; y = 0; return S + make_float2(-d,-d); } GPU bool hasNext(){ if(max_step == step)return false; return true; } GPU bool valid(){ if(max_step <= step)return false; return true; } GPU float2 next(){ step++; x++; if( x == width){ x=0; y++;} float2 P = S + make_float2( x - d, y - d); return P; } }; class cube_iter{ public: float3 S; int d; int width; int max_step, step; float x,y,z; GPU cube_iter(){ } GPU cube_iter(float3 point, float size){ S = point; d = size; width = 1+2*size; max_step = (width)*(width)*(width); } GPU cube_iter(int x,int y,int z, float radius){ S = make_float3(x,y,z); d = radius; width = 1+2*radius; max_step = (width)*(width)*(width); } GPU cube_iter(float x, float y, float z, float size){ S = make_float3(x,y,z); d = size; width = 1+2*size; max_step = (width)*(width)*(width); } GPU float3 begin(){ step = 0; x = 0; y = 0; z = 0; return S + make_float3(-d, -d, -d); } GPU bool hasNext(){ if(max_step == step)return false; return true; } GPU bool valid(){ if(max_step == step)return false; return true; } GPU float3 next(){ step++; x++; if( x == width){ x=0; y++;} if( y == width){ y=0; z++;} float3 P = S + make_float3( x - d, y - d, z - d); return P; } }; // data query functions ////////////////////////////////////////////////////////////////////////////// #define INF __int_as_float(0x7f800000) GPU int2 float2_to_int2(float2 a){ return make_int2(int(a.x), int(a.y)); } GPU int3 float3_to_int3(float3 a){ return make_int3(int(a.x), int(a.y), int(a.z)); } // BORDER handling functions GPU int border_replicate(int x, int pivot){ // aaaaaa|abcdefgh|hhhhhhh return pivot; } GPU int border_reflect(int x, int pivot){ // fedcba|abcdefgh|hgfedcb int a; a = 0; if(x < pivot) a = -1; if(x > pivot) a = 1; return 2*pivot-x + a; } GPU int border_reflect_101(int x, int pivot){ // gfedcb|abcdefgh|gfedcba return 2*pivot-x; } GPU int border_wrap(int x, int pivot, int w){ // cdefgh|abcdefgh|abcdefg if(x > pivot)return x - w; if(x < pivot)return x + w; return pivot; } GPU int border_constant(){ // iiiiii|abcdefgh|iiiiiii with some specified 'i' return 0; } GPU int border_switch(int x, int pivot, int w, int border){ switch(border){ case BORDER_REPLICATE: // replicate return border_replicate(x, pivot); case BORDER_REFLECT: // reflect return border_reflect(x, pivot); case BORDER_REFLECT_101: // reflect_101 return border_reflect_101(x, pivot); case BORDER_WRAP: // border_wrap return border_wrap(x, pivot, w); default: // border_constant return border_constant(); } } GPU int check_in_border(int p, int start, int end){ if(p > end-1)return 1; // right else if(p < start)return -1; // left return 0; // middle } GPU int border_handling(int p, int start, int end, int border){ int flag = -1; while(flag != 0){ flag = check_in_border(p, start, end); if(flag == 1){ p = border_switch(p, end-1, end - start, border); }else if(flag == -1){ p = border_switch(p, start, end - start, border); } } return p; } // Range check GPU bool range_check(int p, int data_start, int data_end){ if(data_start <= p && p < data_end){ return true; } return false; } // 1D data query function //////////////////////////////////////////////////////////////////////////////// template<typename R,typename T> GPU R point_query_1d(T* data, float x, int border, VIVALDI_DATA_RANGE* sdr){ // int data_start = int(sdr->data_start.x); int full_data_start = int(sdr->full_data_start.x); int full_data_end = int(sdr->full_data_end.x); int buffer_start = int(sdr->buffer_start.x); R rt; // input coordinate is world coordinate // border handling int flag1; // -1, 0, 1 flag1 = check_in_border(x, full_data_start, full_data_end); bool flag; // border flag = (1 <= border && border <= 4); if( (!flag && (flag1 == 0)) || (flag)){ // Border calculation x = border_handling(x, full_data_start, full_data_end, border); // to Buffer coordinate x = x - buffer_start; rt = convert(data[int(x)]); }else{ rt = initial(data[0]); } return rt; } template<typename R,typename T> GPU R point_query_1d(T* data, float x, VIVALDI_DATA_RANGE* sdr){ return point_query_1d<R>(data, x, 0, sdr); } // 2D data query functions //////////////////////////////////////////////////////////////////////////////// template<typename R,typename T> GPU R point_query_2d(T* data, float2 p, int border, VIVALDI_DATA_RANGE* sdr){ int4 data_start = sdr->data_start; int4 data_end = sdr->data_end; int4 full_data_start = sdr->full_data_start; int4 full_data_end = sdr->full_data_end; int x = p.x; int y = p.y; int X = data_end.x - data_start.x; R rt; // Data coordinate input // border check int flag1, flag2; // -1, 0, 1 flag1 = check_in_border(x, full_data_start.x, full_data_end.x); flag2 = check_in_border(y, full_data_start.y, full_data_end.y); bool flag; // border flag = (1 <= border && border <= 4); if( (!flag && (flag1 == 0 && flag2 == 0)) || (flag)){ // Border calculation x = border_handling(x, full_data_start.x, full_data_end.x, border); y = border_handling(y, full_data_start.y, full_data_end.y, border); // Data range check bool flag_x = range_check(x, data_start.x, data_end.x); bool flag_y = range_check(y, data_start.y, data_end.y); if(flag_x && flag_y){ // to Buffer coordinate x = x - data_start.x; y = y - data_start.y; rt = convert(data[y*X + x]); }else{ rt = initial(data[0]); } }else{ rt = initial(data[0]); } return rt; } template<typename R,typename T> GPU R point_query_2d(T* image, float2 p, VIVALDI_DATA_RANGE* sdr){ return point_query_2d<R>(image, p, 0, sdr); } template<typename R,typename T> GPU R point_query_2d(T* image, float x, float y, int border, VIVALDI_DATA_RANGE* sdr){ return point_query_2d<R>(image, make_float2(x,y), border, sdr); } template<typename R,typename T> GPU R point_query_2d(T* image, float x, float y, VIVALDI_DATA_RANGE* sdr){ return point_query_2d<R>(image, make_float2(x,y), 0, sdr); } template<typename R,typename T> GPU R linear_query_2d(T* image, float2 p, int border, VIVALDI_DATA_RANGE* sdr){ //range check float x = p.x; float y = p.y; int fx = floor(x); int fy = floor(y); int cx = ceil(x); int cy = ceil(y); float dx = x - fx; float dy = y - fy; R iv = initial(image[0])*0; R q00 = point_query_2d<R>(image, fx, fy, border, sdr); R q01 = point_query_2d<R>(image, cx, fy, border, sdr); R q10 = point_query_2d<R>(image, fx, cy, border, sdr); R q11 = point_query_2d<R>(image, cx, cy, border, sdr); // lerp along x R q0 = lerp(q00, q01, dx); R q1 = lerp(q10, q11, dx); // lerp along y R q = lerp(q0, q1, dy); return q; } template<typename R, typename T> GPU R linear_query_2d(T* image, float2 p, VIVALDI_DATA_RANGE* sdr){ return linear_query_2d<R>(image, p, 0, sdr); } template<typename R, typename T> GPU R linear_query_2d(T* image, float x, float y, int border, VIVALDI_DATA_RANGE* sdr){ return linear_query_2d<R>(image, make_float2(x,y), border, sdr); } template<typename R, typename T> GPU R linear_query_2d(T* image, float x, float y, VIVALDI_DATA_RANGE* sdr){ return linear_query_2d<R>(image, make_float2(x,y), 0, sdr); } template<typename R, typename T> GPU float2 linear_gradient_2d(T* image, float2 p, VIVALDI_DATA_RANGE* sdr){ int4 data_start = sdr->data_start; int4 data_end = sdr->data_end; int halo = sdr->data_halo; float x = p.x - data_start.x; float y = p.y - data_start.y; int X = data_end.x - data_start.x; int Y = data_end.y - data_start.y; float2 rbf = make_float2(0); if( x < halo)return rbf; if( y < halo)return rbf; if( x >= X-halo)return rbf; if( y >= Y-halo)return rbf; float delta = 1.0f; R xf, xb; xf = linear_query_2d<R>(image, make_float2(p.x + delta, p.y), sdr); xb = linear_query_2d<R>(image, make_float2(p.x - delta, p.y), sdr); float dx = length(xf-xb); R yf, yb; yf = linear_query_2d<R>(image, make_float2(p.x, p.y + delta), sdr); yb = linear_query_2d<R>(image, make_float2(p.x, p.y - delta), sdr); float dy = length(yf-yb); return make_float2(dx,dy)/(2*delta); } template<typename R, typename T> GPU float2 linear_gradient_2d(T* image, float x, float y, VIVALDI_DATA_RANGE* sdr){ return linear_gradient_2d<R>(image, make_float2(x,y), sdr); } template<typename R, typename T> GPU float2 linear_gradient_2d(T* image, int x, int y, VIVALDI_DATA_RANGE* sdr){ return linear_gradient_2d<R>(image, make_float2(x,y), sdr); } // 3D data query functions //////////////////////////////////////////////////////////////////////////////// template<typename R, typename T> GPU R point_query_3d(T* image, float3 p, int border, VIVALDI_DATA_RANGE* sdr){ int4 data_start = sdr->data_start; int4 data_end = sdr->data_end; int4 full_data_start = sdr->full_data_start; int4 full_data_end = sdr->full_data_end; int4 buffer_start = sdr->buffer_start; int4 buffer_end = sdr->buffer_end; int x = p.x; int y = p.y; int z = p.z; int X = buffer_end.x - buffer_start.x; int Y = buffer_end.y - buffer_start.y; R rt; // Data coordinate input // border check int flag1, flag2, flag3; // -1, 0, 1 flag1 = check_in_border(x, full_data_start.x, full_data_end.x); flag2 = check_in_border(y, full_data_start.y, full_data_end.y); flag3 = check_in_border(z, full_data_start.z, full_data_end.z); bool flag; // border flag = (1 <= border && border <= 4); if( (!flag && (flag1 == 0 && flag2 == 0 && flag3 == 0)) || (flag)){ // Border calculation x = border_handling(x, full_data_start.x, full_data_end.x, border); y = border_handling(y, full_data_start.y, full_data_end.y, border); z = border_handling(z, full_data_start.z, full_data_end.z, border); bool flag_x = range_check(x, data_start.x, data_end.x); bool flag_y = range_check(y, data_start.y, data_end.y); bool flag_z = range_check(z, data_start.z, data_end.z); if(flag_x && flag_y && flag_z){ // to Buffer coordinate x = x - buffer_start.x; y = y - buffer_start.y; z = z - buffer_start.z; rt = convert(image[z*Y*X + y*X + x]); }else{ rt = initial(image[0]); } }else{ rt = initial(image[0]); } return rt; } template<typename R, typename T> GPU R point_query_3d(T* image, float3 p, VIVALDI_DATA_RANGE* sdr){ return point_query_3d<R>(image, p, 0, sdr); } template<typename R, typename T> GPU R point_query_3d(T* image, float x, float y, float z, int border, VIVALDI_DATA_RANGE* sdr){ return point_query_3d<R>(image, make_float3(x,y,z), border, sdr); } template<typename R, typename T> GPU R point_query_3d(T* image, float x, float y, float z, VIVALDI_DATA_RANGE* sdr){ return point_query_3d<R>(image, make_float3(x,y,z), 0, sdr); } GPU float d_lerp(float a, float b, float t){ return (b - a) * t; } template<typename R, typename T> GPU R linear_query_3d(T* volume, float3 p, int border, VIVALDI_DATA_RANGE* sdr){ //tri linear interpolation float x,y,z; x = p.x; y = p.y; z = p.z; int fx = floor(x); int fy = floor(y); int fz = floor(z); int cx = ceil(x); int cy = ceil(y); int cz = ceil(z); R q000 = point_query_3d<R>(volume, fx, fy, fz, border, sdr); R q001 = point_query_3d<R>(volume, fx, fy, cz, border, sdr); R q010 = point_query_3d<R>(volume, fx, cy, fz, border, sdr); R q011 = point_query_3d<R>(volume, fx, cy, cz, border, sdr); R q100 = point_query_3d<R>(volume, cx, fy, fz, border, sdr); R q101 = point_query_3d<R>(volume, cx, fy, cz, border, sdr); R q110 = point_query_3d<R>(volume, cx, cy, fz, border, sdr); R q111 = point_query_3d<R>(volume, cx, cy, cz, border, sdr); float dx = x - fx; float dy = y - fy; float dz = z - fz; // lerp along x R q00 = lerp(q000, q001, dx); R q01 = lerp(q010, q011, dx); R q10 = lerp(q100, q101, dx); R q11 = lerp(q110, q111, dx); // lerp along y R q0 = lerp(q00, q01, dy); R q1 = lerp(q10, q11, dy); // lerp along z R q = lerp(q0, q1, dz); return q; } template<typename R, typename T> GPU R linear_query_3d(T* volume, float3 p, VIVALDI_DATA_RANGE* sdr){ return linear_query_3d<R>(volume, p, 0, sdr); } template<typename R, typename T> GPU R linear_query_3d(T* volume, float x, float y, float z, int border, VIVALDI_DATA_RANGE* sdr){ return linear_query_3d<R>(volume, make_float3(x,y,z), border, sdr); } template<typename R, typename T> GPU R linear_query_3d(T* volume, float x, float y, float z, VIVALDI_DATA_RANGE* sdr){ return linear_query_3d<R>(volume, make_float3(x,y,z), 0, sdr); } template<typename R, typename T> GPU float3 linear_gradient_3d(T* volume, float3 p, VIVALDI_DATA_RANGE* sdr){ float3 rbf = make_float3(0); float delta = 1.0f; //float delta = 0.01f; R dx = linear_query_3d<R>(volume, make_float3(p.x + delta, p.y, p.z), sdr) - linear_query_3d<R>(volume, make_float3(p.x - delta, p.y, p.z), sdr); R dy = linear_query_3d<R>(volume, make_float3(p.x, p.y + delta, p.z), sdr) - linear_query_3d<R>(volume, make_float3(p.x, p.y - delta, p.z), sdr); R dz = linear_query_3d<R>(volume, make_float3(p.x, p.y, p.z + delta), sdr) - linear_query_3d<R>(volume, make_float3(p.x, p.y, p.z - delta), sdr); // float dxl = length(dx); // float dyl = length(dy); // float dzl = length(dz); // return make_float3(dxl, dyl, dzl) / (2 * delta); // return make_float3(dxl) / (2 * delta); // return make_float3(dx); return make_float3(dx, dy, dz) / (2 * delta); } template<typename R, typename T> GPU float3 linear_gradient_3d(T* volume, int x, int y, int z, VIVALDI_DATA_RANGE* sdr){ return linear_gradient_3d<R>(volume, make_float3(x,y,z), sdr); } template<typename R, typename T> GPU float3 linear_gradient_3d(T* volume, float x, float y, float z, VIVALDI_DATA_RANGE* sdr){ return linear_gradient_3d<R>(volume, make_float3(x,y,z), sdr); } template<typename R,typename T> GPU T cubic_query_3d(T* volume, float3 p, VIVALDI_DATA_RANGE* sdr){ float3 alpha = 255 * (p - floor(p - 0.5f) - 0.5f); float3 hgx = hg(alpha.x); float3 hgy = hg(alpha.y); float3 hgz = hg(alpha.z); // 8 linear queries R q000 = linear_query_3d<R>(volume, p.x - hgx.x, p.y - hgy.x, p.z - hgz.x, sdr); R q001 = linear_query_3d<R>(volume, p.x - hgx.x, p.y - hgy.x, p.z + hgz.y, sdr); R q010 = linear_query_3d<R>(volume, p.x - hgx.x, p.y + hgy.y, p.z - hgz.x, sdr); R q011 = linear_query_3d<R>(volume, p.x - hgx.x, p.y + hgy.y, p.z + hgz.y, sdr); R q100 = linear_query_3d<R>(volume, p.x + hgx.y, p.y - hgy.x, p.z - hgz.x, sdr); R q101 = linear_query_3d<R>(volume, p.x + hgx.y, p.y - hgy.x, p.z + hgz.y, sdr); R q110 = linear_query_3d<R>(volume, p.x + hgx.y, p.y + hgy.y, p.z - hgz.x, sdr); R q111 = linear_query_3d<R>(volume, p.x + hgx.y, p.y + hgy.y, p.z + hgz.y, sdr); // lerp along z R q00 = lerp(q000, q001, hgz.z); R q01 = lerp(q010, q011, hgz.z); R q10 = lerp(q100, q101, hgz.z); R q11 = lerp(q110, q111, hgz.z); // lerp along y R q0 = lerp(q00, q01, hgy.z); R q1 = lerp(q10, q11, hgy.z); // lerp along x R q = lerp(q0, q1, hgx.z); return q; } template<typename R,typename T> GPU float3 cubic_gradient_3d(T* data, float3 p, VIVALDI_DATA_RANGE* sdr){ float3 rbf = make_float3(0); float3 alpha = 255 * (p - floor(p - 0.5f) - 0.5f); float3 hgx = hg(alpha.x); float3 hgy = hg(alpha.y); float3 hgz = hg(alpha.z); float3 dhgx = dhg(alpha.x); float3 dhgy = dhg(alpha.y); float3 dhgz = dhg(alpha.z); // compute x-derivative R q000 = linear_query_3d<R>(data, p.x - dhgx.x, p.y - hgy.x, p.z - hgz.x, sdr); R q001 = linear_query_3d<R>(data, p.x - dhgx.x, p.y - hgy.x, p.z + hgz.y, sdr); R q010 = linear_query_3d<R>(data, p.x - dhgx.x, p.y + hgy.y, p.z - hgz.x, sdr); R q011 = linear_query_3d<R>(data, p.x - dhgx.x, p.y + hgy.y, p.z + hgz.y, sdr); R q100 = linear_query_3d<R>(data, p.x + dhgx.y, p.y - hgy.x, p.z - hgz.x, sdr); R q101 = linear_query_3d<R>(data, p.x + dhgx.y, p.y - hgy.x, p.z + hgz.y, sdr); R q110 = linear_query_3d<R>(data, p.x + dhgx.y, p.y + hgy.y, p.z - hgz.x, sdr); R q111 = linear_query_3d<R>(data, p.x + dhgx.y, p.y + hgy.y, p.z + hgz.y, sdr); R q00 = lerp(q000, q001, hgz.z); R q01 = lerp(q010, q011, hgz.z); R q10 = lerp(q100, q101, hgz.z); R q11 = lerp(q110, q111, hgz.z); R q0 = lerp(q00, q01, hgy.z); R q1 = lerp(q10, q11, hgy.z); float gradientX = d_lerp(q0, q1, dhgx.z); // compute y-derivative q000 = linear_query_3d<R>(data, p.x - hgx.x, p.y - dhgy.x, p.z - hgz.x, sdr); q001 = linear_query_3d<R>(data, p.x - hgx.x, p.y - dhgy.x, p.z + hgz.y, sdr); q010 = linear_query_3d<R>(data, p.x - hgx.x, p.y + dhgy.y, p.z - hgz.x, sdr); q011 = linear_query_3d<R>(data, p.x - hgx.x, p.y + dhgy.y, p.z + hgz.y, sdr); q100 = linear_query_3d<R>(data, p.x + hgx.y, p.y - dhgy.x, p.z - hgz.x, sdr); q101 = linear_query_3d<R>(data, p.x + hgx.y, p.y - dhgy.x, p.z + hgz.y, sdr); q110 = linear_query_3d<R>(data, p.x + hgx.y, p.y + dhgy.y, p.z - hgz.x, sdr); q111 = linear_query_3d<R>(data, p.x + hgx.y, p.y + dhgy.y, p.z + hgz.y, sdr); q00 = lerp(q000, q001, hgz.z); q01 = lerp(q010, q011, hgz.z); q10 = lerp(q100, q101, hgz.z); q11 = lerp(q110, q111, hgz.z); q0 = d_lerp(q00, q01, dhgy.z); q1 = d_lerp(q10, q11, dhgy.z); float gradientY = lerp(q0, q1, hgx.z); // compute z-derivative q000 = linear_query_3d<R>(data, p.x - hgx.x, p.y - hgy.x, p.z - dhgz.x, sdr); q001 = linear_query_3d<R>(data, p.x - hgx.x, p.y - hgy.x, p.z + dhgz.y, sdr); q010 = linear_query_3d<R>(data, p.x - hgx.x, p.y + hgy.y, p.z - dhgz.x, sdr); q011 = linear_query_3d<R>(data, p.x - hgx.x, p.y + hgy.y, p.z + dhgz.y, sdr); q100 = linear_query_3d<R>(data, p.x + hgx.y, p.y - hgy.x, p.z - dhgz.x, sdr); q101 = linear_query_3d<R>(data, p.x + hgx.y, p.y - hgy.x, p.z + dhgz.y, sdr); q110 = linear_query_3d<R>(data, p.x + hgx.y, p.y + hgy.y, p.z - dhgz.x, sdr); q111 = linear_query_3d<R>(data, p.x + hgx.y, p.y + hgy.y, p.z + dhgz.y, sdr); q00 = d_lerp(q000, q001, dhgz.z); q01 = d_lerp(q010, q011, dhgz.z); q10 = d_lerp(q100, q101, dhgz.z); q11 = d_lerp(q110, q111, dhgz.z); q0 = lerp(q00, q01, hgy.z); q1 = lerp(q10, q11, hgy.z); float gradientZ = lerp(q0, q1, hgx.z); return make_float3(gradientX, gradientY, gradientZ); } //rotate functions /////////////////////////////////////////////////////////////////////////////////// GPU float arccos(float angle){ return acos(angle); } GPU float arcsin(float angle){ return asin(angle); } GPU float norm(float3 a){ float val = 0; val += a.x*a.x + a.y*a.y + a.z*a.z; val = sqrt(val); return val; } GPU float3 matmul(float3* mat, float3 vec){ float x = mat[0].x*vec.x + mat[1].x*vec.y + mat[2].x*vec.z; float y = mat[0].y*vec.x + mat[1].y*vec.y + mat[2].y*vec.z; float z = mat[0].z*vec.x + mat[1].z*vec.y + mat[2].z*vec.z; return make_float3(x, y, z); } GPU void getInvMat(float3* mat, float3* ret) { double det = mat[0].x*(mat[1].y*mat[2].z-mat[1].z*mat[2].y)-mat[0].y*(mat[1].x*mat[2].z-mat[1].z*mat[2].x)+mat[0].z*(mat[1].x*mat[2].y-mat[1].y*mat[2].x); if(det!=0) { double invdet = 1/det; float a00 = (mat[1].y*mat[2].z-mat[2].y*mat[1].z)*invdet; float a01 = (mat[0].z*mat[2].y-mat[0].y*mat[2].z)*invdet; float a02 = (mat[0].y*mat[1].z-mat[0].z*mat[1].y)*invdet; float a10 = (mat[1].z*mat[2].x-mat[1].x*mat[2].z)*invdet; float a11 = (mat[0].x*mat[2].z-mat[0].z*mat[2].x)*invdet; float a12 = (mat[1].x*mat[0].z-mat[0].x*mat[1].z)*invdet; float a20 = (mat[1].x*mat[2].y-mat[2].x*mat[1].y)*invdet; float a21 = (mat[2].x*mat[0].y-mat[0].x*mat[2].y)*invdet; float a22 = (mat[0].x*mat[1].y-mat[1].x*mat[0].y)*invdet; ret[0] = make_float3(a00, a01, a02); ret[1] = make_float3(a10, a11, a12); ret[2] = make_float3(a20, a21, a22); } else { ret[0] = make_float3(0); ret[1] = make_float3(0); ret[2] = make_float3(0); } } GPU float getDistance(float3* mat, float3 vec){ float3 tmp_mat = matmul(mat, vec); if(tmp_mat.z>200000 ) return -8765; if(tmp_mat.z<0) tmp_mat.z = 0; if(tmp_mat.y < 0 || tmp_mat.y > 1) return -8765; if(tmp_mat.x < 0 || tmp_mat.x > 1) return -8765; if(tmp_mat.y+tmp_mat.x > 1.0000) return -8765; return tmp_mat.z; } GPU float2 getCrossedInterval(float3 origin, float3 direction, float3* tmp){ float3 min = tmp[0]; float3 max = tmp[1]; float tmin=-9999.0, tmax=9999.0, tymin=-9999.0, tymax=9999.0, tzmin=-9999.0, tzmax=9999.0; if (direction.x > 0) { tmin = (min.x - origin.x) / direction.x; tmax = (max.x - origin.x) / direction.x; } else if(direction.x < 0) { tmin = (max.x - origin.x) / direction.x; tmax = (min.x - origin.x) / direction.x; } if (direction.y > 0) { tymin = (min.y - origin.y) / direction.y; tymax = (max.y - origin.y) / direction.y; } else if(direction.y < 0) { tymin = (max.y - origin.y) / direction.y; tymax = (min.y - origin.y) / direction.y; } if (direction.z > 0) { tzmin = (min.z - origin.z) / direction.z; tzmax = (max.z - origin.z) / direction.z; } else if(direction.z < 0) { tzmin = (max.z - origin.z) / direction.z; tzmax = (min.z - origin.z) / direction.z; } float start, end; start = (tmin < tymin)?((tymin < tzmin)?tzmin:tymin):((tmin < tzmin)?tzmin:tmin); end = (tmax > tymax)?((tymax > tzmax)?tzmax:tymax):((tmax > tzmax)?tzmax:tmax); if((origin.x > min.x) && (origin.x < max.x) && (origin.y > min.y) && (origin.y < max.y) && (origin.z > min.z) && (origin.z < max.z)) { end = start; start = 0; } return make_float2(start, end); } GPU float2 intersectSlab(float p, float d, float2 slab){ if (fabs(d) < 0.0001f) return make_float2(-INF, INF); float x1 = (slab.x - p) / d; float x2 = (slab.y - p) / d; if (x1 <= x2) return make_float2(x1, x2); else return make_float2(x2, x1); } GPU float2 intersectIntervals(float2 a, float2 b){ if (a.x > b.x) { float2 temp = a; a = b; b = temp; } if (b.x > a.y) return make_float2(INF, -INF); return make_float2(b.x, min(a.y, b.y)); } GPU float2 intersectUnitCube(float3 p, float3 d, float3 *tmp){ //float2 slab = make_float2(-1, 1); float3 min = tmp[0]; float3 max = tmp[1]; float2 slabx = make_float2(min.x, max.x); float2 slaby = make_float2(min.y, max.y); float2 slabz = make_float2(min.z, max.z); float2 tx = intersectSlab(p.x, d.x, slabx); float2 ty = intersectSlab(p.y, d.y, slaby); float2 tz = intersectSlab(p.z, d.z, slabz); // parallel test if(tx.x == -INF){ if( p.x < min.x || max.x <= p.x) return make_float2(INF, -INF); } if(ty.x == -INF){ if( p.y < min.y || max.y <= p.y) return make_float2(INF, -INF); } if(tz.x == -INF){ if( p.z < min.z || max.z <= p.z) return make_float2(INF, -INF); } return intersectIntervals(tx, intersectIntervals(ty, tz)); //return make_float2(slaby.y, 0); } template<typename T> GPU line_iter perspective_iter(T* volume, float x, float y, float step, float near, VIVALDI_DATA_RANGE* sdr){ int4 start = sdr->data_start; int4 end = sdr->data_end; float data_halo = sdr->data_halo; float3 ray_direction = make_float3(x,y,near); float3 ray_origin = make_float3(0); start = start + make_int4(data_halo); end = end - make_int4(data_halo); float3 min_max[2]; min_max[0] = make_float3(start.x, start.y, start.z); min_max[1] = make_float3(end.x, end.y, end.z); float o_x, o_y, o_z; o_x = inv_modelview[0][0] * ray_origin.x + inv_modelview[0][1] * ray_origin.y + inv_modelview[0][2] * ray_origin.z + inv_modelview[0][3]; o_y = inv_modelview[1][0] * ray_origin.x + inv_modelview[1][1] * ray_origin.y + inv_modelview[1][2] * ray_origin.z + inv_modelview[1][3]; o_z = inv_modelview[2][0] * ray_origin.x + inv_modelview[2][1] * ray_origin.y + inv_modelview[2][2] * ray_origin.z + inv_modelview[2][3]; ray_origin = make_float3(o_x, o_y, o_z); o_x = inv_modelview[0][0] * ray_direction.x + inv_modelview[0][1] * ray_direction.y + inv_modelview[0][2] * ray_direction.z;// + inv_modelview[0][3]; o_y = inv_modelview[1][0] * ray_direction.x + inv_modelview[1][1] * ray_direction.y + inv_modelview[1][2] * ray_direction.z;// + inv_modelview[1][3]; o_z = inv_modelview[2][0] * ray_direction.x + inv_modelview[2][1] * ray_direction.y + inv_modelview[2][2] * ray_direction.z;// + inv_modelview[2][3]; ray_direction = normalize(make_float3(o_x, o_y, o_z)); float2 interval = intersectUnitCube(ray_origin, ray_direction, min_max); // float val; // val = interval.x; // return line_iter(make_float3(val), make_float3(val,val,val+1), 1.0); if(interval.x == INF) return line_iter(make_float3(0), make_float3(0), 1.0); float3 S = ray_origin + interval.x * ray_direction; float3 E = ray_origin + interval.y * ray_direction; return line_iter(S,E,step); } // Orthogonal_iter with pre-computing template<typename T> GPU line_iter orthogonal_iter(T* volume, float2 p, float step, VIVALDI_DATA_RANGE* sdr){ // initialization int4 start = sdr->data_start; int4 end = sdr->data_end; int data_halo = sdr->data_halo; float3 ray_direction = make_float3(0,0,1); float3 ray_origin = make_float3(p.x, p.y ,0); start = start + make_int4(data_halo); end = end - make_int4(data_halo); float3 min_max[2]; min_max[0] = make_float3(start.x, start.y, start.z); min_max[1] = make_float3(end.x, end.y, end.z); float o_x, o_y, o_z; o_x = inv_modelview[0][0] * p.x + inv_modelview[0][1] * p.y + inv_modelview[0][2] * 0 + inv_modelview[0][3]; o_y = inv_modelview[1][0] * p.x + inv_modelview[1][1] * p.y + inv_modelview[1][2] * 0 + inv_modelview[1][3]; o_z = inv_modelview[2][0] * p.x + inv_modelview[2][1] * p.y + inv_modelview[2][2] * 0 + inv_modelview[2][3]; ray_origin = make_float3(o_x, o_y, o_z); o_x = inv_modelview[0][0] * ray_direction.x + inv_modelview[0][1] * ray_direction.y + inv_modelview[0][2] * ray_direction.z;// + inv_modelview[0][3]; o_y = inv_modelview[1][0] * ray_direction.x + inv_modelview[1][1] * ray_direction.y + inv_modelview[1][2] * ray_direction.z;// + inv_modelview[1][3]; o_z = inv_modelview[2][0] * ray_direction.x + inv_modelview[2][1] * ray_direction.y + inv_modelview[2][2] * ray_direction.z;// + inv_modelview[2][3]; ray_direction = normalize(make_float3(o_x, o_y, o_z)); float2 interval = intersectUnitCube(ray_origin, ray_direction, min_max); // float val; // val = end.z; // return line_iter(make_float3(val), make_float3(val,val,val+1), 1.0); if(interval.x == INF) return line_iter(make_float3(0), make_float3(0), 1.0); float3 S = ray_origin + interval.x * ray_direction; float3 E = ray_origin + interval.y * ray_direction; return line_iter(S,E,step); } template<typename T> GPU line_iter orthogonal_iter(T* volume, float x, float y, float step, VIVALDI_DATA_RANGE* sdr){ return orthogonal_iter(volume, make_float2(x,y), step, sdr); } // Domain Specific functions ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// GPU float3 phong(float3 L, float3 pos, float3 N, float3 omega, float3 kd, float3 ks, float n, float3 amb){ float3 color; float a, b, c; a = inv_modelview[0][0] * L.x + inv_modelview[0][1] * L.y + inv_modelview[0][2] * L.z + inv_modelview[0][3]; b = inv_modelview[1][0] * L.x + inv_modelview[1][1] * L.y + inv_modelview[1][2] * L.z + inv_modelview[1][3]; c = inv_modelview[2][0] * L.x + inv_modelview[2][1] * L.y + inv_modelview[2][2] * L.z + inv_modelview[2][3]; L.x = a; L.y = b; L.z = c; L = normalize(L-pos); //ambient color = amb; // diffuse float lobe = max(dot(N, L), 0.0f); color += kd * lobe; // specular if (n > 0) { float3 R = reflect(-L, N); lobe = pow(fmaxf(dot(R, omega), 0), n); color += ks * lobe; } // clamping is a hack, but looks better return fminf(color, make_float3(1)); } GPU float3 phong(float3 L, float3 N, float3 omega, float3 kd, float3 ks, float n, float3 amb){ float3 color; //ambient color = amb; // diffuse float lobe = max(dot(N, L), 0.0f); //float lobe = max(dot(-N, L), 0.0f); color += kd * lobe; // specular if (n > 0) { float3 R = reflect(-L, N); lobe = pow(fmaxf(dot(R, omega), 0), n); color += ks * lobe; } // clamping is a hack, but looks better return fminf(color, make_float3(1)); } GPU float3 diffuse(float3 L, float3 N, float3 kd){ float lobe = max(dot(N, L), 0.0f); return kd * lobe; } template<typename R,typename T> GPU R laplacian(T* image, float2 p, VIVALDI_DATA_RANGE* sdr){ float x = p.x; float y = p.y; //parallel variables R a = point_query_2d<R>(image, x, y, sdr); R u = point_query_2d<R>(image, x, y+1, sdr); R d = point_query_2d<R>(image, x, y-1, sdr); R l = point_query_2d<R>(image, x-1, y, sdr); R r = point_query_2d<R>(image, x+1, y, sdr); R ret = u+d+l+r-4.0*a; return ret; } template<typename R,typename T> GPU R laplacian(T* image, float x, float y, VIVALDI_DATA_RANGE* sdr){ //parallel variables return laplacian<R>(image, make_float2(x,y), sdr); } extern "C"{ // memory copy functions ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // halo memset function ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* __global__ void halo_memeset( float3* rb, VIVALDI_DATA_RANGE* rb_DATA_RANGE, int z_start, int z_end, int y_start, int y_end, int x_start, int x_end) { //parallel variables int x_hschoi = threadIdx.x + blockDim.x * blockIdx.x; int y_hschoi = threadIdx.y + blockDim.y * blockIdx.y; int z_hschoi = threadIdx.z + blockDim.z * blockIdx.z; int x = x_hschoi + x_start; int y = y_hschoi + y_start; int z = z_hschoi + z_start; if(x_end <= x || y_end <= y || z_end <= z)return; int idx = (z-rb_DATA_RANGE->start.z)*(rb_DATA_RANGE->end.x-rb_DATA_RANGE->start.x)*(rb_DATA_RANGE->end.y-rb_DATA_RANGE->start.y) + (y-rb_DATA_RANGE->start.y)*(rb_DATA_RANGE->end.x-rb_DATA_RANGE->start.x) + (x-rb_DATA_RANGE->start.x); int3 full_data_start = make_int3(rb_DATA_RANGE->full_data_start); int3 full_data_end = make_int3(rb_DATA_RANGE->full_data_end); int3 start = make_int3(rb_DATA_RANGE->start); int3 end = make_int3(rb_DATA_RANGE->end); x = x - start.x; y = y - start.y; z = z - start.z; int buffer_X = end.x - start.x; int buffer_Y = end.y - start.y; int buffer_Z = end.z - start.z; if (!(full_data_start.x <= x && x < full_data_end.x && full_data_start.y <= y && y < full_data_end.y && full_data_start.y <= y && y < full_data_end.z)){ r[idx] = initial(rb[0]); } } } */ } __device__ float4 alpha_compositing(float4 origin, float4 next) { float a = origin.w; float r = origin.x; float g = origin.y; float b = origin.z; float x, y, z, w; w = a + (1-a/255) * next.w; w = (w > 255)? 255 : w; x = r + (1-a/255) * next.x * next.w/255.0f; y = g + (1-a/255) * next.y * next.w/255.0f; z = b + (1-a/255) * next.z * next.w/255.0f; return make_float4(x,y,z,w); } __device__ float4 alpha_compositing_wo_alpha(float4 origin, float4 next) { float a = origin.w; float r = origin.x; float g = origin.y; float b = origin.z; //if(origin.w == 1) return make_float4(255,0,0,0); float x, y, z, w; w = a + (1-a/255) * next.w; w = (w > 255)? 255 : w; x = r + (1-a/255) * next.x; y = g + (1-a/255) * next.y; z = b + (1-a/255) * next.z; return make_float4(x,y,z,w); } __device__ float4 background_white(float4 origin) { float a = origin.w; float r = origin.x; float g = origin.y; float b = origin.z; float x, y, z, w; w = a ; x = r + (1-a/255.0f) * 255.0f; y = g + (1-a/255.0f) * 255.0f; z = b + (1-a/255.0f) * 255.0f; return make_float4(x,y,z,w); } __device__ float4 detach(float4 origin) { float a = origin.w; float r = origin.x; float g = origin.y; float b = origin.z; float x,y,z,w; w = a; x = r - (1-a/255.0)*255.0; y = g - (1-a/255.0)*255.0; z = b - (1-a/255.0)*255.0; return make_float4(x,y,z,w); } texture<float4, 2> TFF; #include <stdio.h> __device__ float4 transfer(float a) { float4 tmp = tex2D(TFF, a/TF_bandwidth* 255, 0); float4 tmp_col = make_float4(tmp.x*255.0, tmp.y*255.0, tmp.z*255.0, tmp.w*255); return tmp_col; } __device__ float4 transfer(float2 a) { return transfer(a.x); } __device__ float4 transfer(float3 a) { return transfer(a.x); } __device__ float4 transfer(float4 a) { return transfer(a.x); } texture<float4, 2> TFF1; texture<float4, 2> TFF2; texture<float4, 2> TFF3; texture<float4, 2> TFF4; __device__ float4 transfer(float a, int chan) { float4 tmp; if(chan == 0) tmp = tex2D(TFF, a/TF_bandwidth * 255, 0); else if(chan == 1) tmp = tex2D(TFF1, a/TF_bandwidth * 255, 0); else if(chan == 2) tmp = tex2D(TFF2, a/TF_bandwidth * 255, 0); else if(chan == 3) tmp = tex2D(TFF3, a/TF_bandwidth * 255, 0); else if(chan == 4) tmp = tex2D(TFF4, a/TF_bandwidth * 255, 0); float4 tmp_col = make_float4(tmp.x*255.0, tmp.y*255.0, tmp.z*255.0, tmp.w*255); return tmp_col; } __device__ float4 transfer(float2 a, int chan) { return transfer(a.x, chan); } __device__ float4 transfer(float3 a, int chan) { return transfer(a.x, chan); } __device__ float4 transfer(float4 a, int chan) { return transfer(a.x, chan); } __device__ float4 ch_binder(float4 a, float4 b) { return (a + b) / 2.0f; } __device__ int floor_tmp(float a) { return floor(a); }
fb35d2719d546ce6b7b99e2d54920d5588e8cff8.cu
#define GPU inline __device__ #define uchar unsigned char #define BORDER_REPLICATE 1 #define BORDER_REFLECT 2 #define BORDER_REFLECT_101 3 #define BORDER_WRAP 4 #include<stdio.h> #include<vector_types.h> #include<vector_functions.h> __device__ int DEVICE_NUMBER; __device__ float modelview[4][4]; __device__ float inv_modelview[4][4]; __device__ int slider[4]; __device__ int slider_opacity[4]; __device__ float TF_bandwidth; __device__ int front_back; // clamp GPU float clamp(float f, float a, float b); GPU float2 clamp(float2 v, float a, float b); GPU float3 clamp(float3 v, float a, float b); GPU float3 clamp(float3 v, float3 a, float3 b); GPU float4 clamp(float4 v, float a, float b); GPU float4 clamp(float4 v, float4 a, float4 b); // rgba class class RGBA{ public: unsigned char r, g, b, a; GPU RGBA(float3 rgb, float a_in){ r = clamp(rgb.x, 0.0f, 255.0f); g = clamp(rgb.y, 0.0f, 255.0f); b = clamp(rgb.z, 0.0f, 255.0f); a = clamp(a_in, 0.0f, 255.0f); } GPU RGBA(float4 rgba){ r = clamp(rgba.x, 0.0f, 255.0f); g = clamp(rgba.y, 0.0f, 255.0f); b = clamp(rgba.z, 0.0f, 255.0f); a = clamp(rgba.w, 0.0f, 255.0f); } GPU RGBA(float r_in, float g_in, float b_in, float a_in){ r = clamp(r_in, 0.0f, 255.0f); g = clamp(g_in, 0.0f, 255.0f); b = clamp(b_in, 0.0f, 255.0f); a = clamp(a_in, 0.0f, 255.0f); } GPU RGBA(float c){ r = clamp(c, 0.0f, 255.0f); g = clamp(c, 0.0f, 255.0f); b = clamp(c, 0.0f, 255.0f); a = clamp(255.0f, 0.0f, 255.0f); } GPU RGBA(){ r = g = b = 0; a = 1; } }; // rgb class class RGB{ public: unsigned char r, g, b; GPU RGB(float3 rgb) { r = clamp(rgb.x, 0.0f, 255.0f); g = clamp(rgb.y, 0.0f, 255.0f); b = clamp(rgb.z, 0.0f, 255.0f); } GPU RGB(float4 rgb) { r = clamp(rgb.x, 0.0f, 255.0f); g = clamp(rgb.y, 0.0f, 255.0f); b = clamp(rgb.z, 0.0f, 255.0f); } GPU RGB(float r_in, float g_in, float b_in) { r = clamp(r_in, 0.0f, 255.0f); g = clamp(g_in, 0.0f, 255.0f); b = clamp(b_in, 0.0f, 255.0f); } GPU RGB(float a) { r = g = b = clamp(a, 0.0f, 255.0f); } GPU RGB() { r = g = b = 0; } }; class VIVALDI_DATA_RANGE{ public: int4 data_start, data_end; int4 full_data_start, full_data_end; int4 buffer_start, buffer_end; int data_halo; int buffer_halo; }; // data type converters //////////////////////////////////////////////////////////////////////////////// GPU float convert(char1 a){ return float(a.x); } GPU float convert(uchar1 a){ return float(a.x); } GPU float convert(short1 a){ return float(a.x); } GPU float convert(ushort1 a){ return float(a.x); } GPU float convert(int1 a){ return float(a.x); } GPU float convert(uint1 a){ return float(a.x); } GPU float convert(float1 a){ return float(a.x); } GPU float convert(double1 a){ return float(a.x); } GPU float convert(double a){ return float(a); } GPU float2 convert(char2 a){ return make_float2(a.x, a.y); } GPU float2 convert(uchar2 a){ return make_float2(a.x, a.y); } GPU float2 convert(short2 a){ return make_float2(a.x,a.y); } GPU float2 convert(ushort2 a){ return make_float2(a.x,a.y); } GPU float2 convert(int2 a){ return make_float2(a.x,a.y); } GPU float2 convert(uint2 a){ return make_float2(a.x,a.y); } GPU float2 convert(float2 a){ return make_float2(a.x,a.y); } GPU float2 convert(double2 a){ return make_float2(a.x,a.y); } GPU float3 convert(RGB a){ return make_float3(a.r,a.g,a.b); } GPU float3 convert(char3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(uchar3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(short3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(ushort3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(int3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(uint3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(float3 a){ return make_float3(a.x, a.y, a.z); } GPU float3 convert(double3 a){ return make_float3(a.x, a.y, a.z); } GPU float4 convert(RGBA a){ return make_float4(a.r,a.g,a.b,a.a); } GPU float4 convert(char4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(uchar4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(short4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(ushort4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(int4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(uint4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(float4 a){ return make_float4(a.x, a.y, a.z, a.w); } GPU float4 convert(double4 a){ return make_float4(a.x, a.y, a.z, a.w); } // data_type init ////////////////////////////////////////////////////////////// GPU float initial(uchar1 a){ return 0.0; } GPU float initial(char1 a){ return 0.0; } GPU float initial(ushort1 a){ return 0.0; } GPU float initial(short1 a){ return 0.0; } GPU float initial(int1 a){ return 0.0; } GPU float initial(uint1 a){ return 0.0; } GPU float initial(float1 a){ return 0.0; } GPU float initial(float a){ return 0.0; } GPU float initial(double1 a){ return 0.0; } GPU float2 initial(char2 a){ return make_float2(0); } GPU float2 initial(uchar2 a){ return make_float2(0); } GPU float2 initial(short2 a){ return make_float2(0); } GPU float2 initial(ushort2 a){ return make_float2(0); } GPU float2 initial(int2 a){ return make_float2(0); } GPU float2 initial(uint2 a){ return make_float2(0); } GPU float2 initial(float2 a){ return make_float2(0); } GPU float2 initial(double2 a){ return make_float2(0); } GPU float3 initial(RGB a){ return make_float3(0); } GPU float3 initial(char3 a){ return make_float3(0); } GPU float3 initial(uchar3 a){ return make_float3(0); } GPU float3 initial(short3 a){ return make_float3(0); } GPU float3 initial(ushort3 a){ return make_float3(0); } GPU float3 initial(int3 a){ return make_float3(0); } GPU float3 initial(uint3 a){ return make_float3(0); } GPU float3 initial(float3 a){ return make_float3(0); } GPU float3 initial(double3 a){ return make_float3(0); } GPU float4 initial(RGBA a){ return make_float4(0); } GPU float4 initial(char4 a){ return make_float4(0); } GPU float4 initial(uchar4 a){ return make_float4(0); } GPU float4 initial(short4 a){ return make_float4(0); } GPU float4 initial(ushort4 a){ return make_float4(0); } GPU float4 initial(int4 a){ return make_float4(0); } GPU float4 initial(uint4 a){ return make_float4(0); } GPU float4 initial(float4 a){ return make_float4(0); } GPU float4 initial(double4 a){ return make_float4(0); } GPU float initial2(float a){ return 1.0; } // float functions //////////////////////////////////////////////////////////////////////////////// GPU float length(float a){ if(a < 0){ return -a; } return a; } //f = value, a = min, b = max GPU float step(float edge, float x){ return x < edge ? 0 : 1; } GPU float rect(float edge0, float edge1, float x){ return edge0 <= x && x <= edge1 ? 1 : 0; } // float2 functions //////////////////////////////////////////////////////////////////////////////// // negate GPU float2 operator-(float2 a){ return make_float2(-a.x, -a.y); } // floor GPU float2 floor(const float2 v){ return make_float2(floor(v.x), floor(v.y)); } // reflect GPU float2 reflect(float2 i, float2 n){ return i - 2.0f * n * dot(n,i); } // float3 functions //////////////////////////////////////////////////////////////////////////////// // floor GPU float3 floor(const float3 v){ return make_float3(floor(v.x), floor(v.y), floor(v.z)); } // float4 functions //////////////////////////////////////////////////////////////////////////////// // additional constructors // negate GPU float4 operator-(float4 a){ return make_float4(-a.x, -a.y, -a.z, -a.w); } // floor GPU float4 floor(const float4 v){ return make_float4(floor(v.x), floor(v.y), floor(v.z), floor(v.w)); } // Frame //////////////////////////////////////////////////////////////////////////////// class Frame{ public: float3 x, y, z, origin; GPU void setDefault(float3 position) { origin = position; x = make_float3(1, 0, 0); y = make_float3(0, 1, 0); z = make_float3(0, 0, 1); } GPU void lookAt(float3 position, float3 target, float3 up) { origin = position; z = normalize(position - target); x = normalize(cross(up, z)); y = normalize(cross(z, x)); } GPU float3 getVectorToWorld(float3 v) { return v.x * x + v.y * y + v.z * z; } GPU float3 getPointToWorld(float3 p) { return p.x * x + p.y * y + p.z * z + origin; } }; // transfer2 //////////////////////////////////////////////////////////////////////////////// GPU float transfer2( float x0, float f0, float x1, float f1, float x) { if (x < x0) return 0; if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); return 0; } GPU float2 transfer2( float x0, float2 f0, float x1, float2 f1, float x) { if (x < x0) return make_float2(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); return make_float2(0); } GPU float3 transfer2( float x0, float3 f0, float x1, float3 f1, float x) { if (x < x0) return make_float3(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); return make_float3(0); } GPU float4 transfer2( float x0, float4 f0, float x1, float4 f1, float x) { if (x < x0) return make_float4(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); return make_float4(0); } // transfer3 //////////////////////////////////////////////////////////////////////////////// GPU float transfer3( float x0, float f0, float x1, float f1, float x2, float f2, float x) { if (x < x0) return 0; if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); if (x < x2) return lerp(f1, f2, (x - x1) / (x2 - x0)); return 0; } GPU float2 transfer3( float x0, float2 f0, float x1, float2 f1, float x2, float2 f2, float x) { if (x < x0) return make_float2(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); if (x < x2) return lerp(f1, f2, (x - x1) / (x2 - x0)); return make_float2(0); } GPU float3 transfer3( float x0, float3 f0, float x1, float3 f1, float x2, float3 f2, float x) { if (x < x0) return make_float3(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); if (x < x2) return lerp(f1, f2, (x - x1) / (x2 - x0)); return make_float3(0); } GPU float4 transfer3( float x0, float4 f0, float x1, float4 f1, float x2, float4 f2, float x) { if (x < x0) return make_float4(0); if (x < x1) return lerp(f0, f1, (x - x0) / (x1 - x0)); if (x < x2) return lerp(f1, f2, (x - x1) / (x2 - x0)); return make_float4(0); } // helper textures for cubic interpolation and random numbers //////////////////////////////////////////////////////////////////////////////// texture<float4, 2, cudaReadModeElementType> hgTexture; texture<float4, 2, cudaReadModeElementType> dhgTexture; texture<int, 2, cudaReadModeElementType> randomTexture; GPU float3 hg(float a){ // float a2 = a * a; // float a3 = a2 * a; // float w0 = (-a3 + 3*a2 - 3*a + 1) / 6; // float w1 = (3*a3 - 6*a2 + 4) / 6; // float w2 = (-3*a3 + 3*a2 + 3*a + 1) / 6; // float w3 = a3 / 6; // float g = w2 + w3; // float h0 = (1.0f + a) - w1 / (w0 + w1); // float h1 = (1.0f - a) + w3 / (w2 + w3); // return make_float3(h0, h1, g); return make_float3(tex2D(hgTexture, a, 0)); } GPU float3 dhg(float a){ return make_float3(tex2D(dhgTexture, a, 0)); } //iterators ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// // class make_laplacian_iter{} class line_iter{ public: float3 S,E,P,step; float len; GPU line_iter(){ } GPU line_iter(float3 from, float3 to, float d){ S = from; E = to; step = normalize(to-from)*d; len = length(to-from); P = S; if( S.x == E.x && S.y == E.y && S.z == E.z )step = make_float3(0,0,1); } GPU float3 begin(){ return S; } GPU bool hasNext(){ float3 T = P + step; if( length(T-S) > len)return false; return true; } GPU bool valid(){ if(length(P-S) < len)return true; return false; } GPU float3 next(){ P += step; return P; } GPU float3 direction(){ return normalize(step); } }; class plane_iter{ public: float2 S; float d; int max_step, step; int width; float x,y; GPU plane_iter(){ } GPU plane_iter(float2 point, float size){ S = point; d = size; width = 1+2*size; max_step = width*width; } GPU plane_iter(int x, int y, float size){ S = make_float2(x,y); d = size; width = 1+2*size; max_step = width*width; } GPU plane_iter(float x, float y, float size){ S = make_float2(x,y); d = size; width = 1+2*size; max_step = width*width; } GPU float2 begin(){ step = 0; x = 0; y = 0; return S + make_float2(-d,-d); } GPU bool hasNext(){ if(max_step == step)return false; return true; } GPU bool valid(){ if(max_step <= step)return false; return true; } GPU float2 next(){ step++; x++; if( x == width){ x=0; y++;} float2 P = S + make_float2( x - d, y - d); return P; } }; class cube_iter{ public: float3 S; int d; int width; int max_step, step; float x,y,z; GPU cube_iter(){ } GPU cube_iter(float3 point, float size){ S = point; d = size; width = 1+2*size; max_step = (width)*(width)*(width); } GPU cube_iter(int x,int y,int z, float radius){ S = make_float3(x,y,z); d = radius; width = 1+2*radius; max_step = (width)*(width)*(width); } GPU cube_iter(float x, float y, float z, float size){ S = make_float3(x,y,z); d = size; width = 1+2*size; max_step = (width)*(width)*(width); } GPU float3 begin(){ step = 0; x = 0; y = 0; z = 0; return S + make_float3(-d, -d, -d); } GPU bool hasNext(){ if(max_step == step)return false; return true; } GPU bool valid(){ if(max_step == step)return false; return true; } GPU float3 next(){ step++; x++; if( x == width){ x=0; y++;} if( y == width){ y=0; z++;} float3 P = S + make_float3( x - d, y - d, z - d); return P; } }; // data query functions ////////////////////////////////////////////////////////////////////////////// #define INF __int_as_float(0x7f800000) GPU int2 float2_to_int2(float2 a){ return make_int2(int(a.x), int(a.y)); } GPU int3 float3_to_int3(float3 a){ return make_int3(int(a.x), int(a.y), int(a.z)); } // BORDER handling functions GPU int border_replicate(int x, int pivot){ // aaaaaa|abcdefgh|hhhhhhh return pivot; } GPU int border_reflect(int x, int pivot){ // fedcba|abcdefgh|hgfedcb int a; a = 0; if(x < pivot) a = -1; if(x > pivot) a = 1; return 2*pivot-x + a; } GPU int border_reflect_101(int x, int pivot){ // gfedcb|abcdefgh|gfedcba return 2*pivot-x; } GPU int border_wrap(int x, int pivot, int w){ // cdefgh|abcdefgh|abcdefg if(x > pivot)return x - w; if(x < pivot)return x + w; return pivot; } GPU int border_constant(){ // iiiiii|abcdefgh|iiiiiii with some specified 'i' return 0; } GPU int border_switch(int x, int pivot, int w, int border){ switch(border){ case BORDER_REPLICATE: // replicate return border_replicate(x, pivot); case BORDER_REFLECT: // reflect return border_reflect(x, pivot); case BORDER_REFLECT_101: // reflect_101 return border_reflect_101(x, pivot); case BORDER_WRAP: // border_wrap return border_wrap(x, pivot, w); default: // border_constant return border_constant(); } } GPU int check_in_border(int p, int start, int end){ if(p > end-1)return 1; // right else if(p < start)return -1; // left return 0; // middle } GPU int border_handling(int p, int start, int end, int border){ int flag = -1; while(flag != 0){ flag = check_in_border(p, start, end); if(flag == 1){ p = border_switch(p, end-1, end - start, border); }else if(flag == -1){ p = border_switch(p, start, end - start, border); } } return p; } // Range check GPU bool range_check(int p, int data_start, int data_end){ if(data_start <= p && p < data_end){ return true; } return false; } // 1D data query function //////////////////////////////////////////////////////////////////////////////// template<typename R,typename T> GPU R point_query_1d(T* data, float x, int border, VIVALDI_DATA_RANGE* sdr){ // int data_start = int(sdr->data_start.x); int full_data_start = int(sdr->full_data_start.x); int full_data_end = int(sdr->full_data_end.x); int buffer_start = int(sdr->buffer_start.x); R rt; // input coordinate is world coordinate // border handling int flag1; // -1, 0, 1 flag1 = check_in_border(x, full_data_start, full_data_end); bool flag; // border flag = (1 <= border && border <= 4); if( (!flag && (flag1 == 0)) || (flag)){ // Border calculation x = border_handling(x, full_data_start, full_data_end, border); // to Buffer coordinate x = x - buffer_start; rt = convert(data[int(x)]); }else{ rt = initial(data[0]); } return rt; } template<typename R,typename T> GPU R point_query_1d(T* data, float x, VIVALDI_DATA_RANGE* sdr){ return point_query_1d<R>(data, x, 0, sdr); } // 2D data query functions //////////////////////////////////////////////////////////////////////////////// template<typename R,typename T> GPU R point_query_2d(T* data, float2 p, int border, VIVALDI_DATA_RANGE* sdr){ int4 data_start = sdr->data_start; int4 data_end = sdr->data_end; int4 full_data_start = sdr->full_data_start; int4 full_data_end = sdr->full_data_end; int x = p.x; int y = p.y; int X = data_end.x - data_start.x; R rt; // Data coordinate input // border check int flag1, flag2; // -1, 0, 1 flag1 = check_in_border(x, full_data_start.x, full_data_end.x); flag2 = check_in_border(y, full_data_start.y, full_data_end.y); bool flag; // border flag = (1 <= border && border <= 4); if( (!flag && (flag1 == 0 && flag2 == 0)) || (flag)){ // Border calculation x = border_handling(x, full_data_start.x, full_data_end.x, border); y = border_handling(y, full_data_start.y, full_data_end.y, border); // Data range check bool flag_x = range_check(x, data_start.x, data_end.x); bool flag_y = range_check(y, data_start.y, data_end.y); if(flag_x && flag_y){ // to Buffer coordinate x = x - data_start.x; y = y - data_start.y; rt = convert(data[y*X + x]); }else{ rt = initial(data[0]); } }else{ rt = initial(data[0]); } return rt; } template<typename R,typename T> GPU R point_query_2d(T* image, float2 p, VIVALDI_DATA_RANGE* sdr){ return point_query_2d<R>(image, p, 0, sdr); } template<typename R,typename T> GPU R point_query_2d(T* image, float x, float y, int border, VIVALDI_DATA_RANGE* sdr){ return point_query_2d<R>(image, make_float2(x,y), border, sdr); } template<typename R,typename T> GPU R point_query_2d(T* image, float x, float y, VIVALDI_DATA_RANGE* sdr){ return point_query_2d<R>(image, make_float2(x,y), 0, sdr); } template<typename R,typename T> GPU R linear_query_2d(T* image, float2 p, int border, VIVALDI_DATA_RANGE* sdr){ //range check float x = p.x; float y = p.y; int fx = floor(x); int fy = floor(y); int cx = ceil(x); int cy = ceil(y); float dx = x - fx; float dy = y - fy; R iv = initial(image[0])*0; R q00 = point_query_2d<R>(image, fx, fy, border, sdr); R q01 = point_query_2d<R>(image, cx, fy, border, sdr); R q10 = point_query_2d<R>(image, fx, cy, border, sdr); R q11 = point_query_2d<R>(image, cx, cy, border, sdr); // lerp along x R q0 = lerp(q00, q01, dx); R q1 = lerp(q10, q11, dx); // lerp along y R q = lerp(q0, q1, dy); return q; } template<typename R, typename T> GPU R linear_query_2d(T* image, float2 p, VIVALDI_DATA_RANGE* sdr){ return linear_query_2d<R>(image, p, 0, sdr); } template<typename R, typename T> GPU R linear_query_2d(T* image, float x, float y, int border, VIVALDI_DATA_RANGE* sdr){ return linear_query_2d<R>(image, make_float2(x,y), border, sdr); } template<typename R, typename T> GPU R linear_query_2d(T* image, float x, float y, VIVALDI_DATA_RANGE* sdr){ return linear_query_2d<R>(image, make_float2(x,y), 0, sdr); } template<typename R, typename T> GPU float2 linear_gradient_2d(T* image, float2 p, VIVALDI_DATA_RANGE* sdr){ int4 data_start = sdr->data_start; int4 data_end = sdr->data_end; int halo = sdr->data_halo; float x = p.x - data_start.x; float y = p.y - data_start.y; int X = data_end.x - data_start.x; int Y = data_end.y - data_start.y; float2 rbf = make_float2(0); if( x < halo)return rbf; if( y < halo)return rbf; if( x >= X-halo)return rbf; if( y >= Y-halo)return rbf; float delta = 1.0f; R xf, xb; xf = linear_query_2d<R>(image, make_float2(p.x + delta, p.y), sdr); xb = linear_query_2d<R>(image, make_float2(p.x - delta, p.y), sdr); float dx = length(xf-xb); R yf, yb; yf = linear_query_2d<R>(image, make_float2(p.x, p.y + delta), sdr); yb = linear_query_2d<R>(image, make_float2(p.x, p.y - delta), sdr); float dy = length(yf-yb); return make_float2(dx,dy)/(2*delta); } template<typename R, typename T> GPU float2 linear_gradient_2d(T* image, float x, float y, VIVALDI_DATA_RANGE* sdr){ return linear_gradient_2d<R>(image, make_float2(x,y), sdr); } template<typename R, typename T> GPU float2 linear_gradient_2d(T* image, int x, int y, VIVALDI_DATA_RANGE* sdr){ return linear_gradient_2d<R>(image, make_float2(x,y), sdr); } // 3D data query functions //////////////////////////////////////////////////////////////////////////////// template<typename R, typename T> GPU R point_query_3d(T* image, float3 p, int border, VIVALDI_DATA_RANGE* sdr){ int4 data_start = sdr->data_start; int4 data_end = sdr->data_end; int4 full_data_start = sdr->full_data_start; int4 full_data_end = sdr->full_data_end; int4 buffer_start = sdr->buffer_start; int4 buffer_end = sdr->buffer_end; int x = p.x; int y = p.y; int z = p.z; int X = buffer_end.x - buffer_start.x; int Y = buffer_end.y - buffer_start.y; R rt; // Data coordinate input // border check int flag1, flag2, flag3; // -1, 0, 1 flag1 = check_in_border(x, full_data_start.x, full_data_end.x); flag2 = check_in_border(y, full_data_start.y, full_data_end.y); flag3 = check_in_border(z, full_data_start.z, full_data_end.z); bool flag; // border flag = (1 <= border && border <= 4); if( (!flag && (flag1 == 0 && flag2 == 0 && flag3 == 0)) || (flag)){ // Border calculation x = border_handling(x, full_data_start.x, full_data_end.x, border); y = border_handling(y, full_data_start.y, full_data_end.y, border); z = border_handling(z, full_data_start.z, full_data_end.z, border); bool flag_x = range_check(x, data_start.x, data_end.x); bool flag_y = range_check(y, data_start.y, data_end.y); bool flag_z = range_check(z, data_start.z, data_end.z); if(flag_x && flag_y && flag_z){ // to Buffer coordinate x = x - buffer_start.x; y = y - buffer_start.y; z = z - buffer_start.z; rt = convert(image[z*Y*X + y*X + x]); }else{ rt = initial(image[0]); } }else{ rt = initial(image[0]); } return rt; } template<typename R, typename T> GPU R point_query_3d(T* image, float3 p, VIVALDI_DATA_RANGE* sdr){ return point_query_3d<R>(image, p, 0, sdr); } template<typename R, typename T> GPU R point_query_3d(T* image, float x, float y, float z, int border, VIVALDI_DATA_RANGE* sdr){ return point_query_3d<R>(image, make_float3(x,y,z), border, sdr); } template<typename R, typename T> GPU R point_query_3d(T* image, float x, float y, float z, VIVALDI_DATA_RANGE* sdr){ return point_query_3d<R>(image, make_float3(x,y,z), 0, sdr); } GPU float d_lerp(float a, float b, float t){ return (b - a) * t; } template<typename R, typename T> GPU R linear_query_3d(T* volume, float3 p, int border, VIVALDI_DATA_RANGE* sdr){ //tri linear interpolation float x,y,z; x = p.x; y = p.y; z = p.z; int fx = floor(x); int fy = floor(y); int fz = floor(z); int cx = ceil(x); int cy = ceil(y); int cz = ceil(z); R q000 = point_query_3d<R>(volume, fx, fy, fz, border, sdr); R q001 = point_query_3d<R>(volume, fx, fy, cz, border, sdr); R q010 = point_query_3d<R>(volume, fx, cy, fz, border, sdr); R q011 = point_query_3d<R>(volume, fx, cy, cz, border, sdr); R q100 = point_query_3d<R>(volume, cx, fy, fz, border, sdr); R q101 = point_query_3d<R>(volume, cx, fy, cz, border, sdr); R q110 = point_query_3d<R>(volume, cx, cy, fz, border, sdr); R q111 = point_query_3d<R>(volume, cx, cy, cz, border, sdr); float dx = x - fx; float dy = y - fy; float dz = z - fz; // lerp along x R q00 = lerp(q000, q001, dx); R q01 = lerp(q010, q011, dx); R q10 = lerp(q100, q101, dx); R q11 = lerp(q110, q111, dx); // lerp along y R q0 = lerp(q00, q01, dy); R q1 = lerp(q10, q11, dy); // lerp along z R q = lerp(q0, q1, dz); return q; } template<typename R, typename T> GPU R linear_query_3d(T* volume, float3 p, VIVALDI_DATA_RANGE* sdr){ return linear_query_3d<R>(volume, p, 0, sdr); } template<typename R, typename T> GPU R linear_query_3d(T* volume, float x, float y, float z, int border, VIVALDI_DATA_RANGE* sdr){ return linear_query_3d<R>(volume, make_float3(x,y,z), border, sdr); } template<typename R, typename T> GPU R linear_query_3d(T* volume, float x, float y, float z, VIVALDI_DATA_RANGE* sdr){ return linear_query_3d<R>(volume, make_float3(x,y,z), 0, sdr); } template<typename R, typename T> GPU float3 linear_gradient_3d(T* volume, float3 p, VIVALDI_DATA_RANGE* sdr){ float3 rbf = make_float3(0); float delta = 1.0f; //float delta = 0.01f; R dx = linear_query_3d<R>(volume, make_float3(p.x + delta, p.y, p.z), sdr) - linear_query_3d<R>(volume, make_float3(p.x - delta, p.y, p.z), sdr); R dy = linear_query_3d<R>(volume, make_float3(p.x, p.y + delta, p.z), sdr) - linear_query_3d<R>(volume, make_float3(p.x, p.y - delta, p.z), sdr); R dz = linear_query_3d<R>(volume, make_float3(p.x, p.y, p.z + delta), sdr) - linear_query_3d<R>(volume, make_float3(p.x, p.y, p.z - delta), sdr); // float dxl = length(dx); // float dyl = length(dy); // float dzl = length(dz); // return make_float3(dxl, dyl, dzl) / (2 * delta); // return make_float3(dxl) / (2 * delta); // return make_float3(dx); return make_float3(dx, dy, dz) / (2 * delta); } template<typename R, typename T> GPU float3 linear_gradient_3d(T* volume, int x, int y, int z, VIVALDI_DATA_RANGE* sdr){ return linear_gradient_3d<R>(volume, make_float3(x,y,z), sdr); } template<typename R, typename T> GPU float3 linear_gradient_3d(T* volume, float x, float y, float z, VIVALDI_DATA_RANGE* sdr){ return linear_gradient_3d<R>(volume, make_float3(x,y,z), sdr); } template<typename R,typename T> GPU T cubic_query_3d(T* volume, float3 p, VIVALDI_DATA_RANGE* sdr){ float3 alpha = 255 * (p - floor(p - 0.5f) - 0.5f); float3 hgx = hg(alpha.x); float3 hgy = hg(alpha.y); float3 hgz = hg(alpha.z); // 8 linear queries R q000 = linear_query_3d<R>(volume, p.x - hgx.x, p.y - hgy.x, p.z - hgz.x, sdr); R q001 = linear_query_3d<R>(volume, p.x - hgx.x, p.y - hgy.x, p.z + hgz.y, sdr); R q010 = linear_query_3d<R>(volume, p.x - hgx.x, p.y + hgy.y, p.z - hgz.x, sdr); R q011 = linear_query_3d<R>(volume, p.x - hgx.x, p.y + hgy.y, p.z + hgz.y, sdr); R q100 = linear_query_3d<R>(volume, p.x + hgx.y, p.y - hgy.x, p.z - hgz.x, sdr); R q101 = linear_query_3d<R>(volume, p.x + hgx.y, p.y - hgy.x, p.z + hgz.y, sdr); R q110 = linear_query_3d<R>(volume, p.x + hgx.y, p.y + hgy.y, p.z - hgz.x, sdr); R q111 = linear_query_3d<R>(volume, p.x + hgx.y, p.y + hgy.y, p.z + hgz.y, sdr); // lerp along z R q00 = lerp(q000, q001, hgz.z); R q01 = lerp(q010, q011, hgz.z); R q10 = lerp(q100, q101, hgz.z); R q11 = lerp(q110, q111, hgz.z); // lerp along y R q0 = lerp(q00, q01, hgy.z); R q1 = lerp(q10, q11, hgy.z); // lerp along x R q = lerp(q0, q1, hgx.z); return q; } template<typename R,typename T> GPU float3 cubic_gradient_3d(T* data, float3 p, VIVALDI_DATA_RANGE* sdr){ float3 rbf = make_float3(0); float3 alpha = 255 * (p - floor(p - 0.5f) - 0.5f); float3 hgx = hg(alpha.x); float3 hgy = hg(alpha.y); float3 hgz = hg(alpha.z); float3 dhgx = dhg(alpha.x); float3 dhgy = dhg(alpha.y); float3 dhgz = dhg(alpha.z); // compute x-derivative R q000 = linear_query_3d<R>(data, p.x - dhgx.x, p.y - hgy.x, p.z - hgz.x, sdr); R q001 = linear_query_3d<R>(data, p.x - dhgx.x, p.y - hgy.x, p.z + hgz.y, sdr); R q010 = linear_query_3d<R>(data, p.x - dhgx.x, p.y + hgy.y, p.z - hgz.x, sdr); R q011 = linear_query_3d<R>(data, p.x - dhgx.x, p.y + hgy.y, p.z + hgz.y, sdr); R q100 = linear_query_3d<R>(data, p.x + dhgx.y, p.y - hgy.x, p.z - hgz.x, sdr); R q101 = linear_query_3d<R>(data, p.x + dhgx.y, p.y - hgy.x, p.z + hgz.y, sdr); R q110 = linear_query_3d<R>(data, p.x + dhgx.y, p.y + hgy.y, p.z - hgz.x, sdr); R q111 = linear_query_3d<R>(data, p.x + dhgx.y, p.y + hgy.y, p.z + hgz.y, sdr); R q00 = lerp(q000, q001, hgz.z); R q01 = lerp(q010, q011, hgz.z); R q10 = lerp(q100, q101, hgz.z); R q11 = lerp(q110, q111, hgz.z); R q0 = lerp(q00, q01, hgy.z); R q1 = lerp(q10, q11, hgy.z); float gradientX = d_lerp(q0, q1, dhgx.z); // compute y-derivative q000 = linear_query_3d<R>(data, p.x - hgx.x, p.y - dhgy.x, p.z - hgz.x, sdr); q001 = linear_query_3d<R>(data, p.x - hgx.x, p.y - dhgy.x, p.z + hgz.y, sdr); q010 = linear_query_3d<R>(data, p.x - hgx.x, p.y + dhgy.y, p.z - hgz.x, sdr); q011 = linear_query_3d<R>(data, p.x - hgx.x, p.y + dhgy.y, p.z + hgz.y, sdr); q100 = linear_query_3d<R>(data, p.x + hgx.y, p.y - dhgy.x, p.z - hgz.x, sdr); q101 = linear_query_3d<R>(data, p.x + hgx.y, p.y - dhgy.x, p.z + hgz.y, sdr); q110 = linear_query_3d<R>(data, p.x + hgx.y, p.y + dhgy.y, p.z - hgz.x, sdr); q111 = linear_query_3d<R>(data, p.x + hgx.y, p.y + dhgy.y, p.z + hgz.y, sdr); q00 = lerp(q000, q001, hgz.z); q01 = lerp(q010, q011, hgz.z); q10 = lerp(q100, q101, hgz.z); q11 = lerp(q110, q111, hgz.z); q0 = d_lerp(q00, q01, dhgy.z); q1 = d_lerp(q10, q11, dhgy.z); float gradientY = lerp(q0, q1, hgx.z); // compute z-derivative q000 = linear_query_3d<R>(data, p.x - hgx.x, p.y - hgy.x, p.z - dhgz.x, sdr); q001 = linear_query_3d<R>(data, p.x - hgx.x, p.y - hgy.x, p.z + dhgz.y, sdr); q010 = linear_query_3d<R>(data, p.x - hgx.x, p.y + hgy.y, p.z - dhgz.x, sdr); q011 = linear_query_3d<R>(data, p.x - hgx.x, p.y + hgy.y, p.z + dhgz.y, sdr); q100 = linear_query_3d<R>(data, p.x + hgx.y, p.y - hgy.x, p.z - dhgz.x, sdr); q101 = linear_query_3d<R>(data, p.x + hgx.y, p.y - hgy.x, p.z + dhgz.y, sdr); q110 = linear_query_3d<R>(data, p.x + hgx.y, p.y + hgy.y, p.z - dhgz.x, sdr); q111 = linear_query_3d<R>(data, p.x + hgx.y, p.y + hgy.y, p.z + dhgz.y, sdr); q00 = d_lerp(q000, q001, dhgz.z); q01 = d_lerp(q010, q011, dhgz.z); q10 = d_lerp(q100, q101, dhgz.z); q11 = d_lerp(q110, q111, dhgz.z); q0 = lerp(q00, q01, hgy.z); q1 = lerp(q10, q11, hgy.z); float gradientZ = lerp(q0, q1, hgx.z); return make_float3(gradientX, gradientY, gradientZ); } //rotate functions /////////////////////////////////////////////////////////////////////////////////// GPU float arccos(float angle){ return acos(angle); } GPU float arcsin(float angle){ return asin(angle); } GPU float norm(float3 a){ float val = 0; val += a.x*a.x + a.y*a.y + a.z*a.z; val = sqrt(val); return val; } GPU float3 matmul(float3* mat, float3 vec){ float x = mat[0].x*vec.x + mat[1].x*vec.y + mat[2].x*vec.z; float y = mat[0].y*vec.x + mat[1].y*vec.y + mat[2].y*vec.z; float z = mat[0].z*vec.x + mat[1].z*vec.y + mat[2].z*vec.z; return make_float3(x, y, z); } GPU void getInvMat(float3* mat, float3* ret) { double det = mat[0].x*(mat[1].y*mat[2].z-mat[1].z*mat[2].y)-mat[0].y*(mat[1].x*mat[2].z-mat[1].z*mat[2].x)+mat[0].z*(mat[1].x*mat[2].y-mat[1].y*mat[2].x); if(det!=0) { double invdet = 1/det; float a00 = (mat[1].y*mat[2].z-mat[2].y*mat[1].z)*invdet; float a01 = (mat[0].z*mat[2].y-mat[0].y*mat[2].z)*invdet; float a02 = (mat[0].y*mat[1].z-mat[0].z*mat[1].y)*invdet; float a10 = (mat[1].z*mat[2].x-mat[1].x*mat[2].z)*invdet; float a11 = (mat[0].x*mat[2].z-mat[0].z*mat[2].x)*invdet; float a12 = (mat[1].x*mat[0].z-mat[0].x*mat[1].z)*invdet; float a20 = (mat[1].x*mat[2].y-mat[2].x*mat[1].y)*invdet; float a21 = (mat[2].x*mat[0].y-mat[0].x*mat[2].y)*invdet; float a22 = (mat[0].x*mat[1].y-mat[1].x*mat[0].y)*invdet; ret[0] = make_float3(a00, a01, a02); ret[1] = make_float3(a10, a11, a12); ret[2] = make_float3(a20, a21, a22); } else { ret[0] = make_float3(0); ret[1] = make_float3(0); ret[2] = make_float3(0); } } GPU float getDistance(float3* mat, float3 vec){ float3 tmp_mat = matmul(mat, vec); if(tmp_mat.z>200000 ) return -8765; if(tmp_mat.z<0) tmp_mat.z = 0; if(tmp_mat.y < 0 || tmp_mat.y > 1) return -8765; if(tmp_mat.x < 0 || tmp_mat.x > 1) return -8765; if(tmp_mat.y+tmp_mat.x > 1.0000) return -8765; return tmp_mat.z; } GPU float2 getCrossedInterval(float3 origin, float3 direction, float3* tmp){ float3 min = tmp[0]; float3 max = tmp[1]; float tmin=-9999.0, tmax=9999.0, tymin=-9999.0, tymax=9999.0, tzmin=-9999.0, tzmax=9999.0; if (direction.x > 0) { tmin = (min.x - origin.x) / direction.x; tmax = (max.x - origin.x) / direction.x; } else if(direction.x < 0) { tmin = (max.x - origin.x) / direction.x; tmax = (min.x - origin.x) / direction.x; } if (direction.y > 0) { tymin = (min.y - origin.y) / direction.y; tymax = (max.y - origin.y) / direction.y; } else if(direction.y < 0) { tymin = (max.y - origin.y) / direction.y; tymax = (min.y - origin.y) / direction.y; } if (direction.z > 0) { tzmin = (min.z - origin.z) / direction.z; tzmax = (max.z - origin.z) / direction.z; } else if(direction.z < 0) { tzmin = (max.z - origin.z) / direction.z; tzmax = (min.z - origin.z) / direction.z; } float start, end; start = (tmin < tymin)?((tymin < tzmin)?tzmin:tymin):((tmin < tzmin)?tzmin:tmin); end = (tmax > tymax)?((tymax > tzmax)?tzmax:tymax):((tmax > tzmax)?tzmax:tmax); if((origin.x > min.x) && (origin.x < max.x) && (origin.y > min.y) && (origin.y < max.y) && (origin.z > min.z) && (origin.z < max.z)) { end = start; start = 0; } return make_float2(start, end); } GPU float2 intersectSlab(float p, float d, float2 slab){ if (fabs(d) < 0.0001f) return make_float2(-INF, INF); float x1 = (slab.x - p) / d; float x2 = (slab.y - p) / d; if (x1 <= x2) return make_float2(x1, x2); else return make_float2(x2, x1); } GPU float2 intersectIntervals(float2 a, float2 b){ if (a.x > b.x) { float2 temp = a; a = b; b = temp; } if (b.x > a.y) return make_float2(INF, -INF); return make_float2(b.x, min(a.y, b.y)); } GPU float2 intersectUnitCube(float3 p, float3 d, float3 *tmp){ //float2 slab = make_float2(-1, 1); float3 min = tmp[0]; float3 max = tmp[1]; float2 slabx = make_float2(min.x, max.x); float2 slaby = make_float2(min.y, max.y); float2 slabz = make_float2(min.z, max.z); float2 tx = intersectSlab(p.x, d.x, slabx); float2 ty = intersectSlab(p.y, d.y, slaby); float2 tz = intersectSlab(p.z, d.z, slabz); // parallel test if(tx.x == -INF){ if( p.x < min.x || max.x <= p.x) return make_float2(INF, -INF); } if(ty.x == -INF){ if( p.y < min.y || max.y <= p.y) return make_float2(INF, -INF); } if(tz.x == -INF){ if( p.z < min.z || max.z <= p.z) return make_float2(INF, -INF); } return intersectIntervals(tx, intersectIntervals(ty, tz)); //return make_float2(slaby.y, 0); } template<typename T> GPU line_iter perspective_iter(T* volume, float x, float y, float step, float near, VIVALDI_DATA_RANGE* sdr){ int4 start = sdr->data_start; int4 end = sdr->data_end; float data_halo = sdr->data_halo; float3 ray_direction = make_float3(x,y,near); float3 ray_origin = make_float3(0); start = start + make_int4(data_halo); end = end - make_int4(data_halo); float3 min_max[2]; min_max[0] = make_float3(start.x, start.y, start.z); min_max[1] = make_float3(end.x, end.y, end.z); float o_x, o_y, o_z; o_x = inv_modelview[0][0] * ray_origin.x + inv_modelview[0][1] * ray_origin.y + inv_modelview[0][2] * ray_origin.z + inv_modelview[0][3]; o_y = inv_modelview[1][0] * ray_origin.x + inv_modelview[1][1] * ray_origin.y + inv_modelview[1][2] * ray_origin.z + inv_modelview[1][3]; o_z = inv_modelview[2][0] * ray_origin.x + inv_modelview[2][1] * ray_origin.y + inv_modelview[2][2] * ray_origin.z + inv_modelview[2][3]; ray_origin = make_float3(o_x, o_y, o_z); o_x = inv_modelview[0][0] * ray_direction.x + inv_modelview[0][1] * ray_direction.y + inv_modelview[0][2] * ray_direction.z;// + inv_modelview[0][3]; o_y = inv_modelview[1][0] * ray_direction.x + inv_modelview[1][1] * ray_direction.y + inv_modelview[1][2] * ray_direction.z;// + inv_modelview[1][3]; o_z = inv_modelview[2][0] * ray_direction.x + inv_modelview[2][1] * ray_direction.y + inv_modelview[2][2] * ray_direction.z;// + inv_modelview[2][3]; ray_direction = normalize(make_float3(o_x, o_y, o_z)); float2 interval = intersectUnitCube(ray_origin, ray_direction, min_max); // float val; // val = interval.x; // return line_iter(make_float3(val), make_float3(val,val,val+1), 1.0); if(interval.x == INF) return line_iter(make_float3(0), make_float3(0), 1.0); float3 S = ray_origin + interval.x * ray_direction; float3 E = ray_origin + interval.y * ray_direction; return line_iter(S,E,step); } // Orthogonal_iter with pre-computing template<typename T> GPU line_iter orthogonal_iter(T* volume, float2 p, float step, VIVALDI_DATA_RANGE* sdr){ // initialization int4 start = sdr->data_start; int4 end = sdr->data_end; int data_halo = sdr->data_halo; float3 ray_direction = make_float3(0,0,1); float3 ray_origin = make_float3(p.x, p.y ,0); start = start + make_int4(data_halo); end = end - make_int4(data_halo); float3 min_max[2]; min_max[0] = make_float3(start.x, start.y, start.z); min_max[1] = make_float3(end.x, end.y, end.z); float o_x, o_y, o_z; o_x = inv_modelview[0][0] * p.x + inv_modelview[0][1] * p.y + inv_modelview[0][2] * 0 + inv_modelview[0][3]; o_y = inv_modelview[1][0] * p.x + inv_modelview[1][1] * p.y + inv_modelview[1][2] * 0 + inv_modelview[1][3]; o_z = inv_modelview[2][0] * p.x + inv_modelview[2][1] * p.y + inv_modelview[2][2] * 0 + inv_modelview[2][3]; ray_origin = make_float3(o_x, o_y, o_z); o_x = inv_modelview[0][0] * ray_direction.x + inv_modelview[0][1] * ray_direction.y + inv_modelview[0][2] * ray_direction.z;// + inv_modelview[0][3]; o_y = inv_modelview[1][0] * ray_direction.x + inv_modelview[1][1] * ray_direction.y + inv_modelview[1][2] * ray_direction.z;// + inv_modelview[1][3]; o_z = inv_modelview[2][0] * ray_direction.x + inv_modelview[2][1] * ray_direction.y + inv_modelview[2][2] * ray_direction.z;// + inv_modelview[2][3]; ray_direction = normalize(make_float3(o_x, o_y, o_z)); float2 interval = intersectUnitCube(ray_origin, ray_direction, min_max); // float val; // val = end.z; // return line_iter(make_float3(val), make_float3(val,val,val+1), 1.0); if(interval.x == INF) return line_iter(make_float3(0), make_float3(0), 1.0); float3 S = ray_origin + interval.x * ray_direction; float3 E = ray_origin + interval.y * ray_direction; return line_iter(S,E,step); } template<typename T> GPU line_iter orthogonal_iter(T* volume, float x, float y, float step, VIVALDI_DATA_RANGE* sdr){ return orthogonal_iter(volume, make_float2(x,y), step, sdr); } // Domain Specific functions ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// GPU float3 phong(float3 L, float3 pos, float3 N, float3 omega, float3 kd, float3 ks, float n, float3 amb){ float3 color; float a, b, c; a = inv_modelview[0][0] * L.x + inv_modelview[0][1] * L.y + inv_modelview[0][2] * L.z + inv_modelview[0][3]; b = inv_modelview[1][0] * L.x + inv_modelview[1][1] * L.y + inv_modelview[1][2] * L.z + inv_modelview[1][3]; c = inv_modelview[2][0] * L.x + inv_modelview[2][1] * L.y + inv_modelview[2][2] * L.z + inv_modelview[2][3]; L.x = a; L.y = b; L.z = c; L = normalize(L-pos); //ambient color = amb; // diffuse float lobe = max(dot(N, L), 0.0f); color += kd * lobe; // specular if (n > 0) { float3 R = reflect(-L, N); lobe = pow(fmaxf(dot(R, omega), 0), n); color += ks * lobe; } // clamping is a hack, but looks better return fminf(color, make_float3(1)); } GPU float3 phong(float3 L, float3 N, float3 omega, float3 kd, float3 ks, float n, float3 amb){ float3 color; //ambient color = amb; // diffuse float lobe = max(dot(N, L), 0.0f); //float lobe = max(dot(-N, L), 0.0f); color += kd * lobe; // specular if (n > 0) { float3 R = reflect(-L, N); lobe = pow(fmaxf(dot(R, omega), 0), n); color += ks * lobe; } // clamping is a hack, but looks better return fminf(color, make_float3(1)); } GPU float3 diffuse(float3 L, float3 N, float3 kd){ float lobe = max(dot(N, L), 0.0f); return kd * lobe; } template<typename R,typename T> GPU R laplacian(T* image, float2 p, VIVALDI_DATA_RANGE* sdr){ float x = p.x; float y = p.y; //parallel variables R a = point_query_2d<R>(image, x, y, sdr); R u = point_query_2d<R>(image, x, y+1, sdr); R d = point_query_2d<R>(image, x, y-1, sdr); R l = point_query_2d<R>(image, x-1, y, sdr); R r = point_query_2d<R>(image, x+1, y, sdr); R ret = u+d+l+r-4.0*a; return ret; } template<typename R,typename T> GPU R laplacian(T* image, float x, float y, VIVALDI_DATA_RANGE* sdr){ //parallel variables return laplacian<R>(image, make_float2(x,y), sdr); } extern "C"{ // memory copy functions ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // halo memset function ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* __global__ void halo_memeset( float3* rb, VIVALDI_DATA_RANGE* rb_DATA_RANGE, int z_start, int z_end, int y_start, int y_end, int x_start, int x_end) { //parallel variables int x_hschoi = threadIdx.x + blockDim.x * blockIdx.x; int y_hschoi = threadIdx.y + blockDim.y * blockIdx.y; int z_hschoi = threadIdx.z + blockDim.z * blockIdx.z; int x = x_hschoi + x_start; int y = y_hschoi + y_start; int z = z_hschoi + z_start; if(x_end <= x || y_end <= y || z_end <= z)return; int idx = (z-rb_DATA_RANGE->start.z)*(rb_DATA_RANGE->end.x-rb_DATA_RANGE->start.x)*(rb_DATA_RANGE->end.y-rb_DATA_RANGE->start.y) + (y-rb_DATA_RANGE->start.y)*(rb_DATA_RANGE->end.x-rb_DATA_RANGE->start.x) + (x-rb_DATA_RANGE->start.x); int3 full_data_start = make_int3(rb_DATA_RANGE->full_data_start); int3 full_data_end = make_int3(rb_DATA_RANGE->full_data_end); int3 start = make_int3(rb_DATA_RANGE->start); int3 end = make_int3(rb_DATA_RANGE->end); x = x - start.x; y = y - start.y; z = z - start.z; int buffer_X = end.x - start.x; int buffer_Y = end.y - start.y; int buffer_Z = end.z - start.z; if (!(full_data_start.x <= x && x < full_data_end.x && full_data_start.y <= y && y < full_data_end.y && full_data_start.y <= y && y < full_data_end.z)){ r[idx] = initial(rb[0]); } } } */ } __device__ float4 alpha_compositing(float4 origin, float4 next) { float a = origin.w; float r = origin.x; float g = origin.y; float b = origin.z; float x, y, z, w; w = a + (1-a/255) * next.w; w = (w > 255)? 255 : w; x = r + (1-a/255) * next.x * next.w/255.0f; y = g + (1-a/255) * next.y * next.w/255.0f; z = b + (1-a/255) * next.z * next.w/255.0f; return make_float4(x,y,z,w); } __device__ float4 alpha_compositing_wo_alpha(float4 origin, float4 next) { float a = origin.w; float r = origin.x; float g = origin.y; float b = origin.z; //if(origin.w == 1) return make_float4(255,0,0,0); float x, y, z, w; w = a + (1-a/255) * next.w; w = (w > 255)? 255 : w; x = r + (1-a/255) * next.x; y = g + (1-a/255) * next.y; z = b + (1-a/255) * next.z; return make_float4(x,y,z,w); } __device__ float4 background_white(float4 origin) { float a = origin.w; float r = origin.x; float g = origin.y; float b = origin.z; float x, y, z, w; w = a ; x = r + (1-a/255.0f) * 255.0f; y = g + (1-a/255.0f) * 255.0f; z = b + (1-a/255.0f) * 255.0f; return make_float4(x,y,z,w); } __device__ float4 detach(float4 origin) { float a = origin.w; float r = origin.x; float g = origin.y; float b = origin.z; float x,y,z,w; w = a; x = r - (1-a/255.0)*255.0; y = g - (1-a/255.0)*255.0; z = b - (1-a/255.0)*255.0; return make_float4(x,y,z,w); } texture<float4, 2> TFF; #include <stdio.h> __device__ float4 transfer(float a) { float4 tmp = tex2D(TFF, a/TF_bandwidth* 255, 0); float4 tmp_col = make_float4(tmp.x*255.0, tmp.y*255.0, tmp.z*255.0, tmp.w*255); return tmp_col; } __device__ float4 transfer(float2 a) { return transfer(a.x); } __device__ float4 transfer(float3 a) { return transfer(a.x); } __device__ float4 transfer(float4 a) { return transfer(a.x); } texture<float4, 2> TFF1; texture<float4, 2> TFF2; texture<float4, 2> TFF3; texture<float4, 2> TFF4; __device__ float4 transfer(float a, int chan) { float4 tmp; if(chan == 0) tmp = tex2D(TFF, a/TF_bandwidth * 255, 0); else if(chan == 1) tmp = tex2D(TFF1, a/TF_bandwidth * 255, 0); else if(chan == 2) tmp = tex2D(TFF2, a/TF_bandwidth * 255, 0); else if(chan == 3) tmp = tex2D(TFF3, a/TF_bandwidth * 255, 0); else if(chan == 4) tmp = tex2D(TFF4, a/TF_bandwidth * 255, 0); float4 tmp_col = make_float4(tmp.x*255.0, tmp.y*255.0, tmp.z*255.0, tmp.w*255); return tmp_col; } __device__ float4 transfer(float2 a, int chan) { return transfer(a.x, chan); } __device__ float4 transfer(float3 a, int chan) { return transfer(a.x, chan); } __device__ float4 transfer(float4 a, int chan) { return transfer(a.x, chan); } __device__ float4 ch_binder(float4 a, float4 b) { return (a + b) / 2.0f; } __device__ int floor_tmp(float a) { return floor(a); }
3384847b6b2ec93930a35305635993f2312f09be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "svgf/svgf.h" #include "kernel/pt_common.h" #include "cuda/cudadefs.h" #include "cuda/helper_math.h" #include "cuda/cudautil.h" #include "cuda/cudamemory.h" #include "aten4idaten.h" //#define ENABLE_MEDIAN_FILTER inline __device__ void computePrevScreenPos( int ix, int iy, float centerDepth, int width, int height, aten::vec4* prevPos, const aten::mat4* __restrict__ mtxs) { // NOTE // Pview = (Xview, Yview, Zview, 1) // mtxV2C = W 0 0 0 // 0 H 0 0 // 0 0 A B // 0 0 -1 0 // mtxV2C * Pview = (Xclip, Yclip, Zclip, Wclip) = (Xclip, Yclip, Zclip, Zview) // Wclip = Zview = depth // Xscr = Xclip / Wclip = Xclip / Zview = Xclip / depth // Yscr = Yclip / Wclip = Yclip / Zview = Yclip / depth // // Xscr * depth = Xclip // Xview = mtxC2V * Xclip const aten::mat4 mtxC2V = mtxs[0]; const aten::mat4 mtxV2W = mtxs[1]; const aten::mat4 mtxPrevW2V = mtxs[2]; const aten::mat4 mtxV2C = mtxs[3]; float2 uv = make_float2(ix + 0.5, iy + 0.5); uv /= make_float2(width - 1, height - 1); // [0, 1] uv = uv * 2.0f - 1.0f; // [0, 1] -> [-1, 1] aten::vec4 pos(uv.x, uv.y, 0, 0); // Screen-space -> Clip-space. pos.x *= centerDepth; pos.y *= centerDepth; // Clip-space -> View-space pos = mtxC2V.apply(pos); pos.z = -centerDepth; pos.w = 1.0; pos = mtxV2W.apply(pos); // Reproject previous screen position. pos = mtxPrevW2V.apply(pos); *prevPos = mtxV2C.apply(pos); *prevPos /= prevPos->w; *prevPos = *prevPos * 0.5 + 0.5; // [-1, 1] -> [0, 1] } inline __device__ int getLinearIdx(int x, int y, int w, int h) { int max_buffer_size = w * h; return clamp(y * w + x, 0, max_buffer_size - 1); } // Bilinear sampler inline __device__ float4 sampleBilinear( const float4* buffer, float uvx, float uvy, int w, int h) { float2 uv = make_float2(uvx, uvy) * make_float2(w, h) - make_float2(0.5f, 0.5f); int x = floor(uv.x); int y = floor(uv.y); float2 uv_ratio = uv - make_float2(x, y); float2 uv_inv = make_float2(1.f, 1.f) - uv_ratio; int x1 = clamp(x + 1, 0, w - 1); int y1 = clamp(y + 1, 0, h - 1); float4 r = (buffer[getLinearIdx(x, y, w, h)] * uv_inv.x + buffer[getLinearIdx(x1, y, w, h)] * uv_ratio.x) * uv_inv.y + (buffer[getLinearIdx(x, y1, w, h)] * uv_inv.x + buffer[getLinearIdx(x1, y1, w, h)] * uv_ratio.x) * uv_ratio.y; return r; } __global__ void temporalReprojection( idaten::TileDomain tileDomain, const float nThreshold, const float zThreshold, const float4* __restrict__ contribs, const aten::CameraParameter* __restrict__ camera, float4* curAovNormalDepth, float4* curAovTexclrMeshid, float4* curAovColorVariance, float4* curAovMomentTemporalWeight, const float4* __restrict__ prevAovNormalDepth, const float4* __restrict__ prevAovTexclrMeshid, const float4* __restrict__ prevAovColorVariance, const float4* __restrict__ prevAovMomentTemporalWeight, hipSurfaceObject_t motionDetphBuffer, hipSurfaceObject_t dst, int width, int height) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } ix += tileDomain.x; iy += tileDomain.y; const auto idx = getIdx(ix, iy, width); auto nmlDepth = curAovNormalDepth[idx]; auto texclrMeshId = curAovTexclrMeshid[idx]; const float centerDepth = nmlDepth.w; const int centerMeshId = (int)texclrMeshId.w; // . auto contrib = contribs[idx]; float4 curColor = make_float4(contrib.x, contrib.y, contrib.z, 1.0f) / contrib.w; //curColor.w = 1; if (centerMeshId < 0) { // . surf2Dwrite( curColor, dst, ix * sizeof(float4), iy, hipBoundaryModeTrap); curAovColorVariance[idx] = curColor; curAovMomentTemporalWeight[idx] = make_float4(1, 1, 1, curAovMomentTemporalWeight[idx].w); return; } float3 centerNormal = make_float3(nmlDepth.x, nmlDepth.y, nmlDepth.z); float4 sum = make_float4(0); float weight = 0.0f; aten::vec4 centerPrevPos; #pragma unroll for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int xx = clamp(ix + x, 0, width - 1); int yy = clamp(iy + y, 0, height - 1); float4 motionDepth; surf2Dread(&motionDepth, motionDetphBuffer, ix * sizeof(float4), iy); // . int px = (int)(xx + motionDepth.x * width); int py = (int)(yy + motionDepth.y * height); px = clamp(px, 0, width - 1); py = clamp(py, 0, height - 1); int pidx = getIdx(px, py, width); nmlDepth = prevAovNormalDepth[pidx]; texclrMeshId = prevAovTexclrMeshid[pidx]; const float prevDepth = nmlDepth.w; const int prevMeshId = (int)texclrMeshId.w; float3 prevNormal = make_float3(nmlDepth.x, nmlDepth.y, nmlDepth.z); // TODO // . float Wz = clamp((zThreshold - abs(1 - centerDepth / prevDepth)) / zThreshold, 0.0f, 1.0f); float Wn = clamp((dot(centerNormal, prevNormal) - nThreshold) / (1.0f - nThreshold), 0.0f, 1.0f); float Wm = centerMeshId == prevMeshId ? 1.0f : 0.0f; // . float4 prev = prevAovColorVariance[pidx]; //float4 prev = sampleBilinear(prevAovColorVariance, prevPos.x, prevPos.y, width, height); float W = Wz * Wn * Wm; sum += prev * W; weight += W; } } if (weight > 0.0f) { sum /= weight; weight /= 9; #if 0 auto w = min(0.8f, weight); curColor = (1.0f - w) * curColor + w * sum; #elif 1 curColor = 0.2 * curColor + 0.8 * sum; #else curColor = (1.0f - weight) * curColor + weight * sum; #endif } curAovMomentTemporalWeight[idx].w = weight; #ifdef ENABLE_MEDIAN_FILTER curAovColorVariance[idx].x = curColor.x; curAovColorVariance[idx].y = curColor.y; curAovColorVariance[idx].z = curColor.z; #else curAovColorVariance[idx].x = curColor.x; curAovColorVariance[idx].y = curColor.y; curAovColorVariance[idx].z = curColor.z; // TODO // . // ex) // f0 = 100, f1 = 0, f2 = 0 // avg = (f0 + f1 + f2) / 3 = 33.3 <- . // accumulate moments. { float lum = AT_NAME::color::luminance(curColor.x, curColor.y, curColor.z); float3 centerMoment = make_float3(lum * lum, lum, 0); // . int frame = 1; if (weight > 0.0f) { auto momentTemporalWeight = prevAovMomentTemporalWeight[idx];; float3 prevMoment = make_float3(momentTemporalWeight.x, momentTemporalWeight.y, momentTemporalWeight.z); // . frame = (int)prevMoment.z + 1; centerMoment += prevMoment; } centerMoment.z = frame; curAovMomentTemporalWeight[idx].x = centerMoment.x; curAovMomentTemporalWeight[idx].y = centerMoment.y; curAovMomentTemporalWeight[idx].z = centerMoment.z; } #endif surf2Dwrite( curColor, dst, ix * sizeof(float4), iy, hipBoundaryModeTrap); } __global__ void dilateWeight( idaten::TileDomain tileDomain, float4* aovMomentTemporalWeight, const float4* __restrict__ aovTexclrMeshid, int width, int height) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } ix += tileDomain.x; iy += tileDomain.y; auto idx = getIdx(ix, iy, width); const int centerMeshId = (int)aovTexclrMeshid[idx].w; if (centerMeshId < 0) { // This pixel is background, so nothing is done. return; } float temporalWeight = aovMomentTemporalWeight[idx].w; for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int xx = ix + x; int yy = iy + y; if ((0 <= xx) && (xx < width) && (0 <= yy) && (yy < height)) { int pidx = getIdx(xx, yy, width); float w = aovMomentTemporalWeight[pidx].w; temporalWeight = min(temporalWeight, w); } } } aovMomentTemporalWeight[idx].w = temporalWeight; } inline __device__ float3 min(float3 a, float3 b) { return make_float3( min(a.x, b.x), min(a.y, b.y), min(a.z, b.z)); } inline __device__ float3 max(float3 a, float3 b) { return make_float3( max(a.x, b.x), max(a.y, b.y), max(a.z, b.z)); } // Macro for sorting. #define s2(a, b) temp = a; a = min(a, b); b = max(temp, b); #define mn3(a, b, c) s2(a, b); s2(a, c); #define mx3(a, b, c) s2(b, c); s2(a, c); #define mnmx3(a, b, c) mx3(a, b, c); s2(a, b); // 3 exchanges #define mnmx4(a, b, c, d) s2(a, b); s2(c, d); s2(a, c); s2(b, d); // 4 exchanges #define mnmx5(a, b, c, d, e) s2(a, b); s2(c, d); mn3(a, c, e); mx3(b, d, e); // 6 exchanges #define mnmx6(a, b, c, d, e, f) s2(a, d); s2(b, e); s2(c, f); mn3(a, b, c); mx3(d, e, f); // 7 exchanges inline __device__ float3 medianFilter( int ix, int iy, const float4* src, int width, int height) { float3 v[9]; int pos = 0; for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int xx = clamp(ix + x, 0, width - 1); int yy = clamp(iy + y, 0, height - 1); int pidx = getIdx(xx, yy, width); auto s = src[pidx]; v[pos] = make_float3(s.x, s.y, s.z); pos++; } } // Sort float3 temp; mnmx6(v[0], v[1], v[2], v[3], v[4], v[5]); mnmx5(v[1], v[2], v[3], v[4], v[6]); mnmx4(v[2], v[3], v[4], v[7]); mnmx3(v[3], v[4], v[8]); return v[4]; } __global__ void medianFilter( hipSurfaceObject_t dst, float4* curAovColorVariance, float4* curAovMomentTemporalWeight, const float4* __restrict__ curAovTexclrMeshid, const float4* __restrict__ prevAovMomentTemporalWeight, int width, int height) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= width || iy >= height) { return; } auto idx = getIdx(ix, iy, width); const int centerMeshId = curAovTexclrMeshid[idx].w; if (centerMeshId < 0) { // This pixel is background, so nothing is done. return; } auto curColor = medianFilter(ix, iy, curAovColorVariance, width, height); curAovColorVariance[idx].x = curColor.x; curAovColorVariance[idx].y = curColor.y; curAovColorVariance[idx].z = curColor.z; // accumulate moments. { float lum = AT_NAME::color::luminance(curColor.x, curColor.y, curColor.z); float3 centerMoment = make_float3(lum * lum, lum, 0); // . int frame = 1; auto momentTemporalWeight = prevAovMomentTemporalWeight[idx];; float3 prevMoment = make_float3(momentTemporalWeight.x, momentTemporalWeight.y, momentTemporalWeight.z); // . frame = (int)prevMoment.z + 1; centerMoment += prevMoment; centerMoment.z = frame; curAovMomentTemporalWeight[idx].x = centerMoment.x; curAovMomentTemporalWeight[idx].y = centerMoment.y; curAovMomentTemporalWeight[idx].z = centerMoment.z; } surf2Dwrite( make_float4(curColor, 0), dst, ix * sizeof(float4), iy, hipBoundaryModeTrap); } namespace idaten { void SVGFPathTracing::onTemporalReprojection( hipSurfaceObject_t outputSurf, int width, int height) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); int curaov = getCurAovs(); int prevaov = getPrevAovs(); CudaGLResourceMapper rscmap(&m_motionDepthBuffer); auto motionDepthBuffer = m_motionDepthBuffer.bind(); temporalReprojection << <grid, block, 0, m_stream >> > ( //temporalReprojection << <1, 1 >> > ( m_tileDomain, m_nmlThresholdTF, m_depthThresholdTF, m_tmpBuf.ptr(), m_cam.ptr(), m_aovNormalDepth[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_aovColorVariance[curaov].ptr(), m_aovMomentTemporalWeight[curaov].ptr(), m_aovNormalDepth[prevaov].ptr(), m_aovTexclrMeshid[prevaov].ptr(), m_aovColorVariance[prevaov].ptr(), m_aovMomentTemporalWeight[prevaov].ptr(), motionDepthBuffer, outputSurf, width, height); checkCudaKernel(temporalReprojection); #ifdef ENABLE_MEDIAN_FILTER medianFilter << <grid, block, 0, m_stream >> > ( outputSurf, m_aovColorVariance[curaov].ptr(), m_aovMomentTemporalWeight[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_aovMomentTemporalWeight[prevaov].ptr(), width, height); checkCudaKernel(medianFilter); #endif dilateWeight << <grid, block, 0, m_stream >> > ( m_tileDomain, m_aovMomentTemporalWeight[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), width, height); checkCudaKernel(dilateWeight); } }
3384847b6b2ec93930a35305635993f2312f09be.cu
#include "svgf/svgf.h" #include "kernel/pt_common.h" #include "cuda/cudadefs.h" #include "cuda/helper_math.h" #include "cuda/cudautil.h" #include "cuda/cudamemory.h" #include "aten4idaten.h" //#define ENABLE_MEDIAN_FILTER inline __device__ void computePrevScreenPos( int ix, int iy, float centerDepth, int width, int height, aten::vec4* prevPos, const aten::mat4* __restrict__ mtxs) { // NOTE // Pview = (Xview, Yview, Zview, 1) // mtxV2C = W 0 0 0 // 0 H 0 0 // 0 0 A B // 0 0 -1 0 // mtxV2C * Pview = (Xclip, Yclip, Zclip, Wclip) = (Xclip, Yclip, Zclip, Zview) // Wclip = Zview = depth // Xscr = Xclip / Wclip = Xclip / Zview = Xclip / depth // Yscr = Yclip / Wclip = Yclip / Zview = Yclip / depth // // Xscr * depth = Xclip // Xview = mtxC2V * Xclip const aten::mat4 mtxC2V = mtxs[0]; const aten::mat4 mtxV2W = mtxs[1]; const aten::mat4 mtxPrevW2V = mtxs[2]; const aten::mat4 mtxV2C = mtxs[3]; float2 uv = make_float2(ix + 0.5, iy + 0.5); uv /= make_float2(width - 1, height - 1); // [0, 1] uv = uv * 2.0f - 1.0f; // [0, 1] -> [-1, 1] aten::vec4 pos(uv.x, uv.y, 0, 0); // Screen-space -> Clip-space. pos.x *= centerDepth; pos.y *= centerDepth; // Clip-space -> View-space pos = mtxC2V.apply(pos); pos.z = -centerDepth; pos.w = 1.0; pos = mtxV2W.apply(pos); // Reproject previous screen position. pos = mtxPrevW2V.apply(pos); *prevPos = mtxV2C.apply(pos); *prevPos /= prevPos->w; *prevPos = *prevPos * 0.5 + 0.5; // [-1, 1] -> [0, 1] } inline __device__ int getLinearIdx(int x, int y, int w, int h) { int max_buffer_size = w * h; return clamp(y * w + x, 0, max_buffer_size - 1); } // Bilinear sampler inline __device__ float4 sampleBilinear( const float4* buffer, float uvx, float uvy, int w, int h) { float2 uv = make_float2(uvx, uvy) * make_float2(w, h) - make_float2(0.5f, 0.5f); int x = floor(uv.x); int y = floor(uv.y); float2 uv_ratio = uv - make_float2(x, y); float2 uv_inv = make_float2(1.f, 1.f) - uv_ratio; int x1 = clamp(x + 1, 0, w - 1); int y1 = clamp(y + 1, 0, h - 1); float4 r = (buffer[getLinearIdx(x, y, w, h)] * uv_inv.x + buffer[getLinearIdx(x1, y, w, h)] * uv_ratio.x) * uv_inv.y + (buffer[getLinearIdx(x, y1, w, h)] * uv_inv.x + buffer[getLinearIdx(x1, y1, w, h)] * uv_ratio.x) * uv_ratio.y; return r; } __global__ void temporalReprojection( idaten::TileDomain tileDomain, const float nThreshold, const float zThreshold, const float4* __restrict__ contribs, const aten::CameraParameter* __restrict__ camera, float4* curAovNormalDepth, float4* curAovTexclrMeshid, float4* curAovColorVariance, float4* curAovMomentTemporalWeight, const float4* __restrict__ prevAovNormalDepth, const float4* __restrict__ prevAovTexclrMeshid, const float4* __restrict__ prevAovColorVariance, const float4* __restrict__ prevAovMomentTemporalWeight, cudaSurfaceObject_t motionDetphBuffer, cudaSurfaceObject_t dst, int width, int height) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } ix += tileDomain.x; iy += tileDomain.y; const auto idx = getIdx(ix, iy, width); auto nmlDepth = curAovNormalDepth[idx]; auto texclrMeshId = curAovTexclrMeshid[idx]; const float centerDepth = nmlDepth.w; const int centerMeshId = (int)texclrMeshId.w; // 今回のフレームのピクセルカラー. auto contrib = contribs[idx]; float4 curColor = make_float4(contrib.x, contrib.y, contrib.z, 1.0f) / contrib.w; //curColor.w = 1; if (centerMeshId < 0) { // 背景なので、そのまま出力して終わり. surf2Dwrite( curColor, dst, ix * sizeof(float4), iy, cudaBoundaryModeTrap); curAovColorVariance[idx] = curColor; curAovMomentTemporalWeight[idx] = make_float4(1, 1, 1, curAovMomentTemporalWeight[idx].w); return; } float3 centerNormal = make_float3(nmlDepth.x, nmlDepth.y, nmlDepth.z); float4 sum = make_float4(0); float weight = 0.0f; aten::vec4 centerPrevPos; #pragma unroll for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int xx = clamp(ix + x, 0, width - 1); int yy = clamp(iy + y, 0, height - 1); float4 motionDepth; surf2Dread(&motionDepth, motionDetphBuffer, ix * sizeof(float4), iy); // 前のフレームのスクリーン座標. int px = (int)(xx + motionDepth.x * width); int py = (int)(yy + motionDepth.y * height); px = clamp(px, 0, width - 1); py = clamp(py, 0, height - 1); int pidx = getIdx(px, py, width); nmlDepth = prevAovNormalDepth[pidx]; texclrMeshId = prevAovTexclrMeshid[pidx]; const float prevDepth = nmlDepth.w; const int prevMeshId = (int)texclrMeshId.w; float3 prevNormal = make_float3(nmlDepth.x, nmlDepth.y, nmlDepth.z); // TODO // 同じメッシュ上でもライトのそばの明るくなったピクセルを拾ってしまう場合の対策が必要. float Wz = clamp((zThreshold - abs(1 - centerDepth / prevDepth)) / zThreshold, 0.0f, 1.0f); float Wn = clamp((dot(centerNormal, prevNormal) - nThreshold) / (1.0f - nThreshold), 0.0f, 1.0f); float Wm = centerMeshId == prevMeshId ? 1.0f : 0.0f; // 前のフレームのピクセルカラーを取得. float4 prev = prevAovColorVariance[pidx]; //float4 prev = sampleBilinear(prevAovColorVariance, prevPos.x, prevPos.y, width, height); float W = Wz * Wn * Wm; sum += prev * W; weight += W; } } if (weight > 0.0f) { sum /= weight; weight /= 9; #if 0 auto w = min(0.8f, weight); curColor = (1.0f - w) * curColor + w * sum; #elif 1 curColor = 0.2 * curColor + 0.8 * sum; #else curColor = (1.0f - weight) * curColor + weight * sum; #endif } curAovMomentTemporalWeight[idx].w = weight; #ifdef ENABLE_MEDIAN_FILTER curAovColorVariance[idx].x = curColor.x; curAovColorVariance[idx].y = curColor.y; curAovColorVariance[idx].z = curColor.z; #else curAovColorVariance[idx].x = curColor.x; curAovColorVariance[idx].y = curColor.y; curAovColorVariance[idx].z = curColor.z; // TODO // 現フレームと過去フレームが同率で加算されるため、どちらかに強い影響がでると影響が弱まるまでに非常に時間がかかる. // ex) // f0 = 100, f1 = 0, f2 = 0 // avg = (f0 + f1 + f2) / 3 = 33.3 <- 非常に大きい値が残り続ける. // accumulate moments. { float lum = AT_NAME::color::luminance(curColor.x, curColor.y, curColor.z); float3 centerMoment = make_float3(lum * lum, lum, 0); // 積算フレーム数のリセット. int frame = 1; if (weight > 0.0f) { auto momentTemporalWeight = prevAovMomentTemporalWeight[idx];; float3 prevMoment = make_float3(momentTemporalWeight.x, momentTemporalWeight.y, momentTemporalWeight.z); // 積算フレーム数を1増やす. frame = (int)prevMoment.z + 1; centerMoment += prevMoment; } centerMoment.z = frame; curAovMomentTemporalWeight[idx].x = centerMoment.x; curAovMomentTemporalWeight[idx].y = centerMoment.y; curAovMomentTemporalWeight[idx].z = centerMoment.z; } #endif surf2Dwrite( curColor, dst, ix * sizeof(float4), iy, cudaBoundaryModeTrap); } __global__ void dilateWeight( idaten::TileDomain tileDomain, float4* aovMomentTemporalWeight, const float4* __restrict__ aovTexclrMeshid, int width, int height) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } ix += tileDomain.x; iy += tileDomain.y; auto idx = getIdx(ix, iy, width); const int centerMeshId = (int)aovTexclrMeshid[idx].w; if (centerMeshId < 0) { // This pixel is background, so nothing is done. return; } float temporalWeight = aovMomentTemporalWeight[idx].w; for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int xx = ix + x; int yy = iy + y; if ((0 <= xx) && (xx < width) && (0 <= yy) && (yy < height)) { int pidx = getIdx(xx, yy, width); float w = aovMomentTemporalWeight[pidx].w; temporalWeight = min(temporalWeight, w); } } } aovMomentTemporalWeight[idx].w = temporalWeight; } inline __device__ float3 min(float3 a, float3 b) { return make_float3( min(a.x, b.x), min(a.y, b.y), min(a.z, b.z)); } inline __device__ float3 max(float3 a, float3 b) { return make_float3( max(a.x, b.x), max(a.y, b.y), max(a.z, b.z)); } // Macro for sorting. #define s2(a, b) temp = a; a = min(a, b); b = max(temp, b); #define mn3(a, b, c) s2(a, b); s2(a, c); #define mx3(a, b, c) s2(b, c); s2(a, c); #define mnmx3(a, b, c) mx3(a, b, c); s2(a, b); // 3 exchanges #define mnmx4(a, b, c, d) s2(a, b); s2(c, d); s2(a, c); s2(b, d); // 4 exchanges #define mnmx5(a, b, c, d, e) s2(a, b); s2(c, d); mn3(a, c, e); mx3(b, d, e); // 6 exchanges #define mnmx6(a, b, c, d, e, f) s2(a, d); s2(b, e); s2(c, f); mn3(a, b, c); mx3(d, e, f); // 7 exchanges inline __device__ float3 medianFilter( int ix, int iy, const float4* src, int width, int height) { float3 v[9]; int pos = 0; for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int xx = clamp(ix + x, 0, width - 1); int yy = clamp(iy + y, 0, height - 1); int pidx = getIdx(xx, yy, width); auto s = src[pidx]; v[pos] = make_float3(s.x, s.y, s.z); pos++; } } // Sort float3 temp; mnmx6(v[0], v[1], v[2], v[3], v[4], v[5]); mnmx5(v[1], v[2], v[3], v[4], v[6]); mnmx4(v[2], v[3], v[4], v[7]); mnmx3(v[3], v[4], v[8]); return v[4]; } __global__ void medianFilter( cudaSurfaceObject_t dst, float4* curAovColorVariance, float4* curAovMomentTemporalWeight, const float4* __restrict__ curAovTexclrMeshid, const float4* __restrict__ prevAovMomentTemporalWeight, int width, int height) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= width || iy >= height) { return; } auto idx = getIdx(ix, iy, width); const int centerMeshId = curAovTexclrMeshid[idx].w; if (centerMeshId < 0) { // This pixel is background, so nothing is done. return; } auto curColor = medianFilter(ix, iy, curAovColorVariance, width, height); curAovColorVariance[idx].x = curColor.x; curAovColorVariance[idx].y = curColor.y; curAovColorVariance[idx].z = curColor.z; // accumulate moments. { float lum = AT_NAME::color::luminance(curColor.x, curColor.y, curColor.z); float3 centerMoment = make_float3(lum * lum, lum, 0); // 積算フレーム数のリセット. int frame = 1; auto momentTemporalWeight = prevAovMomentTemporalWeight[idx];; float3 prevMoment = make_float3(momentTemporalWeight.x, momentTemporalWeight.y, momentTemporalWeight.z); // 積算フレーム数を1増やす. frame = (int)prevMoment.z + 1; centerMoment += prevMoment; centerMoment.z = frame; curAovMomentTemporalWeight[idx].x = centerMoment.x; curAovMomentTemporalWeight[idx].y = centerMoment.y; curAovMomentTemporalWeight[idx].z = centerMoment.z; } surf2Dwrite( make_float4(curColor, 0), dst, ix * sizeof(float4), iy, cudaBoundaryModeTrap); } namespace idaten { void SVGFPathTracing::onTemporalReprojection( cudaSurfaceObject_t outputSurf, int width, int height) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); int curaov = getCurAovs(); int prevaov = getPrevAovs(); CudaGLResourceMapper rscmap(&m_motionDepthBuffer); auto motionDepthBuffer = m_motionDepthBuffer.bind(); temporalReprojection << <grid, block, 0, m_stream >> > ( //temporalReprojection << <1, 1 >> > ( m_tileDomain, m_nmlThresholdTF, m_depthThresholdTF, m_tmpBuf.ptr(), m_cam.ptr(), m_aovNormalDepth[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_aovColorVariance[curaov].ptr(), m_aovMomentTemporalWeight[curaov].ptr(), m_aovNormalDepth[prevaov].ptr(), m_aovTexclrMeshid[prevaov].ptr(), m_aovColorVariance[prevaov].ptr(), m_aovMomentTemporalWeight[prevaov].ptr(), motionDepthBuffer, outputSurf, width, height); checkCudaKernel(temporalReprojection); #ifdef ENABLE_MEDIAN_FILTER medianFilter << <grid, block, 0, m_stream >> > ( outputSurf, m_aovColorVariance[curaov].ptr(), m_aovMomentTemporalWeight[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_aovMomentTemporalWeight[prevaov].ptr(), width, height); checkCudaKernel(medianFilter); #endif dilateWeight << <grid, block, 0, m_stream >> > ( m_tileDomain, m_aovMomentTemporalWeight[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), width, height); checkCudaKernel(dilateWeight); } }
a1c457d5d143cc72c76266e7bc84311681ddf9c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define M 6 __global__ void add (int *A, int *B, int *C) { int idx = threadIdx.x; printf("idx = %d\n", idx); C[idx] = A[idx] + B[idx]; } int main () { int A[M], B[M], C[M]; int i, j; for (i = 0; i < M; ++i) { A[i] = i + 1; B[i] = M - i - 1; } int *d_a, *d_b, *d_c; int size = sizeof(int) * M; hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_b, size); hipMalloc((void**)&d_c, size); hipMemcpy(d_a, &A, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &B, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(1), dim3(M), 0, 0, d_a, d_b, d_c); hipMemcpy(&C, d_c, size, hipMemcpyDeviceToHost); printf("A:\n"); for (j = 0; j < M; ++j) { printf("%d\t", A[j]); } printf("\n"); printf("B:\n"); for (j = 0; j < M; ++j) { printf("%d\t", B[j]); } printf("\n"); printf("A + B:\n"); for (j = 0; j < M; ++j) { printf("%d\t", C[j]); } printf("\n"); hipFree(d_a); hipFree(d_b); hipFree(d_c); getchar(); return 0; }
a1c457d5d143cc72c76266e7bc84311681ddf9c4.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define M 6 __global__ void add (int *A, int *B, int *C) { int idx = threadIdx.x; printf("idx = %d\n", idx); C[idx] = A[idx] + B[idx]; } int main () { int A[M], B[M], C[M]; int i, j; for (i = 0; i < M; ++i) { A[i] = i + 1; B[i] = M - i - 1; } int *d_a, *d_b, *d_c; int size = sizeof(int) * M; cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); cudaMemcpy(d_a, &A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &B, size, cudaMemcpyHostToDevice); add<<<1, M>>>(d_a, d_b, d_c); cudaMemcpy(&C, d_c, size, cudaMemcpyDeviceToHost); printf("A:\n"); for (j = 0; j < M; ++j) { printf("%d\t", A[j]); } printf("\n"); printf("B:\n"); for (j = 0; j < M; ++j) { printf("%d\t", B[j]); } printf("\n"); printf("A + B:\n"); for (j = 0; j < M; ++j) { printf("%d\t", C[j]); } printf("\n"); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); getchar(); return 0; }
266208164ac0aa95e3fc8ac1502d8fc937155a3d.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) Microsoft Corporation. Licensed under the MIT License. */ /* Kernel implementation for blocking repeated n-grams. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <math.h> #include <torch/extension.h> #include <vector> // Ban repeated ngrams of length = 'no_repeat_ngram_size' __global__ void banRepeatedTokens(long* __restrict__ tokens, float* __restrict__ lprobs, int max_predict_len, int vocab_size, int no_repeat_ngram_size) { auto row = blockIdx.x; auto col = threadIdx.x; auto start = row * (max_predict_len) + col; // Each thread compares ngram starting from // thread index with final ngram starting from // step - no_repeat_ngram_size +2 auto check_start_pos = blockDim.x; auto lprob_start = row * vocab_size; bool is_banned = true; extern __shared__ long tokens_shm[]; tokens_shm[col] = tokens[start]; if (col == blockDim.x - 1) { for (int i=1; i<no_repeat_ngram_size; i++){ if (col+i < max_predict_len){ tokens_shm[col + i] = tokens[start + i]; } } } __syncthreads(); for (int k = 0; k < no_repeat_ngram_size - 1; k++) { if (tokens_shm[col + k] != tokens_shm[check_start_pos + k]) { is_banned = false; } } if (is_banned == true) { auto token_to_be_banned = tokens_shm[col + no_repeat_ngram_size - 1]; lprobs[lprob_start + token_to_be_banned] = -INFINITY; } } // Allocate blocks and threads based on // batch size and sequence length and launch // kernel torch::Tensor ngram_repeat_block_cuda_forward(const torch::Tensor tokens, torch::Tensor lprobs, int bsz, int step, int beam_size, int no_repeat_ngram_size) { int threads = step - no_repeat_ngram_size + 2; if (threads <= 0) return lprobs; int max_predict_len = tokens.size(1); int vocab_size = lprobs.size(1); auto token_ptr = tokens.data_ptr<long>(); auto lprob_ptr = lprobs.data_ptr<float>(); int blocks = bsz * beam_size; int shared_mem_size = (step + 1) * sizeof(long); // Launching N blocks where N is number of samples in a batch (beams*bsz) // Launching T threads where T is number of previous ngrams in a sample // Allocating shared mem per block for fastser access of input tokens since // each token will be accessed N times to compare with current Ngram where // N is Ngram size. hipLaunchKernelGGL(( banRepeatedTokens), dim3(blocks), dim3(threads), shared_mem_size, 0, token_ptr, lprob_ptr, max_predict_len, vocab_size, no_repeat_ngram_size); return lprobs; }
266208164ac0aa95e3fc8ac1502d8fc937155a3d.cu
/* Copyright (c) Microsoft Corporation. Licensed under the MIT License. */ /* Kernel implementation for blocking repeated n-grams. */ #include <cuda.h> #include <cuda_runtime.h> #include <math.h> #include <torch/extension.h> #include <vector> // Ban repeated ngrams of length = 'no_repeat_ngram_size' __global__ void banRepeatedTokens(long* __restrict__ tokens, float* __restrict__ lprobs, int max_predict_len, int vocab_size, int no_repeat_ngram_size) { auto row = blockIdx.x; auto col = threadIdx.x; auto start = row * (max_predict_len) + col; // Each thread compares ngram starting from // thread index with final ngram starting from // step - no_repeat_ngram_size +2 auto check_start_pos = blockDim.x; auto lprob_start = row * vocab_size; bool is_banned = true; extern __shared__ long tokens_shm[]; tokens_shm[col] = tokens[start]; if (col == blockDim.x - 1) { for (int i=1; i<no_repeat_ngram_size; i++){ if (col+i < max_predict_len){ tokens_shm[col + i] = tokens[start + i]; } } } __syncthreads(); for (int k = 0; k < no_repeat_ngram_size - 1; k++) { if (tokens_shm[col + k] != tokens_shm[check_start_pos + k]) { is_banned = false; } } if (is_banned == true) { auto token_to_be_banned = tokens_shm[col + no_repeat_ngram_size - 1]; lprobs[lprob_start + token_to_be_banned] = -INFINITY; } } // Allocate blocks and threads based on // batch size and sequence length and launch // kernel torch::Tensor ngram_repeat_block_cuda_forward(const torch::Tensor tokens, torch::Tensor lprobs, int bsz, int step, int beam_size, int no_repeat_ngram_size) { int threads = step - no_repeat_ngram_size + 2; if (threads <= 0) return lprobs; int max_predict_len = tokens.size(1); int vocab_size = lprobs.size(1); auto token_ptr = tokens.data_ptr<long>(); auto lprob_ptr = lprobs.data_ptr<float>(); int blocks = bsz * beam_size; int shared_mem_size = (step + 1) * sizeof(long); // Launching N blocks where N is number of samples in a batch (beams*bsz) // Launching T threads where T is number of previous ngrams in a sample // Allocating shared mem per block for fastser access of input tokens since // each token will be accessed N times to compare with current Ngram where // N is Ngram size. banRepeatedTokens<<<blocks, threads, shared_mem_size>>>( token_ptr, lprob_ptr, max_predict_len, vocab_size, no_repeat_ngram_size); return lprobs; }
52743c1442189084a44ea1c05fc21d2396fff588.hip
// !!! This is a file automatically generated by hipify!!! // CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/random.h> #include "rasterizeKernels.h" #include "rasterizeTools.h" #include "SimpleTimer.h" glm::vec3 *framebuffer; fragment *depthbuffer; float *device_vbo; float *device_cbo; int *device_ibo; float *device_nbo; triangle* primitives; float *device_vbo_window_coords; int *device_lock_buffer; const float EMPTY_BUFFER_DEPTH = 10000.0f; void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } //Handy dandy little hashing function that provides seeds for random number generation __host__ __device__ unsigned int hash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Writes a given fragment to a fragment buffer at a given location __host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; depthbuffer[index] = frag; } } //Reads a fragment from a given location in a fragment buffer __host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return depthbuffer[index]; }else{ fragment f; return f; } } //Writes a given pixel to a pixel buffer at a given location __host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; framebuffer[index] = value; } } //Reads a pixel from a pixel buffer at a given location __host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return framebuffer[index]; }else{ return glm::vec3(0,0,0); } } //Kernel that clears a given pixel buffer with a given color __global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = color; } } //Kernel that clears a given fragment buffer with a given fragment __global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ fragment f = frag; f.position.x = x; f.position.y = y; buffer[index] = f; } } __global__ void clearLockBuffer( glm::vec2 resolution, int *lock_buffer ) { int x = ( blockIdx.x * blockDim.x ) + threadIdx.x; int y = ( blockIdx.y * blockDim.y ) + threadIdx.y; int index = x + ( y * resolution.x ); if( x <= resolution.x && y <= resolution.y ) { lock_buffer[index] = 0; } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } /*********** DANNY'S PRIMARY CONTRIBUTION - START ***********/ // Convert vertices from object-space coordinates to window coordinates. __global__ void vertexShadeKernel( float *vbo, int vbosize, glm::mat4 mvp_matrix, glm::vec2 resolution, float *vbo_window_coords ) { int index = ( blockIdx.x * blockDim.x ) + threadIdx.x; // Divide by 3 because each vertex has 3 components (x, y, and z). if ( index < vbosize / 3 ) { int vbo_index = index * 3; // Create point to transform. glm::vec4 v( vbo[vbo_index + 0], vbo[vbo_index + 1], vbo[vbo_index + 2], 1.0f ); // Transform point from object-space to clip-space by multiplying by the composite model, view, projection matrices. glm::vec4 vt = mvp_matrix * v; // Transform point to NDC-space by dividing x-, y-, and z-components by w-component (perspective division). // [-1, 1]. glm::vec3 v_ndc( vt.x / vt.w, vt.y / vt.w, vt.z / vt.w ); // Transform x and y range from [-1, 1] to [0, 1]. glm::vec2 v_remapped( ( v_ndc.x + 1.0f ) / 2.0f, ( v_ndc.y + 1.0f ) / 2.0f ); // Transform x- and y-coordinates to window-space. glm::vec2 v_window( v_remapped.x * resolution.x, v_remapped.y * resolution.y ); // Save transformed vertices. vbo_window_coords[vbo_index + 0] = v_window.x; vbo_window_coords[vbo_index + 1] = v_window.y; vbo_window_coords[vbo_index + 2] = v_ndc.z; } } template<typename T> __host__ __device__ void simpleSwap( T &f1, T &f2 ) { T tmp = f1; f1 = f2; f2 = tmp; } // Construct primitives from vertices. __global__ void primitiveAssemblyKernel( float *vbo, int vbosize, float *cbo, int cbosize, int *ibo, int ibosize, float *nbo, int nbosize, float *vbo_window_coords, triangle *primitives ) { int index = ( blockIdx.x * blockDim.x ) + threadIdx.x; int primitivesCount = ibosize / 3; if ( index < primitivesCount ) { // Get indices of triangle vertices. int ibo_index = index * 3; int i0 = ibo[ibo_index + 0]; int i1 = ibo[ibo_index + 1]; int i2 = ibo[ibo_index + 2]; // Get positions of triangle vertices. int v0_index = i0 * 3; int v1_index = i1 * 3; int v2_index = i2 * 3; // Get screen-space positions of triangle vertices. glm::vec3 ssp0( vbo_window_coords[v0_index + 0], vbo_window_coords[v0_index + 1], vbo_window_coords[v0_index + 2] ); glm::vec3 ssp1( vbo_window_coords[v1_index + 0], vbo_window_coords[v1_index + 1], vbo_window_coords[v1_index + 2] ); glm::vec3 ssp2( vbo_window_coords[v2_index + 0], vbo_window_coords[v2_index + 1], vbo_window_coords[v2_index + 2] ); // Check if triangle is visible. glm::vec3 backface_check = glm::cross( ssp1 - ssp0, ssp2 - ssp0 ); if ( backface_check.z < 0.0f ) { triangle tri; tri.is_visible = false; primitives[index] = tri; return; } // Get positions of triangle vertices. glm::vec3 p0( vbo[v0_index + 0], vbo[v0_index + 1], vbo[v0_index + 2] ); glm::vec3 p1( vbo[v1_index + 0], vbo[v1_index + 1], vbo[v1_index + 2] ); glm::vec3 p2( vbo[v2_index + 0], vbo[v2_index + 1], vbo[v2_index + 2] ); // Get colors of triangle vertices. int c0_index = ( i0 % 3 ) * 3; int c1_index = ( i1 % 3 ) * 3; int c2_index = ( i2 % 3 ) * 3; glm::vec3 c0( cbo[c0_index + 0], cbo[c0_index + 1], cbo[c0_index + 2] ); glm::vec3 c1( cbo[c1_index + 0], cbo[c1_index + 1], cbo[c1_index + 2] ); glm::vec3 c2( cbo[c2_index + 0], cbo[c2_index + 1], cbo[c2_index + 2] ); // Get normals of triangle vertices. glm::vec3 n0( nbo[v0_index + 0], nbo[v0_index + 1], nbo[v0_index + 2] ); glm::vec3 n1( nbo[v1_index + 0], nbo[v1_index + 1], nbo[v1_index + 2] ); glm::vec3 n2( nbo[v2_index + 0], nbo[v2_index + 1], nbo[v2_index + 2] ); // Set triangle. primitives[index] = triangle( p0, p1, p2, ssp0, ssp1, ssp2, c0, c1, c2, n0, n1, n2 ); } } // Scanline rasterization per triangle. // See http://graphics.stanford.edu/courses/cs248-08/scan/scan1.html for a similar, but slightly different rasterization method. __global__ void rasterizationKernel( triangle *primitives, int primitivesCount, fragment *depthbuffer, glm::vec2 resolution, int *lock_buffer ) { int index = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( index < primitivesCount ) { triangle tri = primitives[index]; // Only rasterize current triangle if triangle is visible (determined in primitive assembly stage). if ( !tri.is_visible ) { return; } glm::vec3 aabb_min; glm::vec3 aabb_max; getAABBForTriangle( tri.ssp0, tri.ssp1, tri.ssp2, aabb_min, aabb_max ); // TODO: Clip AABB boxes outside render resolution. for ( int y = ceil( aabb_min.y ); y < ceil( aabb_max.y ); ++y ) { for ( int x = ceil( aabb_min.x ); x < ceil( aabb_max.x ); ++x ) { // Compute Barycentric coordinates of current fragment in screen-space triangle. glm::vec3 barycentric_coordinates = calculateBarycentricCoordinate( tri.ssp0, tri.ssp1, tri.ssp2, glm::vec2( x, y ) ); // (x, y) point is outside triangle. if ( barycentric_coordinates.x < 0.0f || barycentric_coordinates.y < 0.0f || barycentric_coordinates.z < 0.0f ) { continue; } float current_z = getZAtCoordinate( barycentric_coordinates, tri.p0, tri.p1, tri.p2 ); fragment buffer_fragment = getFromDepthbuffer( x, y, depthbuffer, resolution ); float buffer_z = buffer_fragment.position.z; // Update depth buffer atomically. if ( current_z < buffer_z ) { int current_index = ( y * resolution.x ) + x; bool is_waiting_to_update = true; while ( is_waiting_to_update ) { if ( atomicExch( &lock_buffer[current_index], 1 ) == 0 ) { fragment f; //f.color = ( tri.c0 * barycentric_coordinates.x ) + ( tri.c1 * barycentric_coordinates.y ) + ( tri.c2 * barycentric_coordinates.z ); f.color = glm::vec3( 0.5f, 0.5f, 0.5f ); f.normal = glm::normalize( ( tri.n0 * barycentric_coordinates.x ) + ( tri.n1 * barycentric_coordinates.y ) + ( tri.n2 * barycentric_coordinates.z ) ); f.position = ( tri.p0 * barycentric_coordinates.x ) + ( tri.p1 * barycentric_coordinates.y ) + ( tri.p2 * barycentric_coordinates.z ); writeToDepthbuffer( x, y, f, depthbuffer, resolution ); // Release lock. atomicExch( &lock_buffer[current_index], 0 ); is_waiting_to_update = false; } } } } } } } // Compute light interaction with fragments. // Write fragment colors to frame buffer. // Diffuse Lambertian shading. __global__ void fragmentShadeKernel( fragment *depthbuffer, glm::vec2 resolution ) { int x = ( blockIdx.x * blockDim.x ) + threadIdx.x; int y = ( blockIdx.y * blockDim.y ) + threadIdx.y; int index = x + ( y * resolution.x ); if ( x <= resolution.x && y <= resolution.y ) { fragment f = depthbuffer[index]; glm::vec3 light_pos_1( -10.0f, 0.0f, 10.0f ); float light_intensity_1 = 2.0f; glm::vec3 light_1_contribution = max( glm::dot( f.normal, glm::normalize( light_pos_1 - f.position )), 0.0f ) * depthbuffer[index].color * light_intensity_1; glm::vec3 light_pos_2( 10.0f, 0.0f, -10.0f ); float light_intensity_2 = 2.0f; glm::vec3 light_2_contribution = max( glm::dot( f.normal, glm::normalize( light_pos_2 - f.position )), 0.0f ) * depthbuffer[index].color * light_intensity_2; depthbuffer[index].color = light_1_contribution + light_2_contribution; //depthbuffer[index].color = max( glm::dot( f.normal, glm::normalize( light_pos - f.position )), 0.0f ) * depthbuffer[index].color * light_intensity; } } __host__ __device__ float computeDistanceBetweenTwoColors( glm::vec3 p1, glm::vec3 p2 ) { return sqrt( ( p2.x - p1.x ) * ( p2.x - p1.x ) + ( p2.y - p1.y ) * ( p2.y - p1.y ) + ( p2.z - p1.z ) * ( p2.z - p1.z ) ); } __host__ __device__ bool shouldBlurPixel( int x, int y, fragment *depthbuffer, glm::vec2 resolution ) { if ( x <= resolution.x && y <= resolution.y ) { const float threshold = 0.25f; glm::vec3 p1 = depthbuffer[x + ( y * ( int )resolution.x )].color; int i, j; // Left. i = x - 1; j = y; if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { glm::vec3 p2 = depthbuffer[i + ( j * ( int )resolution.x )].color; if ( computeDistanceBetweenTwoColors( p1, p2 ) > threshold ) { return true; } } // Top. i = x; j = y - 1; if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { glm::vec3 p2 = depthbuffer[i + ( j * ( int )resolution.x )].color; if ( computeDistanceBetweenTwoColors( p1, p2 ) > threshold ) { return true; } } // Right. i = x + 1; j = y; if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { glm::vec3 p2 = depthbuffer[i + ( j * ( int )resolution.x )].color; if ( computeDistanceBetweenTwoColors( p1, p2 ) > threshold ) { return true; } } // Bottom. i = x; j = y + 1; if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { glm::vec3 p2 = depthbuffer[i + ( j * ( int )resolution.x )].color; if ( computeDistanceBetweenTwoColors( p1, p2 ) > threshold ) { return true; } } } return false; } __global__ void antiAliasingPostProcess( fragment *depthbuffer, glm::vec2 resolution ) { int x = ( blockIdx.x * blockDim.x ) + threadIdx.x; int y = ( blockIdx.y * blockDim.y ) + threadIdx.y; int index = x + ( y * resolution.x ); if ( x <= resolution.x && y <= resolution.y ) { if ( shouldBlurPixel( x, y, depthbuffer, resolution ) ) { int pixel_count = 0; glm::vec3 sum( 0.0f, 0.0f, 0.0f ); for ( int i = x - 1; i < x + 1; ++i ) { for ( int j = y - 1; j < y + 1; ++j ) { if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { sum += depthbuffer[i + ( j * ( int )resolution.x )].color; ++pixel_count; } } } depthbuffer[index].color = glm::vec3( sum.x / pixel_count, sum.y / pixel_count, sum.z / pixel_count ); //depthbuffer[index].color = glm::vec3( 1.0f, 0.0f, 0.0f ); } } } /*********** DANNY'S PRIMARY CONTRIBUTION - END ***********/ // Write fragment colors to the framebuffer. __global__ void render( glm::vec2 resolution, fragment *depthbuffer, glm::vec3 *framebuffer ) { int x = ( blockIdx.x * blockDim.x ) + threadIdx.x; int y = ( blockIdx.y * blockDim.y ) + threadIdx.y; int index = x + ( y * resolution.x ); if ( x <= resolution.x && y <= resolution.y ) { framebuffer[index] = depthbuffer[index].color; } } // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRasterizeCore( uchar4 *PBOpos, float frame, float *vbo, int vbosize, float *cbo, int cbosize, int *ibo, int ibosize, float *nbo, int nbosize, simpleCamera camera ) { SimpleTimer timer; float time_elapsed; // set up crucial magic int tileSize = 8; dim3 threadsPerBlock( tileSize, tileSize ); dim3 fullBlocksPerGrid( ( int )ceil( ( float )camera.resolution.x / ( float )tileSize ), ( int )ceil( ( float )camera.resolution.y / ( float )tileSize ) ); // set up framebuffer framebuffer = NULL; hipMalloc( ( void** )&framebuffer, ( int )camera.resolution.x * ( int )camera.resolution.y * sizeof( glm::vec3 ) ); // set up depthbuffer depthbuffer = NULL; hipMalloc( ( void** )&depthbuffer, ( int )camera.resolution.x * ( int )camera.resolution.y * sizeof( fragment ) ); // kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states hipLaunchKernelGGL(( clearImage), dim3(fullBlocksPerGrid), dim3(threadsPerBlock) , 0, 0, camera.resolution, framebuffer, glm::vec3( 0.0f, 0.0f, 0.0f ) ); fragment frag; frag.color = glm::vec3( 0.0f, 0.0f, 0.0f ); frag.normal = glm::vec3( 0.0f, 0.0f, 0.0f ); frag.position = glm::vec3( 0.0f, 0.0f, EMPTY_BUFFER_DEPTH ); hipLaunchKernelGGL(( clearDepthBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock) , 0, 0, camera.resolution, depthbuffer, frag ); //------------------------------ // memory stuff //------------------------------ primitives = NULL; hipMalloc( ( void** )&primitives, ( ibosize / 3 ) * sizeof( triangle ) ); device_ibo = NULL; hipMalloc( ( void** )&device_ibo, ibosize * sizeof( int ) ); hipMemcpy( device_ibo, ibo, ibosize * sizeof( int ), hipMemcpyHostToDevice ); device_vbo = NULL; hipMalloc( ( void** )&device_vbo, vbosize * sizeof( float ) ); hipMemcpy( device_vbo, vbo, vbosize * sizeof( float ), hipMemcpyHostToDevice ); device_vbo_window_coords = NULL; hipMalloc( ( void** )&device_vbo_window_coords, vbosize * sizeof( float ) ); device_lock_buffer = NULL; hipMalloc( ( void** )&device_lock_buffer, ( int )camera.resolution.x * ( int )camera.resolution.y * sizeof( int ) ); device_cbo = NULL; hipMalloc( ( void** )&device_cbo, cbosize * sizeof( float ) ); hipMemcpy( device_cbo, cbo, cbosize * sizeof( float ), hipMemcpyHostToDevice ); device_nbo = NULL; hipMalloc( ( void** )&device_nbo, nbosize * sizeof( float ) ); hipMemcpy( device_nbo, nbo, nbosize * sizeof( float ), hipMemcpyHostToDevice ); tileSize = 32; int primitiveBlocks = ceil( ( ( float )vbosize / 3 ) / ( ( float )tileSize ) ); //------------------------------ // initialize lock buffer //------------------------------ //timer.start(); hipLaunchKernelGGL(( clearLockBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock) , 0, 0, camera.resolution, device_lock_buffer ); hipDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "initialize lock buffer: " << time_elapsed << std::endl; //------------------------------ // vertex shader //------------------------------ //timer.start(); // Define model matrix. // Transforms from object-space to world-space. glm::mat4 model_matrix( 1.0f ); // Identity matrix. //glm::mat4 model_matrix = glm::rotate( glm::mat4( 1.0f ), frame * 2, glm::vec3( 0.0f, 1.0f, 0.0f )); // Define view matrix. // Transforms from world-space to camera-space. glm::mat4 view_matrix = glm::lookAt( camera.position, camera.target, camera.up ); // Define projection matrix. // Transforms from camera-space to clip-space. glm::mat4 projection_matrix = glm::perspective( camera.fov_y, camera.resolution.x / camera.resolution.y, camera.near_clip, camera.far_clip ); hipLaunchKernelGGL(( vertexShadeKernel), dim3(primitiveBlocks), dim3(tileSize) , 0, 0, device_vbo, vbosize, projection_matrix * view_matrix * model_matrix, camera.resolution, device_vbo_window_coords ); hipDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "vertex shader: " << time_elapsed << std::endl; //------------------------------ // primitive assembly //------------------------------ //timer.start(); primitiveBlocks = ceil( ( ( float )ibosize / 3 ) / ( ( float )tileSize ) ); hipLaunchKernelGGL(( primitiveAssemblyKernel), dim3(primitiveBlocks), dim3(tileSize) , 0, 0, device_vbo, vbosize, device_cbo, cbosize, device_ibo, ibosize, device_nbo, nbosize, device_vbo_window_coords, primitives ); hipDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "primitive assembly: " << time_elapsed << std::endl; //------------------------------ // rasterization //------------------------------ //timer.start(); hipLaunchKernelGGL(( rasterizationKernel), dim3(primitiveBlocks), dim3(tileSize) , 0, 0, primitives, ibosize / 3, depthbuffer, camera.resolution, device_lock_buffer ); hipDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "rasterization: " << time_elapsed << std::endl; //------------------------------ // fragment shader //------------------------------ //timer.start(); hipLaunchKernelGGL(( fragmentShadeKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock) , 0, 0, depthbuffer, camera.resolution ); hipDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "fragment shader: " << time_elapsed << std::endl; //------------------------------ // anti-aliasing //------------------------------ //timer.start(); hipLaunchKernelGGL(( antiAliasingPostProcess), dim3(fullBlocksPerGrid), dim3(threadsPerBlock) , 0, 0, depthbuffer, camera.resolution ); hipDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "anti-aliasing: " << time_elapsed << std::endl; //------------------------------ // write fragments to framebuffer //------------------------------ //timer.start(); hipLaunchKernelGGL(( render), dim3(fullBlocksPerGrid), dim3(threadsPerBlock) , 0, 0, camera.resolution, depthbuffer, framebuffer ); hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock) , 0, 0, PBOpos, camera.resolution, framebuffer ); hipDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "write fragments to framebuffer: " << time_elapsed << std::endl; kernelCleanup(); checkCUDAError("Kernel failed!"); } void kernelCleanup(){ hipFree( primitives ); hipFree( device_vbo ); hipFree( device_cbo ); hipFree( device_ibo ); hipFree( device_nbo ); hipFree( framebuffer ); hipFree( depthbuffer ); hipFree( device_vbo_window_coords ); hipFree( device_lock_buffer ); }
52743c1442189084a44ea1c05fc21d2396fff588.cu
// CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania #include <stdio.h> #include <cuda.h> #include <cmath> #include <thrust/random.h> #include "rasterizeKernels.h" #include "rasterizeTools.h" #include "SimpleTimer.h" glm::vec3 *framebuffer; fragment *depthbuffer; float *device_vbo; float *device_cbo; int *device_ibo; float *device_nbo; triangle* primitives; float *device_vbo_window_coords; int *device_lock_buffer; const float EMPTY_BUFFER_DEPTH = 10000.0f; void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } //Handy dandy little hashing function that provides seeds for random number generation __host__ __device__ unsigned int hash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Writes a given fragment to a fragment buffer at a given location __host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; depthbuffer[index] = frag; } } //Reads a fragment from a given location in a fragment buffer __host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return depthbuffer[index]; }else{ fragment f; return f; } } //Writes a given pixel to a pixel buffer at a given location __host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; framebuffer[index] = value; } } //Reads a pixel from a pixel buffer at a given location __host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return framebuffer[index]; }else{ return glm::vec3(0,0,0); } } //Kernel that clears a given pixel buffer with a given color __global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = color; } } //Kernel that clears a given fragment buffer with a given fragment __global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ fragment f = frag; f.position.x = x; f.position.y = y; buffer[index] = f; } } __global__ void clearLockBuffer( glm::vec2 resolution, int *lock_buffer ) { int x = ( blockIdx.x * blockDim.x ) + threadIdx.x; int y = ( blockIdx.y * blockDim.y ) + threadIdx.y; int index = x + ( y * resolution.x ); if( x <= resolution.x && y <= resolution.y ) { lock_buffer[index] = 0; } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } /*********** DANNY'S PRIMARY CONTRIBUTION - START ***********/ // Convert vertices from object-space coordinates to window coordinates. __global__ void vertexShadeKernel( float *vbo, int vbosize, glm::mat4 mvp_matrix, glm::vec2 resolution, float *vbo_window_coords ) { int index = ( blockIdx.x * blockDim.x ) + threadIdx.x; // Divide by 3 because each vertex has 3 components (x, y, and z). if ( index < vbosize / 3 ) { int vbo_index = index * 3; // Create point to transform. glm::vec4 v( vbo[vbo_index + 0], vbo[vbo_index + 1], vbo[vbo_index + 2], 1.0f ); // Transform point from object-space to clip-space by multiplying by the composite model, view, projection matrices. glm::vec4 vt = mvp_matrix * v; // Transform point to NDC-space by dividing x-, y-, and z-components by w-component (perspective division). // [-1, 1]. glm::vec3 v_ndc( vt.x / vt.w, vt.y / vt.w, vt.z / vt.w ); // Transform x and y range from [-1, 1] to [0, 1]. glm::vec2 v_remapped( ( v_ndc.x + 1.0f ) / 2.0f, ( v_ndc.y + 1.0f ) / 2.0f ); // Transform x- and y-coordinates to window-space. glm::vec2 v_window( v_remapped.x * resolution.x, v_remapped.y * resolution.y ); // Save transformed vertices. vbo_window_coords[vbo_index + 0] = v_window.x; vbo_window_coords[vbo_index + 1] = v_window.y; vbo_window_coords[vbo_index + 2] = v_ndc.z; } } template<typename T> __host__ __device__ void simpleSwap( T &f1, T &f2 ) { T tmp = f1; f1 = f2; f2 = tmp; } // Construct primitives from vertices. __global__ void primitiveAssemblyKernel( float *vbo, int vbosize, float *cbo, int cbosize, int *ibo, int ibosize, float *nbo, int nbosize, float *vbo_window_coords, triangle *primitives ) { int index = ( blockIdx.x * blockDim.x ) + threadIdx.x; int primitivesCount = ibosize / 3; if ( index < primitivesCount ) { // Get indices of triangle vertices. int ibo_index = index * 3; int i0 = ibo[ibo_index + 0]; int i1 = ibo[ibo_index + 1]; int i2 = ibo[ibo_index + 2]; // Get positions of triangle vertices. int v0_index = i0 * 3; int v1_index = i1 * 3; int v2_index = i2 * 3; // Get screen-space positions of triangle vertices. glm::vec3 ssp0( vbo_window_coords[v0_index + 0], vbo_window_coords[v0_index + 1], vbo_window_coords[v0_index + 2] ); glm::vec3 ssp1( vbo_window_coords[v1_index + 0], vbo_window_coords[v1_index + 1], vbo_window_coords[v1_index + 2] ); glm::vec3 ssp2( vbo_window_coords[v2_index + 0], vbo_window_coords[v2_index + 1], vbo_window_coords[v2_index + 2] ); // Check if triangle is visible. glm::vec3 backface_check = glm::cross( ssp1 - ssp0, ssp2 - ssp0 ); if ( backface_check.z < 0.0f ) { triangle tri; tri.is_visible = false; primitives[index] = tri; return; } // Get positions of triangle vertices. glm::vec3 p0( vbo[v0_index + 0], vbo[v0_index + 1], vbo[v0_index + 2] ); glm::vec3 p1( vbo[v1_index + 0], vbo[v1_index + 1], vbo[v1_index + 2] ); glm::vec3 p2( vbo[v2_index + 0], vbo[v2_index + 1], vbo[v2_index + 2] ); // Get colors of triangle vertices. int c0_index = ( i0 % 3 ) * 3; int c1_index = ( i1 % 3 ) * 3; int c2_index = ( i2 % 3 ) * 3; glm::vec3 c0( cbo[c0_index + 0], cbo[c0_index + 1], cbo[c0_index + 2] ); glm::vec3 c1( cbo[c1_index + 0], cbo[c1_index + 1], cbo[c1_index + 2] ); glm::vec3 c2( cbo[c2_index + 0], cbo[c2_index + 1], cbo[c2_index + 2] ); // Get normals of triangle vertices. glm::vec3 n0( nbo[v0_index + 0], nbo[v0_index + 1], nbo[v0_index + 2] ); glm::vec3 n1( nbo[v1_index + 0], nbo[v1_index + 1], nbo[v1_index + 2] ); glm::vec3 n2( nbo[v2_index + 0], nbo[v2_index + 1], nbo[v2_index + 2] ); // Set triangle. primitives[index] = triangle( p0, p1, p2, ssp0, ssp1, ssp2, c0, c1, c2, n0, n1, n2 ); } } // Scanline rasterization per triangle. // See http://graphics.stanford.edu/courses/cs248-08/scan/scan1.html for a similar, but slightly different rasterization method. __global__ void rasterizationKernel( triangle *primitives, int primitivesCount, fragment *depthbuffer, glm::vec2 resolution, int *lock_buffer ) { int index = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( index < primitivesCount ) { triangle tri = primitives[index]; // Only rasterize current triangle if triangle is visible (determined in primitive assembly stage). if ( !tri.is_visible ) { return; } glm::vec3 aabb_min; glm::vec3 aabb_max; getAABBForTriangle( tri.ssp0, tri.ssp1, tri.ssp2, aabb_min, aabb_max ); // TODO: Clip AABB boxes outside render resolution. for ( int y = ceil( aabb_min.y ); y < ceil( aabb_max.y ); ++y ) { for ( int x = ceil( aabb_min.x ); x < ceil( aabb_max.x ); ++x ) { // Compute Barycentric coordinates of current fragment in screen-space triangle. glm::vec3 barycentric_coordinates = calculateBarycentricCoordinate( tri.ssp0, tri.ssp1, tri.ssp2, glm::vec2( x, y ) ); // (x, y) point is outside triangle. if ( barycentric_coordinates.x < 0.0f || barycentric_coordinates.y < 0.0f || barycentric_coordinates.z < 0.0f ) { continue; } float current_z = getZAtCoordinate( barycentric_coordinates, tri.p0, tri.p1, tri.p2 ); fragment buffer_fragment = getFromDepthbuffer( x, y, depthbuffer, resolution ); float buffer_z = buffer_fragment.position.z; // Update depth buffer atomically. if ( current_z < buffer_z ) { int current_index = ( y * resolution.x ) + x; bool is_waiting_to_update = true; while ( is_waiting_to_update ) { if ( atomicExch( &lock_buffer[current_index], 1 ) == 0 ) { fragment f; //f.color = ( tri.c0 * barycentric_coordinates.x ) + ( tri.c1 * barycentric_coordinates.y ) + ( tri.c2 * barycentric_coordinates.z ); f.color = glm::vec3( 0.5f, 0.5f, 0.5f ); f.normal = glm::normalize( ( tri.n0 * barycentric_coordinates.x ) + ( tri.n1 * barycentric_coordinates.y ) + ( tri.n2 * barycentric_coordinates.z ) ); f.position = ( tri.p0 * barycentric_coordinates.x ) + ( tri.p1 * barycentric_coordinates.y ) + ( tri.p2 * barycentric_coordinates.z ); writeToDepthbuffer( x, y, f, depthbuffer, resolution ); // Release lock. atomicExch( &lock_buffer[current_index], 0 ); is_waiting_to_update = false; } } } } } } } // Compute light interaction with fragments. // Write fragment colors to frame buffer. // Diffuse Lambertian shading. __global__ void fragmentShadeKernel( fragment *depthbuffer, glm::vec2 resolution ) { int x = ( blockIdx.x * blockDim.x ) + threadIdx.x; int y = ( blockIdx.y * blockDim.y ) + threadIdx.y; int index = x + ( y * resolution.x ); if ( x <= resolution.x && y <= resolution.y ) { fragment f = depthbuffer[index]; glm::vec3 light_pos_1( -10.0f, 0.0f, 10.0f ); float light_intensity_1 = 2.0f; glm::vec3 light_1_contribution = max( glm::dot( f.normal, glm::normalize( light_pos_1 - f.position )), 0.0f ) * depthbuffer[index].color * light_intensity_1; glm::vec3 light_pos_2( 10.0f, 0.0f, -10.0f ); float light_intensity_2 = 2.0f; glm::vec3 light_2_contribution = max( glm::dot( f.normal, glm::normalize( light_pos_2 - f.position )), 0.0f ) * depthbuffer[index].color * light_intensity_2; depthbuffer[index].color = light_1_contribution + light_2_contribution; //depthbuffer[index].color = max( glm::dot( f.normal, glm::normalize( light_pos - f.position )), 0.0f ) * depthbuffer[index].color * light_intensity; } } __host__ __device__ float computeDistanceBetweenTwoColors( glm::vec3 p1, glm::vec3 p2 ) { return sqrt( ( p2.x - p1.x ) * ( p2.x - p1.x ) + ( p2.y - p1.y ) * ( p2.y - p1.y ) + ( p2.z - p1.z ) * ( p2.z - p1.z ) ); } __host__ __device__ bool shouldBlurPixel( int x, int y, fragment *depthbuffer, glm::vec2 resolution ) { if ( x <= resolution.x && y <= resolution.y ) { const float threshold = 0.25f; glm::vec3 p1 = depthbuffer[x + ( y * ( int )resolution.x )].color; int i, j; // Left. i = x - 1; j = y; if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { glm::vec3 p2 = depthbuffer[i + ( j * ( int )resolution.x )].color; if ( computeDistanceBetweenTwoColors( p1, p2 ) > threshold ) { return true; } } // Top. i = x; j = y - 1; if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { glm::vec3 p2 = depthbuffer[i + ( j * ( int )resolution.x )].color; if ( computeDistanceBetweenTwoColors( p1, p2 ) > threshold ) { return true; } } // Right. i = x + 1; j = y; if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { glm::vec3 p2 = depthbuffer[i + ( j * ( int )resolution.x )].color; if ( computeDistanceBetweenTwoColors( p1, p2 ) > threshold ) { return true; } } // Bottom. i = x; j = y + 1; if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { glm::vec3 p2 = depthbuffer[i + ( j * ( int )resolution.x )].color; if ( computeDistanceBetweenTwoColors( p1, p2 ) > threshold ) { return true; } } } return false; } __global__ void antiAliasingPostProcess( fragment *depthbuffer, glm::vec2 resolution ) { int x = ( blockIdx.x * blockDim.x ) + threadIdx.x; int y = ( blockIdx.y * blockDim.y ) + threadIdx.y; int index = x + ( y * resolution.x ); if ( x <= resolution.x && y <= resolution.y ) { if ( shouldBlurPixel( x, y, depthbuffer, resolution ) ) { int pixel_count = 0; glm::vec3 sum( 0.0f, 0.0f, 0.0f ); for ( int i = x - 1; i < x + 1; ++i ) { for ( int j = y - 1; j < y + 1; ++j ) { if ( i > 0 && i <= resolution.x && j > 0 && j <= resolution.y ) { sum += depthbuffer[i + ( j * ( int )resolution.x )].color; ++pixel_count; } } } depthbuffer[index].color = glm::vec3( sum.x / pixel_count, sum.y / pixel_count, sum.z / pixel_count ); //depthbuffer[index].color = glm::vec3( 1.0f, 0.0f, 0.0f ); } } } /*********** DANNY'S PRIMARY CONTRIBUTION - END ***********/ // Write fragment colors to the framebuffer. __global__ void render( glm::vec2 resolution, fragment *depthbuffer, glm::vec3 *framebuffer ) { int x = ( blockIdx.x * blockDim.x ) + threadIdx.x; int y = ( blockIdx.y * blockDim.y ) + threadIdx.y; int index = x + ( y * resolution.x ); if ( x <= resolution.x && y <= resolution.y ) { framebuffer[index] = depthbuffer[index].color; } } // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRasterizeCore( uchar4 *PBOpos, float frame, float *vbo, int vbosize, float *cbo, int cbosize, int *ibo, int ibosize, float *nbo, int nbosize, simpleCamera camera ) { SimpleTimer timer; float time_elapsed; // set up crucial magic int tileSize = 8; dim3 threadsPerBlock( tileSize, tileSize ); dim3 fullBlocksPerGrid( ( int )ceil( ( float )camera.resolution.x / ( float )tileSize ), ( int )ceil( ( float )camera.resolution.y / ( float )tileSize ) ); // set up framebuffer framebuffer = NULL; cudaMalloc( ( void** )&framebuffer, ( int )camera.resolution.x * ( int )camera.resolution.y * sizeof( glm::vec3 ) ); // set up depthbuffer depthbuffer = NULL; cudaMalloc( ( void** )&depthbuffer, ( int )camera.resolution.x * ( int )camera.resolution.y * sizeof( fragment ) ); // kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states clearImage<<< fullBlocksPerGrid, threadsPerBlock >>>( camera.resolution, framebuffer, glm::vec3( 0.0f, 0.0f, 0.0f ) ); fragment frag; frag.color = glm::vec3( 0.0f, 0.0f, 0.0f ); frag.normal = glm::vec3( 0.0f, 0.0f, 0.0f ); frag.position = glm::vec3( 0.0f, 0.0f, EMPTY_BUFFER_DEPTH ); clearDepthBuffer<<< fullBlocksPerGrid, threadsPerBlock >>>( camera.resolution, depthbuffer, frag ); //------------------------------ // memory stuff //------------------------------ primitives = NULL; cudaMalloc( ( void** )&primitives, ( ibosize / 3 ) * sizeof( triangle ) ); device_ibo = NULL; cudaMalloc( ( void** )&device_ibo, ibosize * sizeof( int ) ); cudaMemcpy( device_ibo, ibo, ibosize * sizeof( int ), cudaMemcpyHostToDevice ); device_vbo = NULL; cudaMalloc( ( void** )&device_vbo, vbosize * sizeof( float ) ); cudaMemcpy( device_vbo, vbo, vbosize * sizeof( float ), cudaMemcpyHostToDevice ); device_vbo_window_coords = NULL; cudaMalloc( ( void** )&device_vbo_window_coords, vbosize * sizeof( float ) ); device_lock_buffer = NULL; cudaMalloc( ( void** )&device_lock_buffer, ( int )camera.resolution.x * ( int )camera.resolution.y * sizeof( int ) ); device_cbo = NULL; cudaMalloc( ( void** )&device_cbo, cbosize * sizeof( float ) ); cudaMemcpy( device_cbo, cbo, cbosize * sizeof( float ), cudaMemcpyHostToDevice ); device_nbo = NULL; cudaMalloc( ( void** )&device_nbo, nbosize * sizeof( float ) ); cudaMemcpy( device_nbo, nbo, nbosize * sizeof( float ), cudaMemcpyHostToDevice ); tileSize = 32; int primitiveBlocks = ceil( ( ( float )vbosize / 3 ) / ( ( float )tileSize ) ); //------------------------------ // initialize lock buffer //------------------------------ //timer.start(); clearLockBuffer<<< fullBlocksPerGrid, threadsPerBlock >>>( camera.resolution, device_lock_buffer ); cudaDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "initialize lock buffer: " << time_elapsed << std::endl; //------------------------------ // vertex shader //------------------------------ //timer.start(); // Define model matrix. // Transforms from object-space to world-space. glm::mat4 model_matrix( 1.0f ); // Identity matrix. //glm::mat4 model_matrix = glm::rotate( glm::mat4( 1.0f ), frame * 2, glm::vec3( 0.0f, 1.0f, 0.0f )); // Define view matrix. // Transforms from world-space to camera-space. glm::mat4 view_matrix = glm::lookAt( camera.position, camera.target, camera.up ); // Define projection matrix. // Transforms from camera-space to clip-space. glm::mat4 projection_matrix = glm::perspective( camera.fov_y, camera.resolution.x / camera.resolution.y, camera.near_clip, camera.far_clip ); vertexShadeKernel<<< primitiveBlocks, tileSize >>>( device_vbo, vbosize, projection_matrix * view_matrix * model_matrix, camera.resolution, device_vbo_window_coords ); cudaDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "vertex shader: " << time_elapsed << std::endl; //------------------------------ // primitive assembly //------------------------------ //timer.start(); primitiveBlocks = ceil( ( ( float )ibosize / 3 ) / ( ( float )tileSize ) ); primitiveAssemblyKernel<<< primitiveBlocks, tileSize >>>( device_vbo, vbosize, device_cbo, cbosize, device_ibo, ibosize, device_nbo, nbosize, device_vbo_window_coords, primitives ); cudaDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "primitive assembly: " << time_elapsed << std::endl; //------------------------------ // rasterization //------------------------------ //timer.start(); rasterizationKernel<<< primitiveBlocks, tileSize >>>( primitives, ibosize / 3, depthbuffer, camera.resolution, device_lock_buffer ); cudaDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "rasterization: " << time_elapsed << std::endl; //------------------------------ // fragment shader //------------------------------ //timer.start(); fragmentShadeKernel<<< fullBlocksPerGrid, threadsPerBlock >>>( depthbuffer, camera.resolution ); cudaDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "fragment shader: " << time_elapsed << std::endl; //------------------------------ // anti-aliasing //------------------------------ //timer.start(); antiAliasingPostProcess<<< fullBlocksPerGrid, threadsPerBlock >>>( depthbuffer, camera.resolution ); cudaDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "anti-aliasing: " << time_elapsed << std::endl; //------------------------------ // write fragments to framebuffer //------------------------------ //timer.start(); render<<< fullBlocksPerGrid, threadsPerBlock >>>( camera.resolution, depthbuffer, framebuffer ); sendImageToPBO<<< fullBlocksPerGrid, threadsPerBlock >>>( PBOpos, camera.resolution, framebuffer ); cudaDeviceSynchronize(); //time_elapsed = timer.stop(); //std::cout << "write fragments to framebuffer: " << time_elapsed << std::endl; kernelCleanup(); checkCUDAError("Kernel failed!"); } void kernelCleanup(){ cudaFree( primitives ); cudaFree( device_vbo ); cudaFree( device_cbo ); cudaFree( device_ibo ); cudaFree( device_nbo ); cudaFree( framebuffer ); cudaFree( depthbuffer ); cudaFree( device_vbo_window_coords ); cudaFree( device_lock_buffer ); }
33b3f86c7a2c977765d296067c60486add83a53d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "ATen/ATen.h" // for cuda::type<scalar_t>; #include "ATen/hip/HIPTypeConversion.cuh" // line 107, 303, 316 for lambda syntax #include "utils.h" template <typename T> __global__ void Crop2DFKernel( const int numels, const T* image, const int16_t * fixs, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int stride, T* top_data) { CUDA_1D_KERNEL_LOOP(i, numels) { int w = i % pooled_width; int h = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const int16_t* pos = fixs + 2*n; int row = pos[0] - (pooled_height/2 - h)*stride; int col = pos[1] - (pooled_width/2 - w)*stride; if (row < 0) row = 0; if (row >= height) row = height - 1; if (col < 0) col = 0; if (col >= width) col = width - 1; int j = c * height * width + row * width + col; top_data[i] = image[j]; } } template <typename T> __global__ void Crop2DLKernel( const int numels, const T* image, const int16_t * fixs, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int stride, T* top_data) { CUDA_1D_KERNEL_LOOP(i, numels) { int c = i % channels; int w = (i / channels) % pooled_width; int h = (i / channels / pooled_width ) % pooled_height; int n = i / channels / pooled_width / pooled_height; const int16_t* pos = fixs + 2*n; int row = pos[0] - (pooled_height/2 - h)*stride; int col = pos[1] - (pooled_width/2 - w)*stride; if (row < 0) row = 0; if (row >= height) row = height - 1; if (col < 0) col = 0; if (col >= width) col = width - 1; int j = row * width* channels + col * channels + c; top_data[i] = image[j]; } } at::Tensor crop2d_gpu( const at::Tensor &X, // 3d image hwc const at::Tensor &R, // boxes int pooled_height, int pooled_width, int stride=1, bool first=false ) { at::Tensor output; int channels, off=0; if (X.dim() == 2) { channels = 1; off = 1; output = X.type().zeros( {R.size(0), pooled_height, pooled_width}); } else if (first) { channels = X.size(0); output = X.type().zeros( {R.size(0), channels, pooled_height, pooled_width}); } else { channels = X.size(2); output = X.type().zeros( {R.size(0), pooled_height, pooled_width, channels}); } const int output_size = output.numel(); const int threads = 1024; const int blocks = (output_size + threads - 1) / threads; if (first) { AT_DISPATCH_ALL_TYPES(X.type(), "crop2d_cuda", [&] { using cuda_scalar_t = at::cuda::type<scalar_t>; hipLaunchKernelGGL(( Crop2DFKernel<cuda_scalar_t>) , dim3(blocks), dim3(threads), 0, 0, output_size, X.data<cuda_scalar_t>(), R.data<int16_t>(), X.size(1-off), X.size(2-off), channels, pooled_height, pooled_width, stride, output.data<cuda_scalar_t>()); }); } else { AT_DISPATCH_ALL_TYPES(X.type(), "crop2d_cuda", [&] { using cuda_scalar_t = at::cuda::type<scalar_t>; hipLaunchKernelGGL(( Crop2DLKernel<cuda_scalar_t>) , dim3(blocks), dim3(threads), 0, 0, output_size, X.data<cuda_scalar_t>(), R.data<int16_t>(), X.size(0), X.size(1), channels, pooled_height, pooled_width, stride, output.data<cuda_scalar_t>()); }); } return output; } template <typename T> __global__ void Crop3DFKernel( const int numels, const T* image, const int16_t * fixs, const int length, const int height, const int width, const int channels, const int pooled_length, const int pooled_height, const int pooled_width, const int stride, T* top_data) { CUDA_1D_KERNEL_LOOP(i, numels) { int w = i % pooled_width; int h = (i / pooled_width) % pooled_height; int l = (i / pooled_width / pooled_height) % pooled_length; int c = (i / pooled_width / pooled_height / pooled_length) % channels; int n = i / pooled_width / pooled_height / pooled_length / channels; const int16_t * pos = fixs + 3*n; int len = pos[0] - (pooled_length/2 - l)*stride; int row = pos[1] - (pooled_height/2 - h)*stride; int col = pos[2] - (pooled_width/2 - w)*stride; if (len < 0) len = 0; if (len >= length) len = length - 1; if (row < 0) row = 0; if (row >= height) row = height - 1; if (col < 0) col = 0; if (col >= width) col = width - 1; int j = c * length * height * width + len * height * width + row * width + col; top_data[i] = image[j]; } } template <typename T> __global__ void Crop3DLKernel( const int numels, const T* image, const int16_t * fixs, const int length, const int height, const int width, const int channels, const int pooled_length, const int pooled_height, const int pooled_width, const int stride, T* top_data) { CUDA_1D_KERNEL_LOOP(i, numels) { int c = i % channels; int w = (i / channels) % pooled_width; int h = (i / channels / pooled_width ) % pooled_height; int l = (i / channels / pooled_width / pooled_height) % pooled_length; int n = i / channels / pooled_width / pooled_height / pooled_length; const int16_t * pos = fixs + 3*n; int len = pos[0] - (pooled_length/2 - l)*stride; int row = pos[1] - (pooled_height/2 - h)*stride; int col = pos[2] - (pooled_width/2 - w)*stride; if (len < 0) len = 0; if (len >= length) len = length - 1; if (row < 0) row = 0; if (row >= height) row = height - 1; if (col < 0) col = 0; if (col >= width) col = width - 1; int j = len*height*width*channels + row * width* channels + col * channels + c; top_data[i] = image[j]; } } at::Tensor crop3d_gpu( const at::Tensor &X, // 4d image thwc const at::Tensor &R, // boxes int pooled_length, int pooled_height, int pooled_width, int stride=1, bool first=false ) { at::Tensor output; int channels, off=0; if (X.dim() == 3) { channels = 1; off = 1; output = X.type().zeros( {R.size(0), pooled_length, pooled_height, pooled_width}); } else if (first) { channels = X.size(0); output = X.type().zeros( {R.size(0), channels, pooled_length, pooled_height, pooled_width}); } else { channels = X.size(3); output = X.type().zeros( {R.size(0), pooled_length, pooled_height, pooled_width, channels}); } const int output_size = output.numel(); const int threads = 1024; const int blocks = (output_size + threads - 1) / threads; if (first) { AT_DISPATCH_ALL_TYPES(X.type(), "crop3d_cuda", [&] { using cuda_scalar_t = at::cuda::type<scalar_t>; hipLaunchKernelGGL(( Crop3DFKernel<cuda_scalar_t>) , dim3(blocks), dim3(threads), 0, 0, output_size, X.data<cuda_scalar_t>(), R.data<int16_t>(), X.size(1-off), X.size(2-off), X.size(3-off), channels, pooled_length, pooled_height, pooled_width, stride, output.data<cuda_scalar_t>()); }); } else { AT_DISPATCH_ALL_TYPES(X.type(), "crop3d_cuda", [&] { using cuda_scalar_t = at::cuda::type<scalar_t>; hipLaunchKernelGGL(( Crop3DLKernel<cuda_scalar_t>) , dim3(blocks), dim3(threads), 0, 0, output_size, X.data<cuda_scalar_t>(), R.data<int16_t>(), X.size(0), X.size(1), X.size(2), channels, pooled_length, pooled_height, pooled_width, stride, output.data<cuda_scalar_t>()); }); } return output; } //void crop2d_gpu( // const at::Tensor &X, // 3d image hwc // const at::Tensor &R, // boxes // int pooled_height, int pooled_width, // int stride=1, bool first=false // ) { // //cuda::type<float> x; // AT_DISPATCH_ALL_TYPES( // X.type(), "crop2d", [&](){ // using cuda_scalar_t = at::cuda::type<scalar_t>; // //scalar_t s = 1; // }); //} // //at::Tensor crop2d_gpu( // const at::Tensor &X, // 3d image hwc // const at::Tensor &R, // boxes // int pooled_height, int pooled_width, // int stride=1, bool first=false // ) { // return AT_DISPATCH_ALL_TYPES( // X.type(), "crop2d", [&]() -> at::Tensor { // return _crop2d_gpu<at::cuda::type<scalar_t>>( // X, R, pooled_height, pooled_width, stride, first); // }); //} // //at::Tensor crop3d_gpu( // const at::Tensor &X, // 4d image thwc // const at::Tensor &R, // boxes // int pooled_length, int pooled_height, int pooled_width, // int stride=1, bool first=false // ) { // return AT_DISPATCH_ALL_TYPES(X.type(), "crop3d", [&]() -> at::Tensor { // return _crop3d_gpu<at::cuda::type<scalar_t>>( // X, R, pooled_length, pooled_height, pooled_width, stride, first); // }); //}
33b3f86c7a2c977765d296067c60486add83a53d.cu
#include <cuda.h> #include <cuda_runtime.h> #include "ATen/ATen.h" // for cuda::type<scalar_t>; #include "ATen/cuda/CUDATypeConversion.cuh" // line 107, 303, 316 for lambda syntax #include "utils.h" template <typename T> __global__ void Crop2DFKernel( const int numels, const T* image, const int16_t * fixs, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int stride, T* top_data) { CUDA_1D_KERNEL_LOOP(i, numels) { int w = i % pooled_width; int h = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const int16_t* pos = fixs + 2*n; int row = pos[0] - (pooled_height/2 - h)*stride; int col = pos[1] - (pooled_width/2 - w)*stride; if (row < 0) row = 0; if (row >= height) row = height - 1; if (col < 0) col = 0; if (col >= width) col = width - 1; int j = c * height * width + row * width + col; top_data[i] = image[j]; } } template <typename T> __global__ void Crop2DLKernel( const int numels, const T* image, const int16_t * fixs, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int stride, T* top_data) { CUDA_1D_KERNEL_LOOP(i, numels) { int c = i % channels; int w = (i / channels) % pooled_width; int h = (i / channels / pooled_width ) % pooled_height; int n = i / channels / pooled_width / pooled_height; const int16_t* pos = fixs + 2*n; int row = pos[0] - (pooled_height/2 - h)*stride; int col = pos[1] - (pooled_width/2 - w)*stride; if (row < 0) row = 0; if (row >= height) row = height - 1; if (col < 0) col = 0; if (col >= width) col = width - 1; int j = row * width* channels + col * channels + c; top_data[i] = image[j]; } } at::Tensor crop2d_gpu( const at::Tensor &X, // 3d image hwc const at::Tensor &R, // boxes int pooled_height, int pooled_width, int stride=1, bool first=false ) { at::Tensor output; int channels, off=0; if (X.dim() == 2) { channels = 1; off = 1; output = X.type().zeros( {R.size(0), pooled_height, pooled_width}); } else if (first) { channels = X.size(0); output = X.type().zeros( {R.size(0), channels, pooled_height, pooled_width}); } else { channels = X.size(2); output = X.type().zeros( {R.size(0), pooled_height, pooled_width, channels}); } const int output_size = output.numel(); const int threads = 1024; const int blocks = (output_size + threads - 1) / threads; if (first) { AT_DISPATCH_ALL_TYPES(X.type(), "crop2d_cuda", [&] { using cuda_scalar_t = at::cuda::type<scalar_t>; Crop2DFKernel<cuda_scalar_t> <<<blocks, threads>>>( output_size, X.data<cuda_scalar_t>(), R.data<int16_t>(), X.size(1-off), X.size(2-off), channels, pooled_height, pooled_width, stride, output.data<cuda_scalar_t>()); }); } else { AT_DISPATCH_ALL_TYPES(X.type(), "crop2d_cuda", [&] { using cuda_scalar_t = at::cuda::type<scalar_t>; Crop2DLKernel<cuda_scalar_t> <<<blocks, threads>>>( output_size, X.data<cuda_scalar_t>(), R.data<int16_t>(), X.size(0), X.size(1), channels, pooled_height, pooled_width, stride, output.data<cuda_scalar_t>()); }); } return output; } template <typename T> __global__ void Crop3DFKernel( const int numels, const T* image, const int16_t * fixs, const int length, const int height, const int width, const int channels, const int pooled_length, const int pooled_height, const int pooled_width, const int stride, T* top_data) { CUDA_1D_KERNEL_LOOP(i, numels) { int w = i % pooled_width; int h = (i / pooled_width) % pooled_height; int l = (i / pooled_width / pooled_height) % pooled_length; int c = (i / pooled_width / pooled_height / pooled_length) % channels; int n = i / pooled_width / pooled_height / pooled_length / channels; const int16_t * pos = fixs + 3*n; int len = pos[0] - (pooled_length/2 - l)*stride; int row = pos[1] - (pooled_height/2 - h)*stride; int col = pos[2] - (pooled_width/2 - w)*stride; if (len < 0) len = 0; if (len >= length) len = length - 1; if (row < 0) row = 0; if (row >= height) row = height - 1; if (col < 0) col = 0; if (col >= width) col = width - 1; int j = c * length * height * width + len * height * width + row * width + col; top_data[i] = image[j]; } } template <typename T> __global__ void Crop3DLKernel( const int numels, const T* image, const int16_t * fixs, const int length, const int height, const int width, const int channels, const int pooled_length, const int pooled_height, const int pooled_width, const int stride, T* top_data) { CUDA_1D_KERNEL_LOOP(i, numels) { int c = i % channels; int w = (i / channels) % pooled_width; int h = (i / channels / pooled_width ) % pooled_height; int l = (i / channels / pooled_width / pooled_height) % pooled_length; int n = i / channels / pooled_width / pooled_height / pooled_length; const int16_t * pos = fixs + 3*n; int len = pos[0] - (pooled_length/2 - l)*stride; int row = pos[1] - (pooled_height/2 - h)*stride; int col = pos[2] - (pooled_width/2 - w)*stride; if (len < 0) len = 0; if (len >= length) len = length - 1; if (row < 0) row = 0; if (row >= height) row = height - 1; if (col < 0) col = 0; if (col >= width) col = width - 1; int j = len*height*width*channels + row * width* channels + col * channels + c; top_data[i] = image[j]; } } at::Tensor crop3d_gpu( const at::Tensor &X, // 4d image thwc const at::Tensor &R, // boxes int pooled_length, int pooled_height, int pooled_width, int stride=1, bool first=false ) { at::Tensor output; int channels, off=0; if (X.dim() == 3) { channels = 1; off = 1; output = X.type().zeros( {R.size(0), pooled_length, pooled_height, pooled_width}); } else if (first) { channels = X.size(0); output = X.type().zeros( {R.size(0), channels, pooled_length, pooled_height, pooled_width}); } else { channels = X.size(3); output = X.type().zeros( {R.size(0), pooled_length, pooled_height, pooled_width, channels}); } const int output_size = output.numel(); const int threads = 1024; const int blocks = (output_size + threads - 1) / threads; if (first) { AT_DISPATCH_ALL_TYPES(X.type(), "crop3d_cuda", [&] { using cuda_scalar_t = at::cuda::type<scalar_t>; Crop3DFKernel<cuda_scalar_t> <<<blocks, threads>>>( output_size, X.data<cuda_scalar_t>(), R.data<int16_t>(), X.size(1-off), X.size(2-off), X.size(3-off), channels, pooled_length, pooled_height, pooled_width, stride, output.data<cuda_scalar_t>()); }); } else { AT_DISPATCH_ALL_TYPES(X.type(), "crop3d_cuda", [&] { using cuda_scalar_t = at::cuda::type<scalar_t>; Crop3DLKernel<cuda_scalar_t> <<<blocks, threads>>>( output_size, X.data<cuda_scalar_t>(), R.data<int16_t>(), X.size(0), X.size(1), X.size(2), channels, pooled_length, pooled_height, pooled_width, stride, output.data<cuda_scalar_t>()); }); } return output; } //void crop2d_gpu( // const at::Tensor &X, // 3d image hwc // const at::Tensor &R, // boxes // int pooled_height, int pooled_width, // int stride=1, bool first=false // ) { // //cuda::type<float> x; // AT_DISPATCH_ALL_TYPES( // X.type(), "crop2d", [&](){ // using cuda_scalar_t = at::cuda::type<scalar_t>; // //scalar_t s = 1; // }); //} // //at::Tensor crop2d_gpu( // const at::Tensor &X, // 3d image hwc // const at::Tensor &R, // boxes // int pooled_height, int pooled_width, // int stride=1, bool first=false // ) { // return AT_DISPATCH_ALL_TYPES( // X.type(), "crop2d", [&]() -> at::Tensor { // return _crop2d_gpu<at::cuda::type<scalar_t>>( // X, R, pooled_height, pooled_width, stride, first); // }); //} // //at::Tensor crop3d_gpu( // const at::Tensor &X, // 4d image thwc // const at::Tensor &R, // boxes // int pooled_length, int pooled_height, int pooled_width, // int stride=1, bool first=false // ) { // return AT_DISPATCH_ALL_TYPES(X.type(), "crop3d", [&]() -> at::Tensor { // return _crop3d_gpu<at::cuda::type<scalar_t>>( // X, R, pooled_length, pooled_height, pooled_width, stride, first); // }); //}
07c5a29b9e4528ee1181ff9272f8a00e21af5861.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) { if (comp == coshf(var_1 * (var_2 - fmodf((+1.2526E34f * (+0.0f * +1.1190E-42f / (var_3 - var_4 - var_5))), -1.5493E-35f)))) { if (comp >= -1.8523E36f / (var_6 + (var_7 * (var_8 + +1.5254E35f)))) { comp += -1.6531E-35f / (+1.8575E36f + cosf((+1.6701E35f * (var_9 - -1.9499E-41f)))); if (comp == (-1.4244E-12f * (+1.0691E-35f + var_10 * var_11 - atan2f((var_12 - -1.6145E-36f), -1.9264E1f)))) { float tmp_1 = (var_13 + (var_14 - (var_15 * var_16 / var_17))); float tmp_2 = +1.8803E-36f; comp = tmp_2 / tmp_1 * var_18 / var_19; } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20); hipDeviceSynchronize(); return 0; }
07c5a29b9e4528ee1181ff9272f8a00e21af5861.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) { if (comp == coshf(var_1 * (var_2 - fmodf((+1.2526E34f * (+0.0f * +1.1190E-42f / (var_3 - var_4 - var_5))), -1.5493E-35f)))) { if (comp >= -1.8523E36f / (var_6 + (var_7 * (var_8 + +1.5254E35f)))) { comp += -1.6531E-35f / (+1.8575E36f + cosf((+1.6701E35f * (var_9 - -1.9499E-41f)))); if (comp == (-1.4244E-12f * (+1.0691E-35f + var_10 * var_11 - atan2f((var_12 - -1.6145E-36f), -1.9264E1f)))) { float tmp_1 = (var_13 + (var_14 - (var_15 * var_16 / var_17))); float tmp_2 = +1.8803E-36f; comp = tmp_2 / tmp_1 * var_18 / var_19; } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20); cudaDeviceSynchronize(); return 0; }
5f3fb4587c119fd627308776106a6fda75339ad2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "../../../common/cpu_bitmap.h" #define DIM 1000 struct hipComplex{ float r; float i; hipComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2(void) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a){ return hipComplex(r * a.r - i * a.i, i * a.r + r * a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r + a.r, i + a.i); } }; __device__ int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float) (DIM / 2 - x) / (DIM / 2); float jy = scale * (float) (DIM / 2 - y) / (DIM / 2); hipComplex c(-0.8, 0.156); hipComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a * a +c; if (a.magnitude2() > 1000) return 0; } return 1; } __global__ void kernel(unsigned char *ptr) { // map from threadIdx/BlockIdx to pixel position int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; // now calculate the value at that position int juliaValue = julia(x, y); ptr[offset * 4 + 0] = 255 * juliaValue; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main(void) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; hipMalloc((void**)&dev_bitmap, bitmap.image_size()); dim3 grid(DIM,DIM); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(1), 0, 0, dev_bitmap); hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost); bitmap.display_and_exit(); hipFree(dev_bitmap); }
5f3fb4587c119fd627308776106a6fda75339ad2.cu
#include <iostream> #include "../../../common/cpu_bitmap.h" #define DIM 1000 struct cuComplex{ float r; float i; cuComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2(void) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a){ return cuComplex(r * a.r - i * a.i, i * a.r + r * a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r + a.r, i + a.i); } }; __device__ int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float) (DIM / 2 - x) / (DIM / 2); float jy = scale * (float) (DIM / 2 - y) / (DIM / 2); cuComplex c(-0.8, 0.156); cuComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a * a +c; if (a.magnitude2() > 1000) return 0; } return 1; } __global__ void kernel(unsigned char *ptr) { // map from threadIdx/BlockIdx to pixel position int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; // now calculate the value at that position int juliaValue = julia(x, y); ptr[offset * 4 + 0] = 255 * juliaValue; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main(void) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; cudaMalloc((void**)&dev_bitmap, bitmap.image_size()); dim3 grid(DIM,DIM); kernel<<<grid, 1>>>(dev_bitmap); cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost); bitmap.display_and_exit(); cudaFree(dev_bitmap); }
87a6b47998ad183df6834510a6d64cd5c39e6699.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <cudf/copying.hpp> #include <utilities/cuda_utils.hpp> #include <type_traits> #include <thrust/device_vector.h> #include <thrust/binary_search.h> #include <sys/time.h> #include <time.h> #include <utility/utility.hpp> #include <utility/trajectory_thrust.cuh> #include <cuspatial/trajectory.hpp> namespace{ struct is_true { __device__ bool operator()(const bool t) { return(t); } }; struct subset_functor { template <typename T> static constexpr bool is_supported() { return std::is_floating_point<T>::value; } template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr> gdf_size_type operator()(const gdf_column& id, const gdf_column& in_x, const gdf_column& in_y, const gdf_column& in_id, const gdf_column& in_timestamp, gdf_column& out_x, gdf_column& out_y, gdf_column& out_id, gdf_column& out_timestamp) { gdf_size_type num_hit{0}; gdf_size_type num_id{id.size}; gdf_size_type num_rec{in_id.size}; if (num_id > 0 && id.data != nullptr && num_rec > 0) { int32_t* in_id_ptr = static_cast<int32_t*>(in_id.data); int32_t* id_ptr = static_cast<int32_t*>(id.data); hipStream_t stream{0}; auto exec_policy = rmm::exec_policy(stream)->on(stream); rmm::device_vector<int32_t> temp_id(id_ptr, id_ptr + num_id); thrust::sort(exec_policy, temp_id.begin(), temp_id.end()); thrust::device_vector<bool> hit_vec(num_rec); thrust::binary_search(exec_policy, temp_id.cbegin(), temp_id.cend(), in_id_ptr, in_id_ptr + num_rec, hit_vec.begin()); num_hit = thrust::count(exec_policy, hit_vec.begin(), hit_vec.end(), true); if (num_hit > 0) { out_x = cudf::allocate_like(in_x, num_hit); out_y = cudf::allocate_like(in_y, num_hit); out_id = cudf::allocate_like(in_id, num_hit); out_timestamp = cudf::allocate_like(in_timestamp, num_hit); auto in_itr = thrust::make_zip_iterator(thrust::make_tuple( static_cast<T*>(in_x.data), static_cast<T*>(in_y.data), static_cast<int32_t*>(in_id.data), static_cast<cudf::timestamp*>(in_timestamp.data))); auto out_itr = thrust::make_zip_iterator(thrust::make_tuple( static_cast<T*>(out_x.data), static_cast<T*>(out_y.data), static_cast<int32_t*>(out_id.data), static_cast<cudf::timestamp*>(out_timestamp.data))); auto end = thrust::copy_if(exec_policy, in_itr, in_itr + num_rec, hit_vec.begin(), out_itr, is_true()); gdf_size_type num_keep = end - out_itr; CUDF_EXPECTS(num_hit == num_keep, "count_if and copy_if result mismatch"); } } return num_hit; } template <typename T, std::enable_if_t< !is_supported<T>() >* = nullptr> gdf_size_type operator()(const gdf_column& ids, const gdf_column& in_x, const gdf_column& in_y, const gdf_column& in_id, const gdf_column& in_ts, gdf_column& out_x, gdf_column& out_y, gdf_column& out_id, gdf_column& out_ts) { CUDF_FAIL("Non-floating point operation is not supported"); } }; } // namespace anonymous namespace cuspatial { gdf_size_type subset_trajectory_id(const gdf_column& id, const gdf_column& in_x, const gdf_column& in_y, const gdf_column& in_id, const gdf_column& in_timestamp, gdf_column& out_x, gdf_column& out_y, gdf_column& out_id, gdf_column& out_timestamp) { CUDF_EXPECTS(in_x.data != nullptr && in_x.data != nullptr && in_id.data != nullptr && in_timestamp.data != nullptr, "Null input data"); CUDF_EXPECTS(in_x.size == in_y.size && in_x.size == in_id.size && in_x.size == in_timestamp.size, "Data size mismatch"); CUDF_EXPECTS(in_id.dtype == GDF_INT32, "Invalid trajectory ID datatype"); CUDF_EXPECTS(id.dtype == in_id.dtype, "Trajectory ID datatype mismatch"); CUDF_EXPECTS(in_timestamp.dtype == GDF_TIMESTAMP, "Invalid timestamp datatype"); CUDF_EXPECTS(in_x.null_count == 0 && in_y.null_count == 0 && in_id.null_count==0 && in_timestamp.null_count==0, "NULL support unimplemented"); out_x = cudf::empty_like(in_x); out_y = cudf::empty_like(in_y); out_id = cudf::empty_like(in_id); out_timestamp = cudf::empty_like(in_timestamp); return cudf::type_dispatcher(in_x.dtype, subset_functor(), id, in_x, in_y, in_id, in_timestamp, out_x, out_y, out_id, out_timestamp); } }// namespace cuspatial
87a6b47998ad183df6834510a6d64cd5c39e6699.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <cudf/copying.hpp> #include <utilities/cuda_utils.hpp> #include <type_traits> #include <thrust/device_vector.h> #include <thrust/binary_search.h> #include <sys/time.h> #include <time.h> #include <utility/utility.hpp> #include <utility/trajectory_thrust.cuh> #include <cuspatial/trajectory.hpp> namespace{ struct is_true { __device__ bool operator()(const bool t) { return(t); } }; struct subset_functor { template <typename T> static constexpr bool is_supported() { return std::is_floating_point<T>::value; } template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr> gdf_size_type operator()(const gdf_column& id, const gdf_column& in_x, const gdf_column& in_y, const gdf_column& in_id, const gdf_column& in_timestamp, gdf_column& out_x, gdf_column& out_y, gdf_column& out_id, gdf_column& out_timestamp) { gdf_size_type num_hit{0}; gdf_size_type num_id{id.size}; gdf_size_type num_rec{in_id.size}; if (num_id > 0 && id.data != nullptr && num_rec > 0) { int32_t* in_id_ptr = static_cast<int32_t*>(in_id.data); int32_t* id_ptr = static_cast<int32_t*>(id.data); cudaStream_t stream{0}; auto exec_policy = rmm::exec_policy(stream)->on(stream); rmm::device_vector<int32_t> temp_id(id_ptr, id_ptr + num_id); thrust::sort(exec_policy, temp_id.begin(), temp_id.end()); thrust::device_vector<bool> hit_vec(num_rec); thrust::binary_search(exec_policy, temp_id.cbegin(), temp_id.cend(), in_id_ptr, in_id_ptr + num_rec, hit_vec.begin()); num_hit = thrust::count(exec_policy, hit_vec.begin(), hit_vec.end(), true); if (num_hit > 0) { out_x = cudf::allocate_like(in_x, num_hit); out_y = cudf::allocate_like(in_y, num_hit); out_id = cudf::allocate_like(in_id, num_hit); out_timestamp = cudf::allocate_like(in_timestamp, num_hit); auto in_itr = thrust::make_zip_iterator(thrust::make_tuple( static_cast<T*>(in_x.data), static_cast<T*>(in_y.data), static_cast<int32_t*>(in_id.data), static_cast<cudf::timestamp*>(in_timestamp.data))); auto out_itr = thrust::make_zip_iterator(thrust::make_tuple( static_cast<T*>(out_x.data), static_cast<T*>(out_y.data), static_cast<int32_t*>(out_id.data), static_cast<cudf::timestamp*>(out_timestamp.data))); auto end = thrust::copy_if(exec_policy, in_itr, in_itr + num_rec, hit_vec.begin(), out_itr, is_true()); gdf_size_type num_keep = end - out_itr; CUDF_EXPECTS(num_hit == num_keep, "count_if and copy_if result mismatch"); } } return num_hit; } template <typename T, std::enable_if_t< !is_supported<T>() >* = nullptr> gdf_size_type operator()(const gdf_column& ids, const gdf_column& in_x, const gdf_column& in_y, const gdf_column& in_id, const gdf_column& in_ts, gdf_column& out_x, gdf_column& out_y, gdf_column& out_id, gdf_column& out_ts) { CUDF_FAIL("Non-floating point operation is not supported"); } }; } // namespace anonymous namespace cuspatial { gdf_size_type subset_trajectory_id(const gdf_column& id, const gdf_column& in_x, const gdf_column& in_y, const gdf_column& in_id, const gdf_column& in_timestamp, gdf_column& out_x, gdf_column& out_y, gdf_column& out_id, gdf_column& out_timestamp) { CUDF_EXPECTS(in_x.data != nullptr && in_x.data != nullptr && in_id.data != nullptr && in_timestamp.data != nullptr, "Null input data"); CUDF_EXPECTS(in_x.size == in_y.size && in_x.size == in_id.size && in_x.size == in_timestamp.size, "Data size mismatch"); CUDF_EXPECTS(in_id.dtype == GDF_INT32, "Invalid trajectory ID datatype"); CUDF_EXPECTS(id.dtype == in_id.dtype, "Trajectory ID datatype mismatch"); CUDF_EXPECTS(in_timestamp.dtype == GDF_TIMESTAMP, "Invalid timestamp datatype"); CUDF_EXPECTS(in_x.null_count == 0 && in_y.null_count == 0 && in_id.null_count==0 && in_timestamp.null_count==0, "NULL support unimplemented"); out_x = cudf::empty_like(in_x); out_y = cudf::empty_like(in_y); out_id = cudf::empty_like(in_id); out_timestamp = cudf::empty_like(in_timestamp); return cudf::type_dispatcher(in_x.dtype, subset_functor(), id, in_x, in_y, in_id, in_timestamp, out_x, out_y, out_id, out_timestamp); } }// namespace cuspatial
8ccb0f2ff31873d38df398726fe34f6a7ab1a687.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // cyfu@cs.unc.edu #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void SigmoidFocalLossForward(const int nthreads, const T* logits, const int* targets, const int num_classes, const float gamma, const float alpha, const int num, T* losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80]; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where T term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // p**gamma * log(1-p) T term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename T> __global__ void SigmoidFocalLossBackward(const int nthreads, const T* logits, const int* targets, const T* d_losses, const int num_classes, const float gamma, const float alpha, const int num, T* d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80], 0 is background; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p) T term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // (p**g) * (g*(1-p)*log(1-p) - p) T term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward at::Tensor SigmoidFocalLoss_forward_cuda( const at::Tensor& logits, const at::Tensor& targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(hipGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_forward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossForward<scalar_t>), dim3(grid), dim3(block), 0, stream, losses_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<int>(), num_classes, gamma, alpha, num_samples, losses.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda( const at::Tensor& logits, const at::Tensor& targets, const at::Tensor& d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(hipGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_backward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossBackward<scalar_t>), dim3(grid), dim3(block), 0, stream, d_logits_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<int>(), d_losses.contiguous().data<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return d_logits; }
8ccb0f2ff31873d38df398726fe34f6a7ab1a687.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // cyfu@cs.unc.edu #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void SigmoidFocalLossForward(const int nthreads, const T* logits, const int* targets, const int num_classes, const float gamma, const float alpha, const int num, T* losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80]; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where T term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // p**gamma * log(1-p) T term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename T> __global__ void SigmoidFocalLossBackward(const int nthreads, const T* logits, const int* targets, const T* d_losses, const int num_classes, const float gamma, const float alpha, const int num, T* d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80], 0 is background; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p) T term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // (p**g) * (g*(1-p)*log(1-p) - p) T term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward at::Tensor SigmoidFocalLoss_forward_cuda( const at::Tensor& logits, const at::Tensor& targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(cudaGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_forward", [&] { SigmoidFocalLossForward<scalar_t><<<grid, block, 0, stream>>>( losses_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<int>(), num_classes, gamma, alpha, num_samples, losses.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda( const at::Tensor& logits, const at::Tensor& targets, const at::Tensor& d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(cudaGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_backward", [&] { SigmoidFocalLossBackward<scalar_t><<<grid, block, 0, stream>>>( d_logits_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<int>(), d_losses.contiguous().data<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return d_logits; }
309dd5e2b8c9c430bcb57a6f7dd263011bcc0774.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // setup.cu // Cloth Simulation // // Created by Timothy Luciani on 2011-04-10. // Modified by Mitch Luban on 2011-04-12 // Copyright 2011 __MyCompanyName__. All rights reserved. // #include "imageio.hh" #include "Particle.hh" #include "setup.hh" #include <cutil.h> #include <cuda_gl_interop.h> #include <stdlib.h> #include <stdio.h> #include <cmath> #define BLACK 0 #define RED 1 #define YELLOW 2 #define MAGENTA 3 #define GREEN 4 #define CYAN 5 #define BLUE 6 #define GREY 7 #define WHITE 8 #define MAXSAMPLES 100 extern int numCloths; static const int threadsPerBlock = 64; int row = 40; int column = 40; unsigned int numTriangles = (row-1)*(column-1)*2; int width = 8; int height = 4; struct Particle** pVector; // Lighting attributes GLfloat lightPos[] = {3.0, 5.0, -4.0, 0.0}; GLfloat lightColor[] = {1.0, 1.0, 1.0, 1.0}; GLfloat lightSpecular[] = {1.0, 1.0, 1.0, 1.0}; GLfloat lightDiffuse[] = {1.0, 1.0, 1.0, 1.0}; GLfloat lightShine[] = {20.0}; // Vbos GLuint *vbo; GLuint *normVbo; GLuint texVbo; GLuint indexVbo; // Flag normals float3 **flagNormals; // Triangle indices unsigned int *flagIndexArray; // Flag positions float4 **data_pointer; // Flag texture coordinates float2 *flagTexArray; // Texture image id GLuint flagTexId; int size = row * column; extern void verlet_simulation_step(struct Particle* pVector, float4 *data_pointer, GLuint vbo, float3 *flagNorms, GLuint nVbo, bool wind, int row, int column, int numCloth); void deleteVBO(int numCloth); void deleteTexVBO(); void deleteIndexVBO(); extern bool dsim; extern bool wind; extern GLuint shader; __device__ int getParticleInd(int x, int y, int row) { return y*row+x; } /*---------------------------------------------------------------------- free/clear/allocate simulation data ----------------------------------------------------------------------*/ void free_data ( void ) { for(int ii = 0; ii < numCloths; ii++) { hipFree(pVector[ii]); deleteVBO(ii); free(flagNormals[ii]); } free(flagNormals); glDeleteBuffers(1, &indexVbo); glDeleteBuffers(1, &texVbo); free(flagIndexArray); free(flagTexArray); } /*-------------------------------------------------------------------- Make Particles --------------------------------------------------------------------*/ __global__ void make_particles(struct Particle *pVector, float4 *data_pointer, float3 *flagNorms, int row, int column, int width, int height) { // //calculate the unique thread index int index = blockIdx.x * blockDim.x + threadIdx.x; // int i = index%row; int j = index/column; float3 pos = make_float3(width * (i/(float)row), -height * (j/(float)column), 0); if((j == 0 && i == 0) || (i == 0 && j == column-1)) pVector[getParticleInd(i,j,row)] = Particle(pos, 1, data_pointer, flagNorms, getParticleInd(i,j, row), false); else pVector[getParticleInd(i,j,row)] = Particle(pos, 1, data_pointer, flagNorms, getParticleInd(i,j, row), true); } // end make particles /*---------------------------------------------------------- Create VBO ----------------------------------------------------------*/ void createVBO(int clothNum) { // create buffer object glGenBuffers( 1, &(vbo[clothNum])); glBindBuffer( GL_ARRAY_BUFFER, vbo[clothNum]); // initialize buffer object unsigned int m_size = size * 4 * sizeof(float); glBufferData( GL_ARRAY_BUFFER, m_size, 0, GL_DYNAMIC_DRAW); // register buffer object with CUDA hipGLRegisterBufferObject(vbo[clothNum]); } /*---------------------------------------------------------- Delete VBO ----------------------------------------------------------*/ void deleteVBO(int clothNum) { glBindBuffer( 1, vbo[clothNum]); glDeleteBuffers( 1, &(vbo[clothNum])); glBindBuffer( 1, normVbo[clothNum]); glDeleteBuffers( 1, &(normVbo[clothNum])); hipGLUnregisterBufferObject(vbo[clothNum]); hipGLUnregisterBufferObject(normVbo[clothNum]); } /*-------------------------------------------------------------------- Make Flag Mesh out of particles --------------------------------------------------------------------*/ void make_flag_mesh( void ) { unsigned int currIndex = 0; float colFloat = (float)(column-1); float rowFloat = (float)(row-1); for(unsigned int ii = 0; ii < (size - column); ii++) { if( (ii+1) % column == 0 ) continue; flagIndexArray[currIndex + 0] = ii + 0; flagIndexArray[currIndex + 1] = ii + column; flagIndexArray[currIndex + 2] = ii + 1; currIndex += 3; } for(unsigned int ii = row; ii < size; ii++) { if( (ii+1) % column == 0 ) continue; flagIndexArray[currIndex + 0] = ii + 0; flagIndexArray[currIndex + 1] = ii + 1; flagIndexArray[currIndex + 2] = (ii + 1) - column; currIndex += 3; } for(unsigned int ii = 0; ii < size; ii++) { int currX = column - ii%column; int currY = (ii/column)%row; flagTexArray[ii] = make_float2((float)currX/colFloat, (float)(currY)/rowFloat); } } /*-------------------------------------------------------------------- Initialize System --------------------------------------------------------------------*/ void init_system(void) { vbo = (GLuint*)malloc(sizeof(GLuint) * numCloths); normVbo = (GLuint*)malloc(sizeof(GLuint) * numCloths); pVector = (Particle**)malloc(sizeof(Particle*) * numCloths); data_pointer = (float4**)malloc(sizeof(float4*) * numCloths); flagNormals = (float3**)malloc(sizeof(float3*) * numCloths); /* malloc cuda memory*/ for(int ii = 0; ii < numCloths; ii++) { hipMalloc( (void**)&(pVector[ii]), size * sizeof(struct Particle) ); /* initialize VBO */ glGenBuffers(1, &(vbo[ii])); glBindBuffer(GL_ARRAY_BUFFER, vbo[ii]); glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 4 * size, 0, GL_DYNAMIC_DRAW); hipGLRegisterBufferObject(vbo[ii]); glGenBuffers(1, &(normVbo[ii])); glBindBuffer(GL_ARRAY_BUFFER, normVbo[ii]); glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 3 * size, 0, GL_DYNAMIC_DRAW); hipGLRegisterBufferObject(normVbo[ii]); /* map vbo in cuda */ hipGLMapBufferObject__((void**)&(data_pointer[ii]), vbo[ii]); hipGLMapBufferObject__((void**)&(flagNormals[ii]), normVbo[ii]); /* create and copy */ int totalThreads = row * column; int nBlocks = totalThreads/threadsPerBlock; nBlocks += ((totalThreads % threadsPerBlock) > 0) ? 1 : 0; hipLaunchKernelGGL(( make_particles), dim3(nBlocks), dim3(threadsPerBlock), 0, 0, pVector[ii], data_pointer[ii], flagNormals[ii], row, column, width, height); // create particles hipDeviceSynchronize(); /* unmap vbo */ hipGLUnmapBufferObject(vbo[ii]); hipGLUnmapBufferObject(normVbo[ii]); } /****************************** * Flag texturing and meshing * ***************************/ flagIndexArray = (unsigned int*)malloc(sizeof(unsigned int) * numTriangles * 3); flagTexArray = (float2*)malloc(sizeof(float2) * size); // flagTexArray = (float*)malloc(sizeof(float) * numTriangles * 3 * 2); make_flag_mesh(); glGenBuffers(1, &indexVbo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexVbo); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * numTriangles * 3, flagIndexArray, GL_STATIC_DRAW); glGenBuffers(1, &texVbo); glBindBuffer(GL_ARRAY_BUFFER, texVbo); glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 2 * size, flagTexArray, GL_STATIC_DRAW); const char *flagTextureFilename = "Textures/american_flag.png"; int w, h; unsigned char *data = loadImageRGBA(flagTextureFilename, &w, &h); glGenTextures(1, &flagTexId); glActiveTexture(GL_TEXTURE0_ARB); glBindTexture(GL_TEXTURE_2D, flagTexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); free(data); /****************************** * Lighting * ***************************/ // Enable depth testing glEnable(GL_DEPTH_TEST); glEnable(GL_NORMALIZE); // Enable lighting glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); // Set the light position and color glLightfv(GL_LIGHT0, GL_POSITION, lightPos); glLightfv(GL_LIGHT0, GL_SPECULAR, lightColor); } /*-------------------------------------------------------------------- Draw Particles --------------------------------------------------------------------*/ void draw_particles ( void ) { glEnable(GL_LIGHTING); glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, flagTexId); glMaterialfv(GL_FRONT, GL_SPECULAR, lightSpecular); glMaterialfv(GL_FRONT, GL_DIFFUSE, lightDiffuse); glMaterialfv(GL_FRONT, GL_SHININESS, lightShine); // iterate over the cloth particles and draw their corresponding mesh for(int ii = 0; ii < numCloths; ii++) { glPushMatrix(); glColor3f(1.0, 1.0, 1.0); glTranslatef(ii*10, 0.0, 0.0); glBindBuffer(GL_ARRAY_BUFFER, vbo[ii]); glVertexPointer(4, GL_FLOAT, 0, (GLvoid*)((char*)NULL)); glBindBuffer(GL_ARRAY_BUFFER, texVbo); glTexCoordPointer(2, GL_FLOAT, 0, (GLvoid*)((char*)NULL)); glBindBuffer(GL_ARRAY_BUFFER, normVbo[ii]); glNormalPointer(GL_FLOAT, 0, (GLvoid*)((char*)NULL)); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexVbo); glEnableClientState(GL_VERTEX_ARRAY); glEnableClientState(GL_NORMAL_ARRAY); glEnableClientState(GL_TEXTURE_COORD_ARRAY); // Render flag mesh glDrawElements(GL_TRIANGLES, numTriangles * 3, GL_UNSIGNED_INT, (GLvoid*)((char*)NULL)); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glDisableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_NORMAL_ARRAY); glDisableClientState(GL_TEXTURE_COORD_ARRAY); glPopMatrix(); } glDisable(GL_TEXTURE_2D); glDisable(GL_LIGHTING); } /*---------------------------------------------------------------------- relates mouse movements to tinker toy construction ----------------------------------------------------------------------*/ __global__ void remap_GUI(struct Particle *pVector, float4 *data_pointer, float3 *flagNorms) { //calculate the unique thread index int index = blockIdx.x * blockDim.x + threadIdx.x; // reset particles pVector[index].reset(); // reset vbo and texture normals data_pointer[index] = make_float4(pVector[index].m_ConstructPos, 1); flagNorms[index] = make_float3(0.0f, 0.0f, -1.0f); } void step_func ( ) { // iterate of the number of cloth for(int ii = 0; ii < numCloths; ii++) { if ( dsim ){ // simulate verlet_simulation_step(pVector[ii], data_pointer[ii], vbo[ii], flagNormals[ii], normVbo[ii], wind, row, column, ii); } else { // remap /* map vbo in cuda */ hipGLMapBufferObject__((void**)&(data_pointer[ii]), vbo[ii]); hipGLMapBufferObject__((void**)&(flagNormals[ii]), normVbo[ii]); int totalThreads = row * column; int nBlocks = totalThreads/threadsPerBlock; nBlocks += ((totalThreads % threadsPerBlock) > 0) ? 1 : 0; // launch kernel to remap hipLaunchKernelGGL(( remap_GUI), dim3(nBlocks), dim3(threadsPerBlock), 0, 0, pVector[ii], data_pointer[ii], flagNormals[ii]); hipDeviceSynchronize(); /* unmap vbo */ hipGLUnmapBufferObject(vbo[ii]); hipGLUnmapBufferObject(normVbo[ii]); } } }
309dd5e2b8c9c430bcb57a6f7dd263011bcc0774.cu
// // setup.cu // Cloth Simulation // // Created by Timothy Luciani on 2011-04-10. // Modified by Mitch Luban on 2011-04-12 // Copyright 2011 __MyCompanyName__. All rights reserved. // #include "imageio.hh" #include "Particle.hh" #include "setup.hh" #include <cutil.h> #include <cuda_gl_interop.h> #include <stdlib.h> #include <stdio.h> #include <cmath> #define BLACK 0 #define RED 1 #define YELLOW 2 #define MAGENTA 3 #define GREEN 4 #define CYAN 5 #define BLUE 6 #define GREY 7 #define WHITE 8 #define MAXSAMPLES 100 extern int numCloths; static const int threadsPerBlock = 64; int row = 40; int column = 40; unsigned int numTriangles = (row-1)*(column-1)*2; int width = 8; int height = 4; struct Particle** pVector; // Lighting attributes GLfloat lightPos[] = {3.0, 5.0, -4.0, 0.0}; GLfloat lightColor[] = {1.0, 1.0, 1.0, 1.0}; GLfloat lightSpecular[] = {1.0, 1.0, 1.0, 1.0}; GLfloat lightDiffuse[] = {1.0, 1.0, 1.0, 1.0}; GLfloat lightShine[] = {20.0}; // Vbos GLuint *vbo; GLuint *normVbo; GLuint texVbo; GLuint indexVbo; // Flag normals float3 **flagNormals; // Triangle indices unsigned int *flagIndexArray; // Flag positions float4 **data_pointer; // Flag texture coordinates float2 *flagTexArray; // Texture image id GLuint flagTexId; int size = row * column; extern void verlet_simulation_step(struct Particle* pVector, float4 *data_pointer, GLuint vbo, float3 *flagNorms, GLuint nVbo, bool wind, int row, int column, int numCloth); void deleteVBO(int numCloth); void deleteTexVBO(); void deleteIndexVBO(); extern bool dsim; extern bool wind; extern GLuint shader; __device__ int getParticleInd(int x, int y, int row) { return y*row+x; } /*---------------------------------------------------------------------- free/clear/allocate simulation data ----------------------------------------------------------------------*/ void free_data ( void ) { for(int ii = 0; ii < numCloths; ii++) { cudaFree(pVector[ii]); deleteVBO(ii); free(flagNormals[ii]); } free(flagNormals); glDeleteBuffers(1, &indexVbo); glDeleteBuffers(1, &texVbo); free(flagIndexArray); free(flagTexArray); } /*-------------------------------------------------------------------- Make Particles --------------------------------------------------------------------*/ __global__ void make_particles(struct Particle *pVector, float4 *data_pointer, float3 *flagNorms, int row, int column, int width, int height) { // //calculate the unique thread index int index = blockIdx.x * blockDim.x + threadIdx.x; // int i = index%row; int j = index/column; float3 pos = make_float3(width * (i/(float)row), -height * (j/(float)column), 0); if((j == 0 && i == 0) || (i == 0 && j == column-1)) pVector[getParticleInd(i,j,row)] = Particle(pos, 1, data_pointer, flagNorms, getParticleInd(i,j, row), false); else pVector[getParticleInd(i,j,row)] = Particle(pos, 1, data_pointer, flagNorms, getParticleInd(i,j, row), true); } // end make particles /*---------------------------------------------------------- Create VBO ----------------------------------------------------------*/ void createVBO(int clothNum) { // create buffer object glGenBuffers( 1, &(vbo[clothNum])); glBindBuffer( GL_ARRAY_BUFFER, vbo[clothNum]); // initialize buffer object unsigned int m_size = size * 4 * sizeof(float); glBufferData( GL_ARRAY_BUFFER, m_size, 0, GL_DYNAMIC_DRAW); // register buffer object with CUDA cudaGLRegisterBufferObject(vbo[clothNum]); } /*---------------------------------------------------------- Delete VBO ----------------------------------------------------------*/ void deleteVBO(int clothNum) { glBindBuffer( 1, vbo[clothNum]); glDeleteBuffers( 1, &(vbo[clothNum])); glBindBuffer( 1, normVbo[clothNum]); glDeleteBuffers( 1, &(normVbo[clothNum])); cudaGLUnregisterBufferObject(vbo[clothNum]); cudaGLUnregisterBufferObject(normVbo[clothNum]); } /*-------------------------------------------------------------------- Make Flag Mesh out of particles --------------------------------------------------------------------*/ void make_flag_mesh( void ) { unsigned int currIndex = 0; float colFloat = (float)(column-1); float rowFloat = (float)(row-1); for(unsigned int ii = 0; ii < (size - column); ii++) { if( (ii+1) % column == 0 ) continue; flagIndexArray[currIndex + 0] = ii + 0; flagIndexArray[currIndex + 1] = ii + column; flagIndexArray[currIndex + 2] = ii + 1; currIndex += 3; } for(unsigned int ii = row; ii < size; ii++) { if( (ii+1) % column == 0 ) continue; flagIndexArray[currIndex + 0] = ii + 0; flagIndexArray[currIndex + 1] = ii + 1; flagIndexArray[currIndex + 2] = (ii + 1) - column; currIndex += 3; } for(unsigned int ii = 0; ii < size; ii++) { int currX = column - ii%column; int currY = (ii/column)%row; flagTexArray[ii] = make_float2((float)currX/colFloat, (float)(currY)/rowFloat); } } /*-------------------------------------------------------------------- Initialize System --------------------------------------------------------------------*/ void init_system(void) { vbo = (GLuint*)malloc(sizeof(GLuint) * numCloths); normVbo = (GLuint*)malloc(sizeof(GLuint) * numCloths); pVector = (Particle**)malloc(sizeof(Particle*) * numCloths); data_pointer = (float4**)malloc(sizeof(float4*) * numCloths); flagNormals = (float3**)malloc(sizeof(float3*) * numCloths); /* malloc cuda memory*/ for(int ii = 0; ii < numCloths; ii++) { cudaMalloc( (void**)&(pVector[ii]), size * sizeof(struct Particle) ); /* initialize VBO */ glGenBuffers(1, &(vbo[ii])); glBindBuffer(GL_ARRAY_BUFFER, vbo[ii]); glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 4 * size, 0, GL_DYNAMIC_DRAW); cudaGLRegisterBufferObject(vbo[ii]); glGenBuffers(1, &(normVbo[ii])); glBindBuffer(GL_ARRAY_BUFFER, normVbo[ii]); glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 3 * size, 0, GL_DYNAMIC_DRAW); cudaGLRegisterBufferObject(normVbo[ii]); /* map vbo in cuda */ cudaGLMapBufferObject((void**)&(data_pointer[ii]), vbo[ii]); cudaGLMapBufferObject((void**)&(flagNormals[ii]), normVbo[ii]); /* create and copy */ int totalThreads = row * column; int nBlocks = totalThreads/threadsPerBlock; nBlocks += ((totalThreads % threadsPerBlock) > 0) ? 1 : 0; make_particles<<<nBlocks, threadsPerBlock>>>(pVector[ii], data_pointer[ii], flagNormals[ii], row, column, width, height); // create particles cudaThreadSynchronize(); /* unmap vbo */ cudaGLUnmapBufferObject(vbo[ii]); cudaGLUnmapBufferObject(normVbo[ii]); } /****************************** * Flag texturing and meshing * ***************************/ flagIndexArray = (unsigned int*)malloc(sizeof(unsigned int) * numTriangles * 3); flagTexArray = (float2*)malloc(sizeof(float2) * size); // flagTexArray = (float*)malloc(sizeof(float) * numTriangles * 3 * 2); make_flag_mesh(); glGenBuffers(1, &indexVbo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexVbo); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * numTriangles * 3, flagIndexArray, GL_STATIC_DRAW); glGenBuffers(1, &texVbo); glBindBuffer(GL_ARRAY_BUFFER, texVbo); glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 2 * size, flagTexArray, GL_STATIC_DRAW); const char *flagTextureFilename = "Textures/american_flag.png"; int w, h; unsigned char *data = loadImageRGBA(flagTextureFilename, &w, &h); glGenTextures(1, &flagTexId); glActiveTexture(GL_TEXTURE0_ARB); glBindTexture(GL_TEXTURE_2D, flagTexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); free(data); /****************************** * Lighting * ***************************/ // Enable depth testing glEnable(GL_DEPTH_TEST); glEnable(GL_NORMALIZE); // Enable lighting glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); // Set the light position and color glLightfv(GL_LIGHT0, GL_POSITION, lightPos); glLightfv(GL_LIGHT0, GL_SPECULAR, lightColor); } /*-------------------------------------------------------------------- Draw Particles --------------------------------------------------------------------*/ void draw_particles ( void ) { glEnable(GL_LIGHTING); glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, flagTexId); glMaterialfv(GL_FRONT, GL_SPECULAR, lightSpecular); glMaterialfv(GL_FRONT, GL_DIFFUSE, lightDiffuse); glMaterialfv(GL_FRONT, GL_SHININESS, lightShine); // iterate over the cloth particles and draw their corresponding mesh for(int ii = 0; ii < numCloths; ii++) { glPushMatrix(); glColor3f(1.0, 1.0, 1.0); glTranslatef(ii*10, 0.0, 0.0); glBindBuffer(GL_ARRAY_BUFFER, vbo[ii]); glVertexPointer(4, GL_FLOAT, 0, (GLvoid*)((char*)NULL)); glBindBuffer(GL_ARRAY_BUFFER, texVbo); glTexCoordPointer(2, GL_FLOAT, 0, (GLvoid*)((char*)NULL)); glBindBuffer(GL_ARRAY_BUFFER, normVbo[ii]); glNormalPointer(GL_FLOAT, 0, (GLvoid*)((char*)NULL)); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexVbo); glEnableClientState(GL_VERTEX_ARRAY); glEnableClientState(GL_NORMAL_ARRAY); glEnableClientState(GL_TEXTURE_COORD_ARRAY); // Render flag mesh glDrawElements(GL_TRIANGLES, numTriangles * 3, GL_UNSIGNED_INT, (GLvoid*)((char*)NULL)); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glDisableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_NORMAL_ARRAY); glDisableClientState(GL_TEXTURE_COORD_ARRAY); glPopMatrix(); } glDisable(GL_TEXTURE_2D); glDisable(GL_LIGHTING); } /*---------------------------------------------------------------------- relates mouse movements to tinker toy construction ----------------------------------------------------------------------*/ __global__ void remap_GUI(struct Particle *pVector, float4 *data_pointer, float3 *flagNorms) { //calculate the unique thread index int index = blockIdx.x * blockDim.x + threadIdx.x; // reset particles pVector[index].reset(); // reset vbo and texture normals data_pointer[index] = make_float4(pVector[index].m_ConstructPos, 1); flagNorms[index] = make_float3(0.0f, 0.0f, -1.0f); } void step_func ( ) { // iterate of the number of cloth for(int ii = 0; ii < numCloths; ii++) { if ( dsim ){ // simulate verlet_simulation_step(pVector[ii], data_pointer[ii], vbo[ii], flagNormals[ii], normVbo[ii], wind, row, column, ii); } else { // remap /* map vbo in cuda */ cudaGLMapBufferObject((void**)&(data_pointer[ii]), vbo[ii]); cudaGLMapBufferObject((void**)&(flagNormals[ii]), normVbo[ii]); int totalThreads = row * column; int nBlocks = totalThreads/threadsPerBlock; nBlocks += ((totalThreads % threadsPerBlock) > 0) ? 1 : 0; // launch kernel to remap remap_GUI<<<nBlocks, threadsPerBlock>>>(pVector[ii], data_pointer[ii], flagNormals[ii]); cudaThreadSynchronize(); /* unmap vbo */ cudaGLUnmapBufferObject(vbo[ii]); cudaGLUnmapBufferObject(normVbo[ii]); } } }
bbb01a84c4055eb9d531087a21633de86e9d515e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void fillempty(int n,int * start, int fill) { int index= threadIdx.x; start[index+n] = fill; } //Refered from Nvidia //https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_pref01.html // __global__ void prescan(int *g_odata, int *g_idata, int n, int*temp) { //extern __shared__ int temp[]; // allocated on invocation int thid = blockIdx.x * blockDim.x + threadIdx.x; int offset = 1; temp[2 * thid] = g_idata[2 * thid]; // load input into shared memory temp[2 * thid + 1] = g_idata[2 * thid + 1]; int d,ai,bi; // is >> faster than /=2 for int? for (d = n >> 1; d > 0; d >>= 1)// build sum in place up the tree { __syncthreads(); if (thid < d) { ai = offset*(2 * thid + 1) - 1; bi = offset*(2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; // clear the last element } int t; for (d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { ai = offset*(2 * thid + 1) - 1; bi = offset*(2 * thid + 2) - 1; t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2 * thid] = temp[2 * thid]; // write results to device memory g_odata[2 * thid + 1] = temp[2 * thid + 1]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { //Deal with non pow2 arrays int N; int count; count = ilog2(n); N = 1<<count; if (N < n) { N *=2; } // Malloc buffers int* buffer3; int* buffer4; int* temp; hipMalloc((void**)&buffer3, N * sizeof(int)); hipMalloc((void**)&buffer4, (N + 4) * sizeof(int)); hipMalloc((void**)&temp, (N + 4) * sizeof(int)); hipMemcpy(buffer3, idata, n * sizeof(int), hipMemcpyHostToDevice); //COPY from idata in CPU to buffer1 in GPU hipMemcpy(buffer4, idata, n * sizeof(int), hipMemcpyHostToDevice); //COPY from idata in CPU to buffer2 in GPU hipMemcpy(temp, idata, n * sizeof(int), hipMemcpyHostToDevice); //COPY from idata in CPU to buffer2 in GPU //Fill all empty positions as 0 fillempty << <1, (N - n) >> > (n,buffer3, 0); fillempty << <1, (4 + N - n) >> > (n,buffer4, 0); fillempty << <1, (4 + N - n) >> > (n,temp, 0); //Start Timer timer().startGpuTimer(); // TODO hipLaunchKernelGGL(( prescan), dim3(1),dim3(N/2), 0, 0, buffer4, buffer3, N,temp); //END Timer timer().endGpuTimer(); //COPY data back to CPU hipMemcpy(odata, buffer4+1, N * sizeof(int), hipMemcpyDeviceToHost); //Should I do this???? odata[n - 1] = odata[n - 2] + idata[n - 1]; //Free buffers hipFree(buffer3); hipFree(buffer4); hipFree(temp); } __global__ void naive_sum2(int n, int* odata, int* idata) { int index = threadIdx.x; int i; int * ping; int * pong; int * swap; ping = idata; pong = odata; __syncthreads(); for (i = 1; i < n; i *= 2) { if (index - i >= 0) { pong[index] = ping[index] + ping[index - i]; } else { pong[index] = ping[index]; } //Ping-Pong here! swap = ping; ping = pong; pong = swap; __syncthreads(); } } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // TODO int* data; hipMalloc((void**)&data, n * sizeof(int)); hipMemcpy(data, idata, n * sizeof(int), hipMemcpyHostToDevice); //COPY from idata in CPU to data in GPU int* out; hipMalloc((void**)&out, n * sizeof(int)); hipMemcpy(out, idata, n * sizeof(int), hipMemcpyHostToDevice); //COPY from idata in CPU to data in GPU int* bools; hipMalloc((void**)&bools, n * sizeof(int)); int* index; hipMalloc((void**)&index, n * sizeof(int)); timer().startGpuTimer(); hipLaunchKernelGGL(( Common::kernMapToBoolean), dim3(1),dim3(n), 0, 0, n, bools, data); //map the bools //TEST hipMemcpy(odata, bools, n * sizeof(int), hipMemcpyDeviceToHost); //COPY from idata in CPU to data in GPU int i; int count = 0; for (i = 0; i < n; i++) { count += odata[i]; odata[i] = count-1; } hipMemcpy(index, odata, n * sizeof(int), hipMemcpyHostToDevice); //COPY from idata in CPU to data in GPU hipMemcpy(data, idata, n * sizeof(int), hipMemcpyHostToDevice); //COPY from idata in CPU to data in GPU Common::kernScatter << <1, n>> > (n, out, data, bools, index);//write to out timer().endGpuTimer(); //COPY data back to CPU hipMemcpy(odata, out, n * sizeof(int), hipMemcpyDeviceToHost); //COPY back to CPU //Handle the first if(idata[0]!=0) { odata[0] = idata[0]; } return count; } } }
bbb01a84c4055eb9d531087a21633de86e9d515e.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void fillempty(int n,int * start, int fill) { int index= threadIdx.x; start[index+n] = fill; } //Refered from Nvidia //https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_pref01.html // __global__ void prescan(int *g_odata, int *g_idata, int n, int*temp) { //extern __shared__ int temp[]; // allocated on invocation int thid = blockIdx.x * blockDim.x + threadIdx.x; int offset = 1; temp[2 * thid] = g_idata[2 * thid]; // load input into shared memory temp[2 * thid + 1] = g_idata[2 * thid + 1]; int d,ai,bi; // is >> faster than /=2 for int? for (d = n >> 1; d > 0; d >>= 1)// build sum in place up the tree { __syncthreads(); if (thid < d) { ai = offset*(2 * thid + 1) - 1; bi = offset*(2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; // clear the last element } int t; for (d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { ai = offset*(2 * thid + 1) - 1; bi = offset*(2 * thid + 2) - 1; t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2 * thid] = temp[2 * thid]; // write results to device memory g_odata[2 * thid + 1] = temp[2 * thid + 1]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { //Deal with non pow2 arrays int N; int count; count = ilog2(n); N = 1<<count; if (N < n) { N *=2; } // Malloc buffers int* buffer3; int* buffer4; int* temp; cudaMalloc((void**)&buffer3, N * sizeof(int)); cudaMalloc((void**)&buffer4, (N + 4) * sizeof(int)); cudaMalloc((void**)&temp, (N + 4) * sizeof(int)); cudaMemcpy(buffer3, idata, n * sizeof(int), cudaMemcpyHostToDevice); //COPY from idata in CPU to buffer1 in GPU cudaMemcpy(buffer4, idata, n * sizeof(int), cudaMemcpyHostToDevice); //COPY from idata in CPU to buffer2 in GPU cudaMemcpy(temp, idata, n * sizeof(int), cudaMemcpyHostToDevice); //COPY from idata in CPU to buffer2 in GPU //Fill all empty positions as 0 fillempty << <1, (N - n) >> > (n,buffer3, 0); fillempty << <1, (4 + N - n) >> > (n,buffer4, 0); fillempty << <1, (4 + N - n) >> > (n,temp, 0); //Start Timer timer().startGpuTimer(); // TODO prescan<<<1,N/2>>>(buffer4, buffer3, N,temp); //END Timer timer().endGpuTimer(); //COPY data back to CPU cudaMemcpy(odata, buffer4+1, N * sizeof(int), cudaMemcpyDeviceToHost); //Should I do this???? odata[n - 1] = odata[n - 2] + idata[n - 1]; //Free buffers cudaFree(buffer3); cudaFree(buffer4); cudaFree(temp); } __global__ void naive_sum2(int n, int* odata, int* idata) { int index = threadIdx.x; int i; int * ping; int * pong; int * swap; ping = idata; pong = odata; __syncthreads(); for (i = 1; i < n; i *= 2) { if (index - i >= 0) { pong[index] = ping[index] + ping[index - i]; } else { pong[index] = ping[index]; } //Ping-Pong here! swap = ping; ping = pong; pong = swap; __syncthreads(); } } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // TODO int* data; cudaMalloc((void**)&data, n * sizeof(int)); cudaMemcpy(data, idata, n * sizeof(int), cudaMemcpyHostToDevice); //COPY from idata in CPU to data in GPU int* out; cudaMalloc((void**)&out, n * sizeof(int)); cudaMemcpy(out, idata, n * sizeof(int), cudaMemcpyHostToDevice); //COPY from idata in CPU to data in GPU int* bools; cudaMalloc((void**)&bools, n * sizeof(int)); int* index; cudaMalloc((void**)&index, n * sizeof(int)); timer().startGpuTimer(); Common::kernMapToBoolean<<<1,n>>>(n, bools, data); //map the bools //TEST cudaMemcpy(odata, bools, n * sizeof(int), cudaMemcpyDeviceToHost); //COPY from idata in CPU to data in GPU int i; int count = 0; for (i = 0; i < n; i++) { count += odata[i]; odata[i] = count-1; } cudaMemcpy(index, odata, n * sizeof(int), cudaMemcpyHostToDevice); //COPY from idata in CPU to data in GPU cudaMemcpy(data, idata, n * sizeof(int), cudaMemcpyHostToDevice); //COPY from idata in CPU to data in GPU Common::kernScatter << <1, n>> > (n, out, data, bools, index);//write to out timer().endGpuTimer(); //COPY data back to CPU cudaMemcpy(odata, out, n * sizeof(int), cudaMemcpyDeviceToHost); //COPY back to CPU //Handle the first if(idata[0]!=0) { odata[0] = idata[0]; } return count; } } }
769b2342ebf57f5ad54e708ea93e8ef72e3e663a.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar (thakkarv@gatech.edu). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
769b2342ebf57f5ad54e708ea93e8ef72e3e663a.cu
/*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar (thakkarv@gatech.edu). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nn_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
1299580bc4b3407a1ac176f298a2db92481b2442.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "initKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( initKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( initKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( initKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1299580bc4b3407a1ac176f298a2db92481b2442.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "initKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); initKernel<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { initKernel<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { initKernel<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1cd8317452e13102b88d1dc23bbd77d45e9a8fcd.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file transformer.cu * \brief GPU implementation of the operators used in Transformer */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime_api.h> #include <mxnet/base.h> #include "./transformer-inl.h" #include "../../common/cuda_utils.h" namespace mxnet { namespace op { // Approach in gemm_switch_fp32accum is coming from MLPerf v0.6 submission repository from NVIDIA // by https://github.com/kevinstephano template<typename DType> void CublasStridedBatchedGemm(mshadow::Stream<gpu>* s, bool transA, bool transB, int32_t m, int32_t n, int32_t k, float alpha, const DType* a, int32_t lda, int32_t strideA, const DType *b, int32_t ldb, int32_t strideB, float beta, DType *c, int32_t ldc, int32_t strideC, int32_t batchCount, hipblasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP) { #if TORCH_HIP_VERSION >= 9010 using namespace mxnet::common::cuda; CHECK_EQ(s->blas_handle_ownership_, mshadow::Stream<gpu>::OwnHandle) << "Must init CuBLAS handle in stream"; hipblasHandle_t blas_handle = mshadow::Stream<gpu>::GetBlasHandle(s); auto err = HIPBLAS_STATUS_SUCCESS; // TODO(cfujitsang): handle computation_precision err = hipblasGemmStridedBatchedEx( blas_handle, CublasTransposeOp(transA), CublasTransposeOp(transB), static_cast<int>(m), static_cast<int>(n), static_cast<int>(k), reinterpret_cast<void*>(&alpha), a, CublasType<DType>::kCudaFlag, static_cast<int>(lda), strideA, b, CublasType<DType>::kCudaFlag, static_cast<int>(ldb), strideB, reinterpret_cast<void*>(&beta), c, CublasType<DType>::kCudaFlag, static_cast<int>(ldc), strideC, static_cast<int>(batchCount), HIP_R_32F, algo); CHECK_EQ(err, HIPBLAS_STATUS_SUCCESS) << "Cublas gemmEx fail."; #else LOG(FATAL) << "Not implemented with CUDA < 9.1"; #endif } template<typename DType> void gemm_switch_fp32accum(mshadow::Stream<gpu>* s, bool transA, bool transB, int32_t m, int32_t n, int32_t k, float alpha, const DType *a, int32_t lda, int32_t strideA, const DType *b, int32_t ldb, int32_t strideB, float beta, DType *c, int32_t ldc, int32_t strideC, int32_t batchCount) { hipStream_t stream = mshadow::Stream<gpu>::GetStream(s); if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(s, transA, transB, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_ALGO0_TENSOR_OP); } else { CublasStridedBatchedGemm(s, transA, transB, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } CHECK_CUDA_ERROR("Error at InterleavedMatMul"); } // TODO(cfujitsang): use scale as optional ? void InterleavedMatMulSelfAttQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries_keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_dim = inputs[0].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float beta = req[0] == kAddTo ? 1.f : 0.f; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, true, false, qkv_seq_len, qkv_seq_len, head_dim, scale, queries_keys_values + head_dim, lead_dim, batch_stride, queries_keys_values, lead_dim, batch_stride, beta, output, qkv_seq_len, qkv_seq_len * qkv_seq_len, attn_batches); }) } void BackwardInterleavedMatMulSelfAttQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries_keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; if (req[0] == kWriteTo) { hipMemsetAsync(queries_keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } gemm_switch_fp32accum(s, false, false, head_dim, qkv_seq_len, qkv_seq_len, scale, queries_keys_values + head_dim, lead_dim, batch_stride, output_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads, lead_dim, batch_stride, attn_batches); gemm_switch_fp32accum(s, false, true, head_dim, qkv_seq_len, qkv_seq_len, scale, queries_keys_values, lead_dim, batch_stride, output_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads + head_dim, lead_dim, batch_stride, attn_batches); }) } void InterleavedMatMulSelfAttValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries_keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_dim = inputs[0].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float alpha = 1.f; const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, false, false, head_dim, qkv_seq_len, qkv_seq_len, alpha, queries_keys_values + 2 * head_dim, lead_dim, batch_stride, attention_maps, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, output, head_dim * attn_batches, head_dim, attn_batches); }) } void BackwardInterleavedMatMulSelfAttValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries_keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* attention_maps_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float alpha = 1.f; if (req[0] != kNullOp) { if (req[0] == kWriteTo) { hipMemsetAsync(queries_keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, qkv_seq_len, qkv_seq_len, alpha, output_grads, head_dim * attn_batches, head_dim, attention_maps, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads + 2 * head_dim, lead_dim, batch_stride, attn_batches); } if (req[1] != kNullOp) { const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, true, false, qkv_seq_len, qkv_seq_len, head_dim, alpha, queries_keys_values + 2 * head_dim, lead_dim, batch_stride, output_grads, head_dim * attn_batches, head_dim, beta, attention_maps_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, attn_batches); } }) } void InterleavedMatMulEncDecQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t q_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_q_dim = inputs[0].shape_[2]; const int32_t kv_seq_len = inputs[1].shape_[0]; const int32_t output_lin_kv_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_q_dim; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim_q = attn_batches * head_dim; const int32_t lead_dim_kv = attn_batches * 2 * head_dim; const int32_t batch_stride_q = head_dim; const int32_t batch_stride_kv = head_dim * 2; const float beta = req[0] == kAddTo ? 1.f : 0.f; const float scale = 1.f / sqrt(static_cast<float>(head_dim)); if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, true, false, kv_seq_len, q_seq_len, head_dim, scale, keys_values, lead_dim_kv, batch_stride_kv, queries, lead_dim_q, batch_stride_q, beta, output, kv_seq_len, kv_seq_len * q_seq_len, attn_batches); }) } void BackwardInterleavedMatMulEncDecQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* keys_values_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t q_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_q_dim = inputs[1].shape_[2]; const int32_t kv_seq_len = inputs[2].shape_[0]; const int32_t output_lin_kv_dim = inputs[2].shape_[2]; const int32_t embed_dim = output_lin_q_dim; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim_q = attn_batches * head_dim; const int32_t lead_dim_kv = attn_batches * 2 * head_dim; const int32_t batch_stride_q = head_dim; const int32_t batch_stride_kv = head_dim * 2; const float scale = 1.f / sqrt(static_cast<float>(head_dim)); if (req[0] != kNullOp) { const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, false, head_dim, q_seq_len, kv_seq_len, scale, keys_values, lead_dim_kv, batch_stride_kv, output_grads, kv_seq_len, kv_seq_len * q_seq_len, beta, queries_grads, lead_dim_q, batch_stride_q, attn_batches); } if (req[1] != kNullOp) { if (req[1] == kWriteTo) { hipMemsetAsync(keys_values_grads, 0, outputs[1].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, kv_seq_len, q_seq_len, scale, queries, lead_dim_q, batch_stride_q, output_grads, kv_seq_len, kv_seq_len * q_seq_len, beta, keys_values_grads, lead_dim_kv, batch_stride_kv, attn_batches); } }) } void InterleavedMatMulEncDecValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t kv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_kv_dim = inputs[0].shape_[2]; const int32_t attn_batches = inputs[1].shape_[0]; const int32_t q_seq_len = inputs[1].shape_[1]; const int32_t embed_dim = output_lin_kv_dim / 2; int32_t head_dim = embed_dim / params.heads; const int32_t lead_dim_kv = attn_batches * head_dim * 2; const int32_t batch_stride_kv = 2 * head_dim; const float alpha = 1.f; const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, false, false, head_dim, q_seq_len, kv_seq_len, alpha, keys_values + head_dim, lead_dim_kv, batch_stride_kv, attention_maps, kv_seq_len, kv_seq_len * q_seq_len, beta, output, head_dim * attn_batches, head_dim, attn_batches); }) } void BackwardInterleavedMatMulEncDecValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* attention_maps_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t kv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_kv_dim = inputs[1].shape_[2]; const int32_t attn_batches = inputs[2].shape_[0]; const int32_t q_seq_len = inputs[2].shape_[1]; const int32_t embed_dim = output_lin_kv_dim / 2; int32_t head_dim = embed_dim / params.heads; const int32_t lead_dim_kv = attn_batches * head_dim * 2; const int32_t batch_stride_kv = 2 * head_dim; const float alpha = 1.f; if (req[0] != kNullOp) { if (req[0] == kWriteTo) { hipMemsetAsync(keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, kv_seq_len, q_seq_len, alpha, output_grads, head_dim * attn_batches, head_dim, attention_maps, kv_seq_len, kv_seq_len * q_seq_len, beta, keys_values_grads + head_dim, lead_dim_kv, batch_stride_kv, attn_batches); } if (req[1] != kNullOp) { const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, true, false, kv_seq_len, q_seq_len, head_dim, alpha, keys_values + head_dim, lead_dim_kv, batch_stride_kv, output_grads, head_dim * attn_batches, head_dim, beta, attention_maps_grads, kv_seq_len, kv_seq_len * q_seq_len, attn_batches); } }) } NNVM_REGISTER_OP(_contrib_interleaved_matmul_selfatt_qk) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulSelfAttQKGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_selfatt_valatt) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulSelfAttValAttGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_encdec_qk) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulEncDecQKGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_encdec_valatt) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulEncDecValAttGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_selfatt_qk) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulSelfAttQKGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_selfatt_valatt) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulSelfAttValAttGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_encdec_qk) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulEncDecQKGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_encdec_valatt) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulEncDecValAttGPU); // relu NNVM_REGISTER_OP(_contrib_div_sqrt_dim) .set_attr<FCompute>("FCompute<gpu>", DivSqrtDimForward_<gpu>); } // namespace op } // namespace mxnet
1cd8317452e13102b88d1dc23bbd77d45e9a8fcd.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file transformer.cu * \brief GPU implementation of the operators used in Transformer */ #include <cuda.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include <mxnet/base.h> #include "./transformer-inl.h" #include "../../common/cuda_utils.h" namespace mxnet { namespace op { // Approach in gemm_switch_fp32accum is coming from MLPerf v0.6 submission repository from NVIDIA // by https://github.com/kevinstephano template<typename DType> void CublasStridedBatchedGemm(mshadow::Stream<gpu>* s, bool transA, bool transB, int32_t m, int32_t n, int32_t k, float alpha, const DType* a, int32_t lda, int32_t strideA, const DType *b, int32_t ldb, int32_t strideB, float beta, DType *c, int32_t ldc, int32_t strideC, int32_t batchCount, cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP) { #if CUDA_VERSION >= 9010 using namespace mxnet::common::cuda; CHECK_EQ(s->blas_handle_ownership_, mshadow::Stream<gpu>::OwnHandle) << "Must init CuBLAS handle in stream"; cublasHandle_t blas_handle = mshadow::Stream<gpu>::GetBlasHandle(s); auto err = CUBLAS_STATUS_SUCCESS; // TODO(cfujitsang): handle computation_precision err = cublasGemmStridedBatchedEx( blas_handle, CublasTransposeOp(transA), CublasTransposeOp(transB), static_cast<int>(m), static_cast<int>(n), static_cast<int>(k), reinterpret_cast<void*>(&alpha), a, CublasType<DType>::kCudaFlag, static_cast<int>(lda), strideA, b, CublasType<DType>::kCudaFlag, static_cast<int>(ldb), strideB, reinterpret_cast<void*>(&beta), c, CublasType<DType>::kCudaFlag, static_cast<int>(ldc), strideC, static_cast<int>(batchCount), CUDA_R_32F, algo); CHECK_EQ(err, CUBLAS_STATUS_SUCCESS) << "Cublas gemmEx fail."; #else LOG(FATAL) << "Not implemented with CUDA < 9.1"; #endif } template<typename DType> void gemm_switch_fp32accum(mshadow::Stream<gpu>* s, bool transA, bool transB, int32_t m, int32_t n, int32_t k, float alpha, const DType *a, int32_t lda, int32_t strideA, const DType *b, int32_t ldb, int32_t strideB, float beta, DType *c, int32_t ldc, int32_t strideC, int32_t batchCount) { cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s); if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(s, transA, transB, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_ALGO0_TENSOR_OP); } else { CublasStridedBatchedGemm(s, transA, transB, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } CHECK_CUDA_ERROR("Error at InterleavedMatMul"); } // TODO(cfujitsang): use scale as optional ? void InterleavedMatMulSelfAttQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries_keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_dim = inputs[0].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float beta = req[0] == kAddTo ? 1.f : 0.f; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, true, false, qkv_seq_len, qkv_seq_len, head_dim, scale, queries_keys_values + head_dim, lead_dim, batch_stride, queries_keys_values, lead_dim, batch_stride, beta, output, qkv_seq_len, qkv_seq_len * qkv_seq_len, attn_batches); }) } void BackwardInterleavedMatMulSelfAttQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries_keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; if (req[0] == kWriteTo) { cudaMemsetAsync(queries_keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } gemm_switch_fp32accum(s, false, false, head_dim, qkv_seq_len, qkv_seq_len, scale, queries_keys_values + head_dim, lead_dim, batch_stride, output_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads, lead_dim, batch_stride, attn_batches); gemm_switch_fp32accum(s, false, true, head_dim, qkv_seq_len, qkv_seq_len, scale, queries_keys_values, lead_dim, batch_stride, output_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads + head_dim, lead_dim, batch_stride, attn_batches); }) } void InterleavedMatMulSelfAttValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries_keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_dim = inputs[0].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float alpha = 1.f; const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, false, false, head_dim, qkv_seq_len, qkv_seq_len, alpha, queries_keys_values + 2 * head_dim, lead_dim, batch_stride, attention_maps, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, output, head_dim * attn_batches, head_dim, attn_batches); }) } void BackwardInterleavedMatMulSelfAttValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries_keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* attention_maps_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float alpha = 1.f; if (req[0] != kNullOp) { if (req[0] == kWriteTo) { cudaMemsetAsync(queries_keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, qkv_seq_len, qkv_seq_len, alpha, output_grads, head_dim * attn_batches, head_dim, attention_maps, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads + 2 * head_dim, lead_dim, batch_stride, attn_batches); } if (req[1] != kNullOp) { const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, true, false, qkv_seq_len, qkv_seq_len, head_dim, alpha, queries_keys_values + 2 * head_dim, lead_dim, batch_stride, output_grads, head_dim * attn_batches, head_dim, beta, attention_maps_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, attn_batches); } }) } void InterleavedMatMulEncDecQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t q_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_q_dim = inputs[0].shape_[2]; const int32_t kv_seq_len = inputs[1].shape_[0]; const int32_t output_lin_kv_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_q_dim; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim_q = attn_batches * head_dim; const int32_t lead_dim_kv = attn_batches * 2 * head_dim; const int32_t batch_stride_q = head_dim; const int32_t batch_stride_kv = head_dim * 2; const float beta = req[0] == kAddTo ? 1.f : 0.f; const float scale = 1.f / sqrt(static_cast<float>(head_dim)); if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, true, false, kv_seq_len, q_seq_len, head_dim, scale, keys_values, lead_dim_kv, batch_stride_kv, queries, lead_dim_q, batch_stride_q, beta, output, kv_seq_len, kv_seq_len * q_seq_len, attn_batches); }) } void BackwardInterleavedMatMulEncDecQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* keys_values_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t q_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_q_dim = inputs[1].shape_[2]; const int32_t kv_seq_len = inputs[2].shape_[0]; const int32_t output_lin_kv_dim = inputs[2].shape_[2]; const int32_t embed_dim = output_lin_q_dim; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim_q = attn_batches * head_dim; const int32_t lead_dim_kv = attn_batches * 2 * head_dim; const int32_t batch_stride_q = head_dim; const int32_t batch_stride_kv = head_dim * 2; const float scale = 1.f / sqrt(static_cast<float>(head_dim)); if (req[0] != kNullOp) { const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, false, head_dim, q_seq_len, kv_seq_len, scale, keys_values, lead_dim_kv, batch_stride_kv, output_grads, kv_seq_len, kv_seq_len * q_seq_len, beta, queries_grads, lead_dim_q, batch_stride_q, attn_batches); } if (req[1] != kNullOp) { if (req[1] == kWriteTo) { cudaMemsetAsync(keys_values_grads, 0, outputs[1].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, kv_seq_len, q_seq_len, scale, queries, lead_dim_q, batch_stride_q, output_grads, kv_seq_len, kv_seq_len * q_seq_len, beta, keys_values_grads, lead_dim_kv, batch_stride_kv, attn_batches); } }) } void InterleavedMatMulEncDecValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t kv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_kv_dim = inputs[0].shape_[2]; const int32_t attn_batches = inputs[1].shape_[0]; const int32_t q_seq_len = inputs[1].shape_[1]; const int32_t embed_dim = output_lin_kv_dim / 2; int32_t head_dim = embed_dim / params.heads; const int32_t lead_dim_kv = attn_batches * head_dim * 2; const int32_t batch_stride_kv = 2 * head_dim; const float alpha = 1.f; const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, false, false, head_dim, q_seq_len, kv_seq_len, alpha, keys_values + head_dim, lead_dim_kv, batch_stride_kv, attention_maps, kv_seq_len, kv_seq_len * q_seq_len, beta, output, head_dim * attn_batches, head_dim, attn_batches); }) } void BackwardInterleavedMatMulEncDecValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* attention_maps_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t kv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_kv_dim = inputs[1].shape_[2]; const int32_t attn_batches = inputs[2].shape_[0]; const int32_t q_seq_len = inputs[2].shape_[1]; const int32_t embed_dim = output_lin_kv_dim / 2; int32_t head_dim = embed_dim / params.heads; const int32_t lead_dim_kv = attn_batches * head_dim * 2; const int32_t batch_stride_kv = 2 * head_dim; const float alpha = 1.f; if (req[0] != kNullOp) { if (req[0] == kWriteTo) { cudaMemsetAsync(keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, kv_seq_len, q_seq_len, alpha, output_grads, head_dim * attn_batches, head_dim, attention_maps, kv_seq_len, kv_seq_len * q_seq_len, beta, keys_values_grads + head_dim, lead_dim_kv, batch_stride_kv, attn_batches); } if (req[1] != kNullOp) { const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, true, false, kv_seq_len, q_seq_len, head_dim, alpha, keys_values + head_dim, lead_dim_kv, batch_stride_kv, output_grads, head_dim * attn_batches, head_dim, beta, attention_maps_grads, kv_seq_len, kv_seq_len * q_seq_len, attn_batches); } }) } NNVM_REGISTER_OP(_contrib_interleaved_matmul_selfatt_qk) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulSelfAttQKGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_selfatt_valatt) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulSelfAttValAttGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_encdec_qk) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulEncDecQKGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_encdec_valatt) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulEncDecValAttGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_selfatt_qk) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulSelfAttQKGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_selfatt_valatt) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulSelfAttValAttGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_encdec_qk) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulEncDecQKGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_encdec_valatt) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulEncDecValAttGPU); // relu NNVM_REGISTER_OP(_contrib_div_sqrt_dim) .set_attr<FCompute>("FCompute<gpu>", DivSqrtDimForward_<gpu>); } // namespace op } // namespace mxnet
ded4b2623e19094f74924fa745deb43cd2f7ca9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(hipGetLastError()); */ static void checkCudaCall(hipError_t result) { if (result != hipSuccess) { cerr << "cuda error: " << hipGetErrorString(result) << endl; exit(1); } } __global__ void vectorTransformKernel(float* a, float* b, float* result) { // Get the thread id, which we can use as itterator in the array of results. int i = threadIdx.x + blockDim.x * blockIdx.x; // insert operation here for (int j=0; j<5; j++) { result[i] = result[i]+a[i]*b[i]; } } void vectorTransformCuda(int n, float* a, float* b, float* result) { int threadBlockSize = 256; // allocate the vectors on the GPU float* deviceA = NULL; checkCudaCall(hipMalloc((void **) &deviceA, n * sizeof(float))); if (deviceA == NULL) { cout << "could not allocate memory!" << endl; return; } float* deviceB = NULL; checkCudaCall(hipMalloc((void **) &deviceB, n * sizeof(float))); if (deviceB == NULL) { checkCudaCall(hipFree(deviceA)); cout << "could not allocate memory!" << endl; return; } float* deviceResult = NULL; checkCudaCall(hipMalloc((void **) &deviceResult, n * sizeof(float))); if (deviceResult == NULL) { checkCudaCall(hipFree(deviceA)); checkCudaCall(hipFree(deviceB)); cout << "could not allocate memory!" << endl; return; } timer kernelTime1 = timer("kernelTime1"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(hipMemcpy(deviceA, a, n*sizeof(float), hipMemcpyHostToDevice)); checkCudaCall(hipMemcpy(deviceB, b, n*sizeof(float), hipMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); hipLaunchKernelGGL(( vectorTransformKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceA, deviceB, deviceResult); hipDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(hipGetLastError()); // copy result back memoryTime.start(); checkCudaCall(hipMemcpy(result, deviceResult, n * sizeof(float), hipMemcpyDeviceToHost)); checkCudaCall(hipMemcpy(b, deviceB, n * sizeof(float), hipMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(hipFree(deviceA)); checkCudaCall(hipFree(deviceB)); checkCudaCall(hipFree(deviceResult)); cout << "vector-transform (kernel): \t\t" << kernelTime1 << endl; cout << "vector-transform (memory): \t\t" << memoryTime << endl; } int vectorTransformSeq(int n, float* a, float* b, float* result) { int i,j; timer sequentialTime = timer("Sequential"); sequentialTime.start(); for (j=0; j<5; j++) { for (i=0; i<n; i++) { result[i] = result[i]+a[i]*b[i]; } } sequentialTime.stop(); cout << "vector-transform (sequential): \t\t" << sequentialTime << endl; } int main(int argc, char* argv[]) { int n = 655360; float* a = new float[n]; float* b = new float[n]; float* result = new float[n]; float* result_s = new float[n]; if (argc > 1) n = atoi(argv[1]); cout << "Iteratively transform vector A with vector B of " << n << " integer elements." << endl; // initialize the vectors. for(int i=0; i<n; i++) { a[i] = i; b[i] = 0.1*i; result[i]=0; result_s[i]=0; } vectorTransformSeq(n, a, b, result_s); vectorTransformCuda(n, a, b, result); // verify the resuls for(int i=0; i<n; i++) { // if(result[i] != n /*2*i*/) { if (result[i]!=result_s[i]) { cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << result_s[i] << endl; exit(1); } } cout << "results OK!" << endl; delete[] a; delete[] b; delete[] result; return 0; }
ded4b2623e19094f74924fa745deb43cd2f7ca9c.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(cudaGetLastError()); */ static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { cerr << "cuda error: " << cudaGetErrorString(result) << endl; exit(1); } } __global__ void vectorTransformKernel(float* a, float* b, float* result) { // Get the thread id, which we can use as itterator in the array of results. int i = threadIdx.x + blockDim.x * blockIdx.x; // insert operation here for (int j=0; j<5; j++) { result[i] = result[i]+a[i]*b[i]; } } void vectorTransformCuda(int n, float* a, float* b, float* result) { int threadBlockSize = 256; // allocate the vectors on the GPU float* deviceA = NULL; checkCudaCall(cudaMalloc((void **) &deviceA, n * sizeof(float))); if (deviceA == NULL) { cout << "could not allocate memory!" << endl; return; } float* deviceB = NULL; checkCudaCall(cudaMalloc((void **) &deviceB, n * sizeof(float))); if (deviceB == NULL) { checkCudaCall(cudaFree(deviceA)); cout << "could not allocate memory!" << endl; return; } float* deviceResult = NULL; checkCudaCall(cudaMalloc((void **) &deviceResult, n * sizeof(float))); if (deviceResult == NULL) { checkCudaCall(cudaFree(deviceA)); checkCudaCall(cudaFree(deviceB)); cout << "could not allocate memory!" << endl; return; } timer kernelTime1 = timer("kernelTime1"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(cudaMemcpy(deviceA, a, n*sizeof(float), cudaMemcpyHostToDevice)); checkCudaCall(cudaMemcpy(deviceB, b, n*sizeof(float), cudaMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); vectorTransformKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceA, deviceB, deviceResult); cudaDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back memoryTime.start(); checkCudaCall(cudaMemcpy(result, deviceResult, n * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaCall(cudaMemcpy(b, deviceB, n * sizeof(float), cudaMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(cudaFree(deviceA)); checkCudaCall(cudaFree(deviceB)); checkCudaCall(cudaFree(deviceResult)); cout << "vector-transform (kernel): \t\t" << kernelTime1 << endl; cout << "vector-transform (memory): \t\t" << memoryTime << endl; } int vectorTransformSeq(int n, float* a, float* b, float* result) { int i,j; timer sequentialTime = timer("Sequential"); sequentialTime.start(); for (j=0; j<5; j++) { for (i=0; i<n; i++) { result[i] = result[i]+a[i]*b[i]; } } sequentialTime.stop(); cout << "vector-transform (sequential): \t\t" << sequentialTime << endl; } int main(int argc, char* argv[]) { int n = 655360; float* a = new float[n]; float* b = new float[n]; float* result = new float[n]; float* result_s = new float[n]; if (argc > 1) n = atoi(argv[1]); cout << "Iteratively transform vector A with vector B of " << n << " integer elements." << endl; // initialize the vectors. for(int i=0; i<n; i++) { a[i] = i; b[i] = 0.1*i; result[i]=0; result_s[i]=0; } vectorTransformSeq(n, a, b, result_s); vectorTransformCuda(n, a, b, result); // verify the resuls for(int i=0; i<n; i++) { // if(result[i] != n /*2*i*/) { if (result[i]!=result_s[i]) { cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << result_s[i] << endl; exit(1); } } cout << "results OK!" << endl; delete[] a; delete[] b; delete[] result; return 0; }
e1ef9e8a121a8dde3d30ea44352f688dd75a2633.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /* -*- mode: c++ -*- */ #include "gg.h" #include "ggcuda.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; #include "gen_hip.cuh" static const int __tb_ConnectedComp = TB_SIZE; static const int __tb_FirstItr_ConnectedComp = TB_SIZE; __global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_comp_current, uint32_t * p_comp_old) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_comp_current[src] = graph.node_data[src]; p_comp_old[src] = graph.node_data[src]; } } // FP: "8 -> 9; } __global__ void FirstItr_ConnectedComp(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_comp_current, uint32_t * p_comp_old) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_FirstItr_ConnectedComp; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "6 -> 7; bool pop = src < __end; // FP: "7 -> 8; if (pop) { p_comp_old[src] = p_comp_current[src]; } // FP: "10 -> 11; // FP: "13 -> 14; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "14 -> 15; __shared__ struct { index_type src; } _np_closure [TB_SIZE]; // FP: "15 -> 16; _np_closure[threadIdx.x].src = src; // FP: "16 -> 17; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "19 -> 20; // FP: "20 -> 21; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "21 -> 22; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "22 -> 23; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "25 -> 26; __syncthreads(); // FP: "26 -> 27; while (true) { // FP: "27 -> 28; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "30 -> 31; __syncthreads(); // FP: "31 -> 32; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "32 -> 33; __syncthreads(); // FP: "33 -> 34; break; } // FP: "35 -> 36; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "38 -> 39; __syncthreads(); // FP: "39 -> 40; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "40 -> 41; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "43 -> 44; assert(nps.tb.src < __kernel_tb_size); src = _np_closure[nps.tb.src].src; // FP: "44 -> 45; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type jj; jj = ns +_np_j; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } // FP: "57 -> 58; __syncthreads(); } // FP: "59 -> 60; // FP: "60 -> 61; { const int warpid = threadIdx.x / 32; // FP: "61 -> 62; const int _np_laneid = cub::LaneId(); // FP: "62 -> 63; while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); src = _np_closure[nps.warp.src[warpid]].src; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type jj; jj = _np_w_start +_np_ii; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } } // FP: "85 -> 86; __syncthreads(); // FP: "86 -> 87; } // FP: "87 -> 88; __syncthreads(); // FP: "88 -> 89; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "89 -> 90; while (_np.work()) { // FP: "90 -> 91; int _np_i =0; // FP: "91 -> 92; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "92 -> 93; __syncthreads(); // FP: "93 -> 94; // FP: "94 -> 95; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type jj; assert(nps.fg.src[_np_i] < __kernel_tb_size); src = _np_closure[nps.fg.src[_np_i]].src; jj= nps.fg.itvalue[_np_i]; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } // FP: "108 -> 109; _np.execute_round_done(ITSIZE); // FP: "109 -> 110; __syncthreads(); } // FP: "111 -> 112; assert(threadIdx.x < __kernel_tb_size); src = _np_closure[threadIdx.x].src; } // FP: "113 -> 114; } __global__ void ConnectedComp(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_comp_current, uint32_t * p_comp_old, HGAccumulator<unsigned int> DGAccumulator_accum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_ConnectedComp; __shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; // FP: "6 -> 7; DGAccumulator_accum.thread_entry(); // FP: "7 -> 8; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "8 -> 9; bool pop = src < __end; // FP: "9 -> 10; if (pop) { if (p_comp_old[src] > p_comp_current[src]) { p_comp_old[src] = p_comp_current[src]; DGAccumulator_accum.reduce( 1); } else { pop = false; } } // FP: "15 -> 16; // FP: "18 -> 19; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "19 -> 20; __shared__ struct { index_type src; } _np_closure [TB_SIZE]; // FP: "20 -> 21; _np_closure[threadIdx.x].src = src; // FP: "21 -> 22; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "24 -> 25; // FP: "25 -> 26; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "26 -> 27; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "27 -> 28; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "30 -> 31; __syncthreads(); // FP: "31 -> 32; while (true) { // FP: "32 -> 33; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "35 -> 36; __syncthreads(); // FP: "36 -> 37; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "37 -> 38; __syncthreads(); // FP: "38 -> 39; break; } // FP: "40 -> 41; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "43 -> 44; __syncthreads(); // FP: "44 -> 45; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "45 -> 46; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "48 -> 49; assert(nps.tb.src < __kernel_tb_size); src = _np_closure[nps.tb.src].src; // FP: "49 -> 50; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type jj; jj = ns +_np_j; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } // FP: "62 -> 63; __syncthreads(); } // FP: "64 -> 65; // FP: "65 -> 66; { const int warpid = threadIdx.x / 32; // FP: "66 -> 67; const int _np_laneid = cub::LaneId(); // FP: "67 -> 68; while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); src = _np_closure[nps.warp.src[warpid]].src; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type jj; jj = _np_w_start +_np_ii; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } } // FP: "90 -> 91; __syncthreads(); // FP: "91 -> 92; } // FP: "92 -> 93; __syncthreads(); // FP: "93 -> 94; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "94 -> 95; while (_np.work()) { // FP: "95 -> 96; int _np_i =0; // FP: "96 -> 97; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "97 -> 98; __syncthreads(); // FP: "98 -> 99; // FP: "99 -> 100; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type jj; assert(nps.fg.src[_np_i] < __kernel_tb_size); src = _np_closure[nps.fg.src[_np_i]].src; jj= nps.fg.itvalue[_np_i]; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } // FP: "113 -> 114; _np.execute_round_done(ITSIZE); // FP: "114 -> 115; __syncthreads(); } // FP: "116 -> 117; assert(threadIdx.x < __kernel_tb_size); src = _np_closure[threadIdx.x].src; } // FP: "119 -> 120; DGAccumulator_accum.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts); // FP: "120 -> 121; } __global__ void ConnectedCompSanityCheck(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_comp_current, HGAccumulator<uint64_t> DGAccumulator_accum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; __shared__ hipcub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; DGAccumulator_accum.thread_entry(); // FP: "3 -> 4; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_comp_current[src] == graph.node_data[src]) { DGAccumulator_accum.reduce( 1); } } } // FP: "11 -> 12; DGAccumulator_accum.thread_exit<hipcub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_accum_ts); // FP: "12 -> 13; } void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( InitializeGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), ctx->comp_old.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void InitializeGraph_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void InitializeGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void FirstItr_ConnectedComp_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( FirstItr_ConnectedComp) , dim3(blocks), dim3(__tb_FirstItr_ConnectedComp), 0, 0, ctx->gg, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), ctx->comp_old.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void FirstItr_ConnectedComp_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; FirstItr_ConnectedComp_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void FirstItr_ConnectedComp_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; FirstItr_ConnectedComp_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void FirstItr_ConnectedComp_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; FirstItr_ConnectedComp_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void ConnectedComp_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<unsigned int> _DGAccumulator_accum; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_accumval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr(); // FP: "8 -> 9; hipLaunchKernelGGL(( ConnectedComp) , dim3(blocks), dim3(__tb_ConnectedComp), 0, 0, ctx->gg, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), ctx->comp_old.data.gpu_wr_ptr(), _DGAccumulator_accum); // FP: "9 -> 10; check_cuda_kernel; // FP: "10 -> 11; DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr()); // FP: "11 -> 12; } void ConnectedComp_allNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedComp_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedComp_masterNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedComp_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedComp_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedComp_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedCompSanityCheck_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<uint64_t> _DGAccumulator_accum; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<uint64_t> DGAccumulator_accumval = Shared<uint64_t>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_accumval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr(); // FP: "8 -> 9; hipLaunchKernelGGL(( ConnectedCompSanityCheck) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), _DGAccumulator_accum); // FP: "9 -> 10; check_cuda_kernel; // FP: "10 -> 11; DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr()); // FP: "11 -> 12; } void ConnectedCompSanityCheck_allNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedCompSanityCheck_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedCompSanityCheck_masterNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedCompSanityCheck_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedCompSanityCheck_nodesWithEdges_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedCompSanityCheck_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx); // FP: "2 -> 3; }
e1ef9e8a121a8dde3d30ea44352f688dd75a2633.cu
/* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /* -*- mode: c++ -*- */ #include "gg.h" #include "ggcuda.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; #include "gen_cuda.cuh" static const int __tb_ConnectedComp = TB_SIZE; static const int __tb_FirstItr_ConnectedComp = TB_SIZE; __global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_comp_current, uint32_t * p_comp_old) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_comp_current[src] = graph.node_data[src]; p_comp_old[src] = graph.node_data[src]; } } // FP: "8 -> 9; } __global__ void FirstItr_ConnectedComp(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_comp_current, uint32_t * p_comp_old) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_FirstItr_ConnectedComp; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "6 -> 7; bool pop = src < __end; // FP: "7 -> 8; if (pop) { p_comp_old[src] = p_comp_current[src]; } // FP: "10 -> 11; // FP: "13 -> 14; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "14 -> 15; __shared__ struct { index_type src; } _np_closure [TB_SIZE]; // FP: "15 -> 16; _np_closure[threadIdx.x].src = src; // FP: "16 -> 17; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "19 -> 20; // FP: "20 -> 21; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "21 -> 22; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "22 -> 23; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "25 -> 26; __syncthreads(); // FP: "26 -> 27; while (true) { // FP: "27 -> 28; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "30 -> 31; __syncthreads(); // FP: "31 -> 32; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "32 -> 33; __syncthreads(); // FP: "33 -> 34; break; } // FP: "35 -> 36; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "38 -> 39; __syncthreads(); // FP: "39 -> 40; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "40 -> 41; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "43 -> 44; assert(nps.tb.src < __kernel_tb_size); src = _np_closure[nps.tb.src].src; // FP: "44 -> 45; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type jj; jj = ns +_np_j; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } // FP: "57 -> 58; __syncthreads(); } // FP: "59 -> 60; // FP: "60 -> 61; { const int warpid = threadIdx.x / 32; // FP: "61 -> 62; const int _np_laneid = cub::LaneId(); // FP: "62 -> 63; while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); src = _np_closure[nps.warp.src[warpid]].src; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type jj; jj = _np_w_start +_np_ii; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } } // FP: "85 -> 86; __syncthreads(); // FP: "86 -> 87; } // FP: "87 -> 88; __syncthreads(); // FP: "88 -> 89; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "89 -> 90; while (_np.work()) { // FP: "90 -> 91; int _np_i =0; // FP: "91 -> 92; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "92 -> 93; __syncthreads(); // FP: "93 -> 94; // FP: "94 -> 95; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type jj; assert(nps.fg.src[_np_i] < __kernel_tb_size); src = _np_closure[nps.fg.src[_np_i]].src; jj= nps.fg.itvalue[_np_i]; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } // FP: "108 -> 109; _np.execute_round_done(ITSIZE); // FP: "109 -> 110; __syncthreads(); } // FP: "111 -> 112; assert(threadIdx.x < __kernel_tb_size); src = _np_closure[threadIdx.x].src; } // FP: "113 -> 114; } __global__ void ConnectedComp(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_comp_current, uint32_t * p_comp_old, HGAccumulator<unsigned int> DGAccumulator_accum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_ConnectedComp; __shared__ cub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; // FP: "6 -> 7; DGAccumulator_accum.thread_entry(); // FP: "7 -> 8; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "8 -> 9; bool pop = src < __end; // FP: "9 -> 10; if (pop) { if (p_comp_old[src] > p_comp_current[src]) { p_comp_old[src] = p_comp_current[src]; DGAccumulator_accum.reduce( 1); } else { pop = false; } } // FP: "15 -> 16; // FP: "18 -> 19; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "19 -> 20; __shared__ struct { index_type src; } _np_closure [TB_SIZE]; // FP: "20 -> 21; _np_closure[threadIdx.x].src = src; // FP: "21 -> 22; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "24 -> 25; // FP: "25 -> 26; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "26 -> 27; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "27 -> 28; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "30 -> 31; __syncthreads(); // FP: "31 -> 32; while (true) { // FP: "32 -> 33; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "35 -> 36; __syncthreads(); // FP: "36 -> 37; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "37 -> 38; __syncthreads(); // FP: "38 -> 39; break; } // FP: "40 -> 41; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "43 -> 44; __syncthreads(); // FP: "44 -> 45; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "45 -> 46; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "48 -> 49; assert(nps.tb.src < __kernel_tb_size); src = _np_closure[nps.tb.src].src; // FP: "49 -> 50; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type jj; jj = ns +_np_j; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } // FP: "62 -> 63; __syncthreads(); } // FP: "64 -> 65; // FP: "65 -> 66; { const int warpid = threadIdx.x / 32; // FP: "66 -> 67; const int _np_laneid = cub::LaneId(); // FP: "67 -> 68; while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); src = _np_closure[nps.warp.src[warpid]].src; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type jj; jj = _np_w_start +_np_ii; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } } // FP: "90 -> 91; __syncthreads(); // FP: "91 -> 92; } // FP: "92 -> 93; __syncthreads(); // FP: "93 -> 94; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "94 -> 95; while (_np.work()) { // FP: "95 -> 96; int _np_i =0; // FP: "96 -> 97; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "97 -> 98; __syncthreads(); // FP: "98 -> 99; // FP: "99 -> 100; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type jj; assert(nps.fg.src[_np_i] < __kernel_tb_size); src = _np_closure[nps.fg.src[_np_i]].src; jj= nps.fg.itvalue[_np_i]; { index_type dst; uint32_t new_dist; uint32_t old_dist; dst = graph.getAbsDestination(jj); new_dist = p_comp_current[src]; old_dist = atomicTestMin(&p_comp_current[dst], new_dist); } } // FP: "113 -> 114; _np.execute_round_done(ITSIZE); // FP: "114 -> 115; __syncthreads(); } // FP: "116 -> 117; assert(threadIdx.x < __kernel_tb_size); src = _np_closure[threadIdx.x].src; } // FP: "119 -> 120; DGAccumulator_accum.thread_exit<cub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts); // FP: "120 -> 121; } __global__ void ConnectedCompSanityCheck(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_comp_current, HGAccumulator<uint64_t> DGAccumulator_accum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; __shared__ cub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; DGAccumulator_accum.thread_entry(); // FP: "3 -> 4; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_comp_current[src] == graph.node_data[src]) { DGAccumulator_accum.reduce( 1); } } } // FP: "11 -> 12; DGAccumulator_accum.thread_exit<cub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_accum_ts); // FP: "12 -> 13; } void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; InitializeGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), ctx->comp_old.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void InitializeGraph_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void InitializeGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void FirstItr_ConnectedComp_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; FirstItr_ConnectedComp <<<blocks, __tb_FirstItr_ConnectedComp>>>(ctx->gg, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), ctx->comp_old.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void FirstItr_ConnectedComp_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; FirstItr_ConnectedComp_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void FirstItr_ConnectedComp_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; FirstItr_ConnectedComp_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void FirstItr_ConnectedComp_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; FirstItr_ConnectedComp_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void ConnectedComp_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<unsigned int> _DGAccumulator_accum; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_accumval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr(); // FP: "8 -> 9; ConnectedComp <<<blocks, __tb_ConnectedComp>>>(ctx->gg, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), ctx->comp_old.data.gpu_wr_ptr(), _DGAccumulator_accum); // FP: "9 -> 10; check_cuda_kernel; // FP: "10 -> 11; DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr()); // FP: "11 -> 12; } void ConnectedComp_allNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedComp_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedComp_masterNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedComp_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedComp_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedComp_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedCompSanityCheck_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<uint64_t> _DGAccumulator_accum; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<uint64_t> DGAccumulator_accumval = Shared<uint64_t>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_accumval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr(); // FP: "8 -> 9; ConnectedCompSanityCheck <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), _DGAccumulator_accum); // FP: "9 -> 10; check_cuda_kernel; // FP: "10 -> 11; DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr()); // FP: "11 -> 12; } void ConnectedCompSanityCheck_allNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedCompSanityCheck_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedCompSanityCheck_masterNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedCompSanityCheck_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void ConnectedCompSanityCheck_nodesWithEdges_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; ConnectedCompSanityCheck_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx); // FP: "2 -> 3; }
8f93eaf4a85bd4908d45ea1d07d566d0226739ea.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/Config.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/Parallel.h> #include <ATen/SparseTensorImpl.h> #include <ATen/SparseTensorUtils.h> #include <ATen/native/Resize.h> #include <hip/hip_runtime.h> #include <type_traits> #include <thrust/device_ptr.h> #include <thrust/for_each.h> #include <thrust/sequence.h> #include <THH/THHTensorMathPointwise.cuh> #include <THH/THHThrustAllocator.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPUtils.h> #include <hipsparse.h> #include <ATen/native/sparse/hip/SparseHIPBlas.cuh> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/iterator/discard_iterator.h> #if defined(__HIPCC__) && (CUSPARSE_VERSION >= 11000) #define IS_CUSPARSE11_AVAILABLE() 1 #else #define IS_CUSPARSE11_AVAILABLE() 0 #endif #if IS_CUSPARSE11_AVAILABLE() #include <hip/library_types.h> #endif namespace at { namespace native { namespace { using namespace at::sparse; Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) { Tensor csr = at::empty({dim + 1}, CUDA(kInt)); Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt)); rowIndicesInt.copy_(rowIndices); sparse::cuda::Xcoo2csr( rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>()); return csr; } int confirm_mult_size(const std::vector<int>& mat1_size, const std::vector<int>& mat2_size) { TORCH_CHECK( mat1_size[1] == mat2_size[0], "mat1 and mat2 shapes cannot be multiplied (", mat1_size[0], "x", mat1_size[1], " and ", mat2_size[0], "x", mat2_size[1], ")"); return mat1_size[1]; } void create_general_description_(hipsparseMatDescr_t& description_) { TORCH_CUDASPARSE_CHECK(hipsparseCreateMatDescr(&description_)); TORCH_CUDASPARSE_CHECK(hipsparseSetMatType(description_, HIPSPARSE_MATRIX_TYPE_GENERAL)); TORCH_CUDASPARSE_CHECK(hipsparseSetMatIndexBase(description_, HIPSPARSE_INDEX_BASE_ZERO)); } // csrMatrixRef is used to have a representation of a raw CSR matrix representation // comming from `sparse_sparse_matmul_cuda_kernel` function. // Moreover this implements a RAII guard for a cusparse descriptor template<class scalar_t> struct csrMatrixRef { int* csr_indices_{nullptr}; int* csr_pointers_{nullptr}; scalar_t* csr_values_{nullptr}; int nnz_{0}; std::vector<int> size_{}; #if IS_CUSPARSE11_AVAILABLE() hipsparseSpMatDescr_t description_{0}; #else hipsparseMatDescr_t description_{0}; #endif csrMatrixRef() { #if !IS_CUSPARSE11_AVAILABLE() create_general_description_(description_); #endif } csrMatrixRef( int* csr_indices, int* csr_pointers, scalar_t* csr_values, int nnz, const std::vector<int>& size) : csr_indices_{csr_indices}, csr_pointers_{csr_pointers}, csr_values_{csr_values}, nnz_{nnz}, size_{size} { #if IS_CUSPARSE11_AVAILABLE() hipDataType cuda_data_type; if ( std::is_same<float, scalar_t>::value ) { cuda_data_type = HIP_R_32F; } else if ( std::is_same<double, scalar_t>::value) { cuda_data_type = HIP_R_64F; } else { TORCH_CHECK(false, "Tensor types must be either float32 or float64"); } TORCH_CUDASPARSE_CHECK(hipsparseCreateCsr( &description_, this->size(0), this->size(1), this->nnz_, this->csr_pointers_, this->csr_indices_, this->csr_values_, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cuda_data_type)); #else create_general_description_(description_); #endif } ~csrMatrixRef() { #if IS_CUSPARSE11_AVAILABLE() hipsparseDestroySpMat(description_); #else hipsparseDestroyMatDescr(description_); #endif } int size(int index) const { return size_.at(index); } }; // csrOutput is used to represent the output for `CusparseMatrixMultiplyOp` // Note that `csrOutput` is different from `csrMatrixRef` and the purpose // of this was to have a materialized version of a CSR matrix. // Moreover this implements a RAII guard for a cusparse descriptor struct csrOutput { Tensor csr_indices_{}; Tensor csr_pointers_{}; at::Tensor csr_values_{}; int nnz_{0}; std::vector<int> size_; hipsparseMatDescr_t description_{0}; csrOutput(const std::vector<int> &size) : size_{size} { create_general_description_(description_); } ~csrOutput() { hipsparseDestroyMatDescr(description_); } int size(int index) const { return size_.at(index); } }; #if IS_CUSPARSE11_AVAILABLE() // RAII guard helps to support cuSparse 11 API for `A @ B` operation // This generic template exists because with cuSparse the `scalar_t` type could be a double or float template <class scalar_t> struct CusparseMatrixMultiplyOp { hipsparseSpGEMMDescr_t spgemmDesc; CusparseMatrixMultiplyOp() { static_assert(std::is_same<float, scalar_t>::value || std::is_same<double, scalar_t>::value, "cusparse csr sparse-sparse MM only supports data type of float and double."); // SpGEMM Computation TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_createDescr(&spgemmDesc)); } ~CusparseMatrixMultiplyOp() { // destroy matrix/vector descriptors hipsparseSpGEMM_destroyDescr(spgemmDesc); } csrOutput operator ()( const csrMatrixRef<scalar_t>& A, const csrMatrixRef<scalar_t>& B, Tensor& output_values, Tensor& output_indices) { const int A_num_rows = A.size(0); const int B_num_cols = B.size(1); hipDataType computeType; if ( std::is_same<float, scalar_t>::value ) { computeType = HIP_R_32F; } else if ( std::is_same<double, scalar_t>::value) { computeType = HIP_R_64F; } else { TORCH_CHECK(false, "Tensor types must be either float32 or float64"); } csrOutput out({A.size(0), B.size(1)}); out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt)); int* dC_csrOffsets = out.csr_pointers_.data_ptr<int>(); int* dC_columns = nullptr; scalar_t* dC_values = nullptr; scalar_t alpha = 1.0f; scalar_t beta = 0.0f; hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; hipsparseOperation_t opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; csrMatrixRef<scalar_t> C( nullptr, nullptr, nullptr, /*nnz*/0, {A_num_rows, B_num_cols} ); //-------------------------------------------------------------------------- // CUSPARSE APIs hipsparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle(); void *dBuffer1 = NULL, *dBuffer2 = NULL; size_t bufferSize1 = 0, bufferSize2 = 0; hipsparseSpMatDescr_t matA = A.description_; hipsparseSpMatDescr_t matB = B.description_; hipsparseSpMatDescr_t matC = C.description_; //-------------------------------------------------------------------------- // ask bufferSize1 bytes for external memory TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_workEstimation( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize1, NULL)); auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); at::DataPtr dataPtr1 = allocator.allocate(bufferSize1); dBuffer1 = dataPtr1.get(); // inspect the matrices A and B to understand the memory requiremnent for // the next step TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_workEstimation( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize1, dBuffer1)); // ask bufferSize2 bytes for external memory TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_compute( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize2, NULL)); at::DataPtr dataPtr2 = allocator.allocate(bufferSize2); dBuffer2 = dataPtr2.get(); // compute the intermediate product of A * B TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_compute( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize2, dBuffer2)); // get matrix C non-zero entries C_num_nnz1 int64_t C_num_rows1, C_num_cols1, C_num_nnz1; TORCH_CUDASPARSE_CHECK( hipsparseSpMatGetSize(matC, &C_num_rows1, &C_num_cols1, &C_num_nnz1)); // allocate matrix C // allocate C offsets out.nnz_ = C_num_nnz1; out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt)); out.csr_values_ = at::empty({out.nnz_}, output_values.options()); dC_columns = out.csr_indices_.data_ptr<int>(); dC_values = out.csr_values_.data_ptr<scalar_t>(); // update matC with the new pointers TORCH_CUDASPARSE_CHECK( hipsparseCsrSetPointers(matC, dC_csrOffsets, dC_columns, dC_values)); // copy the final products to the matrix C TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_copy( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, HIPSPARSE_SPGEMM_DEFAULT, spgemmDesc)); return out; } }; template struct CusparseMatrixMultiplyOp<float>; template struct CusparseMatrixMultiplyOp<double>; #else // if not IS_CUSPARSE11_AVAILABLE() using DcsrMatrixRef = csrMatrixRef<double>; using ScsrMatrixRef = csrMatrixRef<float>; // RAII guard helps to support cuSparse 10 API for `A @ B` operation // This generic template exists because with cuSparse the `scalar_t` type could be a double or float template <class scalar_t> struct CusparseMatrixMultiplyOp { csrOutput operator()( const csrMatrixRef<scalar_t>& lhs, const csrMatrixRef<scalar_t>& rhs, Tensor &output_values, Tensor &output_indices) { TORCH_INTERNAL_ASSERT(false, "cusparse csr sparse-sparse MM only supports data type of float and double."); } }; // Specializacion for `A @ B` operation for double values with cuSparse template<> struct CusparseMatrixMultiplyOp<double> { csrgemm2Info_t gemm2Info_; CusparseMatrixMultiplyOp() { TORCH_CUDASPARSE_CHECK(hipsparseCreateCsrgemm2Info(&gemm2Info_)); } ~CusparseMatrixMultiplyOp() { hipsparseDestroyCsrgemm2Info(gemm2Info_); } csrOutput operator ()( const DcsrMatrixRef& lhs, const DcsrMatrixRef& rhs, Tensor &output_values, Tensor &output_indices) { double alpha = 1.0; DcsrMatrixRef empty; return Dgemm2(lhs, rhs, empty, &alpha, nullptr, output_values, output_indices); } csrOutput Dgemm2( const DcsrMatrixRef& A, const DcsrMatrixRef& B, const DcsrMatrixRef& C, const double* alpha, const double* beta, Tensor &output_values, Tensor &output_indices) { void* buffer_{nullptr}; hipsparseHandle_t cusparseHandle_ = at::cuda::getCurrentCUDASparseHandle(); TORCH_CUDASPARSE_CHECK(hipsparseSetPointerMode(cusparseHandle_, HIPSPARSE_POINTER_MODE_HOST)); csrOutput out({A.size(0), B.size(1)}); int innerSize = confirm_mult_size(A.size_, B.size_); out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt)); // Compute needed buffer size size_t new_bubber_sz; TORCH_CUDASPARSE_CHECK(hipsparseDcsrgemm2_bufferSizeExt( cusparseHandle_, out.size(0), out.size(1), innerSize, alpha, A.description_, A.nnz_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_pointers_, B.csr_indices_, beta, C.description_, C.nnz_, C.csr_pointers_, C.csr_indices_, gemm2Info_, &new_bubber_sz)); // (Re)allocate buffer if needed auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); at::DataPtr data_ptr = allocator.allocate(new_bubber_sz); buffer_ = data_ptr.get(); // Find the resulting non-zero pattern. TORCH_CUDASPARSE_CHECK(hipsparseXcsrgemm2Nnz( cusparseHandle_, out.size(0), out.size(1), innerSize, A.description_, A.nnz_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_pointers_, B.csr_indices_, C.description_, C.nnz_, C.csr_pointers_, C.csr_indices_, out.description_, out.csr_pointers_.data_ptr<int>(), &out.nnz_, gemm2Info_, buffer_)); out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt)); out.csr_values_ = at::empty({out.nnz_}, output_values.options()); // Perform the gemm2 operation for doubles // out = alpha A B + beta C TORCH_CUDASPARSE_CHECK(hipsparseDcsrgemm2( cusparseHandle_, out.size(0), out.size(1), innerSize, alpha, A.description_, A.nnz_, A.csr_values_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_values_, B.csr_pointers_, B.csr_indices_, beta, C.description_, C.nnz_, C.csr_values_, C.csr_pointers_, C.csr_indices_, out.description_, out.csr_values_.data_ptr<double>(), out.csr_pointers_.data_ptr<int>(), out.csr_indices_.data_ptr<int>(), gemm2Info_, buffer_)); return out; } }; // Specializacion for `A @ B` operation for float values with cuSparse template<> struct CusparseMatrixMultiplyOp<float> { csrgemm2Info_t gemm2Info_; CusparseMatrixMultiplyOp() { TORCH_CUDASPARSE_CHECK(hipsparseCreateCsrgemm2Info(&gemm2Info_)); } ~CusparseMatrixMultiplyOp() { hipsparseDestroyCsrgemm2Info(gemm2Info_); } csrOutput operator()( const ScsrMatrixRef& lhs, const ScsrMatrixRef& rhs, Tensor &output_values, Tensor &output_indices) { float alpha = 1.0; ScsrMatrixRef empty; return Sgemm2(lhs, rhs, empty, &alpha, nullptr, output_values, output_indices); } csrOutput Sgemm2( const ScsrMatrixRef& A, const ScsrMatrixRef& B, const ScsrMatrixRef& C, const float* alpha, const float* beta, Tensor &output_values, Tensor &output_indices) { void* buffer_{nullptr}; hipsparseHandle_t cusparseHandle_ = at::cuda::getCurrentCUDASparseHandle(); TORCH_CUDASPARSE_CHECK(hipsparseSetPointerMode(cusparseHandle_, HIPSPARSE_POINTER_MODE_HOST)); csrOutput out({A.size(0), B.size(1)}); int innerSize = confirm_mult_size(A.size_, B.size_); out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt)); // Compute needed buffer size size_t new_bubber_sz; TORCH_CUDASPARSE_CHECK(hipsparseScsrgemm2_bufferSizeExt( cusparseHandle_, out.size(0), out.size(1), innerSize, alpha, A.description_, A.nnz_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_pointers_, B.csr_indices_, beta, C.description_, C.nnz_, C.csr_pointers_, C.csr_indices_, gemm2Info_, &new_bubber_sz)); auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); at::DataPtr data_ptr = allocator.allocate(new_bubber_sz); buffer_ = data_ptr.get(); // Find the resulting non-zero pattern. TORCH_CUDASPARSE_CHECK(hipsparseXcsrgemm2Nnz( cusparseHandle_, out.size(0), out.size(1), innerSize, A.description_, A.nnz_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_pointers_, B.csr_indices_, C.description_, C.nnz_, C.csr_pointers_, C.csr_indices_, out.description_, out.csr_pointers_.data_ptr<int>(), &out.nnz_, gemm2Info_, buffer_)); out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt)); out.csr_values_ = at::empty({out.nnz_}, output_values.options()); // Perform the gemm2 operation for doubles // out = alpha A B + beta C TORCH_CUDASPARSE_CHECK(hipsparseScsrgemm2( cusparseHandle_, out.size(0), out.size(1), innerSize, alpha, A.description_, A.nnz_, A.csr_values_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_values_, B.csr_pointers_, B.csr_indices_, beta, C.description_, C.nnz_, C.csr_values_, C.csr_pointers_, C.csr_indices_, out.description_, out.csr_values_.data_ptr<float>(), out.csr_pointers_.data_ptr<int>(), out.csr_indices_.data_ptr<int>(), gemm2Info_, buffer_)); return out; } }; #endif // IS_CUSPARSE11_AVAILABLE() template <typename scalar_t> void sparse_sparse_matmul_cuda_kernel( Tensor& result, const Tensor& mat1, const Tensor& mat2) { static_assert(std::is_same<float, scalar_t>::value || std::is_same<double, scalar_t>::value, "sparse_sparse_matmul_cuda_kernel only supports float and double value types"); Tensor mat1_indices_ = mat1._indices().contiguous(); Tensor mat1_values = mat1._values().contiguous(); Tensor mat1_row_indices = mat1_indices_.select(0, 0); Tensor mat1_col_indices = mat1_indices_.select(0, 1); Tensor mat1_indptr = _to_csr_int(mat1_row_indices, mat1.size(0), mat1._nnz()); Tensor mat1_indices = at::empty( {mat1_col_indices.size(0)}, mat1_col_indices.options().dtype(kInt)); mat1_indices.copy_(mat1_col_indices); Tensor mat2_indices_ = mat2._indices().contiguous(); Tensor mat2_values = mat2._values().contiguous(); Tensor mat2_row_indices = mat2_indices_.select(0, 0); Tensor mat2_col_indices = mat2_indices_.select(0, 1); Tensor mat2_indptr = _to_csr_int(mat2_row_indices, mat2.size(0), mat2._nnz()); Tensor mat2_indices = at::empty({mat2_col_indices.size(0)}, mat2_col_indices.options().dtype(kInt)); mat2_indices.copy_(mat2_col_indices); auto m = mat1.size(0); auto k1 = mat1.size(1); auto k2 = mat2.size(0); auto n = mat2.size(1); TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k1 <= INT_MAX), "At the moment, hipsparseDcsrgemm2 only supports m, n, k, nnz with the bound [val] <= ", INT_MAX, ".", "If you need this, please file an issue on GitHub." ); auto output_indices = result._indices(); auto output_values = result._values(); if ((k1 == 0 && k2 == 0) || (n == 0 && m == 0)) { output_indices.zero_(); output_values.zero_(); return; } csrMatrixRef<scalar_t> csr_mat1( mat1_indices.data_ptr<int>(), mat1_indptr.data_ptr<int>(), mat1_values.data_ptr<scalar_t>(), (int)mat1._nnz(), {(int)mat1.size(0), (int)mat1.size(1)}); csrMatrixRef<scalar_t> csr_mat2( mat2_indices.data_ptr<int>(), mat2_indptr.data_ptr<int>(), mat2_values.data_ptr<scalar_t>(), (int)mat2._nnz(), {(int)mat2.size(0), (int)mat2.size(1)}); // Sparse matrix multiplication CusparseMatrixMultiplyOp<scalar_t> op; csrOutput csr_output = op(csr_mat1, csr_mat2, output_values, output_indices); auto nnz = csr_output.nnz_; output_values.set_(csr_output.csr_values_); output_indices.resize_({2, nnz}); auto output_indices_accessor = output_indices.packed_accessor<int64_t, 2>(); auto csr_output_pointers_accessor = csr_output.csr_pointers_.packed_accessor<int, 1>(); auto csr_output_ind_accessor = csr_output.csr_indices_.packed_accessor<int, 1>(); auto major_dim = result.size(0); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Filling the COO row indices thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(major_dim)), [output_indices_accessor, csr_output_pointers_accessor, major_dim, nnz] __device__(int64_t i) { auto Ap = csr_output_pointers_accessor.data(); int64_t* indices_row = output_indices_accessor[0].data(); for (int jj = Ap[i]; jj < Ap[i + 1]; jj++) { indices_row[jj] = i; } }); // Filling the COO column indices thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(csr_output.nnz_)), [output_indices_accessor, csr_output_pointers_accessor, csr_output_ind_accessor, major_dim, nnz] __device__(int64_t i) { int64_t* indices_col = output_indices_accessor[1].data(); indices_col[i] = csr_output_ind_accessor[i]; }); } } // end anonymous namespace Tensor sparse_sparse_matmul_cuda(const Tensor& mat1_, const Tensor& mat2_) { TORCH_INTERNAL_ASSERT(mat1_.is_sparse()); TORCH_INTERNAL_ASSERT(mat2_.is_sparse()); TORCH_CHECK(mat1_.dim() == 2); TORCH_CHECK(mat2_.dim() == 2); TORCH_CHECK(mat1_.dense_dim() == 0, "sparse_mm: scalar values expected, mat1 got ", mat1_.dense_dim(), "D values"); TORCH_CHECK(mat2_.dense_dim() == 0, "sparse_mm: scalar values expected, mat2 got ", mat2_.dense_dim(), "D values"); TORCH_CHECK( mat1_.size(1) == mat2_.size(0), "mat1 and mat2 shapes cannot be multiplied (", mat1_.size(0), "x", mat1_.size(1), " and ", mat2_.size(0), "x", mat2_.size(1), ")"); TORCH_CHECK(mat1_.scalar_type() == mat2_.scalar_type(), "mat1 dtype ", mat1_.scalar_type(), " does not match mat2 dtype ", mat2_.scalar_type()); auto output = at::native::empty_like(mat1_); output.sparse_resize_and_clear_({mat1_.size(0), mat2_.size(1)}, mat1_.sparse_dim(), 0); AT_DISPATCH_FLOATING_TYPES(mat1_.scalar_type(), "sparse_matmul", [&] { sparse_sparse_matmul_cuda_kernel<scalar_t>(output, mat1_.coalesce(), mat2_.coalesce()); }); return output; } } // namespace native } // namespace at
8f93eaf4a85bd4908d45ea1d07d566d0226739ea.cu
#include <ATen/ATen.h> #include <ATen/Config.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/Parallel.h> #include <ATen/SparseTensorImpl.h> #include <ATen/SparseTensorUtils.h> #include <ATen/native/Resize.h> #include <cuda_runtime.h> #include <type_traits> #include <thrust/device_ptr.h> #include <thrust/for_each.h> #include <thrust/sequence.h> #include <THC/THCTensorMathPointwise.cuh> #include <THC/THCThrustAllocator.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAUtils.h> #include <cusparse.h> #include <ATen/native/sparse/cuda/SparseCUDABlas.cuh> #include <c10/cuda/CUDACachingAllocator.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/iterator/discard_iterator.h> #if defined(__CUDACC__) && (CUSPARSE_VERSION >= 11000) #define IS_CUSPARSE11_AVAILABLE() 1 #else #define IS_CUSPARSE11_AVAILABLE() 0 #endif #if IS_CUSPARSE11_AVAILABLE() #include <library_types.h> #endif namespace at { namespace native { namespace { using namespace at::sparse; Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) { Tensor csr = at::empty({dim + 1}, CUDA(kInt)); Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt)); rowIndicesInt.copy_(rowIndices); sparse::cuda::Xcoo2csr( rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>()); return csr; } int confirm_mult_size(const std::vector<int>& mat1_size, const std::vector<int>& mat2_size) { TORCH_CHECK( mat1_size[1] == mat2_size[0], "mat1 and mat2 shapes cannot be multiplied (", mat1_size[0], "x", mat1_size[1], " and ", mat2_size[0], "x", mat2_size[1], ")"); return mat1_size[1]; } void create_general_description_(cusparseMatDescr_t& description_) { TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&description_)); TORCH_CUDASPARSE_CHECK(cusparseSetMatType(description_, CUSPARSE_MATRIX_TYPE_GENERAL)); TORCH_CUDASPARSE_CHECK(cusparseSetMatIndexBase(description_, CUSPARSE_INDEX_BASE_ZERO)); } // csrMatrixRef is used to have a representation of a raw CSR matrix representation // comming from `sparse_sparse_matmul_cuda_kernel` function. // Moreover this implements a RAII guard for a cusparse descriptor template<class scalar_t> struct csrMatrixRef { int* csr_indices_{nullptr}; int* csr_pointers_{nullptr}; scalar_t* csr_values_{nullptr}; int nnz_{0}; std::vector<int> size_{}; #if IS_CUSPARSE11_AVAILABLE() cusparseSpMatDescr_t description_{0}; #else cusparseMatDescr_t description_{0}; #endif csrMatrixRef() { #if !IS_CUSPARSE11_AVAILABLE() create_general_description_(description_); #endif } csrMatrixRef( int* csr_indices, int* csr_pointers, scalar_t* csr_values, int nnz, const std::vector<int>& size) : csr_indices_{csr_indices}, csr_pointers_{csr_pointers}, csr_values_{csr_values}, nnz_{nnz}, size_{size} { #if IS_CUSPARSE11_AVAILABLE() cudaDataType cuda_data_type; if ( std::is_same<float, scalar_t>::value ) { cuda_data_type = CUDA_R_32F; } else if ( std::is_same<double, scalar_t>::value) { cuda_data_type = CUDA_R_64F; } else { TORCH_CHECK(false, "Tensor types must be either float32 or float64"); } TORCH_CUDASPARSE_CHECK(cusparseCreateCsr( &description_, this->size(0), this->size(1), this->nnz_, this->csr_pointers_, this->csr_indices_, this->csr_values_, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cuda_data_type)); #else create_general_description_(description_); #endif } ~csrMatrixRef() { #if IS_CUSPARSE11_AVAILABLE() cusparseDestroySpMat(description_); #else cusparseDestroyMatDescr(description_); #endif } int size(int index) const { return size_.at(index); } }; // csrOutput is used to represent the output for `CusparseMatrixMultiplyOp` // Note that `csrOutput` is different from `csrMatrixRef` and the purpose // of this was to have a materialized version of a CSR matrix. // Moreover this implements a RAII guard for a cusparse descriptor struct csrOutput { Tensor csr_indices_{}; Tensor csr_pointers_{}; at::Tensor csr_values_{}; int nnz_{0}; std::vector<int> size_; cusparseMatDescr_t description_{0}; csrOutput(const std::vector<int> &size) : size_{size} { create_general_description_(description_); } ~csrOutput() { cusparseDestroyMatDescr(description_); } int size(int index) const { return size_.at(index); } }; #if IS_CUSPARSE11_AVAILABLE() // RAII guard helps to support cuSparse 11 API for `A @ B` operation // This generic template exists because with cuSparse the `scalar_t` type could be a double or float template <class scalar_t> struct CusparseMatrixMultiplyOp { cusparseSpGEMMDescr_t spgemmDesc; CusparseMatrixMultiplyOp() { static_assert(std::is_same<float, scalar_t>::value || std::is_same<double, scalar_t>::value, "cusparse csr sparse-sparse MM only supports data type of float and double."); // SpGEMM Computation TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&spgemmDesc)); } ~CusparseMatrixMultiplyOp() { // destroy matrix/vector descriptors cusparseSpGEMM_destroyDescr(spgemmDesc); } csrOutput operator ()( const csrMatrixRef<scalar_t>& A, const csrMatrixRef<scalar_t>& B, Tensor& output_values, Tensor& output_indices) { const int A_num_rows = A.size(0); const int B_num_cols = B.size(1); cudaDataType computeType; if ( std::is_same<float, scalar_t>::value ) { computeType = CUDA_R_32F; } else if ( std::is_same<double, scalar_t>::value) { computeType = CUDA_R_64F; } else { TORCH_CHECK(false, "Tensor types must be either float32 or float64"); } csrOutput out({A.size(0), B.size(1)}); out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt)); int* dC_csrOffsets = out.csr_pointers_.data_ptr<int>(); int* dC_columns = nullptr; scalar_t* dC_values = nullptr; scalar_t alpha = 1.0f; scalar_t beta = 0.0f; cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE; cusparseOperation_t opB = CUSPARSE_OPERATION_NON_TRANSPOSE; csrMatrixRef<scalar_t> C( nullptr, nullptr, nullptr, /*nnz*/0, {A_num_rows, B_num_cols} ); //-------------------------------------------------------------------------- // CUSPARSE APIs cusparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle(); void *dBuffer1 = NULL, *dBuffer2 = NULL; size_t bufferSize1 = 0, bufferSize2 = 0; cusparseSpMatDescr_t matA = A.description_; cusparseSpMatDescr_t matB = B.description_; cusparseSpMatDescr_t matC = C.description_; //-------------------------------------------------------------------------- // ask bufferSize1 bytes for external memory TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_workEstimation( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize1, NULL)); auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); at::DataPtr dataPtr1 = allocator.allocate(bufferSize1); dBuffer1 = dataPtr1.get(); // inspect the matrices A and B to understand the memory requiremnent for // the next step TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_workEstimation( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize1, dBuffer1)); // ask bufferSize2 bytes for external memory TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_compute( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize2, NULL)); at::DataPtr dataPtr2 = allocator.allocate(bufferSize2); dBuffer2 = dataPtr2.get(); // compute the intermediate product of A * B TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_compute( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize2, dBuffer2)); // get matrix C non-zero entries C_num_nnz1 int64_t C_num_rows1, C_num_cols1, C_num_nnz1; TORCH_CUDASPARSE_CHECK( cusparseSpMatGetSize(matC, &C_num_rows1, &C_num_cols1, &C_num_nnz1)); // allocate matrix C // allocate C offsets out.nnz_ = C_num_nnz1; out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt)); out.csr_values_ = at::empty({out.nnz_}, output_values.options()); dC_columns = out.csr_indices_.data_ptr<int>(); dC_values = out.csr_values_.data_ptr<scalar_t>(); // update matC with the new pointers TORCH_CUDASPARSE_CHECK( cusparseCsrSetPointers(matC, dC_csrOffsets, dC_columns, dC_values)); // copy the final products to the matrix C TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_copy( handle, opA, opB, &alpha, matA, matB, &beta, matC, computeType, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc)); return out; } }; template struct CusparseMatrixMultiplyOp<float>; template struct CusparseMatrixMultiplyOp<double>; #else // if not IS_CUSPARSE11_AVAILABLE() using DcsrMatrixRef = csrMatrixRef<double>; using ScsrMatrixRef = csrMatrixRef<float>; // RAII guard helps to support cuSparse 10 API for `A @ B` operation // This generic template exists because with cuSparse the `scalar_t` type could be a double or float template <class scalar_t> struct CusparseMatrixMultiplyOp { csrOutput operator()( const csrMatrixRef<scalar_t>& lhs, const csrMatrixRef<scalar_t>& rhs, Tensor &output_values, Tensor &output_indices) { TORCH_INTERNAL_ASSERT(false, "cusparse csr sparse-sparse MM only supports data type of float and double."); } }; // Specializacion for `A @ B` operation for double values with cuSparse template<> struct CusparseMatrixMultiplyOp<double> { csrgemm2Info_t gemm2Info_; CusparseMatrixMultiplyOp() { TORCH_CUDASPARSE_CHECK(cusparseCreateCsrgemm2Info(&gemm2Info_)); } ~CusparseMatrixMultiplyOp() { cusparseDestroyCsrgemm2Info(gemm2Info_); } csrOutput operator ()( const DcsrMatrixRef& lhs, const DcsrMatrixRef& rhs, Tensor &output_values, Tensor &output_indices) { double alpha = 1.0; DcsrMatrixRef empty; return Dgemm2(lhs, rhs, empty, &alpha, nullptr, output_values, output_indices); } csrOutput Dgemm2( const DcsrMatrixRef& A, const DcsrMatrixRef& B, const DcsrMatrixRef& C, const double* alpha, const double* beta, Tensor &output_values, Tensor &output_indices) { void* buffer_{nullptr}; cusparseHandle_t cusparseHandle_ = at::cuda::getCurrentCUDASparseHandle(); TORCH_CUDASPARSE_CHECK(cusparseSetPointerMode(cusparseHandle_, CUSPARSE_POINTER_MODE_HOST)); csrOutput out({A.size(0), B.size(1)}); int innerSize = confirm_mult_size(A.size_, B.size_); out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt)); // Compute needed buffer size size_t new_bubber_sz; TORCH_CUDASPARSE_CHECK(cusparseDcsrgemm2_bufferSizeExt( cusparseHandle_, out.size(0), out.size(1), innerSize, alpha, A.description_, A.nnz_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_pointers_, B.csr_indices_, beta, C.description_, C.nnz_, C.csr_pointers_, C.csr_indices_, gemm2Info_, &new_bubber_sz)); // (Re)allocate buffer if needed auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); at::DataPtr data_ptr = allocator.allocate(new_bubber_sz); buffer_ = data_ptr.get(); // Find the resulting non-zero pattern. TORCH_CUDASPARSE_CHECK(cusparseXcsrgemm2Nnz( cusparseHandle_, out.size(0), out.size(1), innerSize, A.description_, A.nnz_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_pointers_, B.csr_indices_, C.description_, C.nnz_, C.csr_pointers_, C.csr_indices_, out.description_, out.csr_pointers_.data_ptr<int>(), &out.nnz_, gemm2Info_, buffer_)); out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt)); out.csr_values_ = at::empty({out.nnz_}, output_values.options()); // Perform the gemm2 operation for doubles // out = alpha ∗ A ∗ B + beta ∗ C TORCH_CUDASPARSE_CHECK(cusparseDcsrgemm2( cusparseHandle_, out.size(0), out.size(1), innerSize, alpha, A.description_, A.nnz_, A.csr_values_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_values_, B.csr_pointers_, B.csr_indices_, beta, C.description_, C.nnz_, C.csr_values_, C.csr_pointers_, C.csr_indices_, out.description_, out.csr_values_.data_ptr<double>(), out.csr_pointers_.data_ptr<int>(), out.csr_indices_.data_ptr<int>(), gemm2Info_, buffer_)); return out; } }; // Specializacion for `A @ B` operation for float values with cuSparse template<> struct CusparseMatrixMultiplyOp<float> { csrgemm2Info_t gemm2Info_; CusparseMatrixMultiplyOp() { TORCH_CUDASPARSE_CHECK(cusparseCreateCsrgemm2Info(&gemm2Info_)); } ~CusparseMatrixMultiplyOp() { cusparseDestroyCsrgemm2Info(gemm2Info_); } csrOutput operator()( const ScsrMatrixRef& lhs, const ScsrMatrixRef& rhs, Tensor &output_values, Tensor &output_indices) { float alpha = 1.0; ScsrMatrixRef empty; return Sgemm2(lhs, rhs, empty, &alpha, nullptr, output_values, output_indices); } csrOutput Sgemm2( const ScsrMatrixRef& A, const ScsrMatrixRef& B, const ScsrMatrixRef& C, const float* alpha, const float* beta, Tensor &output_values, Tensor &output_indices) { void* buffer_{nullptr}; cusparseHandle_t cusparseHandle_ = at::cuda::getCurrentCUDASparseHandle(); TORCH_CUDASPARSE_CHECK(cusparseSetPointerMode(cusparseHandle_, CUSPARSE_POINTER_MODE_HOST)); csrOutput out({A.size(0), B.size(1)}); int innerSize = confirm_mult_size(A.size_, B.size_); out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt)); // Compute needed buffer size size_t new_bubber_sz; TORCH_CUDASPARSE_CHECK(cusparseScsrgemm2_bufferSizeExt( cusparseHandle_, out.size(0), out.size(1), innerSize, alpha, A.description_, A.nnz_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_pointers_, B.csr_indices_, beta, C.description_, C.nnz_, C.csr_pointers_, C.csr_indices_, gemm2Info_, &new_bubber_sz)); auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); at::DataPtr data_ptr = allocator.allocate(new_bubber_sz); buffer_ = data_ptr.get(); // Find the resulting non-zero pattern. TORCH_CUDASPARSE_CHECK(cusparseXcsrgemm2Nnz( cusparseHandle_, out.size(0), out.size(1), innerSize, A.description_, A.nnz_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_pointers_, B.csr_indices_, C.description_, C.nnz_, C.csr_pointers_, C.csr_indices_, out.description_, out.csr_pointers_.data_ptr<int>(), &out.nnz_, gemm2Info_, buffer_)); out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt)); out.csr_values_ = at::empty({out.nnz_}, output_values.options()); // Perform the gemm2 operation for doubles // out = alpha ∗ A ∗ B + beta ∗ C TORCH_CUDASPARSE_CHECK(cusparseScsrgemm2( cusparseHandle_, out.size(0), out.size(1), innerSize, alpha, A.description_, A.nnz_, A.csr_values_, A.csr_pointers_, A.csr_indices_, B.description_, B.nnz_, B.csr_values_, B.csr_pointers_, B.csr_indices_, beta, C.description_, C.nnz_, C.csr_values_, C.csr_pointers_, C.csr_indices_, out.description_, out.csr_values_.data_ptr<float>(), out.csr_pointers_.data_ptr<int>(), out.csr_indices_.data_ptr<int>(), gemm2Info_, buffer_)); return out; } }; #endif // IS_CUSPARSE11_AVAILABLE() template <typename scalar_t> void sparse_sparse_matmul_cuda_kernel( Tensor& result, const Tensor& mat1, const Tensor& mat2) { static_assert(std::is_same<float, scalar_t>::value || std::is_same<double, scalar_t>::value, "sparse_sparse_matmul_cuda_kernel only supports float and double value types"); Tensor mat1_indices_ = mat1._indices().contiguous(); Tensor mat1_values = mat1._values().contiguous(); Tensor mat1_row_indices = mat1_indices_.select(0, 0); Tensor mat1_col_indices = mat1_indices_.select(0, 1); Tensor mat1_indptr = _to_csr_int(mat1_row_indices, mat1.size(0), mat1._nnz()); Tensor mat1_indices = at::empty( {mat1_col_indices.size(0)}, mat1_col_indices.options().dtype(kInt)); mat1_indices.copy_(mat1_col_indices); Tensor mat2_indices_ = mat2._indices().contiguous(); Tensor mat2_values = mat2._values().contiguous(); Tensor mat2_row_indices = mat2_indices_.select(0, 0); Tensor mat2_col_indices = mat2_indices_.select(0, 1); Tensor mat2_indptr = _to_csr_int(mat2_row_indices, mat2.size(0), mat2._nnz()); Tensor mat2_indices = at::empty({mat2_col_indices.size(0)}, mat2_col_indices.options().dtype(kInt)); mat2_indices.copy_(mat2_col_indices); auto m = mat1.size(0); auto k1 = mat1.size(1); auto k2 = mat2.size(0); auto n = mat2.size(1); TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k1 <= INT_MAX), "At the moment, cusparseDcsrgemm2 only supports m, n, k, nnz with the bound [val] <= ", INT_MAX, ".", "If you need this, please file an issue on GitHub." ); auto output_indices = result._indices(); auto output_values = result._values(); if ((k1 == 0 && k2 == 0) || (n == 0 && m == 0)) { output_indices.zero_(); output_values.zero_(); return; } csrMatrixRef<scalar_t> csr_mat1( mat1_indices.data_ptr<int>(), mat1_indptr.data_ptr<int>(), mat1_values.data_ptr<scalar_t>(), (int)mat1._nnz(), {(int)mat1.size(0), (int)mat1.size(1)}); csrMatrixRef<scalar_t> csr_mat2( mat2_indices.data_ptr<int>(), mat2_indptr.data_ptr<int>(), mat2_values.data_ptr<scalar_t>(), (int)mat2._nnz(), {(int)mat2.size(0), (int)mat2.size(1)}); // Sparse matrix multiplication CusparseMatrixMultiplyOp<scalar_t> op; csrOutput csr_output = op(csr_mat1, csr_mat2, output_values, output_indices); auto nnz = csr_output.nnz_; output_values.set_(csr_output.csr_values_); output_indices.resize_({2, nnz}); auto output_indices_accessor = output_indices.packed_accessor<int64_t, 2>(); auto csr_output_pointers_accessor = csr_output.csr_pointers_.packed_accessor<int, 1>(); auto csr_output_ind_accessor = csr_output.csr_indices_.packed_accessor<int, 1>(); auto major_dim = result.size(0); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Filling the COO row indices thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(major_dim)), [output_indices_accessor, csr_output_pointers_accessor, major_dim, nnz] __device__(int64_t i) { auto Ap = csr_output_pointers_accessor.data(); int64_t* indices_row = output_indices_accessor[0].data(); for (int jj = Ap[i]; jj < Ap[i + 1]; jj++) { indices_row[jj] = i; } }); // Filling the COO column indices thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(csr_output.nnz_)), [output_indices_accessor, csr_output_pointers_accessor, csr_output_ind_accessor, major_dim, nnz] __device__(int64_t i) { int64_t* indices_col = output_indices_accessor[1].data(); indices_col[i] = csr_output_ind_accessor[i]; }); } } // end anonymous namespace Tensor sparse_sparse_matmul_cuda(const Tensor& mat1_, const Tensor& mat2_) { TORCH_INTERNAL_ASSERT(mat1_.is_sparse()); TORCH_INTERNAL_ASSERT(mat2_.is_sparse()); TORCH_CHECK(mat1_.dim() == 2); TORCH_CHECK(mat2_.dim() == 2); TORCH_CHECK(mat1_.dense_dim() == 0, "sparse_mm: scalar values expected, mat1 got ", mat1_.dense_dim(), "D values"); TORCH_CHECK(mat2_.dense_dim() == 0, "sparse_mm: scalar values expected, mat2 got ", mat2_.dense_dim(), "D values"); TORCH_CHECK( mat1_.size(1) == mat2_.size(0), "mat1 and mat2 shapes cannot be multiplied (", mat1_.size(0), "x", mat1_.size(1), " and ", mat2_.size(0), "x", mat2_.size(1), ")"); TORCH_CHECK(mat1_.scalar_type() == mat2_.scalar_type(), "mat1 dtype ", mat1_.scalar_type(), " does not match mat2 dtype ", mat2_.scalar_type()); auto output = at::native::empty_like(mat1_); output.sparse_resize_and_clear_({mat1_.size(0), mat2_.size(1)}, mat1_.sparse_dim(), 0); AT_DISPATCH_FLOATING_TYPES(mat1_.scalar_type(), "sparse_matmul", [&] { sparse_sparse_matmul_cuda_kernel<scalar_t>(output, mat1_.coalesce(), mat2_.coalesce()); }); return output; } } // namespace native } // namespace at
e0aa96526ababd86f0391800e5adcc75949ee445.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_allocator.h" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "contrib_ops/cuda/math/isfinite.cuh" #include "orttraining/training_ops/cuda/optimizer/common.h" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/lamb.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2, typename T3> __device__ __forceinline__ void _LambComputeDirectionRule( const T1& g_scale, const T1& w, const T2& g, const T3& m1, const T3& m2, const float& alpha, const float& beta, const float& lambda, const float& epsilon, const float& alpha_correction, const float& beta_correction, T2& d, T3& m1_new, T3& m2_new) { // Actual gradient. The scale is a product of loss' scale and // global gradient norm (if the norm > 1). const T1 g_unscaled = T1(g) / g_scale; // A constant in Lamb's equation. const T1 one = T1(1.0f); // Update exponentially-averaged historical gradient const T1 m1_new_tmp = alpha * static_cast<T1>(m1) + (one - alpha) * g_unscaled; // Update exponentially-averaged historical squared gradient const T1 m2_new_tmp = beta * static_cast<T1>(m2) + (one - beta) * g_unscaled * g_unscaled; // Compute unbiased 1st-order momentom. // The value alpha_correction is usually (1-alpha^t), // where t is the number of executed training iterations. const T1 m1_new_tmp_corrected = m1_new_tmp / alpha_correction; // Compute unbiased 2nd-order momentom. // The value beta_correction is usually (1-beta^t), // where t is the number of executed training iterations. const T1 m2_new_tmp_corrected = m2_new_tmp / beta_correction; // Save regularized update direction to output. const T1 d_tmp = lambda * w + m1_new_tmp_corrected / (_Sqrt(m2_new_tmp_corrected) + epsilon); // Things are updated only if the direction is finite. if (IsFiniteScalar(d_tmp)) { d = d_tmp; m1_new = m1_new_tmp; m2_new = m2_new_tmp; } else { d = T2(0); m1_new = m1; m2_new = m2; } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void _LambComputeDirectionImpl( const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* g_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); _LambComputeDirectionRule( scale, weights[id], grads[id], moment_1[id], moment_2[id], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, update_direction[id], moment_1_out[id], moment_2_out[id]); } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambComputeDirection( hipStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _LambComputeDirectionImpl<T1, T2, T3, T_GRAD_NORM>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, max_norm, alpha_correction, beta_correction, update_direction, moment_1_out, moment_2_out, N); } #define SPECIALIZED_LAMB_COMPUTE_DIRECTION(T1, T2, T3, T_GRAD_NORM) \ template void LambComputeDirection( \ hipStream_t stream, \ const T1* weights, \ const T2* grads, \ const T3* moment_1, \ const T3* moment_2, \ const T1* loss_scale, \ const T_GRAD_NORM* grad_norm, \ float alpha, \ float beta, \ float lambda, \ float epsilon, \ float max_norm, \ float alpha_correction, \ float beta_correction, \ T2* weights_out, \ T3* moment_1_out, \ T3* moment_2_out, \ size_t count); SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, float, float, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(double, double, double, double) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __device__ __forceinline__ void _LambUpdateRule( const T1 eta, const float ratio_min, const float ratio_max, const T2 r_norm, const T2 w_norm, const T2 w, const T3 d, T2* w_new, T3* g_new, T_MIXED_PRECISION_FP* w_mixed_precision_new) { // Confidence coefficeint of this update. const T2 ratio = (w_norm != T2(0.0f) && r_norm != T2(0.0f)) ? T2(eta) * _Max(T2(ratio_min), _Min(T2(ratio_max), _Sqrt(w_norm / r_norm))) : T2(eta); // Compute delta using the saved update direction. const T2 delta = -ratio * T2(d); const T2 w_new_tmp = w + delta; if (IsFiniteScalar(w_new_tmp)) { if (g_new) { *g_new = T3(delta); } if (w_new) { *w_new = w_new_tmp; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w_new_tmp); } } } else { if (g_new) { *g_new = T3(0); } if (w_new) { *w_new = w; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w); } } } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void _LambUpdateImpl( const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, weights[id], update_direction[id], weights_out != nullptr ? weights_out + id : nullptr, gradients_out != nullptr ? gradients_out + id : nullptr, mixed_precision_weights_out != nullptr ? mixed_precision_weights_out + id : nullptr); } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambUpdate( hipStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _LambUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, eta, ratio_min, ratio_max, r_norm, w_norm, weights, update_direction, weights_out, gradients_out, mixed_precision_weights_out, N); } #define INSTANTIATE_LAMB_UPDATE(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambUpdate( \ hipStream_t stream, \ const T1* eta, \ const float ratio_min, \ const float ratio_max, \ const T2* r_norm, \ const T2* w_norm, \ const T2* weights, \ const T3* update_direction, \ T2* weights_out, \ T3* gradients_out, \ T_MIXED_PRECISION_FP* mixed_precision_weights_out, \ size_t count); INSTANTIATE_LAMB_UPDATE(float, float, float, half) INSTANTIATE_LAMB_UPDATE(double, double, double, half) INSTANTIATE_LAMB_UPDATE(half, float, half, half) INSTANTIATE_LAMB_UPDATE(float, float, half, half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_UPDATE(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(float, float, nv_bfloat16, nv_bfloat16) #endif template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void LambMultiTensorComputeDirectionImpl( ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T1* w = reinterpret_cast<const T1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; T2* g = reinterpret_cast<T2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; const T3* m1 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* m2 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T3* m1_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start; T3* m2_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start; const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); #pragma unroll for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambComputeDirectionRule( scale, w[i], g[i], m1[i], m2[i], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, g[i], m1_new[i], m2_new[i]); } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( hipStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int thread_count = ChunkGroup<6>::thread_count_per_block; const int block_count = chunk_group.chunk_count; hipLaunchKernelGGL(( LambMultiTensorComputeDirectionImpl<T1, T2, T3>), dim3(block_count), dim3(thread_count), 0, stream, chunk_group, loss_scale, g_norm, lambda, alpha, beta, epsilon, max_norm, alpha_correction, beta_correction); } #define INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(T1, T2, T3, T_GRAD_NORM) \ template void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( \ hipStream_t stream, \ ChunkGroup<6> chunk_group, \ const T1* loss_scale, \ const T_GRAD_NORM* g_norm, \ const float lambda, \ const float alpha, \ const float beta, \ const float epsilon, \ const float max_norm, \ const float alpha_correction, \ const float beta_correction); INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, float, float, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(double, double, double, double) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void LambMultiTensorUpdateImpl( ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T2* w_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[0][group_index]); const T2* r_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[1][group_index]); const T2* w = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* d = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T2* w_new = chunk_group.tensor_ptrs[4][group_index] != nullptr ? reinterpret_cast<T2*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start : nullptr; T3* g_new = chunk_group.tensor_ptrs[5][group_index] != nullptr ? reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start : nullptr; T_MIXED_PRECISION_FP* w_mixed_precision_new = chunk_group.tensor_ptrs[6][group_index] != nullptr ? reinterpret_cast<T_MIXED_PRECISION_FP*>(chunk_group.tensor_ptrs[6][group_index]) + chunk_start : nullptr; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, w[i], d[i], w_new != nullptr ? w_new + i : nullptr, g_new != nullptr ? g_new + i : nullptr, w_mixed_precision_new != nullptr ? w_mixed_precision_new + i : nullptr); } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( hipStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int thread_count = ChunkGroup<7>::thread_count_per_block; const int block_count = chunk_group.chunk_count; hipLaunchKernelGGL(( LambMultiTensorUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP>), dim3(block_count), dim3(thread_count), 0, stream, chunk_group, eta, ratio_min, ratio_max); } #define INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( \ hipStream_t stream, \ ChunkGroup<7> chunk_group, \ const T1* eta, \ const float ratio_min, \ const float ratio_max); INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(half, float, half, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, half, half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, nv_bfloat16, nv_bfloat16) #endif // w_buffer[i], d_buffer[i] is used to store the squared sum of all elements processed by the i-th block. // sync_range_and_lock is used for a well ordered reduction over blocks spanning the same tensor template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> __launch_bounds__(ChunkGroup<4>::thread_count_per_block) __global__ void LambMultiTensorReductionImpl( ChunkGroup<4> chunk_group, TOut1* w_buffer, TOut2* d_buffer, LambMultiTensorSyncRangeAndLock* sync_range_and_lock) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const TIn1* w = reinterpret_cast<const TIn1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; const TIn2* d = reinterpret_cast<const TIn2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; TOut1* w_norm = reinterpret_cast<TOut1*>(chunk_group.tensor_ptrs[2][group_index]); TOut2* d_norm = reinterpret_cast<TOut2*>(chunk_group.tensor_ptrs[3][group_index]); TBuf d_sum = TBuf(0.f); TBuf w_sum = TBuf(0.f); constexpr int load_count_per_thread = 4; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x * load_count_per_thread) { #pragma unroll for (int j = 0; j < load_count_per_thread; ++j) { const int index_in_chunk = i + j * blockDim.x; const int index_in_tensor = chunk_start + index_in_chunk; if (index_in_chunk < chunk_size && index_in_tensor < tensor_size) { const TBuf w_element = TBuf(w[index_in_chunk]); const TBuf d_element = TBuf(d[index_in_chunk]); w_sum += w_element * w_element; d_sum += d_element * d_element; } } } // Thread count in a block must be a multiple of GPU_WARP_SIZE. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { w_sum += WARP_SHFL_DOWN(w_sum, stride); d_sum += WARP_SHFL_DOWN(d_sum, stride); } const int warp_count_in_block = blockDim.x / GPU_WARP_SIZE; const int lid = threadIdx.x % GPU_WARP_SIZE; const int wid = threadIdx.x / GPU_WARP_SIZE; // Shape is 2 x warp_count_in_block. extern __shared__ unsigned char shared_memory_[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_); TBuf* w_shared_memory_ = shared_memory; TBuf* d_shared_memory_ = shared_memory + warp_count_in_block; if (lid == 0) { w_shared_memory_[wid] = w_sum; d_shared_memory_[wid] = d_sum; } __syncthreads(); #pragma unroll for (int stride = warp_count_in_block / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { w_shared_memory_[threadIdx.x] += w_shared_memory_[threadIdx.x + stride]; d_shared_memory_[threadIdx.x] += d_shared_memory_[threadIdx.x + stride]; } __syncthreads(); } // ascertain the range of blocks with the associated tensor // note: if non-ordered reduction is OK, then atomicAdd over blocks could suffice const int leading_block_in_tensor = sync_range_and_lock[group_index].leading_block; const int num_blocks_in_tensor = sync_range_and_lock[group_index].number_blocks; if (num_blocks_in_tensor == 1) { if (threadIdx.x == 0) { *w_norm = TOut1(w_shared_memory_[0]); *d_norm = TOut2(d_shared_memory_[0]); } return; } if (threadIdx.x == 0) { w_buffer[blockIdx.x] = w_shared_memory_[0]; d_buffer[blockIdx.x] = d_shared_memory_[0]; } __threadfence(); __syncthreads(); // use lock to determine if this is last block for given tensor __shared__ bool is_last_block_done; if (threadIdx.x == 0) { int* p_lock = &sync_range_and_lock[group_index].completed_blocks; int counter = atomicAdd(p_lock, 1); is_last_block_done = (counter == num_blocks_in_tensor - 1); } __syncthreads(); // only last block to finish for associated tensor enters below if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_tensor); int blockid = leading_block_in_tensor + threadIdx.x; for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride && threadIdx.x + stride < num_blocks_in_tensor) { w_buffer[blockid] += w_buffer[blockid + stride]; d_buffer[blockid] += d_buffer[blockid + stride]; } __syncthreads(); } if (threadIdx.x == 0) { *w_norm = TOut1(w_buffer[leading_block_in_tensor]); *d_norm = TOut2(d_buffer[leading_block_in_tensor]); } } } CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> compute_tensor_range_and_lock(ChunkGroup<4> chunk_group, const CudaKernel& kernel) { const int num_blocks = chunk_group.chunk_count; // sync_range_and_lock is a struct consisting of (start_block, num_blocks, lock) for each tensor // Note: Adding such info to chunk group causes overflow (unless max tensors is reduced) const int max_tensors = ChunkGroup<4>::max_tensor_group_count; LambMultiTensorSyncRangeAndLock initial = {0, 0, 0}; CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> sync_range_and_lock(&kernel, initial, max_tensors); for (int block_index = num_blocks - 1; block_index >= 0; block_index--) { int tensor_index = chunk_group.block_index_to_tensor_group_index[block_index]; auto& tensor_block_span = sync_range_and_lock.CpuPtr()[tensor_index]; tensor_block_span.leading_block = block_index; tensor_block_span.number_blocks++; } ORT_THROW_IF_ERROR(sync_range_and_lock.CopyToGpu()); return sync_range_and_lock; } template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(hipStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size) { // thread count per block. constexpr int thread_count = ChunkGroup<4>::thread_count_per_block; // shared memory's size per block. const int shared_memory_size = thread_count / GPU_WARP_SIZE * 2 * sizeof(TBuf); // Enforce assumptions used inside this reduction CUDA kernel. ORT_ENFORCE(thread_count % GPU_WARP_SIZE == 0); ORT_ENFORCE((thread_count & (thread_count - 1)) == 0); const int num_blocks = chunk_group.chunk_count; const size_t w_buffer_size = num_blocks * sizeof(TOut1); const size_t d_buffer_size = num_blocks * sizeof(TOut2); ORT_ENFORCE(w_buffer_size + d_buffer_size <= reduction_buffer_size); TOut1* w_buffer = reinterpret_cast<TOut1*>(reduction_buffer); TOut2* d_buffer = reinterpret_cast<TOut2*>(w_buffer + num_blocks); auto sync_range_and_lock = compute_tensor_range_and_lock(chunk_group, kernel); hipLaunchKernelGGL(( LambMultiTensorReductionImpl<TIn1, TIn2, TOut1, TOut2, TBuf>), dim3(chunk_group.chunk_count), dim3(thread_count), shared_memory_size, stream, chunk_group, w_buffer, d_buffer, sync_range_and_lock.GpuPtr()); } #define INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(TIn1, TIn2, TOut1, TOut2, TBuf) \ template void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(hipStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size); INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, float, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(double, double, double, double, double) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, half, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(half, half, half, half, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(nv_bfloat16, nv_bfloat16, nv_bfloat16, nv_bfloat16, float) #endif } // namespace cuda } // namespace onnxruntime
e0aa96526ababd86f0391800e5adcc75949ee445.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_allocator.h" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "contrib_ops/cuda/math/isfinite.cuh" #include "orttraining/training_ops/cuda/optimizer/common.h" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/lamb.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2, typename T3> __device__ __forceinline__ void _LambComputeDirectionRule( const T1& g_scale, const T1& w, const T2& g, const T3& m1, const T3& m2, const float& alpha, const float& beta, const float& lambda, const float& epsilon, const float& alpha_correction, const float& beta_correction, T2& d, T3& m1_new, T3& m2_new) { // Actual gradient. The scale is a product of loss' scale and // global gradient norm (if the norm > 1). const T1 g_unscaled = T1(g) / g_scale; // A constant in Lamb's equation. const T1 one = T1(1.0f); // Update exponentially-averaged historical gradient const T1 m1_new_tmp = alpha * static_cast<T1>(m1) + (one - alpha) * g_unscaled; // Update exponentially-averaged historical squared gradient const T1 m2_new_tmp = beta * static_cast<T1>(m2) + (one - beta) * g_unscaled * g_unscaled; // Compute unbiased 1st-order momentom. // The value alpha_correction is usually (1-alpha^t), // where t is the number of executed training iterations. const T1 m1_new_tmp_corrected = m1_new_tmp / alpha_correction; // Compute unbiased 2nd-order momentom. // The value beta_correction is usually (1-beta^t), // where t is the number of executed training iterations. const T1 m2_new_tmp_corrected = m2_new_tmp / beta_correction; // Save regularized update direction to output. const T1 d_tmp = lambda * w + m1_new_tmp_corrected / (_Sqrt(m2_new_tmp_corrected) + epsilon); // Things are updated only if the direction is finite. if (IsFiniteScalar(d_tmp)) { d = d_tmp; m1_new = m1_new_tmp; m2_new = m2_new_tmp; } else { d = T2(0); m1_new = m1; m2_new = m2; } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void _LambComputeDirectionImpl( const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* g_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); _LambComputeDirectionRule( scale, weights[id], grads[id], moment_1[id], moment_2[id], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, update_direction[id], moment_1_out[id], moment_2_out[id]); } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambComputeDirection( cudaStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _LambComputeDirectionImpl<T1, T2, T3, T_GRAD_NORM><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, max_norm, alpha_correction, beta_correction, update_direction, moment_1_out, moment_2_out, N); } #define SPECIALIZED_LAMB_COMPUTE_DIRECTION(T1, T2, T3, T_GRAD_NORM) \ template void LambComputeDirection( \ cudaStream_t stream, \ const T1* weights, \ const T2* grads, \ const T3* moment_1, \ const T3* moment_2, \ const T1* loss_scale, \ const T_GRAD_NORM* grad_norm, \ float alpha, \ float beta, \ float lambda, \ float epsilon, \ float max_norm, \ float alpha_correction, \ float beta_correction, \ T2* weights_out, \ T3* moment_1_out, \ T3* moment_2_out, \ size_t count); SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, float, float, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(double, double, double, double) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __device__ __forceinline__ void _LambUpdateRule( const T1 eta, const float ratio_min, const float ratio_max, const T2 r_norm, const T2 w_norm, const T2 w, const T3 d, T2* w_new, T3* g_new, T_MIXED_PRECISION_FP* w_mixed_precision_new) { // Confidence coefficeint of this update. const T2 ratio = (w_norm != T2(0.0f) && r_norm != T2(0.0f)) ? T2(eta) * _Max(T2(ratio_min), _Min(T2(ratio_max), _Sqrt(w_norm / r_norm))) : T2(eta); // Compute delta using the saved update direction. const T2 delta = -ratio * T2(d); const T2 w_new_tmp = w + delta; if (IsFiniteScalar(w_new_tmp)) { if (g_new) { *g_new = T3(delta); } if (w_new) { *w_new = w_new_tmp; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w_new_tmp); } } } else { if (g_new) { *g_new = T3(0); } if (w_new) { *w_new = w; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w); } } } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void _LambUpdateImpl( const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, weights[id], update_direction[id], weights_out != nullptr ? weights_out + id : nullptr, gradients_out != nullptr ? gradients_out + id : nullptr, mixed_precision_weights_out != nullptr ? mixed_precision_weights_out + id : nullptr); } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambUpdate( cudaStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _LambUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( eta, ratio_min, ratio_max, r_norm, w_norm, weights, update_direction, weights_out, gradients_out, mixed_precision_weights_out, N); } #define INSTANTIATE_LAMB_UPDATE(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambUpdate( \ cudaStream_t stream, \ const T1* eta, \ const float ratio_min, \ const float ratio_max, \ const T2* r_norm, \ const T2* w_norm, \ const T2* weights, \ const T3* update_direction, \ T2* weights_out, \ T3* gradients_out, \ T_MIXED_PRECISION_FP* mixed_precision_weights_out, \ size_t count); INSTANTIATE_LAMB_UPDATE(float, float, float, half) INSTANTIATE_LAMB_UPDATE(double, double, double, half) INSTANTIATE_LAMB_UPDATE(half, float, half, half) INSTANTIATE_LAMB_UPDATE(float, float, half, half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_UPDATE(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(float, float, nv_bfloat16, nv_bfloat16) #endif template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void LambMultiTensorComputeDirectionImpl( ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T1* w = reinterpret_cast<const T1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; T2* g = reinterpret_cast<T2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; const T3* m1 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* m2 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T3* m1_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start; T3* m2_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start; const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); #pragma unroll for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambComputeDirectionRule( scale, w[i], g[i], m1[i], m2[i], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, g[i], m1_new[i], m2_new[i]); } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( cudaStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int thread_count = ChunkGroup<6>::thread_count_per_block; const int block_count = chunk_group.chunk_count; LambMultiTensorComputeDirectionImpl<T1, T2, T3><<<block_count, thread_count, 0, stream>>>( chunk_group, loss_scale, g_norm, lambda, alpha, beta, epsilon, max_norm, alpha_correction, beta_correction); } #define INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(T1, T2, T3, T_GRAD_NORM) \ template void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( \ cudaStream_t stream, \ ChunkGroup<6> chunk_group, \ const T1* loss_scale, \ const T_GRAD_NORM* g_norm, \ const float lambda, \ const float alpha, \ const float beta, \ const float epsilon, \ const float max_norm, \ const float alpha_correction, \ const float beta_correction); INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, float, float, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(double, double, double, double) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void LambMultiTensorUpdateImpl( ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T2* w_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[0][group_index]); const T2* r_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[1][group_index]); const T2* w = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* d = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T2* w_new = chunk_group.tensor_ptrs[4][group_index] != nullptr ? reinterpret_cast<T2*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start : nullptr; T3* g_new = chunk_group.tensor_ptrs[5][group_index] != nullptr ? reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start : nullptr; T_MIXED_PRECISION_FP* w_mixed_precision_new = chunk_group.tensor_ptrs[6][group_index] != nullptr ? reinterpret_cast<T_MIXED_PRECISION_FP*>(chunk_group.tensor_ptrs[6][group_index]) + chunk_start : nullptr; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, w[i], d[i], w_new != nullptr ? w_new + i : nullptr, g_new != nullptr ? g_new + i : nullptr, w_mixed_precision_new != nullptr ? w_mixed_precision_new + i : nullptr); } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( cudaStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int thread_count = ChunkGroup<7>::thread_count_per_block; const int block_count = chunk_group.chunk_count; LambMultiTensorUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP><<<block_count, thread_count, 0, stream>>>( chunk_group, eta, ratio_min, ratio_max); } #define INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( \ cudaStream_t stream, \ ChunkGroup<7> chunk_group, \ const T1* eta, \ const float ratio_min, \ const float ratio_max); INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(half, float, half, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, half, half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, nv_bfloat16, nv_bfloat16) #endif // w_buffer[i], d_buffer[i] is used to store the squared sum of all elements processed by the i-th block. // sync_range_and_lock is used for a well ordered reduction over blocks spanning the same tensor template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> __launch_bounds__(ChunkGroup<4>::thread_count_per_block) __global__ void LambMultiTensorReductionImpl( ChunkGroup<4> chunk_group, TOut1* w_buffer, TOut2* d_buffer, LambMultiTensorSyncRangeAndLock* sync_range_and_lock) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const TIn1* w = reinterpret_cast<const TIn1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; const TIn2* d = reinterpret_cast<const TIn2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; TOut1* w_norm = reinterpret_cast<TOut1*>(chunk_group.tensor_ptrs[2][group_index]); TOut2* d_norm = reinterpret_cast<TOut2*>(chunk_group.tensor_ptrs[3][group_index]); TBuf d_sum = TBuf(0.f); TBuf w_sum = TBuf(0.f); constexpr int load_count_per_thread = 4; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x * load_count_per_thread) { #pragma unroll for (int j = 0; j < load_count_per_thread; ++j) { const int index_in_chunk = i + j * blockDim.x; const int index_in_tensor = chunk_start + index_in_chunk; if (index_in_chunk < chunk_size && index_in_tensor < tensor_size) { const TBuf w_element = TBuf(w[index_in_chunk]); const TBuf d_element = TBuf(d[index_in_chunk]); w_sum += w_element * w_element; d_sum += d_element * d_element; } } } // Thread count in a block must be a multiple of GPU_WARP_SIZE. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { w_sum += WARP_SHFL_DOWN(w_sum, stride); d_sum += WARP_SHFL_DOWN(d_sum, stride); } const int warp_count_in_block = blockDim.x / GPU_WARP_SIZE; const int lid = threadIdx.x % GPU_WARP_SIZE; const int wid = threadIdx.x / GPU_WARP_SIZE; // Shape is 2 x warp_count_in_block. extern __shared__ unsigned char shared_memory_[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_); TBuf* w_shared_memory_ = shared_memory; TBuf* d_shared_memory_ = shared_memory + warp_count_in_block; if (lid == 0) { w_shared_memory_[wid] = w_sum; d_shared_memory_[wid] = d_sum; } __syncthreads(); #pragma unroll for (int stride = warp_count_in_block / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { w_shared_memory_[threadIdx.x] += w_shared_memory_[threadIdx.x + stride]; d_shared_memory_[threadIdx.x] += d_shared_memory_[threadIdx.x + stride]; } __syncthreads(); } // ascertain the range of blocks with the associated tensor // note: if non-ordered reduction is OK, then atomicAdd over blocks could suffice const int leading_block_in_tensor = sync_range_and_lock[group_index].leading_block; const int num_blocks_in_tensor = sync_range_and_lock[group_index].number_blocks; if (num_blocks_in_tensor == 1) { if (threadIdx.x == 0) { *w_norm = TOut1(w_shared_memory_[0]); *d_norm = TOut2(d_shared_memory_[0]); } return; } if (threadIdx.x == 0) { w_buffer[blockIdx.x] = w_shared_memory_[0]; d_buffer[blockIdx.x] = d_shared_memory_[0]; } __threadfence(); __syncthreads(); // use lock to determine if this is last block for given tensor __shared__ bool is_last_block_done; if (threadIdx.x == 0) { int* p_lock = &sync_range_and_lock[group_index].completed_blocks; int counter = atomicAdd(p_lock, 1); is_last_block_done = (counter == num_blocks_in_tensor - 1); } __syncthreads(); // only last block to finish for associated tensor enters below if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_tensor); int blockid = leading_block_in_tensor + threadIdx.x; for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride && threadIdx.x + stride < num_blocks_in_tensor) { w_buffer[blockid] += w_buffer[blockid + stride]; d_buffer[blockid] += d_buffer[blockid + stride]; } __syncthreads(); } if (threadIdx.x == 0) { *w_norm = TOut1(w_buffer[leading_block_in_tensor]); *d_norm = TOut2(d_buffer[leading_block_in_tensor]); } } } CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> compute_tensor_range_and_lock(ChunkGroup<4> chunk_group, const CudaKernel& kernel) { const int num_blocks = chunk_group.chunk_count; // sync_range_and_lock is a struct consisting of (start_block, num_blocks, lock) for each tensor // Note: Adding such info to chunk group causes overflow (unless max tensors is reduced) const int max_tensors = ChunkGroup<4>::max_tensor_group_count; LambMultiTensorSyncRangeAndLock initial = {0, 0, 0}; CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> sync_range_and_lock(&kernel, initial, max_tensors); for (int block_index = num_blocks - 1; block_index >= 0; block_index--) { int tensor_index = chunk_group.block_index_to_tensor_group_index[block_index]; auto& tensor_block_span = sync_range_and_lock.CpuPtr()[tensor_index]; tensor_block_span.leading_block = block_index; tensor_block_span.number_blocks++; } ORT_THROW_IF_ERROR(sync_range_and_lock.CopyToGpu()); return sync_range_and_lock; } template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size) { // thread count per block. constexpr int thread_count = ChunkGroup<4>::thread_count_per_block; // shared memory's size per block. const int shared_memory_size = thread_count / GPU_WARP_SIZE * 2 * sizeof(TBuf); // Enforce assumptions used inside this reduction CUDA kernel. ORT_ENFORCE(thread_count % GPU_WARP_SIZE == 0); ORT_ENFORCE((thread_count & (thread_count - 1)) == 0); const int num_blocks = chunk_group.chunk_count; const size_t w_buffer_size = num_blocks * sizeof(TOut1); const size_t d_buffer_size = num_blocks * sizeof(TOut2); ORT_ENFORCE(w_buffer_size + d_buffer_size <= reduction_buffer_size); TOut1* w_buffer = reinterpret_cast<TOut1*>(reduction_buffer); TOut2* d_buffer = reinterpret_cast<TOut2*>(w_buffer + num_blocks); auto sync_range_and_lock = compute_tensor_range_and_lock(chunk_group, kernel); LambMultiTensorReductionImpl<TIn1, TIn2, TOut1, TOut2, TBuf><<<chunk_group.chunk_count, thread_count, shared_memory_size, stream>>>( chunk_group, w_buffer, d_buffer, sync_range_and_lock.GpuPtr()); } #define INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(TIn1, TIn2, TOut1, TOut2, TBuf) \ template void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size); INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, float, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(double, double, double, double, double) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, half, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(half, half, half, half, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(nv_bfloat16, nv_bfloat16, nv_bfloat16, nv_bfloat16, float) #endif } // namespace cuda } // namespace onnxruntime
07743a2b66b0a0c0f477e762506782988a0a40ee.hip
// !!! This is a file automatically generated by hipify!!! #include "kernel.h" #include <hip/hip_runtime.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #define DIM 224 __global__ void maxVal(float *normM_c, long int image_size, float *d_projections){ __shared__ float val[1024]; unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x*2 + threadIdx.x; if(id < image_size){ if((id+blockDim.x) >= image_size){ val[tid] = normM_c[id]; } else{ if(normM_c[id]>normM_c[id + blockDim.x]){ val[tid]=normM_c[id]; } else{ val[tid]=normM_c[id + blockDim.x]; } } } else{ val[tid] = -1; } __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s>>=1){ if (tid < s){ if(val[tid]<=val[tid+s]){ val[tid]=val[tid+s]; } } __syncthreads(); } d_projections[blockIdx.x]=val[0]; __syncthreads(); } __global__ void maxValExtract(float *normM_c, float *normM1_c, long int image_size, float *d_projections, int *d_index, float a){ __shared__ int pos[2048]; __shared__ float val[2048]; unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x*2 * blockDim.x + threadIdx.x; float faux, faux2; faux = ((a - normM_c[id])/a); faux2 = ((a - normM_c[id + blockDim.x])/a); if(id < image_size && faux <= 1.0e-6){ val[tid] = normM1_c[id]; pos[tid] = id; } else{ val[tid] = -1; } if(id + blockDim.x < image_size && faux2 <= 1.0e-6){ val[tid + blockDim.x] = normM1_c[id + blockDim.x]; pos[tid + blockDim.x] = id + blockDim.x; } else{ val[tid + blockDim.x] = -1; } __syncthreads(); for (unsigned int s = blockDim.x; s > 0; s>>=1){ if (tid < s){ if(val[tid]<=val[tid+s]){ val[tid] = val[tid+s]; pos[tid] = pos[tid+s]; } } __syncthreads(); } d_projections[blockIdx.x]=val[0]; d_index[blockIdx.x]=(int)pos[0]; __syncthreads(); } __global__ void actualizacion(float *v_c, float *image_c, int bands, float *normM_c, long int image_size) { __shared__ float block_v[DIM]; int k, i; float faux = 0; int j = blockIdx.x * blockDim.x + threadIdx.x; if(blockDim.x < bands){ for(i = threadIdx.x; i < bands; i += blockDim.x){ block_v[i] = v_c[i]; } } else{ if(threadIdx.x < bands){ block_v[threadIdx.x] = v_c[threadIdx.x]; } } __syncthreads(); if (j < image_size){ faux = 0; for(k = 0; k < bands; k++){ faux += block_v[k] * image_c[k*image_size + j]; } normM_c[j] -= faux * faux; } } __global__ void normalizacion(float *image_c, int bands, long int image_size, float *normM_c, float *normM1_c) { long int j, i; float norm_val = 0, aux = 0, pixel = 0; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < image_size){ for(j = 0; j < bands; j++){ norm_val += image_c[j*image_size + i]; } norm_val = 1.0/(norm_val + 1.0e-16); for(j = 0; j < bands; j++){ pixel = image_c[j*image_size + i] * norm_val; image_c[j*image_size + i] = pixel; aux += pixel * pixel; } normM_c[i] = aux; normM1_c[i] = aux; } } __global__ void calculateNormM(float *image_c, int bands, long int image_size, float *normM_c, float *normM1_c) { int k; int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < image_size){ for(k = 0; k < bands; k++){ normM_c[j] += image_c[k*image_size + j] * image_c[k*image_size + j]; normM1_c[j] += image_c[k*image_size + j] * image_c[k*image_size + j]; } } } void checkCUDAError(const char *mensaje, hipError_t error){ if(error != hipSuccess){ printf("ERROR %d: %s (%s)\n", error, hipGetErrorString(error), mensaje); } } void reservarMemoria(int bands,long int image_size, float **v_c, float **image_c, float **normM_c, float **normM1_c, float **image, float **v, float **d_projections, int **d_index, float **h_projections, int **h_index, int globalSize_reduction){ hipError_t error; error = hipMalloc(v_c, bands*sizeof(float)); checkCUDAError("ERROR EN hipMalloc de v_c", error); error = hipMalloc(image_c, bands*image_size*sizeof(float)); checkCUDAError("ERROR EN hipMalloc de image_c", error); error = hipMalloc(normM_c, image_size*sizeof(float)); checkCUDAError("ERROR EN hipMalloc de normMc", error); error = hipMalloc(normM1_c, image_size*sizeof(float)); checkCUDAError("ERROR EN hipMalloc de normM1_c", error); error = hipHostMalloc(image, bands*image_size*sizeof(float), hipHostMallocDefault); checkCUDAError("ERROR EN hipHostMalloc de image", error); error = hipHostMalloc(v, image_size*sizeof(float), hipHostMallocDefault); checkCUDAError("ERROR EN hipHostMalloc de v", error); error = hipMalloc(d_projections, globalSize_reduction*sizeof(float)); checkCUDAError("ERROR EN hipMalloc de d_projections", error); error = hipMalloc(d_index, globalSize_reduction*sizeof(int)); checkCUDAError("ERROR EN hipMalloc de d_projections", error); error = hipHostMalloc(h_projections, globalSize_reduction*sizeof(float), hipHostMallocDefault); checkCUDAError("ERROR EN hipHostMalloc de d_projections", error); error = hipHostMalloc(h_index, globalSize_reduction*sizeof(int), hipHostMallocDefault); checkCUDAError("ERROR EN hipHostMalloc de d_projections", error); } void liberarMemoria(float *v_c, float *image_c, float *normM_c, float *image, float *normM1_c, float *v, float *d_projections, int *d_index, float *h_projections, int *h_index){ hipError_t error; error = hipFree(v_c); checkCUDAError("ERROR EN hipFree de v_c", error); error = hipFree(image_c); checkCUDAError("ERROR EN hipFree de image_c", error); error = hipFree(normM_c); checkCUDAError("ERROR EN hipFree de normM_c", error); error = hipHostFree(image); checkCUDAError("ERROR EN hipHostFree de image", error); error = hipFree(normM1_c); checkCUDAError("ERROR EN hipHostFree de normM1_c", error); error = hipHostFree(v); checkCUDAError("ERROR EN hipHostFree de v", error); error = hipFree(d_projections); checkCUDAError("ERROR EN hipFree de d_projections", error); error = hipFree(d_index); checkCUDAError("ERROR EN hipFree de d_index", error); error = hipHostFree(h_projections); checkCUDAError("ERROR EN hipHostFree de h_projections", error); error = hipHostFree(h_index); checkCUDAError("ERROR EN hipHostFree de h_index", error); } void selectDevice(){ int count; int i , device; hipDeviceProp_t prop; hipError_t error; hipGetDeviceCount(&count); for(i = 0; i < count; ++i){ hipGetDeviceProperties(&prop, i); printf("Device %d, con nombre: %s\n", i, prop.name); } printf("Select a device: "); scanf ("%d", &device); error = hipSetDevice(device); checkCUDAError("ERROR EN setDevice", error); } void actualizarNormM(float *v, int bands, long int image_size, int i, int rows, float *v_c, float *image_c, float *normM_c){ hipError_t error; int val = ceil((double)image_size/1024); error = hipMemcpy(v_c, v, bands*sizeof(float), hipMemcpyHostToDevice); checkCUDAError("ERROR EN cudamemcpy de v_c", error); dim3 dimBlock(1024); dim3 dimGrid(val); hipLaunchKernelGGL(( actualizacion), dim3(dimGrid),dim3(dimBlock), 0, 0, v_c, image_c, bands, normM_c, image_size); checkCUDAError("ERROR EN kernel actualizacin", hipGetLastError()); hipDeviceSynchronize(); } void normalizeImgC(float *image, long int image_size, int bands,float *image_c, int rows, float *normM_c, float *normM1_c){ hipError_t error; int val = ceil((double)image_size/1024); error = hipMemcpy(image_c, image, bands*image_size*sizeof(float), hipMemcpyHostToDevice); checkCUDAError("ERROR EN hipMemcpy de image_c", error); dim3 dimBlock(1024); dim3 dimGrid(val); hipLaunchKernelGGL(( normalizacion), dim3(dimGrid),dim3(dimBlock), 0, 0, image_c, bands, image_size, normM_c, normM1_c); checkCUDAError("ERROR EN kernel normalizacin", hipGetLastError()); hipDeviceSynchronize(); } void calculateNormM(float *image, long int image_size, int bands, int rows, float *image_c, float *normM_c, float *normM1_c){ hipError_t error; int val = ceil((double)image_size/1024); error = hipMemcpy(image_c, image, bands*image_size*sizeof(float), hipMemcpyHostToDevice); checkCUDAError("ERROR EN hipMemcpy de image_c", error); dim3 dimBlock(1024); dim3 dimGrid(val); hipLaunchKernelGGL(( calculateNormM), dim3(dimGrid),dim3(dimBlock), 0, 0, image_c, bands, image_size, normM_c, normM1_c); checkCUDAError("ERROR EN kernel calculateNormM", hipGetLastError()); hipDeviceSynchronize(); } void calculateMaxVal(int image_size, float *normM_c, float *d_projections, float *h_projections){ int val = ceil((double)image_size/2/1024); hipError_t error; dim3 dimBlock(1024); dim3 dimGrid(val); hipLaunchKernelGGL(( maxVal), dim3(dimGrid),dim3(dimBlock), 0, 0, normM_c, image_size, d_projections); checkCUDAError("ERROR EN kernel maxVal", hipGetLastError()); hipDeviceSynchronize(); error = hipMemcpy(h_projections, d_projections, val*sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("ERROR EN hipMemcpy de h_projections", error); } void calculateMaxValExtract_2(int image_size, float *normM_c, float *normM1_c, float *d_projections, float *h_projections, int *d_index, int *h_index, float a){ hipError_t error; int val = ceil((double)image_size/2/1024); dim3 dimBlock(1024); dim3 dimGrid(val); hipLaunchKernelGGL(( maxValExtract), dim3(dimGrid),dim3(dimBlock), 0, 0, normM_c, normM1_c, image_size, d_projections, d_index, a); checkCUDAError("ERROR EN kernel maxValExtract", hipGetLastError()); hipDeviceSynchronize(); error = hipMemcpy(h_projections, d_projections, val*sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("ERROR EN hipMemcpy de h_projections", error); error = hipMemcpy(h_index, d_index, val*sizeof(float), hipMemcpyDeviceToHost); checkCUDAError("ERROR EN hipMemcpy de h_index", error); }
07743a2b66b0a0c0f477e762506782988a0a40ee.cu
#include "kernel.h" #include <cuda.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #define DIM 224 __global__ void maxVal(float *normM_c, long int image_size, float *d_projections){ __shared__ float val[1024]; unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x*2 + threadIdx.x; if(id < image_size){ if((id+blockDim.x) >= image_size){ val[tid] = normM_c[id]; } else{ if(normM_c[id]>normM_c[id + blockDim.x]){ val[tid]=normM_c[id]; } else{ val[tid]=normM_c[id + blockDim.x]; } } } else{ val[tid] = -1; } __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s>>=1){ if (tid < s){ if(val[tid]<=val[tid+s]){ val[tid]=val[tid+s]; } } __syncthreads(); } d_projections[blockIdx.x]=val[0]; __syncthreads(); } __global__ void maxValExtract(float *normM_c, float *normM1_c, long int image_size, float *d_projections, int *d_index, float a){ __shared__ int pos[2048]; __shared__ float val[2048]; unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x*2 * blockDim.x + threadIdx.x; float faux, faux2; faux = ((a - normM_c[id])/a); faux2 = ((a - normM_c[id + blockDim.x])/a); if(id < image_size && faux <= 1.0e-6){ val[tid] = normM1_c[id]; pos[tid] = id; } else{ val[tid] = -1; } if(id + blockDim.x < image_size && faux2 <= 1.0e-6){ val[tid + blockDim.x] = normM1_c[id + blockDim.x]; pos[tid + blockDim.x] = id + blockDim.x; } else{ val[tid + blockDim.x] = -1; } __syncthreads(); for (unsigned int s = blockDim.x; s > 0; s>>=1){ if (tid < s){ if(val[tid]<=val[tid+s]){ val[tid] = val[tid+s]; pos[tid] = pos[tid+s]; } } __syncthreads(); } d_projections[blockIdx.x]=val[0]; d_index[blockIdx.x]=(int)pos[0]; __syncthreads(); } __global__ void actualizacion(float *v_c, float *image_c, int bands, float *normM_c, long int image_size) { __shared__ float block_v[DIM]; int k, i; float faux = 0; int j = blockIdx.x * blockDim.x + threadIdx.x; if(blockDim.x < bands){ for(i = threadIdx.x; i < bands; i += blockDim.x){ block_v[i] = v_c[i]; } } else{ if(threadIdx.x < bands){ block_v[threadIdx.x] = v_c[threadIdx.x]; } } __syncthreads(); if (j < image_size){ faux = 0; for(k = 0; k < bands; k++){ faux += block_v[k] * image_c[k*image_size + j]; } normM_c[j] -= faux * faux; } } __global__ void normalizacion(float *image_c, int bands, long int image_size, float *normM_c, float *normM1_c) { long int j, i; float norm_val = 0, aux = 0, pixel = 0; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < image_size){ for(j = 0; j < bands; j++){ norm_val += image_c[j*image_size + i]; } norm_val = 1.0/(norm_val + 1.0e-16); for(j = 0; j < bands; j++){ pixel = image_c[j*image_size + i] * norm_val; image_c[j*image_size + i] = pixel; aux += pixel * pixel; } normM_c[i] = aux; normM1_c[i] = aux; } } __global__ void calculateNormM(float *image_c, int bands, long int image_size, float *normM_c, float *normM1_c) { int k; int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < image_size){ for(k = 0; k < bands; k++){ normM_c[j] += image_c[k*image_size + j] * image_c[k*image_size + j]; normM1_c[j] += image_c[k*image_size + j] * image_c[k*image_size + j]; } } } void checkCUDAError(const char *mensaje, cudaError_t error){ if(error != cudaSuccess){ printf("ERROR %d: %s (%s)\n", error, cudaGetErrorString(error), mensaje); } } void reservarMemoria(int bands,long int image_size, float **v_c, float **image_c, float **normM_c, float **normM1_c, float **image, float **v, float **d_projections, int **d_index, float **h_projections, int **h_index, int globalSize_reduction){ cudaError_t error; error = cudaMalloc(v_c, bands*sizeof(float)); checkCUDAError("ERROR EN cudaMalloc de v_c", error); error = cudaMalloc(image_c, bands*image_size*sizeof(float)); checkCUDAError("ERROR EN cudaMalloc de image_c", error); error = cudaMalloc(normM_c, image_size*sizeof(float)); checkCUDAError("ERROR EN cudaMalloc de normMc", error); error = cudaMalloc(normM1_c, image_size*sizeof(float)); checkCUDAError("ERROR EN cudaMalloc de normM1_c", error); error = cudaHostAlloc(image, bands*image_size*sizeof(float), cudaHostAllocDefault); checkCUDAError("ERROR EN cudaHostAlloc de image", error); error = cudaHostAlloc(v, image_size*sizeof(float), cudaHostAllocDefault); checkCUDAError("ERROR EN cudaHostAlloc de v", error); error = cudaMalloc(d_projections, globalSize_reduction*sizeof(float)); checkCUDAError("ERROR EN cudaMalloc de d_projections", error); error = cudaMalloc(d_index, globalSize_reduction*sizeof(int)); checkCUDAError("ERROR EN cudaMalloc de d_projections", error); error = cudaHostAlloc(h_projections, globalSize_reduction*sizeof(float), cudaHostAllocDefault); checkCUDAError("ERROR EN cudaHostAlloc de d_projections", error); error = cudaHostAlloc(h_index, globalSize_reduction*sizeof(int), cudaHostAllocDefault); checkCUDAError("ERROR EN cudaHostAlloc de d_projections", error); } void liberarMemoria(float *v_c, float *image_c, float *normM_c, float *image, float *normM1_c, float *v, float *d_projections, int *d_index, float *h_projections, int *h_index){ cudaError_t error; error = cudaFree(v_c); checkCUDAError("ERROR EN cudaFree de v_c", error); error = cudaFree(image_c); checkCUDAError("ERROR EN cudaFree de image_c", error); error = cudaFree(normM_c); checkCUDAError("ERROR EN cudaFree de normM_c", error); error = cudaFreeHost(image); checkCUDAError("ERROR EN cudaFreeHost de image", error); error = cudaFree(normM1_c); checkCUDAError("ERROR EN cudaFreeHost de normM1_c", error); error = cudaFreeHost(v); checkCUDAError("ERROR EN cudaFreeHost de v", error); error = cudaFree(d_projections); checkCUDAError("ERROR EN cudaFree de d_projections", error); error = cudaFree(d_index); checkCUDAError("ERROR EN cudaFree de d_index", error); error = cudaFreeHost(h_projections); checkCUDAError("ERROR EN cudaFreeHost de h_projections", error); error = cudaFreeHost(h_index); checkCUDAError("ERROR EN cudaFreeHost de h_index", error); } void selectDevice(){ int count; int i , device; cudaDeviceProp prop; cudaError_t error; cudaGetDeviceCount(&count); for(i = 0; i < count; ++i){ cudaGetDeviceProperties(&prop, i); printf("Device %d, con nombre: %s\n", i, prop.name); } printf("Select a device: "); scanf ("%d", &device); error = cudaSetDevice(device); checkCUDAError("ERROR EN setDevice", error); } void actualizarNormM(float *v, int bands, long int image_size, int i, int rows, float *v_c, float *image_c, float *normM_c){ cudaError_t error; int val = ceil((double)image_size/1024); error = cudaMemcpy(v_c, v, bands*sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("ERROR EN cudamemcpy de v_c", error); dim3 dimBlock(1024); dim3 dimGrid(val); actualizacion<<<dimGrid,dimBlock>>>(v_c, image_c, bands, normM_c, image_size); checkCUDAError("ERROR EN kernel actualización", cudaGetLastError()); cudaDeviceSynchronize(); } void normalizeImgC(float *image, long int image_size, int bands,float *image_c, int rows, float *normM_c, float *normM1_c){ cudaError_t error; int val = ceil((double)image_size/1024); error = cudaMemcpy(image_c, image, bands*image_size*sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("ERROR EN cudaMemcpy de image_c", error); dim3 dimBlock(1024); dim3 dimGrid(val); normalizacion<<<dimGrid,dimBlock>>>(image_c, bands, image_size, normM_c, normM1_c); checkCUDAError("ERROR EN kernel normalización", cudaGetLastError()); cudaDeviceSynchronize(); } void calculateNormM(float *image, long int image_size, int bands, int rows, float *image_c, float *normM_c, float *normM1_c){ cudaError_t error; int val = ceil((double)image_size/1024); error = cudaMemcpy(image_c, image, bands*image_size*sizeof(float), cudaMemcpyHostToDevice); checkCUDAError("ERROR EN cudaMemcpy de image_c", error); dim3 dimBlock(1024); dim3 dimGrid(val); calculateNormM<<<dimGrid,dimBlock>>>(image_c, bands, image_size, normM_c, normM1_c); checkCUDAError("ERROR EN kernel calculateNormM", cudaGetLastError()); cudaDeviceSynchronize(); } void calculateMaxVal(int image_size, float *normM_c, float *d_projections, float *h_projections){ int val = ceil((double)image_size/2/1024); cudaError_t error; dim3 dimBlock(1024); dim3 dimGrid(val); maxVal<<<dimGrid,dimBlock>>>(normM_c, image_size, d_projections); checkCUDAError("ERROR EN kernel maxVal", cudaGetLastError()); cudaDeviceSynchronize(); error = cudaMemcpy(h_projections, d_projections, val*sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("ERROR EN cudaMemcpy de h_projections", error); } void calculateMaxValExtract_2(int image_size, float *normM_c, float *normM1_c, float *d_projections, float *h_projections, int *d_index, int *h_index, float a){ cudaError_t error; int val = ceil((double)image_size/2/1024); dim3 dimBlock(1024); dim3 dimGrid(val); maxValExtract<<<dimGrid,dimBlock>>>(normM_c, normM1_c, image_size, d_projections, d_index, a); checkCUDAError("ERROR EN kernel maxValExtract", cudaGetLastError()); cudaDeviceSynchronize(); error = cudaMemcpy(h_projections, d_projections, val*sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("ERROR EN cudaMemcpy de h_projections", error); error = cudaMemcpy(h_index, d_index, val*sizeof(float), cudaMemcpyDeviceToHost); checkCUDAError("ERROR EN cudaMemcpy de h_index", error); }
3f10ba84bb6ee87f93b398ddbdec2de20d4c225f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/search.hpp> #include <cudf/detail/stream_compaction.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/stream_compaction.hpp> #include <rmm/thrust_rmm_allocator.h> #include <thrust/binary_search.h> namespace cudf { namespace dictionary { namespace detail { namespace { /** * @brief Type-dispatch functor for remapping the old indices to new values based on the new * key-set. * * The dispatch is based on the key type. * The output column is the new indices column for the new dictionary column. */ struct dispatch_compute_indices { template <typename Element> typename std::enable_if_t<cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>> operator()(dictionary_column_view const& input, column_view const& new_keys, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto dictionary_view = column_device_view::create(input.parent(), stream); auto d_dictionary = *dictionary_view; auto dictionary_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), [d_dictionary] __device__(size_type idx) { if (d_dictionary.is_null(idx)) return Element{}; column_device_view d_keys = d_dictionary.child(1); size_type index = static_cast<size_type>(d_dictionary.element<dictionary32>(idx)); return d_keys.template element<Element>(index); }); auto new_keys_view = column_device_view::create(new_keys, stream); auto d_new_keys = *new_keys_view; auto keys_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), [d_new_keys] __device__(size_type idx) { return d_new_keys.template element<Element>(idx); }); auto result = make_numeric_column( data_type{type_id::INT32}, input.size(), mask_state::UNALLOCATED, stream, mr); auto d_result = result->mutable_view().data<int32_t>(); auto execpol = rmm::exec_policy(stream); thrust::lower_bound(execpol->on(stream), keys_itr, keys_itr + new_keys.size(), dictionary_itr, dictionary_itr + input.size(), d_result, thrust::less<Element>()); result->set_null_count(0); return result; } template <typename Element> typename std::enable_if_t<!cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>> operator()(dictionary_column_view const& input, column_view const& new_keys, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_FAIL("list_view dictionary set_keys not supported yet"); } }; } // namespace // std::unique_ptr<column> set_keys( dictionary_column_view const& dictionary_column, column_view const& new_keys, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { CUDF_EXPECTS(!new_keys.has_nulls(), "keys parameter must not have nulls"); auto keys = dictionary_column.keys(); CUDF_EXPECTS(keys.type() == new_keys.type(), "keys types must match"); // copy the keys -- use drop_duplicates to make sure they are sorted and unique auto table_keys = cudf::detail::drop_duplicates(table_view{{new_keys}}, std::vector<size_type>{0}, duplicate_keep_option::KEEP_FIRST, null_equality::EQUAL, mr, stream) ->release(); std::unique_ptr<column> keys_column(std::move(table_keys.front())); // compute the new nulls auto matches = cudf::detail::contains(keys, keys_column->view(), mr, stream); auto d_matches = matches->view().data<bool>(); auto d_indices = dictionary_column.indices().data<int32_t>(); auto d_null_mask = dictionary_column.null_mask(); auto new_nulls = cudf::detail::valid_if( thrust::make_counting_iterator<size_type>(dictionary_column.offset()), thrust::make_counting_iterator<size_type>(dictionary_column.offset() + dictionary_column.size()), [d_null_mask, d_indices, d_matches] __device__(size_type idx) { if (d_null_mask && !bit_is_set(d_null_mask, idx)) return false; return d_matches[d_indices[idx]]; }, stream, mr); // compute the new indices auto indices_column = type_dispatcher(keys_column->type(), dispatch_compute_indices{}, dictionary_column, keys_column->view(), mr, stream); // create column with keys_column and indices_column return make_dictionary_column(std::move(keys_column), std::move(indices_column), std::move(new_nulls.first), new_nulls.second); } } // namespace detail // external API std::unique_ptr<column> set_keys(dictionary_column_view const& dictionary_column, column_view const& keys, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::set_keys(dictionary_column, keys, mr); } } // namespace dictionary } // namespace cudf
3f10ba84bb6ee87f93b398ddbdec2de20d4c225f.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/search.hpp> #include <cudf/detail/stream_compaction.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/stream_compaction.hpp> #include <rmm/thrust_rmm_allocator.h> #include <thrust/binary_search.h> namespace cudf { namespace dictionary { namespace detail { namespace { /** * @brief Type-dispatch functor for remapping the old indices to new values based on the new * key-set. * * The dispatch is based on the key type. * The output column is the new indices column for the new dictionary column. */ struct dispatch_compute_indices { template <typename Element> typename std::enable_if_t<cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>> operator()(dictionary_column_view const& input, column_view const& new_keys, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto dictionary_view = column_device_view::create(input.parent(), stream); auto d_dictionary = *dictionary_view; auto dictionary_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), [d_dictionary] __device__(size_type idx) { if (d_dictionary.is_null(idx)) return Element{}; column_device_view d_keys = d_dictionary.child(1); size_type index = static_cast<size_type>(d_dictionary.element<dictionary32>(idx)); return d_keys.template element<Element>(index); }); auto new_keys_view = column_device_view::create(new_keys, stream); auto d_new_keys = *new_keys_view; auto keys_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), [d_new_keys] __device__(size_type idx) { return d_new_keys.template element<Element>(idx); }); auto result = make_numeric_column( data_type{type_id::INT32}, input.size(), mask_state::UNALLOCATED, stream, mr); auto d_result = result->mutable_view().data<int32_t>(); auto execpol = rmm::exec_policy(stream); thrust::lower_bound(execpol->on(stream), keys_itr, keys_itr + new_keys.size(), dictionary_itr, dictionary_itr + input.size(), d_result, thrust::less<Element>()); result->set_null_count(0); return result; } template <typename Element> typename std::enable_if_t<!cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>> operator()(dictionary_column_view const& input, column_view const& new_keys, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_FAIL("list_view dictionary set_keys not supported yet"); } }; } // namespace // std::unique_ptr<column> set_keys( dictionary_column_view const& dictionary_column, column_view const& new_keys, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { CUDF_EXPECTS(!new_keys.has_nulls(), "keys parameter must not have nulls"); auto keys = dictionary_column.keys(); CUDF_EXPECTS(keys.type() == new_keys.type(), "keys types must match"); // copy the keys -- use drop_duplicates to make sure they are sorted and unique auto table_keys = cudf::detail::drop_duplicates(table_view{{new_keys}}, std::vector<size_type>{0}, duplicate_keep_option::KEEP_FIRST, null_equality::EQUAL, mr, stream) ->release(); std::unique_ptr<column> keys_column(std::move(table_keys.front())); // compute the new nulls auto matches = cudf::detail::contains(keys, keys_column->view(), mr, stream); auto d_matches = matches->view().data<bool>(); auto d_indices = dictionary_column.indices().data<int32_t>(); auto d_null_mask = dictionary_column.null_mask(); auto new_nulls = cudf::detail::valid_if( thrust::make_counting_iterator<size_type>(dictionary_column.offset()), thrust::make_counting_iterator<size_type>(dictionary_column.offset() + dictionary_column.size()), [d_null_mask, d_indices, d_matches] __device__(size_type idx) { if (d_null_mask && !bit_is_set(d_null_mask, idx)) return false; return d_matches[d_indices[idx]]; }, stream, mr); // compute the new indices auto indices_column = type_dispatcher(keys_column->type(), dispatch_compute_indices{}, dictionary_column, keys_column->view(), mr, stream); // create column with keys_column and indices_column return make_dictionary_column(std::move(keys_column), std::move(indices_column), std::move(new_nulls.first), new_nulls.second); } } // namespace detail // external API std::unique_ptr<column> set_keys(dictionary_column_view const& dictionary_column, column_view const& keys, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::set_keys(dictionary_column, keys, mr); } } // namespace dictionary } // namespace cudf
ef1afe5f3b099b0a1a2dd20e195211a231584b22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> #include "cuda_helpers.h" #include <iostream> #include <vector> int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename T> __device__ inline float devIoU(T const* const a, T const* const b) { T left = max(a[0], b[0]), right = min(a[2], b[2]); T top = max(a[1], b[1]), bottom = min(a[3], b[3]); T width = max(right - left, (T)0), height = max(bottom - top, (T)0); T interS = width * height; T Sa = (a[2] - a[0]) * (a[3] - a[1]); T Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } template <typename T> __global__ void nms_kernel( const int n_boxes, const float nms_overlap_thresh, const T* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU<T>(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); at::hip::HIPGuardMasqueradingAsCUDA device_guard(boxes.device()); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = at::cuda::ATenCeilDiv(boxes_num, threadsPerBlock); at::Tensor mask = at::empty({boxes_num * col_blocks}, boxes.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( boxes_sorted.type(), "nms_kernel_cuda", [&] { hipLaunchKernelGGL(( nms_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, boxes_num, nms_overlap_thresh, boxes_sorted.data<scalar_t>(), (unsigned long long*)mask.data<int64_t>()); }); at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(hipGetLastError()); return order_t .index({keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); }
ef1afe5f3b099b0a1a2dd20e195211a231584b22.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include "cuda_helpers.h" #include <iostream> #include <vector> int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename T> __device__ inline float devIoU(T const* const a, T const* const b) { T left = max(a[0], b[0]), right = min(a[2], b[2]); T top = max(a[1], b[1]), bottom = min(a[3], b[3]); T width = max(right - left, (T)0), height = max(bottom - top, (T)0); T interS = width * height; T Sa = (a[2] - a[0]) * (a[3] - a[1]); T Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } template <typename T> __global__ void nms_kernel( const int n_boxes, const float nms_overlap_thresh, const T* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU<T>(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); at::cuda::CUDAGuard device_guard(boxes.device()); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = at::cuda::ATenCeilDiv(boxes_num, threadsPerBlock); at::Tensor mask = at::empty({boxes_num * col_blocks}, boxes.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( boxes_sorted.type(), "nms_kernel_cuda", [&] { nms_kernel<scalar_t><<<blocks, threads, 0, stream>>>( boxes_num, nms_overlap_thresh, boxes_sorted.data<scalar_t>(), (unsigned long long*)mask.data<int64_t>()); }); at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(cudaGetLastError()); return order_t .index({keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); }
b209f2d75b18a06e733dc509875e6e13f8312b2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "symbols/NaN.cuh" // -1/target probability if target = 1.0, 0.0 otherwise __global__ void backwardLogisticLossKernel (int batchSize, int numberEntriesPerInstance, int numberIterations, float *predictions, float *targets, float *result) { // What's the first entry index within the instance that this thread should operate on? int startIndexWithinInstance = blockIdx.y * (blockDim.x * numberIterations) + threadIdx.x * numberIterations; // Continue if this index is smaller than the dimension of the instance. if(startIndexWithinInstance < numberEntriesPerInstance) { // What's the first entry index within the batch that this thread should operate on? int startIndexWithinBatch = blockIdx.x * numberEntriesPerInstance + startIndexWithinInstance; // Is the instance greater than the current batch size? if(blockIdx.x >= batchSize) { setToNan(result, startIndexWithinBatch, numberIterations); } else { for(int indexEntry = startIndexWithinBatch; indexEntry < startIndexWithinBatch + numberIterations; indexEntry++) { result[indexEntry] = targets[indexEntry] * -(1.0/predictions[indexEntry]); } } } }
b209f2d75b18a06e733dc509875e6e13f8312b2c.cu
#include "symbols/NaN.cuh" // -1/target probability if target = 1.0, 0.0 otherwise __global__ void backwardLogisticLossKernel (int batchSize, int numberEntriesPerInstance, int numberIterations, float *predictions, float *targets, float *result) { // What's the first entry index within the instance that this thread should operate on? int startIndexWithinInstance = blockIdx.y * (blockDim.x * numberIterations) + threadIdx.x * numberIterations; // Continue if this index is smaller than the dimension of the instance. if(startIndexWithinInstance < numberEntriesPerInstance) { // What's the first entry index within the batch that this thread should operate on? int startIndexWithinBatch = blockIdx.x * numberEntriesPerInstance + startIndexWithinInstance; // Is the instance greater than the current batch size? if(blockIdx.x >= batchSize) { setToNan(result, startIndexWithinBatch, numberIterations); } else { for(int indexEntry = startIndexWithinBatch; indexEntry < startIndexWithinBatch + numberIterations; indexEntry++) { result[indexEntry] = targets[indexEntry] * -(1.0/predictions[indexEntry]); } } } }
c24ef70e9eed38b8ea61451b0746b3a11c24739c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <thrust/extrema.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <cudf/utilities/error.hpp> #include "nvstrings/NVStrings.h" #include "./NVStringsImpl.h" #include "../custring_view.cuh" #include "../util.h" // common token counter for all split methods struct token_counter { custring_view_array d_strings; char* d_delimiter; unsigned int dellen; int tokens; int* d_counts; // token_counter(custring_view_array dstrs, char* delim, unsigned int dlen, int t, int* counts) : d_strings(dstrs), d_delimiter(delim), dellen(dlen), tokens(t), d_counts(counts) {} __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( dstr ) d_counts[idx] = dstr->split_size(d_delimiter,dellen,0,tokens); } }; // special-case token counter for whitespace delimiter // leading and trailing and duplicate delimiters are ignored struct whitespace_token_counter { custring_view_array d_strings; int tokens; int* d_counts; // count the 'words' only between non-whitespace characters whitespace_token_counter(custring_view_array dstrs, int t, int* counts) : d_strings(dstrs), tokens(t), d_counts(counts) {} __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( !dstr ) return; int dcount = 0; bool spaces = true; custring_view::iterator itr = dstr->begin(); while( itr != dstr->end() ) { Char ch = *itr; if( spaces == (ch <= ' ') ) itr++; else { dcount += (int)spaces; spaces = !spaces; } } if( tokens && (dcount > tokens) ) dcount = tokens; if( dcount==0 ) dcount = 1; // always allow empty string d_counts[idx] = dcount; //printf("dcount=%d\n",dcount); } }; // // Coded form Pandas split algorithm as documented here: // https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.split.html#pandas.Series.str.split // // Example: // // import pandas as pd // pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__']) // print(pd_series.str.split(pat='_', expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [, a, b, ] // 4 [, , aa, , bb, , ] // 5 [, a, , bbb, , , c] // 6 [, aa, b, , ccc, , ] // // print(pd_series.str.split(pat='_', n=1, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [, a_b_] // 4 [, _aa__bb__] // 5 [, a__bbb___c] // 6 [, aa_b__ccc__] // // print(pd_series.str.split(pat='_', n=2, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [, a, b_] // 4 [, , aa__bb__] // 5 [, a, _bbb___c] // 6 [, aa, b__ccc__] // // int NVStrings::split_record( const char* delimiter, int maxsplit, std::vector<NVStrings*>& results) { if( delimiter==0 ) return split_record(maxsplit,results); auto execpol = rmm::exec_policy(0); unsigned int dellen = (unsigned int)strlen(delimiter); char* d_delimiter = device_alloc<char>(dellen+1,0); CUDA_TRY( hipMemcpyAsync(d_delimiter,delimiter,dellen+1,hipMemcpyHostToDevice)) int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); // build int arrays to hold each string's split size int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); rmm::device_vector<int> sizes(totalSizes,0), offsets(count,0), totals(count,0); thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); int* d_offsets = offsets.data().get(); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, dellen, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int* dsizes = d_sizes + d_offsets[idx]; int dcount = d_counts[idx]; d_totals[idx] = dstr->split_size(d_delimiter,dellen,dsizes,dcount); }); // hipDeviceSynchronize(); // now build an array of custring_views* arrays for each value int totalNewStrings = 0; thrust::host_vector<int> h_counts(counts); thrust::host_vector<int> h_totals(totals); thrust::host_vector<custring_view_array> h_splits(count,nullptr); thrust::host_vector<char*> h_buffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int splitCount = h_counts[idx]; if( splitCount==0 ) { results.push_back(0); continue; } NVStrings* splitResult = new NVStrings(splitCount); results.push_back(splitResult); h_splits[idx] = splitResult->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; totalNewStrings += splitCount; } // rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the splits and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, dellen, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int d_count = d_counts[idx]; if( d_count < 1 ) return; char* buffer = (char*)d_buffers[idx]; int* dsizes = d_sizes + d_offsets[idx]; custring_view_array d_strs = d_splits[idx]; for( int i=0; i < d_count; ++i ) { int size = ALIGN_SIZE(dsizes[i]); d_strs[i] = (custring_view*)buffer; buffer += size; } dstr->split(d_delimiter,dellen,d_count,d_strs); }); // printCudaError(hipDeviceSynchronize(),"nvs-split_record"); RMM_FREE(d_delimiter,0); return totalNewStrings; } // // Whitespace delimiter algorithm is very different. // It follows the Python str.split algorithm as defined in Pandas: https://docs.python.org/3/library/stdtypes.html#str.split // Paraphrased as follows (for null delimiter): // Runs of consecutive whitespace are regarded as a single separator, // and the result will contain no empty strings at the start orend if // the string has leading or trailing whitespace. // Also whitespace is not just space. // The algorithm below uses the shortcut (<=' ') to catch \t\r\n or any other control character. // The above statement does not account for maxplit as seen in the following examples where n=maxpslit. // // import pandas as pd // pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc ']) // print(pd_series.str.split(pat=None, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [a, b] // 4 [aa, bb] // 5 [a, bbb, c] // 6 [aa, b, ccc] // // print(pd_series.str.split(pat=None, n=1, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [a, b ] // 4 [aa, bb ] // 5 [a, bbb c] // 6 [aa, b ccc ] // // print(pd_series.str.split(pat=None, n=2, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [a, b] // 4 [aa, bb] // 5 [a, bbb, c] // 6 [aa, b, ccc ] // // Note: // - lack of empty strings // - trailing and leading characters are ignored (sometimes) // - multiple whitespace characters are ignored (sometimes) // int NVStrings::split_record( int maxsplit, std::vector<NVStrings*>& results) { auto execpol = rmm::exec_policy(0); int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, whitespace_token_counter(d_strings,tokens,d_counts)); //hipDeviceSynchronize(); // build int arrays to hold each string's split size int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); rmm::device_vector<int> sizes(totalSizes,0), offsets(count,0), totals(count,0); thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); int* d_offsets = offsets.data().get(); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, tokens, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; // null string int* dsizes = d_sizes + d_offsets[idx]; int dcount = d_counts[idx]; int bytes = 0, sidx = 0, spos = 0, nchars = dstr->chars_count(); //printf("tokens=%d,dcount=%d,nchars=%d\n",tokens,dcount,nchars); bool spaces = true; for( int pos=0; (pos < nchars) && (sidx < dcount); ++pos ) { Char ch = dstr->at(pos); if( spaces == (ch <= ' ') ) { if( spaces ) spos = pos+1; continue; } if( !spaces ) { if( (sidx+1)==tokens ) break; int size = dstr->substr_size(spos,pos-spos); dsizes[sidx++] = size; //printf("%d:pos=%d,spos=%d,size=%d\n",(sidx-1),pos,spos,size); bytes += ALIGN_SIZE(size); spos = pos + 1; } spaces = !spaces; } if( sidx < dcount ) { int size = 0; if( spos < nchars ) size = dstr->substr_size(spos,nchars-spos); else size = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); dsizes[sidx] = size; //printf("spos=%d,nchars=%d,size=%d\n",spos,nchars,size); bytes += ALIGN_SIZE(size); } //printf("bytes=%d\n",bytes); d_totals[idx] = bytes; }); // hipDeviceSynchronize(); // now build an array of custring_views* arrays for each value int totalNewStrings = 0; thrust::host_vector<int> h_counts(counts); thrust::host_vector<int> h_totals(totals); thrust::host_vector<custring_view_array> h_splits(count,nullptr); thrust::host_vector<char*> h_buffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int splitCount = h_counts[idx]; if( splitCount==0 ) { results.push_back(0); continue; } NVStrings* splitResult = new NVStrings(splitCount); results.push_back(splitResult); h_splits[idx] = splitResult->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; totalNewStrings += splitCount; } // rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the splits and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, tokens, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; // null string int dcount = d_counts[idx]; char* buffer = (char*)d_buffers[idx]; int* dsizes = d_sizes + d_offsets[idx]; custring_view_array d_strs = d_splits[idx]; int emptysize = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); if( dcount==0 || dsizes[0]==emptysize ) { d_strs[0] = custring_view::create_from(buffer,buffer,0); return; // empty string } for( int i=0; i < dcount; ++i ) { int size = ALIGN_SIZE(dsizes[i]); d_strs[i] = (custring_view*)buffer; buffer += size; } int sidx = 0, spos = 0, nchars = dstr->chars_count(); //printf(">tokens=%d,dcount=%d,nchars=%d",tokens,dcount,nchars); bool spaces = true; for( int pos=0; (pos < nchars) && (sidx < dcount); ++pos ) { Char ch = dstr->at(pos); if( spaces == (ch <= ' ') ) { if( spaces ) spos = pos+1; continue; } if( !spaces ) { if( (sidx+1)==tokens ) break; d_strs[sidx] = dstr->substr(spos,pos-spos,1,(void*)d_strs[sidx]); //printf(">%d:pos=%d,spos=%d\n",sidx,pos,spos); ++sidx; spos = pos + 1; } spaces = !spaces; } if( (sidx < dcount) && (spos < nchars) ) { d_strs[sidx] = dstr->substr(spos,nchars-spos,1,(void*)d_strs[sidx]); //printf(">%d:spos=%d,nchars=%d\n",sidx,spos,nchars); } }); // printCudaError(hipDeviceSynchronize(),"nvs-split_record_ws"); return totalNewStrings; } // // This is just the split-from-the-right version of above. // int NVStrings::rsplit_record( const char* delimiter, int maxsplit, std::vector<NVStrings*>& results) { if( delimiter==0 ) return rsplit_record(maxsplit,results); auto execpol = rmm::exec_policy(0); unsigned int dellen = (unsigned int)strlen(delimiter); char* d_delimiter = device_alloc<char>(dellen+1,0); CUDA_TRY( hipMemcpyAsync(d_delimiter,delimiter,dellen+1,hipMemcpyHostToDevice)) int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); // build int arrays to hold each string's split size int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); rmm::device_vector<int> sizes(totalSizes,0), offsets(count,0), totals(count,0); thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); int* d_offsets = offsets.data().get(); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, dellen, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int dcount = d_counts[idx]; int* dsizes = d_sizes + d_offsets[idx]; d_totals[idx] = dstr->rsplit_size(d_delimiter,dellen,dsizes,dcount); }); hipDeviceSynchronize(); // now build an array of custring_views* arrays for each value int totalNewStrings = 0; thrust::host_vector<int> h_counts(counts); thrust::host_vector<int> h_totals(totals); thrust::host_vector<custring_view_array> h_splits(count,nullptr); thrust::host_vector<char*> h_buffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int splitCount = h_counts[idx]; if( splitCount==0 ) { results.push_back(0); continue; } NVStrings* splitResult = new NVStrings(splitCount); results.push_back(splitResult); h_splits[idx] = splitResult->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; totalNewStrings += splitCount; } // rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the splits and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, dellen, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int d_count = d_counts[idx]; if( d_count < 1 ) return; char* buffer = (char*)d_buffers[idx]; int* dsizes = d_sizes + d_offsets[idx]; custring_view_array d_strs = d_splits[idx]; for( int i=0; i < d_count; ++i ) { d_strs[i] = (custring_view*)buffer; int size = ALIGN_SIZE(dsizes[i]); buffer += size; //printf("%d:%d=%d\n",(int)idx,i,size); } dstr->rsplit(d_delimiter,dellen,d_count,d_strs); }); // printCudaError(hipDeviceSynchronize(),"nvs-rsplit_record"); RMM_FREE(d_delimiter,0); return totalNewStrings; } // // And the whitespace-delimited version of rsplit_record // int NVStrings::rsplit_record( int maxsplit, std::vector<NVStrings*>& results) { auto execpol = rmm::exec_policy(0); int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, whitespace_token_counter(d_strings,tokens,d_counts)); // build int arrays to hold each string's split size int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); rmm::device_vector<int> sizes(totalSizes,0), offsets(count,0), totals(count,0); thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); int* d_offsets = offsets.data().get(); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, tokens, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int* dsizes = d_sizes + d_offsets[idx]; int dcount = d_counts[idx]; int sidx = (dcount-1), nchars = dstr->chars_count(); int bytes = 0, epos = nchars; //printf("tokens=%d,dcount=%d,nchars=%d\n",tokens,dcount,nchars); bool spaces = true; for( int pos=nchars; (pos>0) && (sidx>=0); --pos ) { Char ch = dstr->at(pos-1); if( spaces == (ch <= ' ') ) { if( spaces ) epos = pos-1; continue; } if( !spaces ) { if( (dcount-sidx)==tokens ) break; int size = dstr->substr_size(pos,epos-pos); dsizes[sidx--] = size; //printf("%d:pos=%d,epos=%d,size=%d\n",(sidx+1),pos,epos,size); bytes += ALIGN_SIZE(size); epos = pos-1; } spaces = !spaces; } if( sidx==0 ) { int size = 0; if( epos > 0 ) size = dstr->substr_size(0,epos); else size = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); //printf("%d:epos=%d,size=%d\n",sidx,epos,size); dsizes[sidx] = size; bytes += ALIGN_SIZE(size); } //printf("bytes=%d\n",bytes); d_totals[idx] = bytes; }); hipDeviceSynchronize(); // now build an array of custring_views* arrays for each value int totalNewStrings = 0; thrust::host_vector<int> h_counts(counts); thrust::host_vector<int> h_totals(totals); thrust::host_vector<custring_view_array> h_splits(count,nullptr); thrust::host_vector<char*> h_buffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int splitCount = h_counts[idx]; if( splitCount==0 ) { results.push_back(0); continue; } NVStrings* splitResult = new NVStrings(splitCount); results.push_back(splitResult); h_splits[idx] = splitResult->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; totalNewStrings += splitCount; } // rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the splits and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, tokens, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int dcount = d_counts[idx]; char* buffer = (char*)d_buffers[idx]; int* dsizes = d_sizes + d_offsets[idx]; custring_view_array d_strs = d_splits[idx]; int emptysize = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); if( dcount==0 || dsizes[0]==emptysize ) { d_strs[0] = custring_view::create_from(buffer,buffer,0); return; // empty string } for( int i=0; i < dcount; ++i ) { int size = ALIGN_SIZE(dsizes[i]); d_strs[i] = (custring_view*)buffer; buffer += size; } int sidx = (dcount-1), nchars = dstr->chars_count(); int epos = nchars; //printf(">tokens=%d,dcount=%d,nchars=%d\n",tokens,dcount,nchars); bool spaces = true; for( int pos=nchars; (pos > 0) && (sidx >= 0); --pos ) { Char ch = dstr->at(pos-1); if( spaces == (ch <= ' ') ) { if( spaces ) epos = pos-1; continue; } if( !spaces ) { if( (dcount-sidx)==tokens ) break; d_strs[sidx] = dstr->substr(pos,epos-pos,1,(void*)d_strs[sidx]); //printf(">%d:pos=%d,epos=%d\n",sidx,pos,epos); --sidx; epos = pos-1; } spaces = !spaces; } if( (sidx>=0) && (epos > 0) ) { d_strs[sidx] = dstr->substr(0,epos,1,(void*)d_strs[sidx]); //printf(">%d:epos=%d\n",sidx,epos); } }); // printCudaError(hipDeviceSynchronize(),"nvs-rsplit_record_ws"); return totalNewStrings; } // // This will create new columns by splitting the array of strings vertically. // All the first tokens go in the first column, all the second tokens go in the second column, etc. // It is comparable to Pandas split with expand=True but the rows/columns are transposed. // Example: // import pandas as pd // pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__']) // print(pd_series.str.split(pat='_', expand=True)) // 0 1 2 3 4 5 6 // 0 '' None None None None None None // 1 None None None None None None None // 2 a b None None None None None // 3 '' a b '' None None None // 4 '' '' aa '' bb '' '' // 5 '' a '' bbb '' '' c // 6 '' aa b '' ccc '' '' // // print(pd_series.str.split(pat='_', n=1, expand=True)) // 0 1 // 0 '' None // 1 None None // 2 a b // 3 '' a_b_ // 4 '' _aa__bb__ // 5 '' a__bbb___c // 6 '' aa_b__ccc__ // // print(pd_series.str.split(pat='_', n=2, expand=True)) // 0 1 2 // 0 '' None None // 1 None None None // 2 a b None // 3 '' a b_ // 4 '' aa__bb__ // 5 '' a _bbb___c // 6 '' aa b__ccc__ // unsigned int NVStrings::split( const char* delimiter, int maxsplit, std::vector<NVStrings*>& results) { if( delimiter==0 ) return split(maxsplit,results); auto execpol = rmm::exec_policy(0); unsigned int dellen = (unsigned int)strlen(delimiter); char* d_delimiter = device_alloc<char>(dellen+1,0); CUDA_TRY( hipMemcpyAsync(d_delimiter,delimiter,dellen+1,hipMemcpyHostToDevice)) int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); // boundary case: if no columns, return one null column (issue #119) if( columnsCount==0 ) results.push_back(new NVStrings(count)); // create each column for( int col=0; col < columnsCount; ++col ) { // first, build a vector of pair<char*,int>'s' for each column // each pair points to a string for this column for each row //st = GetTime(); rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count); thrust::pair<const char*,size_t>* d_indexes = indexes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, col, d_delimiter, dellen, d_counts, d_indexes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; d_indexes[idx].first = nullptr; // initialize to d_indexes[idx].second = 0; // null string if( !dstr ) return; // dcount already accounts for the maxsplit value int dcount = d_counts[idx]; if( col >= dcount ) return; // passed the end for this string // skip delimiters until we reach this column int dchars = custring_view::chars_in_string(d_delimiter,dellen); int spos = 0, nchars = dstr->chars_count(); int epos = nchars; for( int c=0; c < (dcount-1); ++c ) { epos = dstr->find(d_delimiter,dellen,spos); if( epos < 0 ) { epos = nchars; break; } if( c==col ) // found our column break; spos = epos + dchars; epos = nchars; } // this will be the string for this column if( spos < epos ) { spos = dstr->byte_offset_for(spos); // convert char pos epos = dstr->byte_offset_for(epos); // to byte offset d_indexes[idx].first = dstr->data() + spos; d_indexes[idx].second = (epos-spos); } else { // this will create empty string instead of null one d_indexes[idx].first = dstr->data(); } }); //hipError_t err = hipDeviceSynchronize(); //if( err != hipSuccess ) //{ // fprintf(stderr,"nvs-split(%s,%d), col=%d\n",delimiter,maxsplit,col); // printCudaError(err); //} // NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count); results.push_back(column); } // RMM_FREE(d_delimiter,0); return (unsigned int)results.size(); } // // This is the whitespace-delimiter version of the column split function. // Like the one above, it can be compared to Pandas split with expand=True but // with the rows/columns transposed. // // import pandas as pd // pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc ']) // print(pd_series.str.split(pat=None, expand=True)) // 0 1 2 // 0 None None None // 1 None None None // 2 a b None // 3 a b None // 4 aa bb None // 5 a bbb c // 6 aa b ccc // // print(pd_series.str.split(pat=None, n=1, expand=True)) // 0 1 // 0 None None // 1 None None // 2 a b // 3 a b // 4 aa bb // 5 a bbb c // 6 aa b ccc // // print(pd_series.str.split(pat=None, n=2, expand=True)) // 0 1 2 // 0 None None None // 1 None None None // 2 a b None // 3 a b None // 4 aa bb None // 5 a bbb c // 6 aa b ccc // // Like the split_record method, there are no empty strings here. // unsigned int NVStrings::split( int maxsplit, std::vector<NVStrings*>& results) { auto execpol = rmm::exec_policy(0); int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, whitespace_token_counter(d_strings,tokens,d_counts)); int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); // boundary case: if no columns, return one null column (issue #119) if( columnsCount==0 ) results.push_back(new NVStrings(count)); // create each column for( int col=0; col < columnsCount; ++col ) { // first, build a vector of pair<char*,int>'s' for each column // each pair points to a string for this column for each row //st = GetTime(); rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count); thrust::pair<const char*,size_t>* d_indexes = indexes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, col, tokens, d_counts, d_indexes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; d_indexes[idx].first = nullptr; // initialize to d_indexes[idx].second = 0; // null string if( !dstr ) return; // null string int dcount = d_counts[idx]; if( col >= dcount ) return; int c = 0, nchars = dstr->chars_count(); int spos = 0, epos = nchars; //printf(">%d:tokens=%d,dcount=%d,nchars=%d\n",col,tokens,dcount,nchars); bool spaces = true; for( int pos=0; pos < nchars; ++pos ) { Char ch = dstr->at(pos); if( spaces == (ch <= ' ') ) { if( spaces ) spos = pos+1; else epos = pos+1; continue; } if( !spaces ) { epos = nchars; if( (c+1)==tokens ) break; epos = pos; if( c==col ) break; spos = pos+1; epos = nchars; ++c; } spaces = !spaces; } if( spos < epos ) { spos = dstr->byte_offset_for(spos); // convert char pos epos = dstr->byte_offset_for(epos); // to byte offset //printf(">%d:spos=%d,epos=%d\n",c,spos,epos); d_indexes[idx].first = dstr->data() + spos; d_indexes[idx].second = (epos-spos); } //else //{ no empty strings in split-column-whitespace // d_indexes[idx].first = dstr->data(); //} }); //hipError_t err = hipDeviceSynchronize(); //if( err != hipSuccess ) //{ // fprintf(stderr,"nvs-split-ws(%d), col=%d\n",maxsplit,col); // printCudaError(err); //} // NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count); results.push_back(column); } // return (unsigned int)results.size(); } // // The split-from-the-right version of split // unsigned int NVStrings::rsplit( const char* delimiter, int maxsplit, std::vector<NVStrings*>& results) { if( delimiter==0 ) return rsplit(maxsplit,results); auto execpol = rmm::exec_policy(0); unsigned int dellen = (unsigned int)strlen(delimiter); char* d_delimiter = device_alloc<char>(dellen+1,0); CUDA_TRY( hipMemcpyAsync(d_delimiter,delimiter,dellen+1,hipMemcpyHostToDevice)) int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); // boundary case: if no columns, return one null column (issue #119) if( columnsCount==0 ) results.push_back(new NVStrings(count)); // create each column for( int col = 0; col < columnsCount; ++col ) { // first, build a vector of pair<char*,int>'s' for each column // each pair points to a string for this column for each row rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count); thrust::pair<const char*,size_t>* d_indexes = indexes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, col, d_delimiter, dellen, d_counts, d_indexes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; d_indexes[idx].first = nullptr; // initialize to d_indexes[idx].second = 0; // null string if( !dstr ) return; // dcount already accounts for the maxsplit value int dcount = d_counts[idx]; if( col >= dcount ) return; // passed the end for this string // skip delimiters until we reach this column int dchars = custring_view::chars_in_string(d_delimiter,dellen); int spos = 0, nchars = dstr->chars_count(); int epos = nchars, pos = dstr->size()-1; for( int c=(dcount-1); c > 0; --c ) { spos = dstr->rfind(d_delimiter,dellen,0,epos); if( spos < 0 ) { spos = 0; break; } if( c==col ) // found our column { spos += dchars; // do not include delimiter break; } epos = spos; spos = 0; } // this will be the string for this column if( spos < epos ) { spos = dstr->byte_offset_for(spos); // convert char pos epos = dstr->byte_offset_for(epos); // to byte offset d_indexes[idx].first = dstr->data() + spos; d_indexes[idx].second = (epos-spos); } else { // this will create empty string instead of null one d_indexes[idx].first = dstr->data(); } }); //hipError_t err = hipDeviceSynchronize(); //if( err != hipSuccess ) //{ // fprintf(stderr,"nvs-rsplit(%s,%d)\n",delimiter,maxsplit); // printCudaError(err); //} // NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count); results.push_back(column); } // RMM_FREE(d_delimiter,0); return (unsigned int)results.size(); } // // The whitespace-delimited version of rsplit. // unsigned int NVStrings::rsplit( int maxsplit, std::vector<NVStrings*>& results) { auto execpol = rmm::exec_policy(0); int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, whitespace_token_counter(d_strings,tokens,d_counts)); int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); // boundary case: if no columns, return one null column (issue #119) if( columnsCount==0 ) results.push_back(new NVStrings(count)); // create each column for( int col = 0; col < columnsCount; ++col ) { // first, build a vector of pair<char*,int>'s' for each column // each pair points to a string for this column for each row rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count); thrust::pair<const char*,size_t>* d_indexes = indexes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, col, columnsCount, tokens, d_counts, d_indexes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; d_indexes[idx].first = nullptr; // initialize to d_indexes[idx].second = 0; // null string if( !dstr ) return; // null string int dcount = d_counts[idx]; if( col >= dcount ) return; int c = (dcount-1), nchars = dstr->chars_count(); int spos = 0, epos = nchars; //printf(">%d:tokens=%d,dcount=%d,nchars=%d\n",col,tokens,dcount,nchars); bool spaces = true; for( int pos=nchars; pos > 0; --pos ) { Char ch = dstr->at(pos-1); if( spaces == (ch <= ' ') ) { if( spaces ) epos = pos-1; else spos = pos-1; continue; } if( !spaces ) { spos = 0; if( (columnsCount-c)==tokens ) break; spos = pos; if( c==col ) break; epos = pos-1; spos = 0; --c; } spaces = !spaces; } if( spos < epos ) { spos = dstr->byte_offset_for(spos); // convert char pos epos = dstr->byte_offset_for(epos); // to byte offset //printf(">%d:spos=%d,epos=%d\n",c,spos,epos); d_indexes[idx].first = dstr->data() + spos; d_indexes[idx].second = (epos-spos); } //else //{ no empty strings in rsplit column whitespace // d_indexes[idx].first = dstr->data(); //} }); //hipError_t err = hipDeviceSynchronize(); //if( err != hipSuccess ) //{ // fprintf(stderr,"nvs-rsplit-ws(%d)\n",maxsplit); // printCudaError(err); //} // NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count); results.push_back(column); } // return (unsigned int)results.size(); } // // Partition is split the string at the first occurrence of delimiter, and return 3 elements containing // the part before the delimiter, the delimiter itself, and the part after the delimiter. // If the delimiter is not found, return 3 elements containing the string itself, followed by two empty strings. // // >>> import pandas as pd // >>> strs = pd.Series(['hllo', None, 'a_bc_df', 'a__bc', '_ab_cd', 'ab_cd_']) // >>> strs.str.partition('_') // 0 1 2 // 0 hllo // 1 None None None // 2 a _ bc_df // 3 a _ _bc // 4 _ ab_cd // 5 ab _ cd_ // int NVStrings::partition( const char* delimiter, std::vector<NVStrings*>& results) { if( delimiter==0 ) return 0; unsigned int bytes = (unsigned int)strlen(delimiter); if( bytes==0 ) return 0; // just return original list? auto execpol = rmm::exec_policy(0); // copy delimiter to device char* d_delimiter = device_alloc<char>(bytes,0); CUDA_TRY( hipMemcpyAsync(d_delimiter,delimiter,bytes,hipMemcpyHostToDevice)) int d_asize = custring_view::alloc_size((char*)delimiter,bytes); d_asize = ALIGN_SIZE(d_asize); unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); // build int arrays to hold each string's partition sizes int totalSizes = 2 * count; rmm::device_vector<int> sizes(totalSizes,0), totals(count,0); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count, [d_strings, d_delimiter, bytes, d_asize, d_sizes, d_totals] __device__(size_t idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int* dsizes = &(d_sizes[idx*2]); d_totals[idx] = dstr->split_size(d_delimiter,bytes,dsizes,2) + d_asize; }); hipDeviceSynchronize(); // build an output array of custring_views* arrays for each value // there will always be 3 per string thrust::host_vector<int> h_totals(totals); thrust::host_vector<char*> h_buffers(count,nullptr); thrust::host_vector<custring_view_array> h_splits(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { NVStrings* result = new NVStrings(3); results.push_back(result); h_splits[idx] = result->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); result->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; } rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the partition and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, bytes, d_buffers, d_sizes, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = (char*)d_buffers[idx]; int* dsizes = &(d_sizes[idx*2]); custring_view_array d_strs = d_splits[idx]; d_strs[0] = custring_view::create_from(buffer,0,0); buffer += ALIGN_SIZE(dsizes[0]); d_strs[1] = custring_view::create_from(buffer,0,0); buffer += ALIGN_SIZE(dsizes[1]); d_strs[2] = custring_view::create_from(buffer,0,0); // int dcount = dstr->rsplit_size(d_delimiter,bytes,0,2); dstr->split(d_delimiter,bytes,2,d_strs); if( dcount==2 ) { // insert delimiter element in the middle custring_view* tmp = d_strs[1]; d_strs[1] = custring_view::create_from(buffer,d_delimiter,bytes); d_strs[2] = tmp; } }); printCudaError(hipDeviceSynchronize(),"nvs-partition"); RMM_FREE(d_delimiter,0); return count; } // // This follows most of the same logic as partition above except that the delimiter // search starts from the end of the string. Also, if no delimiter is found the // resulting array includes two empty strings followed by the original string. // // >>> import pandas as pd // >>> strs = pd.Series(['hllo', None, 'a_bc_df', 'a__bc', '_ab_cd', 'ab_cd_']) // >>> strs.str.rpartition('_') // 0 1 2 // 0 hllo // 1 None None None // 2 a_bc _ df // 3 a_ _ bc // 4 _ab _ cd // 5 ab_cd _ // int NVStrings::rpartition( const char* delimiter, std::vector<NVStrings*>& results) { if( delimiter==0 ) return 0; unsigned int bytes = (unsigned int)strlen(delimiter); if( bytes==0 ) return 0; // just return original list? auto execpol = rmm::exec_policy(0); // copy delimiter to device char* d_delimiter = device_alloc<char>(bytes,0); CUDA_TRY( hipMemcpyAsync(d_delimiter,delimiter,bytes,hipMemcpyHostToDevice)) int d_asize = custring_view::alloc_size((char*)delimiter,bytes); d_asize = ALIGN_SIZE(d_asize); unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); // build int arrays to hold each string's partition sizes int totalSizes = 2 * count; rmm::device_vector<int> sizes(totalSizes,0), totals(count,0); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, bytes, d_asize, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int* dsizes = &(d_sizes[idx*2]); //d_totals[idx] = dstr->rpartition_size(d_delimiter,bytes,dsizes); d_totals[idx] = dstr->rsplit_size(d_delimiter,bytes,dsizes,2) + d_asize; }); hipDeviceSynchronize(); // now build an output array of custring_views* arrays for each value // there will always be 3 per string thrust::host_vector<int> h_totals(totals); thrust::host_vector<char*> h_buffers(count,nullptr); thrust::host_vector<custring_view_array> h_splits(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { NVStrings* result = new NVStrings(3); results.push_back(result); h_splits[idx] = result->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); result->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; } rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the partition and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, bytes, d_buffers, d_sizes, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = (char*)d_buffers[idx]; int* dsizes = &(d_sizes[idx*2]); custring_view_array d_strs = d_splits[idx]; d_strs[0] = custring_view::create_from(buffer,0,0); buffer += ALIGN_SIZE(dsizes[0]); d_strs[1] = custring_view::create_from(buffer,0,0); buffer += ALIGN_SIZE(dsizes[1]); d_strs[2] = custring_view::create_from(buffer,0,0); // int dcount = dstr->rsplit_size(d_delimiter,bytes,0,2); dstr->rsplit(d_delimiter,bytes,2,d_strs); // reorder elements if( dcount==1 ) { // if only one element, it goes on the end custring_view* tmp = d_strs[2]; d_strs[2] = d_strs[0]; d_strs[0] = tmp; } if( dcount==2 ) { // insert delimiter element in the middle custring_view* tmp = d_strs[1]; d_strs[1] = custring_view::create_from(buffer,d_delimiter,bytes); d_strs[2] = tmp; } }); printCudaError(hipDeviceSynchronize(),"nvs-rpartition"); RMM_FREE(d_delimiter,0); return count; }
c24ef70e9eed38b8ea61451b0746b3a11c24739c.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <thrust/extrema.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <cudf/utilities/error.hpp> #include "nvstrings/NVStrings.h" #include "./NVStringsImpl.h" #include "../custring_view.cuh" #include "../util.h" // common token counter for all split methods struct token_counter { custring_view_array d_strings; char* d_delimiter; unsigned int dellen; int tokens; int* d_counts; // token_counter(custring_view_array dstrs, char* delim, unsigned int dlen, int t, int* counts) : d_strings(dstrs), d_delimiter(delim), dellen(dlen), tokens(t), d_counts(counts) {} __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( dstr ) d_counts[idx] = dstr->split_size(d_delimiter,dellen,0,tokens); } }; // special-case token counter for whitespace delimiter // leading and trailing and duplicate delimiters are ignored struct whitespace_token_counter { custring_view_array d_strings; int tokens; int* d_counts; // count the 'words' only between non-whitespace characters whitespace_token_counter(custring_view_array dstrs, int t, int* counts) : d_strings(dstrs), tokens(t), d_counts(counts) {} __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( !dstr ) return; int dcount = 0; bool spaces = true; custring_view::iterator itr = dstr->begin(); while( itr != dstr->end() ) { Char ch = *itr; if( spaces == (ch <= ' ') ) itr++; else { dcount += (int)spaces; spaces = !spaces; } } if( tokens && (dcount > tokens) ) dcount = tokens; if( dcount==0 ) dcount = 1; // always allow empty string d_counts[idx] = dcount; //printf("dcount=%d\n",dcount); } }; // // Coded form Pandas split algorithm as documented here: // https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.split.html#pandas.Series.str.split // // Example: // // import pandas as pd // pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__']) // print(pd_series.str.split(pat='_', expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [, a, b, ] // 4 [, , aa, , bb, , ] // 5 [, a, , bbb, , , c] // 6 [, aa, b, , ccc, , ] // // print(pd_series.str.split(pat='_', n=1, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [, a_b_] // 4 [, _aa__bb__] // 5 [, a__bbb___c] // 6 [, aa_b__ccc__] // // print(pd_series.str.split(pat='_', n=2, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [, a, b_] // 4 [, , aa__bb__] // 5 [, a, _bbb___c] // 6 [, aa, b__ccc__] // // int NVStrings::split_record( const char* delimiter, int maxsplit, std::vector<NVStrings*>& results) { if( delimiter==0 ) return split_record(maxsplit,results); auto execpol = rmm::exec_policy(0); unsigned int dellen = (unsigned int)strlen(delimiter); char* d_delimiter = device_alloc<char>(dellen+1,0); CUDA_TRY( cudaMemcpyAsync(d_delimiter,delimiter,dellen+1,cudaMemcpyHostToDevice)) int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); // build int arrays to hold each string's split size int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); rmm::device_vector<int> sizes(totalSizes,0), offsets(count,0), totals(count,0); thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); int* d_offsets = offsets.data().get(); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, dellen, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int* dsizes = d_sizes + d_offsets[idx]; int dcount = d_counts[idx]; d_totals[idx] = dstr->split_size(d_delimiter,dellen,dsizes,dcount); }); // cudaDeviceSynchronize(); // now build an array of custring_views* arrays for each value int totalNewStrings = 0; thrust::host_vector<int> h_counts(counts); thrust::host_vector<int> h_totals(totals); thrust::host_vector<custring_view_array> h_splits(count,nullptr); thrust::host_vector<char*> h_buffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int splitCount = h_counts[idx]; if( splitCount==0 ) { results.push_back(0); continue; } NVStrings* splitResult = new NVStrings(splitCount); results.push_back(splitResult); h_splits[idx] = splitResult->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; totalNewStrings += splitCount; } // rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the splits and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, dellen, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int d_count = d_counts[idx]; if( d_count < 1 ) return; char* buffer = (char*)d_buffers[idx]; int* dsizes = d_sizes + d_offsets[idx]; custring_view_array d_strs = d_splits[idx]; for( int i=0; i < d_count; ++i ) { int size = ALIGN_SIZE(dsizes[i]); d_strs[i] = (custring_view*)buffer; buffer += size; } dstr->split(d_delimiter,dellen,d_count,d_strs); }); // printCudaError(cudaDeviceSynchronize(),"nvs-split_record"); RMM_FREE(d_delimiter,0); return totalNewStrings; } // // Whitespace delimiter algorithm is very different. // It follows the Python str.split algorithm as defined in Pandas: https://docs.python.org/3/library/stdtypes.html#str.split // Paraphrased as follows (for null delimiter): // Runs of consecutive whitespace are regarded as a single separator, // and the result will contain no empty strings at the start orend if // the string has leading or trailing whitespace. // Also whitespace is not just space. // The algorithm below uses the shortcut (<=' ') to catch \t\r\n or any other control character. // The above statement does not account for maxplit as seen in the following examples where n=maxpslit. // // import pandas as pd // pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc ']) // print(pd_series.str.split(pat=None, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [a, b] // 4 [aa, bb] // 5 [a, bbb, c] // 6 [aa, b, ccc] // // print(pd_series.str.split(pat=None, n=1, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [a, b ] // 4 [aa, bb ] // 5 [a, bbb c] // 6 [aa, b ccc ] // // print(pd_series.str.split(pat=None, n=2, expand=False)) // 0 [] // 1 None // 2 [a, b] // 3 [a, b] // 4 [aa, bb] // 5 [a, bbb, c] // 6 [aa, b, ccc ] // // Note: // - lack of empty strings // - trailing and leading characters are ignored (sometimes) // - multiple whitespace characters are ignored (sometimes) // int NVStrings::split_record( int maxsplit, std::vector<NVStrings*>& results) { auto execpol = rmm::exec_policy(0); int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, whitespace_token_counter(d_strings,tokens,d_counts)); //cudaDeviceSynchronize(); // build int arrays to hold each string's split size int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); rmm::device_vector<int> sizes(totalSizes,0), offsets(count,0), totals(count,0); thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); int* d_offsets = offsets.data().get(); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, tokens, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; // null string int* dsizes = d_sizes + d_offsets[idx]; int dcount = d_counts[idx]; int bytes = 0, sidx = 0, spos = 0, nchars = dstr->chars_count(); //printf("tokens=%d,dcount=%d,nchars=%d\n",tokens,dcount,nchars); bool spaces = true; for( int pos=0; (pos < nchars) && (sidx < dcount); ++pos ) { Char ch = dstr->at(pos); if( spaces == (ch <= ' ') ) { if( spaces ) spos = pos+1; continue; } if( !spaces ) { if( (sidx+1)==tokens ) break; int size = dstr->substr_size(spos,pos-spos); dsizes[sidx++] = size; //printf("%d:pos=%d,spos=%d,size=%d\n",(sidx-1),pos,spos,size); bytes += ALIGN_SIZE(size); spos = pos + 1; } spaces = !spaces; } if( sidx < dcount ) { int size = 0; if( spos < nchars ) size = dstr->substr_size(spos,nchars-spos); else size = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); dsizes[sidx] = size; //printf("spos=%d,nchars=%d,size=%d\n",spos,nchars,size); bytes += ALIGN_SIZE(size); } //printf("bytes=%d\n",bytes); d_totals[idx] = bytes; }); // cudaDeviceSynchronize(); // now build an array of custring_views* arrays for each value int totalNewStrings = 0; thrust::host_vector<int> h_counts(counts); thrust::host_vector<int> h_totals(totals); thrust::host_vector<custring_view_array> h_splits(count,nullptr); thrust::host_vector<char*> h_buffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int splitCount = h_counts[idx]; if( splitCount==0 ) { results.push_back(0); continue; } NVStrings* splitResult = new NVStrings(splitCount); results.push_back(splitResult); h_splits[idx] = splitResult->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; totalNewStrings += splitCount; } // rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the splits and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, tokens, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; // null string int dcount = d_counts[idx]; char* buffer = (char*)d_buffers[idx]; int* dsizes = d_sizes + d_offsets[idx]; custring_view_array d_strs = d_splits[idx]; int emptysize = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); if( dcount==0 || dsizes[0]==emptysize ) { d_strs[0] = custring_view::create_from(buffer,buffer,0); return; // empty string } for( int i=0; i < dcount; ++i ) { int size = ALIGN_SIZE(dsizes[i]); d_strs[i] = (custring_view*)buffer; buffer += size; } int sidx = 0, spos = 0, nchars = dstr->chars_count(); //printf(">tokens=%d,dcount=%d,nchars=%d",tokens,dcount,nchars); bool spaces = true; for( int pos=0; (pos < nchars) && (sidx < dcount); ++pos ) { Char ch = dstr->at(pos); if( spaces == (ch <= ' ') ) { if( spaces ) spos = pos+1; continue; } if( !spaces ) { if( (sidx+1)==tokens ) break; d_strs[sidx] = dstr->substr(spos,pos-spos,1,(void*)d_strs[sidx]); //printf(">%d:pos=%d,spos=%d\n",sidx,pos,spos); ++sidx; spos = pos + 1; } spaces = !spaces; } if( (sidx < dcount) && (spos < nchars) ) { d_strs[sidx] = dstr->substr(spos,nchars-spos,1,(void*)d_strs[sidx]); //printf(">%d:spos=%d,nchars=%d\n",sidx,spos,nchars); } }); // printCudaError(cudaDeviceSynchronize(),"nvs-split_record_ws"); return totalNewStrings; } // // This is just the split-from-the-right version of above. // int NVStrings::rsplit_record( const char* delimiter, int maxsplit, std::vector<NVStrings*>& results) { if( delimiter==0 ) return rsplit_record(maxsplit,results); auto execpol = rmm::exec_policy(0); unsigned int dellen = (unsigned int)strlen(delimiter); char* d_delimiter = device_alloc<char>(dellen+1,0); CUDA_TRY( cudaMemcpyAsync(d_delimiter,delimiter,dellen+1,cudaMemcpyHostToDevice)) int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); // build int arrays to hold each string's split size int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); rmm::device_vector<int> sizes(totalSizes,0), offsets(count,0), totals(count,0); thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); int* d_offsets = offsets.data().get(); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, dellen, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int dcount = d_counts[idx]; int* dsizes = d_sizes + d_offsets[idx]; d_totals[idx] = dstr->rsplit_size(d_delimiter,dellen,dsizes,dcount); }); cudaDeviceSynchronize(); // now build an array of custring_views* arrays for each value int totalNewStrings = 0; thrust::host_vector<int> h_counts(counts); thrust::host_vector<int> h_totals(totals); thrust::host_vector<custring_view_array> h_splits(count,nullptr); thrust::host_vector<char*> h_buffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int splitCount = h_counts[idx]; if( splitCount==0 ) { results.push_back(0); continue; } NVStrings* splitResult = new NVStrings(splitCount); results.push_back(splitResult); h_splits[idx] = splitResult->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; totalNewStrings += splitCount; } // rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the splits and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, dellen, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int d_count = d_counts[idx]; if( d_count < 1 ) return; char* buffer = (char*)d_buffers[idx]; int* dsizes = d_sizes + d_offsets[idx]; custring_view_array d_strs = d_splits[idx]; for( int i=0; i < d_count; ++i ) { d_strs[i] = (custring_view*)buffer; int size = ALIGN_SIZE(dsizes[i]); buffer += size; //printf("%d:%d=%d\n",(int)idx,i,size); } dstr->rsplit(d_delimiter,dellen,d_count,d_strs); }); // printCudaError(cudaDeviceSynchronize(),"nvs-rsplit_record"); RMM_FREE(d_delimiter,0); return totalNewStrings; } // // And the whitespace-delimited version of rsplit_record // int NVStrings::rsplit_record( int maxsplit, std::vector<NVStrings*>& results) { auto execpol = rmm::exec_policy(0); int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, whitespace_token_counter(d_strings,tokens,d_counts)); // build int arrays to hold each string's split size int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); rmm::device_vector<int> sizes(totalSizes,0), offsets(count,0), totals(count,0); thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); int* d_offsets = offsets.data().get(); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, tokens, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int* dsizes = d_sizes + d_offsets[idx]; int dcount = d_counts[idx]; int sidx = (dcount-1), nchars = dstr->chars_count(); int bytes = 0, epos = nchars; //printf("tokens=%d,dcount=%d,nchars=%d\n",tokens,dcount,nchars); bool spaces = true; for( int pos=nchars; (pos>0) && (sidx>=0); --pos ) { Char ch = dstr->at(pos-1); if( spaces == (ch <= ' ') ) { if( spaces ) epos = pos-1; continue; } if( !spaces ) { if( (dcount-sidx)==tokens ) break; int size = dstr->substr_size(pos,epos-pos); dsizes[sidx--] = size; //printf("%d:pos=%d,epos=%d,size=%d\n",(sidx+1),pos,epos,size); bytes += ALIGN_SIZE(size); epos = pos-1; } spaces = !spaces; } if( sidx==0 ) { int size = 0; if( epos > 0 ) size = dstr->substr_size(0,epos); else size = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); //printf("%d:epos=%d,size=%d\n",sidx,epos,size); dsizes[sidx] = size; bytes += ALIGN_SIZE(size); } //printf("bytes=%d\n",bytes); d_totals[idx] = bytes; }); cudaDeviceSynchronize(); // now build an array of custring_views* arrays for each value int totalNewStrings = 0; thrust::host_vector<int> h_counts(counts); thrust::host_vector<int> h_totals(totals); thrust::host_vector<custring_view_array> h_splits(count,nullptr); thrust::host_vector<char*> h_buffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int splitCount = h_counts[idx]; if( splitCount==0 ) { results.push_back(0); continue; } NVStrings* splitResult = new NVStrings(splitCount); results.push_back(splitResult); h_splits[idx] = splitResult->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; totalNewStrings += splitCount; } // rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the splits and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, tokens, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int dcount = d_counts[idx]; char* buffer = (char*)d_buffers[idx]; int* dsizes = d_sizes + d_offsets[idx]; custring_view_array d_strs = d_splits[idx]; int emptysize = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); if( dcount==0 || dsizes[0]==emptysize ) { d_strs[0] = custring_view::create_from(buffer,buffer,0); return; // empty string } for( int i=0; i < dcount; ++i ) { int size = ALIGN_SIZE(dsizes[i]); d_strs[i] = (custring_view*)buffer; buffer += size; } int sidx = (dcount-1), nchars = dstr->chars_count(); int epos = nchars; //printf(">tokens=%d,dcount=%d,nchars=%d\n",tokens,dcount,nchars); bool spaces = true; for( int pos=nchars; (pos > 0) && (sidx >= 0); --pos ) { Char ch = dstr->at(pos-1); if( spaces == (ch <= ' ') ) { if( spaces ) epos = pos-1; continue; } if( !spaces ) { if( (dcount-sidx)==tokens ) break; d_strs[sidx] = dstr->substr(pos,epos-pos,1,(void*)d_strs[sidx]); //printf(">%d:pos=%d,epos=%d\n",sidx,pos,epos); --sidx; epos = pos-1; } spaces = !spaces; } if( (sidx>=0) && (epos > 0) ) { d_strs[sidx] = dstr->substr(0,epos,1,(void*)d_strs[sidx]); //printf(">%d:epos=%d\n",sidx,epos); } }); // printCudaError(cudaDeviceSynchronize(),"nvs-rsplit_record_ws"); return totalNewStrings; } // // This will create new columns by splitting the array of strings vertically. // All the first tokens go in the first column, all the second tokens go in the second column, etc. // It is comparable to Pandas split with expand=True but the rows/columns are transposed. // Example: // import pandas as pd // pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__']) // print(pd_series.str.split(pat='_', expand=True)) // 0 1 2 3 4 5 6 // 0 '' None None None None None None // 1 None None None None None None None // 2 a b None None None None None // 3 '' a b '' None None None // 4 '' '' aa '' bb '' '' // 5 '' a '' bbb '' '' c // 6 '' aa b '' ccc '' '' // // print(pd_series.str.split(pat='_', n=1, expand=True)) // 0 1 // 0 '' None // 1 None None // 2 a b // 3 '' a_b_ // 4 '' _aa__bb__ // 5 '' a__bbb___c // 6 '' aa_b__ccc__ // // print(pd_series.str.split(pat='_', n=2, expand=True)) // 0 1 2 // 0 '' None None // 1 None None None // 2 a b None // 3 '' a b_ // 4 '' aa__bb__ // 5 '' a _bbb___c // 6 '' aa b__ccc__ // unsigned int NVStrings::split( const char* delimiter, int maxsplit, std::vector<NVStrings*>& results) { if( delimiter==0 ) return split(maxsplit,results); auto execpol = rmm::exec_policy(0); unsigned int dellen = (unsigned int)strlen(delimiter); char* d_delimiter = device_alloc<char>(dellen+1,0); CUDA_TRY( cudaMemcpyAsync(d_delimiter,delimiter,dellen+1,cudaMemcpyHostToDevice)) int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); // boundary case: if no columns, return one null column (issue #119) if( columnsCount==0 ) results.push_back(new NVStrings(count)); // create each column for( int col=0; col < columnsCount; ++col ) { // first, build a vector of pair<char*,int>'s' for each column // each pair points to a string for this column for each row //st = GetTime(); rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count); thrust::pair<const char*,size_t>* d_indexes = indexes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, col, d_delimiter, dellen, d_counts, d_indexes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; d_indexes[idx].first = nullptr; // initialize to d_indexes[idx].second = 0; // null string if( !dstr ) return; // dcount already accounts for the maxsplit value int dcount = d_counts[idx]; if( col >= dcount ) return; // passed the end for this string // skip delimiters until we reach this column int dchars = custring_view::chars_in_string(d_delimiter,dellen); int spos = 0, nchars = dstr->chars_count(); int epos = nchars; for( int c=0; c < (dcount-1); ++c ) { epos = dstr->find(d_delimiter,dellen,spos); if( epos < 0 ) { epos = nchars; break; } if( c==col ) // found our column break; spos = epos + dchars; epos = nchars; } // this will be the string for this column if( spos < epos ) { spos = dstr->byte_offset_for(spos); // convert char pos epos = dstr->byte_offset_for(epos); // to byte offset d_indexes[idx].first = dstr->data() + spos; d_indexes[idx].second = (epos-spos); } else { // this will create empty string instead of null one d_indexes[idx].first = dstr->data(); } }); //cudaError_t err = cudaDeviceSynchronize(); //if( err != cudaSuccess ) //{ // fprintf(stderr,"nvs-split(%s,%d), col=%d\n",delimiter,maxsplit,col); // printCudaError(err); //} // NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count); results.push_back(column); } // RMM_FREE(d_delimiter,0); return (unsigned int)results.size(); } // // This is the whitespace-delimiter version of the column split function. // Like the one above, it can be compared to Pandas split with expand=True but // with the rows/columns transposed. // // import pandas as pd // pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc ']) // print(pd_series.str.split(pat=None, expand=True)) // 0 1 2 // 0 None None None // 1 None None None // 2 a b None // 3 a b None // 4 aa bb None // 5 a bbb c // 6 aa b ccc // // print(pd_series.str.split(pat=None, n=1, expand=True)) // 0 1 // 0 None None // 1 None None // 2 a b // 3 a b // 4 aa bb // 5 a bbb c // 6 aa b ccc // // print(pd_series.str.split(pat=None, n=2, expand=True)) // 0 1 2 // 0 None None None // 1 None None None // 2 a b None // 3 a b None // 4 aa bb None // 5 a bbb c // 6 aa b ccc // // Like the split_record method, there are no empty strings here. // unsigned int NVStrings::split( int maxsplit, std::vector<NVStrings*>& results) { auto execpol = rmm::exec_policy(0); int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, whitespace_token_counter(d_strings,tokens,d_counts)); int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); // boundary case: if no columns, return one null column (issue #119) if( columnsCount==0 ) results.push_back(new NVStrings(count)); // create each column for( int col=0; col < columnsCount; ++col ) { // first, build a vector of pair<char*,int>'s' for each column // each pair points to a string for this column for each row //st = GetTime(); rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count); thrust::pair<const char*,size_t>* d_indexes = indexes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, col, tokens, d_counts, d_indexes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; d_indexes[idx].first = nullptr; // initialize to d_indexes[idx].second = 0; // null string if( !dstr ) return; // null string int dcount = d_counts[idx]; if( col >= dcount ) return; int c = 0, nchars = dstr->chars_count(); int spos = 0, epos = nchars; //printf(">%d:tokens=%d,dcount=%d,nchars=%d\n",col,tokens,dcount,nchars); bool spaces = true; for( int pos=0; pos < nchars; ++pos ) { Char ch = dstr->at(pos); if( spaces == (ch <= ' ') ) { if( spaces ) spos = pos+1; else epos = pos+1; continue; } if( !spaces ) { epos = nchars; if( (c+1)==tokens ) break; epos = pos; if( c==col ) break; spos = pos+1; epos = nchars; ++c; } spaces = !spaces; } if( spos < epos ) { spos = dstr->byte_offset_for(spos); // convert char pos epos = dstr->byte_offset_for(epos); // to byte offset //printf(">%d:spos=%d,epos=%d\n",c,spos,epos); d_indexes[idx].first = dstr->data() + spos; d_indexes[idx].second = (epos-spos); } //else //{ no empty strings in split-column-whitespace // d_indexes[idx].first = dstr->data(); //} }); //cudaError_t err = cudaDeviceSynchronize(); //if( err != cudaSuccess ) //{ // fprintf(stderr,"nvs-split-ws(%d), col=%d\n",maxsplit,col); // printCudaError(err); //} // NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count); results.push_back(column); } // return (unsigned int)results.size(); } // // The split-from-the-right version of split // unsigned int NVStrings::rsplit( const char* delimiter, int maxsplit, std::vector<NVStrings*>& results) { if( delimiter==0 ) return rsplit(maxsplit,results); auto execpol = rmm::exec_policy(0); unsigned int dellen = (unsigned int)strlen(delimiter); char* d_delimiter = device_alloc<char>(dellen+1,0); CUDA_TRY( cudaMemcpyAsync(d_delimiter,delimiter,dellen+1,cudaMemcpyHostToDevice)) int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); // boundary case: if no columns, return one null column (issue #119) if( columnsCount==0 ) results.push_back(new NVStrings(count)); // create each column for( int col = 0; col < columnsCount; ++col ) { // first, build a vector of pair<char*,int>'s' for each column // each pair points to a string for this column for each row rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count); thrust::pair<const char*,size_t>* d_indexes = indexes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, col, d_delimiter, dellen, d_counts, d_indexes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; d_indexes[idx].first = nullptr; // initialize to d_indexes[idx].second = 0; // null string if( !dstr ) return; // dcount already accounts for the maxsplit value int dcount = d_counts[idx]; if( col >= dcount ) return; // passed the end for this string // skip delimiters until we reach this column int dchars = custring_view::chars_in_string(d_delimiter,dellen); int spos = 0, nchars = dstr->chars_count(); int epos = nchars, pos = dstr->size()-1; for( int c=(dcount-1); c > 0; --c ) { spos = dstr->rfind(d_delimiter,dellen,0,epos); if( spos < 0 ) { spos = 0; break; } if( c==col ) // found our column { spos += dchars; // do not include delimiter break; } epos = spos; spos = 0; } // this will be the string for this column if( spos < epos ) { spos = dstr->byte_offset_for(spos); // convert char pos epos = dstr->byte_offset_for(epos); // to byte offset d_indexes[idx].first = dstr->data() + spos; d_indexes[idx].second = (epos-spos); } else { // this will create empty string instead of null one d_indexes[idx].first = dstr->data(); } }); //cudaError_t err = cudaDeviceSynchronize(); //if( err != cudaSuccess ) //{ // fprintf(stderr,"nvs-rsplit(%s,%d)\n",delimiter,maxsplit); // printCudaError(err); //} // NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count); results.push_back(column); } // RMM_FREE(d_delimiter,0); return (unsigned int)results.size(); } // // The whitespace-delimited version of rsplit. // unsigned int NVStrings::rsplit( int maxsplit, std::vector<NVStrings*>& results) { auto execpol = rmm::exec_policy(0); int tokens = 0; if( maxsplit > 0 ) tokens = maxsplit + 1; // makes consistent with Pandas // need to count how many output strings per string unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, whitespace_token_counter(d_strings,tokens,d_counts)); int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); // boundary case: if no columns, return one null column (issue #119) if( columnsCount==0 ) results.push_back(new NVStrings(count)); // create each column for( int col = 0; col < columnsCount; ++col ) { // first, build a vector of pair<char*,int>'s' for each column // each pair points to a string for this column for each row rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count); thrust::pair<const char*,size_t>* d_indexes = indexes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, col, columnsCount, tokens, d_counts, d_indexes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; d_indexes[idx].first = nullptr; // initialize to d_indexes[idx].second = 0; // null string if( !dstr ) return; // null string int dcount = d_counts[idx]; if( col >= dcount ) return; int c = (dcount-1), nchars = dstr->chars_count(); int spos = 0, epos = nchars; //printf(">%d:tokens=%d,dcount=%d,nchars=%d\n",col,tokens,dcount,nchars); bool spaces = true; for( int pos=nchars; pos > 0; --pos ) { Char ch = dstr->at(pos-1); if( spaces == (ch <= ' ') ) { if( spaces ) epos = pos-1; else spos = pos-1; continue; } if( !spaces ) { spos = 0; if( (columnsCount-c)==tokens ) break; spos = pos; if( c==col ) break; epos = pos-1; spos = 0; --c; } spaces = !spaces; } if( spos < epos ) { spos = dstr->byte_offset_for(spos); // convert char pos epos = dstr->byte_offset_for(epos); // to byte offset //printf(">%d:spos=%d,epos=%d\n",c,spos,epos); d_indexes[idx].first = dstr->data() + spos; d_indexes[idx].second = (epos-spos); } //else //{ no empty strings in rsplit column whitespace // d_indexes[idx].first = dstr->data(); //} }); //cudaError_t err = cudaDeviceSynchronize(); //if( err != cudaSuccess ) //{ // fprintf(stderr,"nvs-rsplit-ws(%d)\n",maxsplit); // printCudaError(err); //} // NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count); results.push_back(column); } // return (unsigned int)results.size(); } // // Partition is split the string at the first occurrence of delimiter, and return 3 elements containing // the part before the delimiter, the delimiter itself, and the part after the delimiter. // If the delimiter is not found, return 3 elements containing the string itself, followed by two empty strings. // // >>> import pandas as pd // >>> strs = pd.Series(['héllo', None, 'a_bc_déf', 'a__bc', '_ab_cd', 'ab_cd_']) // >>> strs.str.partition('_') // 0 1 2 // 0 héllo // 1 None None None // 2 a _ bc_déf // 3 a _ _bc // 4 _ ab_cd // 5 ab _ cd_ // int NVStrings::partition( const char* delimiter, std::vector<NVStrings*>& results) { if( delimiter==0 ) return 0; unsigned int bytes = (unsigned int)strlen(delimiter); if( bytes==0 ) return 0; // just return original list? auto execpol = rmm::exec_policy(0); // copy delimiter to device char* d_delimiter = device_alloc<char>(bytes,0); CUDA_TRY( cudaMemcpyAsync(d_delimiter,delimiter,bytes,cudaMemcpyHostToDevice)) int d_asize = custring_view::alloc_size((char*)delimiter,bytes); d_asize = ALIGN_SIZE(d_asize); unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); // build int arrays to hold each string's partition sizes int totalSizes = 2 * count; rmm::device_vector<int> sizes(totalSizes,0), totals(count,0); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count, [d_strings, d_delimiter, bytes, d_asize, d_sizes, d_totals] __device__(size_t idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int* dsizes = &(d_sizes[idx*2]); d_totals[idx] = dstr->split_size(d_delimiter,bytes,dsizes,2) + d_asize; }); cudaDeviceSynchronize(); // build an output array of custring_views* arrays for each value // there will always be 3 per string thrust::host_vector<int> h_totals(totals); thrust::host_vector<char*> h_buffers(count,nullptr); thrust::host_vector<custring_view_array> h_splits(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { NVStrings* result = new NVStrings(3); results.push_back(result); h_splits[idx] = result->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); result->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; } rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the partition and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, bytes, d_buffers, d_sizes, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = (char*)d_buffers[idx]; int* dsizes = &(d_sizes[idx*2]); custring_view_array d_strs = d_splits[idx]; d_strs[0] = custring_view::create_from(buffer,0,0); buffer += ALIGN_SIZE(dsizes[0]); d_strs[1] = custring_view::create_from(buffer,0,0); buffer += ALIGN_SIZE(dsizes[1]); d_strs[2] = custring_view::create_from(buffer,0,0); // int dcount = dstr->rsplit_size(d_delimiter,bytes,0,2); dstr->split(d_delimiter,bytes,2,d_strs); if( dcount==2 ) { // insert delimiter element in the middle custring_view* tmp = d_strs[1]; d_strs[1] = custring_view::create_from(buffer,d_delimiter,bytes); d_strs[2] = tmp; } }); printCudaError(cudaDeviceSynchronize(),"nvs-partition"); RMM_FREE(d_delimiter,0); return count; } // // This follows most of the same logic as partition above except that the delimiter // search starts from the end of the string. Also, if no delimiter is found the // resulting array includes two empty strings followed by the original string. // // >>> import pandas as pd // >>> strs = pd.Series(['héllo', None, 'a_bc_déf', 'a__bc', '_ab_cd', 'ab_cd_']) // >>> strs.str.rpartition('_') // 0 1 2 // 0 héllo // 1 None None None // 2 a_bc _ déf // 3 a_ _ bc // 4 _ab _ cd // 5 ab_cd _ // int NVStrings::rpartition( const char* delimiter, std::vector<NVStrings*>& results) { if( delimiter==0 ) return 0; unsigned int bytes = (unsigned int)strlen(delimiter); if( bytes==0 ) return 0; // just return original list? auto execpol = rmm::exec_policy(0); // copy delimiter to device char* d_delimiter = device_alloc<char>(bytes,0); CUDA_TRY( cudaMemcpyAsync(d_delimiter,delimiter,bytes,cudaMemcpyHostToDevice)) int d_asize = custring_view::alloc_size((char*)delimiter,bytes); d_asize = ALIGN_SIZE(d_asize); unsigned int count = size(); custring_view** d_strings = pImpl->getStringsPtr(); // build int arrays to hold each string's partition sizes int totalSizes = 2 * count; rmm::device_vector<int> sizes(totalSizes,0), totals(count,0); int* d_sizes = sizes.data().get(); int* d_totals = totals.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, bytes, d_asize, d_sizes, d_totals] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; int* dsizes = &(d_sizes[idx*2]); //d_totals[idx] = dstr->rpartition_size(d_delimiter,bytes,dsizes); d_totals[idx] = dstr->rsplit_size(d_delimiter,bytes,dsizes,2) + d_asize; }); cudaDeviceSynchronize(); // now build an output array of custring_views* arrays for each value // there will always be 3 per string thrust::host_vector<int> h_totals(totals); thrust::host_vector<char*> h_buffers(count,nullptr); thrust::host_vector<custring_view_array> h_splits(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { NVStrings* result = new NVStrings(3); results.push_back(result); h_splits[idx] = result->pImpl->getStringsPtr(); int totalSize = h_totals[idx]; char* d_buffer = device_alloc<char>(totalSize,0); result->pImpl->setMemoryBuffer(d_buffer,totalSize); h_buffers[idx] = d_buffer; } rmm::device_vector<custring_view_array> splits(h_splits); custring_view_array* d_splits = splits.data().get(); rmm::device_vector<char*> buffers(h_buffers); char** d_buffers = buffers.data().get(); // do the partition and fill in the arrays thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_delimiter, bytes, d_buffers, d_sizes, d_splits] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = (char*)d_buffers[idx]; int* dsizes = &(d_sizes[idx*2]); custring_view_array d_strs = d_splits[idx]; d_strs[0] = custring_view::create_from(buffer,0,0); buffer += ALIGN_SIZE(dsizes[0]); d_strs[1] = custring_view::create_from(buffer,0,0); buffer += ALIGN_SIZE(dsizes[1]); d_strs[2] = custring_view::create_from(buffer,0,0); // int dcount = dstr->rsplit_size(d_delimiter,bytes,0,2); dstr->rsplit(d_delimiter,bytes,2,d_strs); // reorder elements if( dcount==1 ) { // if only one element, it goes on the end custring_view* tmp = d_strs[2]; d_strs[2] = d_strs[0]; d_strs[0] = tmp; } if( dcount==2 ) { // insert delimiter element in the middle custring_view* tmp = d_strs[1]; d_strs[1] = custring_view::create_from(buffer,d_delimiter,bytes); d_strs[2] = tmp; } }); printCudaError(cudaDeviceSynchronize(),"nvs-rpartition"); RMM_FREE(d_delimiter,0); return count; }
a38d4ac09e63954453274e40012c440e15c8b855.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file pyramid.cu * \brief Classes for computing image pyramid. * \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details * \license 3-clause BSD, see LICENSE for more details */ #include <iostream> #include <exception> #include <flowfilter/gpu/util.h> #include "spherepix/gpu/pyramid.h" #include "spherepix/gpu/device/pyramid_k.h" namespace spherepix { namespace gpu { ImagePyramid::ImagePyramid() { __configured = false; __inputImageSet = false; __levels = 0; } ImagePyramid::ImagePyramid(PixelationFace face): ImagePyramid() { __face = face; } ImagePyramid::ImagePyramid(PixelationFace face, PixelationFaceImage inputImage, const int levels): ImagePyramid(face) { setInputImage(inputImage); setLevels(levels); configure(); } ImagePyramid::~ImagePyramid() { // nothing to do } void ImagePyramid::configure() { if(!__inputImageSet) { std::cerr << "ERROR: ImageModel::configure(): input image has not been set" << std::endl; throw std::exception(); } int height = __inputImage.height(); int width = __inputImage.width(); int depth = __inputImage.depth(); PixelType pixtype = __inputImage.pixelType(); __block = dim3(32, 32, 1); __pyramidB1.resize(__levels -1); __gridB1.resize(__levels -1); __pyramidB2.resize(__levels); __gridB2.resize(__levels); __pyramidB2[0] = __inputImage; dim3 gb1(0,0,0); dim3 gb2(0,0,0); configureKernelGrid(height, width, __block, gb2); __gridB2[0] = gb2; // for levels 0 to H - 2 for(int h = 0; h < __levels -1; h ++) { // downsampling in beta-2 (column) width /= 2; PixelationFaceImage img_b1(height, width, depth, pixtype); img_b1.clear(); __pyramidB1[h] = img_b1; configureKernelGrid(height, width, __block, gb1); // __gridB1.push_back(gb1); __gridB1[h] = gb1; // downsampling in beta-1 (row) height /= 2; PixelationFaceImage img_b2(height, width, depth, pixtype); img_b2.clear(); __pyramidB2[h+1] = img_b2; configureKernelGrid(height, width, __block, gb2); __gridB2[h+1] = gb2; } __configured = true; } void ImagePyramid::compute() { startTiming(); if(!__configured) { std::cerr << "ERROR: ImagePyramid::compute() stage not configured." << std::endl; exit(-1); } if(__inputImage.pixelType() == UINT8) { for(int h = 0; h < __levels -1; h ++) { hipLaunchKernelGGL(( imageDownB1_k<unsigned char>) , dim3(__gridB1[h]), dim3(__block), 0, __stream, __pyramidB2[h].wrap<unsigned char>(), __pyramidB1[h].wrap<unsigned char>()); hipLaunchKernelGGL(( imageDownB2_k<unsigned char>) , dim3(__gridB2[h]), dim3(__block), 0, __stream, __pyramidB1[h].wrap<unsigned char>(), __pyramidB2[h +1].wrap<unsigned char>()); } } else if(__inputImage.pixelType() == FLOAT32) { for(int h = 0; h < __levels -1; h ++) { hipLaunchKernelGGL(( imageDownB1_k<float>) , dim3(__gridB1[h]), dim3(__block), 0, __stream, __pyramidB2[h].wrap<float>(), __pyramidB1[h].wrap<float>()); hipLaunchKernelGGL(( imageDownB2_k<float>) , dim3(__gridB2[h]), dim3(__block), 0, __stream, __pyramidB1[h].wrap<float>(), __pyramidB2[h +1].wrap<float>()); } } stopTiming(); } //######################### // Stage inputs //######################### void ImagePyramid::setInputImage(PixelationFaceImage inputImage) { const int height = inputImage.height(); const int width = inputImage.width(); if(height != __face.height() || width != __face.width()) { std::cerr << "ERROR: ImagePyramid::setInputImage(): input image shape different than pixelation face. " << "required: [" << __face.height() << ", " << __face.width() << "] passed: [" << height << ", " << width << "]" << std::endl; throw std::exception(); } if(inputImage.depth() != 1) { std::cerr << "ERROR: ImagePyramid::setInputImage(): input image should have depth 1, got: " << inputImage.depth() << std::endl; throw std::exception(); } __inputImage = inputImage; __inputImageSet = true; } //######################### // Stage outputs //######################### PixelationFaceImage ImagePyramid::getImage(const int level) { if(level < 0 || level >= __levels){ std::cerr << "ERROR: ImagePyramid::getImage(): level index out of range: " << level << std::endl; throw std::exception(); } return __pyramidB2[level]; } //######################### // Parameters //######################### void ImagePyramid::setLevels(const int levels) { if(levels <= 0) { std::cerr << "ERROR: ImagePyramid::setLevels(): " << "levels should be greater than zero: " << levels << std::endl; throw std::exception(); } __levels = levels; } int ImagePyramid::getLevels() const { return __levels; } } // namespace gpu } // namespace spherepix
a38d4ac09e63954453274e40012c440e15c8b855.cu
/** * \file pyramid.cu * \brief Classes for computing image pyramid. * \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details * \license 3-clause BSD, see LICENSE for more details */ #include <iostream> #include <exception> #include <flowfilter/gpu/util.h> #include "spherepix/gpu/pyramid.h" #include "spherepix/gpu/device/pyramid_k.h" namespace spherepix { namespace gpu { ImagePyramid::ImagePyramid() { __configured = false; __inputImageSet = false; __levels = 0; } ImagePyramid::ImagePyramid(PixelationFace face): ImagePyramid() { __face = face; } ImagePyramid::ImagePyramid(PixelationFace face, PixelationFaceImage inputImage, const int levels): ImagePyramid(face) { setInputImage(inputImage); setLevels(levels); configure(); } ImagePyramid::~ImagePyramid() { // nothing to do } void ImagePyramid::configure() { if(!__inputImageSet) { std::cerr << "ERROR: ImageModel::configure(): input image has not been set" << std::endl; throw std::exception(); } int height = __inputImage.height(); int width = __inputImage.width(); int depth = __inputImage.depth(); PixelType pixtype = __inputImage.pixelType(); __block = dim3(32, 32, 1); __pyramidB1.resize(__levels -1); __gridB1.resize(__levels -1); __pyramidB2.resize(__levels); __gridB2.resize(__levels); __pyramidB2[0] = __inputImage; dim3 gb1(0,0,0); dim3 gb2(0,0,0); configureKernelGrid(height, width, __block, gb2); __gridB2[0] = gb2; // for levels 0 to H - 2 for(int h = 0; h < __levels -1; h ++) { // downsampling in beta-2 (column) width /= 2; PixelationFaceImage img_b1(height, width, depth, pixtype); img_b1.clear(); __pyramidB1[h] = img_b1; configureKernelGrid(height, width, __block, gb1); // __gridB1.push_back(gb1); __gridB1[h] = gb1; // downsampling in beta-1 (row) height /= 2; PixelationFaceImage img_b2(height, width, depth, pixtype); img_b2.clear(); __pyramidB2[h+1] = img_b2; configureKernelGrid(height, width, __block, gb2); __gridB2[h+1] = gb2; } __configured = true; } void ImagePyramid::compute() { startTiming(); if(!__configured) { std::cerr << "ERROR: ImagePyramid::compute() stage not configured." << std::endl; exit(-1); } if(__inputImage.pixelType() == UINT8) { for(int h = 0; h < __levels -1; h ++) { imageDownB1_k<unsigned char> <<<__gridB1[h], __block, 0, __stream>>>( __pyramidB2[h].wrap<unsigned char>(), __pyramidB1[h].wrap<unsigned char>()); imageDownB2_k<unsigned char> <<<__gridB2[h], __block, 0, __stream>>>( __pyramidB1[h].wrap<unsigned char>(), __pyramidB2[h +1].wrap<unsigned char>()); } } else if(__inputImage.pixelType() == FLOAT32) { for(int h = 0; h < __levels -1; h ++) { imageDownB1_k<float> <<<__gridB1[h], __block, 0, __stream>>>( __pyramidB2[h].wrap<float>(), __pyramidB1[h].wrap<float>()); imageDownB2_k<float> <<<__gridB2[h], __block, 0, __stream>>>( __pyramidB1[h].wrap<float>(), __pyramidB2[h +1].wrap<float>()); } } stopTiming(); } //######################### // Stage inputs //######################### void ImagePyramid::setInputImage(PixelationFaceImage inputImage) { const int height = inputImage.height(); const int width = inputImage.width(); if(height != __face.height() || width != __face.width()) { std::cerr << "ERROR: ImagePyramid::setInputImage(): input image shape different than pixelation face. " << "required: [" << __face.height() << ", " << __face.width() << "] passed: [" << height << ", " << width << "]" << std::endl; throw std::exception(); } if(inputImage.depth() != 1) { std::cerr << "ERROR: ImagePyramid::setInputImage(): input image should have depth 1, got: " << inputImage.depth() << std::endl; throw std::exception(); } __inputImage = inputImage; __inputImageSet = true; } //######################### // Stage outputs //######################### PixelationFaceImage ImagePyramid::getImage(const int level) { if(level < 0 || level >= __levels){ std::cerr << "ERROR: ImagePyramid::getImage(): level index out of range: " << level << std::endl; throw std::exception(); } return __pyramidB2[level]; } //######################### // Parameters //######################### void ImagePyramid::setLevels(const int levels) { if(levels <= 0) { std::cerr << "ERROR: ImagePyramid::setLevels(): " << "levels should be greater than zero: " << levels << std::endl; throw std::exception(); } __levels = levels; } int ImagePyramid::getLevels() const { return __levels; } } // namespace gpu } // namespace spherepix
6536b535eba0e0b99f2b7f99a0ca59b37d1698a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; bools[index] = idata[index] ? 1 : 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; if (bools[index]) { odata[indices[index]] = idata[index]; } } } }
6536b535eba0e0b99f2b7f99a0ca59b37d1698a5.cu
#include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; bools[index] = idata[index] ? 1 : 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= n) return; if (bools[index]) { odata[indices[index]] = idata[index]; } } } }
d8d7eaefaa565579b6cedd03a969c2d72742be6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "direct_conv.h" using namespace std; __global__ void pad_input(float* f_in, float* f_out, int H, int W, int D, int pad) { int col = blockIdx.x*blockDim.x+threadIdx.x; int row = blockIdx.y*blockDim.y+threadIdx.y; int dep = blockIdx.z*blockDim.z+threadIdx.z; int new_H = H+2*pad; int new_W = W+2*pad; int i = dep * new_H * new_W + col * new_W + row; int j = dep * H * W + (col - pad) *W+ (row - pad); if(col < new_H && row < new_W && dep < D) { if((col < pad || col > H+pad-1) || (row < pad || row > W+pad-1)) f_out[i] = 0; else f_out[i] = f_in[j]; } } /*parallelization code */ __global__ void direct_convolution(int input_channels, int input_height, int input_width, int out_channels, int kernel_height,int kernel_width, int padding, int stride, int H_out, int W_out, int W_grid, int tile_w, float* X, float* W_filter, float* Y) { int n , m , h , w , c , p , q; n = blockIdx.x; m = blockIdx.y; h = (blockIdx.z / W_grid)*tile_w + threadIdx.y; w = (blockIdx.z % W_grid)*tile_w + threadIdx.x; input_height = input_height+padding; input_width = input_width+padding; if(h<H_out && w<W_out) { float temp=0; for(c = 0; c < input_channels; c++) { for(p = 0; p < kernel_height; p++) { for(q = 0; q < kernel_width; q++) { temp = temp + X[ n*(input_channels*input_height*input_width) + c*(input_height*input_width) + (h*stride+p)*(input_width) + (w*stride+q)] * W_filter[ m*(input_channels*kernel_height*kernel_width) + c*(kernel_height*kernel_width) + p*(kernel_height) + q]; } } } Y[n*(out_channels*H_out*W_out) + m*(H_out*W_out) + h*(W_out) + w] = temp; } } /*forward pass function declared in direc_conv.hpp library*/ float* Direct::passforward(int out_channels, int input_channels, int kernel_height, int kernel_width, int padding, int stride, float* weights,int batchsize_of_data, int input_height, int input_width, float* input, float &conv_time, float& overhead_time) { if(kernel_height > input_height || kernel_width > input_width){ cout << "kernel size is too big " << endl; exit(EXIT_FAILURE); } conv_time = 0; overhead_time = 0; float milliseconds = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipError_t err = hipSuccess; /* The rest of the code assumes that padding = x means x/2 on either ends hence the modification */ padding = 2*padding; /* size of matrix with padding*/ int size_input_matrix = batchsize_of_data * input_channels * (input_height+padding) * (input_width+padding) * sizeof(float); // size of input matrix after padding /* size of kernel matrix */ int size_kernel_matrix = out_channels * input_channels * kernel_height * kernel_width * sizeof(float); // size of input matrix after padding /* calculating size of output matrix*/ int H_out = (input_height - kernel_height + padding + stride)/stride; int W_out = (input_width - kernel_width + padding + stride)/stride; int size_output_matrix = batchsize_of_data * out_channels * H_out * W_out * sizeof(float); /* allocating memory for input matrix with padding */ float *h_X = (float*)malloc(size_input_matrix); /* allocating memory for output matrix */ float *h_Y = (float*)malloc(size_output_matrix); /* memory allocation check*/ if (h_X == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } /* Padding */ int new_input_height = input_height + padding; int new_input_width = input_width + padding; float *pad_input_in = NULL; hipMalloc((void **)&pad_input_in, input_height * input_width * input_channels * sizeof(float)); float *pad_input_out = NULL; hipMalloc((void **)&pad_input_out, new_input_height * new_input_width * input_channels * sizeof(float)); dim3 threads1(1, 1, 1); dim3 grid1(new_input_height, new_input_width, input_channels); for(int i = 0; i < batchsize_of_data; i++) { hipMemcpy(pad_input_in, &input[i * input_channels * input_height * input_width], input_height * input_width * input_channels * sizeof(float) , hipMemcpyHostToDevice); hipEventRecord(start); hipLaunchKernelGGL(( pad_input), dim3(grid1),dim3(threads1), 0, 0, pad_input_in, pad_input_out, input_height, input_width, input_channels, padding/2); hipEventRecord(stop); err = hipGetLastError(); if(err!=hipSuccess) { fprintf(stderr, "Failed to launch pad input (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipMemcpy(&h_X[i * input_channels * new_input_height * new_input_width], pad_input_out, new_input_height * new_input_width * input_channels * sizeof(float), hipMemcpyDeviceToHost); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; } hipFree(pad_input_in); hipFree(pad_input_out); float *d_X, *d_Y, *d_W; /*allocating memory for padded matrix in the device*/ err = hipMalloc((void**)&d_X, size_input_matrix); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector d_X (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* copying padded matrix to device */ err = hipMemcpy(d_X , h_X , size_input_matrix , hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector h_X from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Renaming the kernel weights pointer (input is in device memory) */ err = hipMalloc((void**)&d_W, size_kernel_matrix); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector d_W(error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* copying kernel to device */ err = hipMemcpy(d_W , weights , size_kernel_matrix , hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector weights from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /*allocating memory for the output matrix*/ err = hipMalloc((void**)&d_Y, size_output_matrix); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector d_Y (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* making sure that 1024 threads isn't crossed*/ int tile_width = 2 , tile_height = 2; int w_grid = ceil((W_out*1.0) / tile_width); int h_grid = ceil((H_out*1.0) / tile_height); int temp = w_grid * h_grid; dim3 grid(batchsize_of_data , out_channels , temp); dim3 block(tile_width , tile_height , 1); hipEventRecord(start); /* calling the direct_convolution kernel */ hipLaunchKernelGGL(( direct_convolution), dim3(grid), dim3(block) , 0, 0, input_channels, input_height, input_width, out_channels, kernel_height, kernel_width, padding, stride, H_out, W_out, w_grid, tile_width, d_X, d_W, d_Y); hipEventRecord(stop); err = hipGetLastError(); /*checking if the device program is executed or not*/ if (err != hipSuccess) { fprintf(stderr, "Failed to launch reduce1 kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; /* copying output matrix to host */ err = hipMemcpy(h_Y, d_Y, size_output_matrix, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy output vector from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* releasing all the device and host vectors */ err = hipFree(d_X); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector X (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_Y); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector Y (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_W); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector W (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /*releasing the memory*/ free(h_X); /*Return the CUDA Array*/ return h_Y; }
d8d7eaefaa565579b6cedd03a969c2d72742be6a.cu
#include "direct_conv.h" using namespace std; __global__ void pad_input(float* f_in, float* f_out, int H, int W, int D, int pad) { int col = blockIdx.x*blockDim.x+threadIdx.x; int row = blockIdx.y*blockDim.y+threadIdx.y; int dep = blockIdx.z*blockDim.z+threadIdx.z; int new_H = H+2*pad; int new_W = W+2*pad; int i = dep * new_H * new_W + col * new_W + row; int j = dep * H * W + (col - pad) *W+ (row - pad); if(col < new_H && row < new_W && dep < D) { if((col < pad || col > H+pad-1) || (row < pad || row > W+pad-1)) f_out[i] = 0; else f_out[i] = f_in[j]; } } /*parallelization code */ __global__ void direct_convolution(int input_channels, int input_height, int input_width, int out_channels, int kernel_height,int kernel_width, int padding, int stride, int H_out, int W_out, int W_grid, int tile_w, float* X, float* W_filter, float* Y) { int n , m , h , w , c , p , q; n = blockIdx.x; m = blockIdx.y; h = (blockIdx.z / W_grid)*tile_w + threadIdx.y; w = (blockIdx.z % W_grid)*tile_w + threadIdx.x; input_height = input_height+padding; input_width = input_width+padding; if(h<H_out && w<W_out) { float temp=0; for(c = 0; c < input_channels; c++) { for(p = 0; p < kernel_height; p++) { for(q = 0; q < kernel_width; q++) { temp = temp + X[ n*(input_channels*input_height*input_width) + c*(input_height*input_width) + (h*stride+p)*(input_width) + (w*stride+q)] * W_filter[ m*(input_channels*kernel_height*kernel_width) + c*(kernel_height*kernel_width) + p*(kernel_height) + q]; } } } Y[n*(out_channels*H_out*W_out) + m*(H_out*W_out) + h*(W_out) + w] = temp; } } /*forward pass function declared in direc_conv.hpp library*/ float* Direct::passforward(int out_channels, int input_channels, int kernel_height, int kernel_width, int padding, int stride, float* weights,int batchsize_of_data, int input_height, int input_width, float* input, float &conv_time, float& overhead_time) { if(kernel_height > input_height || kernel_width > input_width){ cout << "kernel size is too big " << endl; exit(EXIT_FAILURE); } conv_time = 0; overhead_time = 0; float milliseconds = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaError_t err = cudaSuccess; /* The rest of the code assumes that padding = x means x/2 on either ends hence the modification */ padding = 2*padding; /* size of matrix with padding*/ int size_input_matrix = batchsize_of_data * input_channels * (input_height+padding) * (input_width+padding) * sizeof(float); // size of input matrix after padding /* size of kernel matrix */ int size_kernel_matrix = out_channels * input_channels * kernel_height * kernel_width * sizeof(float); // size of input matrix after padding /* calculating size of output matrix*/ int H_out = (input_height - kernel_height + padding + stride)/stride; int W_out = (input_width - kernel_width + padding + stride)/stride; int size_output_matrix = batchsize_of_data * out_channels * H_out * W_out * sizeof(float); /* allocating memory for input matrix with padding */ float *h_X = (float*)malloc(size_input_matrix); /* allocating memory for output matrix */ float *h_Y = (float*)malloc(size_output_matrix); /* memory allocation check*/ if (h_X == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } /* Padding */ int new_input_height = input_height + padding; int new_input_width = input_width + padding; float *pad_input_in = NULL; cudaMalloc((void **)&pad_input_in, input_height * input_width * input_channels * sizeof(float)); float *pad_input_out = NULL; cudaMalloc((void **)&pad_input_out, new_input_height * new_input_width * input_channels * sizeof(float)); dim3 threads1(1, 1, 1); dim3 grid1(new_input_height, new_input_width, input_channels); for(int i = 0; i < batchsize_of_data; i++) { cudaMemcpy(pad_input_in, &input[i * input_channels * input_height * input_width], input_height * input_width * input_channels * sizeof(float) , cudaMemcpyHostToDevice); cudaEventRecord(start); pad_input<<<grid1,threads1>>>(pad_input_in, pad_input_out, input_height, input_width, input_channels, padding/2); cudaEventRecord(stop); err = cudaGetLastError(); if(err!=cudaSuccess) { fprintf(stderr, "Failed to launch pad input (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaMemcpy(&h_X[i * input_channels * new_input_height * new_input_width], pad_input_out, new_input_height * new_input_width * input_channels * sizeof(float), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; } cudaFree(pad_input_in); cudaFree(pad_input_out); float *d_X, *d_Y, *d_W; /*allocating memory for padded matrix in the device*/ err = cudaMalloc((void**)&d_X, size_input_matrix); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector d_X (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* copying padded matrix to device */ err = cudaMemcpy(d_X , h_X , size_input_matrix , cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector h_X from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Renaming the kernel weights pointer (input is in device memory) */ err = cudaMalloc((void**)&d_W, size_kernel_matrix); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector d_W(error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* copying kernel to device */ err = cudaMemcpy(d_W , weights , size_kernel_matrix , cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector weights from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /*allocating memory for the output matrix*/ err = cudaMalloc((void**)&d_Y, size_output_matrix); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector d_Y (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* making sure that 1024 threads isn't crossed*/ int tile_width = 2 , tile_height = 2; int w_grid = ceil((W_out*1.0) / tile_width); int h_grid = ceil((H_out*1.0) / tile_height); int temp = w_grid * h_grid; dim3 grid(batchsize_of_data , out_channels , temp); dim3 block(tile_width , tile_height , 1); cudaEventRecord(start); /* calling the direct_convolution kernel */ direct_convolution<<< grid, block >>>(input_channels, input_height, input_width, out_channels, kernel_height, kernel_width, padding, stride, H_out, W_out, w_grid, tile_width, d_X, d_W, d_Y); cudaEventRecord(stop); err = cudaGetLastError(); /*checking if the device program is executed or not*/ if (err != cudaSuccess) { fprintf(stderr, "Failed to launch reduce1 kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; /* copying output matrix to host */ err = cudaMemcpy(h_Y, d_Y, size_output_matrix, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy output vector from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* releasing all the device and host vectors */ err = cudaFree(d_X); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector X (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_Y); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector Y (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_W); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector W (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /*releasing the memory*/ free(h_X); /*Return the CUDA Array*/ return h_Y; }
a86c231fff38acb6812153129fd6d1a74603ec11.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _CUDA_VTKCUDAVOLUMEMAPPER_RENDERALGO_H #define _CUDA_VTKCUDAVOLUMEMAPPER_RENDERALGO_H #include "CUDA_vtkCudaVolumeMapper_renderAlgo.h" #include <hip/hip_runtime.h> #include <iostream> #define BLOCK_DIM2D 16 //16 is optimal, 4 is the minimum and 16 is the maximum //execution parameters and general information __constant__ cudaVolumeInformation volInfo; __constant__ cudaRendererInformation renInfo; __constant__ cudaOutputImageInformation outInfo; __constant__ float dRandomRayOffsets[BLOCK_DIM2D*BLOCK_DIM2D]; //texture element information for the ZBuffer hipArray* ZBufferArray = 0; texture<float, 2, hipReadModeElementType> zbuffer_texture; #define bindSingle2DTexture( textureToBind, value) textureToBind.normalized = true; \ textureToBind.filterMode = hipFilterModePoint; \ textureToBind.addressMode[0] = hipAddressModeClamp; \ textureToBind.addressMode[1] = hipAddressModeClamp; \ hipBindTextureToArray(textureToBind, value, channelDesc); #define load2DArray(array, values, s, tr) if(array) hipFreeArray(array); \ hipMallocArray( &array, &channelDesc, s, s); \ cudaMemcpyToArrayAsync(array, 0, 0, values, sizeof(float)*s*s, \ hipMemcpyHostToDevice, tr); #define unloadArray(a) if(a); hipFreeArray(a); a = 0; //channel for loading input data and transfer functions hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float2>(); __device__ void CUDAkernel_FindKeyholeValues(float3 rayStart, float3 rayInc, float& numSteps, float& excludeStart, float& excludeEnd ) { __syncthreads(); const int numPlanes = renInfo.NumberOfKeyholePlanes; __syncthreads(); //create a rayEnd holder float3 oldRayStart = rayStart; float3 rayDir; rayDir.x = numSteps * rayInc.x; rayDir.y = numSteps * rayInc.y; rayDir.z = numSteps * rayInc.z; float3 rayEnd; rayEnd.x = rayStart.x + rayDir.x; rayEnd.y = rayStart.y + rayDir.y; rayEnd.z = rayStart.z + rayDir.z; //default to some safe values excludeStart = 1.0f; excludeEnd = -1.0f; // loop through all provided clipping planes if(!numPlanes) { return; } int flag = 0; for ( int i = 0; i < numPlanes; i++ ) { //refine the ray direction to account for any changes in starting or ending position rayDir.x = rayEnd.x - rayStart.x; rayDir.y = rayEnd.y - rayStart.y; rayDir.z = rayEnd.z - rayStart.z; //collect all the information about the current clipping plane float4 keyholePlane; __syncthreads(); keyholePlane.x = renInfo.KeyholePlanes[4*i]; keyholePlane.y = renInfo.KeyholePlanes[4*i+1]; keyholePlane.z = renInfo.KeyholePlanes[4*i+2]; keyholePlane.w = renInfo.KeyholePlanes[4*i+3]; __syncthreads(); const float dp = keyholePlane.x*rayDir.x + keyholePlane.y*rayDir.y + keyholePlane.z*rayDir.z; const float t = -(keyholePlane.x*rayStart.x + keyholePlane.y*rayStart.y + keyholePlane.z*rayStart.z + keyholePlane.w) / dp; const float point0 = rayStart.x + t*rayDir.x; const float point1 = rayStart.y + t*rayDir.y; const float point2 = rayStart.z + t*rayDir.z; //if the ray intersects the plane, set the start or end point to the intersection point if ( t > 0.0f && t < 1.0f ) { dp > 0.0f ? rayStart.x = point0 : rayEnd.x = point0; dp > 0.0f ? rayStart.y = point1 : rayEnd.y = point1; dp > 0.0f ? rayStart.z = point2 : rayEnd.z = point2; } //flag this ray if it is outside the plane entirely flag |= (dp > 0.0f && t > 1.0f); flag |= (dp < 0.0f && t < 0.0f); }//for rayStart.x -= oldRayStart.x; rayStart.y -= oldRayStart.y; rayStart.z -= oldRayStart.z; rayEnd.x -= oldRayStart.x; rayEnd.y -= oldRayStart.y; rayEnd.z -= oldRayStart.z; //if the ray is not inside the clipping planes, make the ray zero length float invRayLengthSquared = 1.0f / (rayInc.x*rayInc.x + rayInc.y*rayInc.y + rayInc.z*rayInc.z); excludeStart = flag ? -1.0f : (rayStart.x * rayInc.x + rayStart.y * rayInc.y + rayStart.z * rayInc.z ) * invRayLengthSquared; excludeEnd = flag ? -1.0f : (rayEnd.x * rayInc.x + rayEnd.y * rayInc.y + rayEnd.z * rayInc.z ) * invRayLengthSquared; } __device__ void CUDAkernel_ClipRayAgainstClippingPlanes(float3& rayStart, float3& rayEnd, float3& rayDir) { __syncthreads(); const int numPlanes = renInfo.NumberOfClippingPlanes; __syncthreads(); // loop through all 6 clipping planes if(!numPlanes) { return; } int flag = 0; #pragma unroll 1 for ( int i = 0; i < numPlanes; i++ ) { //refine the ray direction to account for any changes in starting or ending position rayDir.x = rayEnd.x - rayStart.x; rayDir.y = rayEnd.y - rayStart.y; rayDir.z = rayEnd.z - rayStart.z; //collect all the information about the current clipping plane float4 clippingPlane; __syncthreads(); clippingPlane.x = renInfo.ClippingPlanes[4*i]; clippingPlane.y = renInfo.ClippingPlanes[4*i+1]; clippingPlane.z = renInfo.ClippingPlanes[4*i+2]; clippingPlane.w = renInfo.ClippingPlanes[4*i+3]; __syncthreads(); const float dp = clippingPlane.x*rayDir.x + clippingPlane.y*rayDir.y + clippingPlane.z*rayDir.z; const float t = -(clippingPlane.x*rayStart.x + clippingPlane.y*rayStart.y + clippingPlane.z*rayStart.z + clippingPlane.w) / dp; const float point0 = rayStart.x + t*rayDir.x; const float point1 = rayStart.y + t*rayDir.y; const float point2 = rayStart.z + t*rayDir.z; //if the ray intersects the plane, set the start or end point to the intersection point if ( t > 0.0f && t < 1.0f ) { dp > 0.0f ? rayStart.x = point0 : rayEnd.x = point0; dp > 0.0f ? rayStart.y = point1 : rayEnd.y = point1; dp > 0.0f ? rayStart.z = point2 : rayEnd.z = point2; } //flag this ray if it is outside the plane entirely flag |= (dp > 0.0f && t > 1.0f); flag |= (dp < 0.0f && t < 0.0f); }//for //if the ray is not inside the clipping planes, make the ray zero length if(flag) { rayStart.x = rayEnd.x; rayStart.y = rayEnd.y; rayStart.z = rayEnd.z; } } __device__ void CUDAkernel_ClipRayAgainstVolume(float3& rayStart, float3& rayEnd, float3& rayDir) { //define the ray's length and direction to account for any changes in starting and ending position rayDir.x = rayEnd.x - rayStart.x; rayDir.y = rayEnd.y - rayStart.y; rayDir.z = rayEnd.z - rayStart.z; //collect the information about the bounds of the volume in voxels from the volume information __syncthreads(); const float bounds0 = volInfo.Bounds[0]+1.0f; const float bounds1 = volInfo.Bounds[1]-1.0f; const float bounds2 = volInfo.Bounds[2]+1.0f; const float bounds3 = volInfo.Bounds[3]-1.0f; const float bounds4 = volInfo.Bounds[4]+1.0f; const float bounds5 = volInfo.Bounds[5]-1.0f; __syncthreads(); float diffS; float diffE; //find the intersection of the ray and the volume (in the x direction) if (rayDir.x > 0.0f) { diffS = rayStart.x < bounds0 ? bounds0 - rayStart.x : 0.0f; diffE = rayEnd.x > bounds1 ? bounds1 - rayEnd.x : 0.0f; } else { diffS = rayStart.x > bounds1 ? bounds1 - rayStart.x : 0.0f; diffE = rayEnd.x < bounds0 ? bounds0 - rayEnd.x : 0.0f; } diffS /= rayDir.x; diffE /= rayDir.x; //crop the ray to fit the x direction if possible if(isfinite(diffS)) { rayStart.x += rayDir.x * diffS; rayStart.y += rayDir.y * diffS; rayStart.z += rayDir.z * diffS; rayEnd.x += rayDir.x * diffE; rayEnd.y += rayDir.y * diffE; rayEnd.z += rayDir.z * diffE; } //find the intersection of the ray and the volume (in the y direction) if(rayDir.y > 0.0f) { diffS = rayStart.y < bounds2 ? bounds2 - rayStart.y : 0.0f; diffE = rayEnd.y > bounds3 ? bounds3 - rayEnd.y : 0.0f; } else { diffS = rayStart.y > bounds3 ? bounds3 - rayStart.y : 0.0f; diffE = rayEnd.y < bounds2 ? bounds2 - rayEnd.y : 0.0f; } diffS /= rayDir.y; diffE /= rayDir.y; //crop the ray to fit the y direction if possible if(isfinite(diffS)) { rayStart.x += rayDir.x * diffS; rayStart.y += rayDir.y * diffS; rayStart.z += rayDir.z * diffS; rayEnd.x += rayDir.x * diffE; rayEnd.y += rayDir.y * diffE; rayEnd.z += rayDir.z * diffE; } //find the intersection of the ray and the volume (in the z direction) if(rayDir.z > 0.0f) { diffS = rayStart.z < bounds4 ? bounds4 - rayStart.z : 0.0f; diffE = rayEnd.z > bounds5 ? bounds5 - rayEnd.z : 0.0f; } else { diffS = rayStart.z > bounds5 ? bounds5 - rayStart.z : 0.0f; diffE = rayEnd.z < bounds4 ? bounds4 - rayEnd.z : 0.0f; } diffS /= rayDir.z; diffE /= rayDir.z; //crop the ray to fit the z direction if possible if(isfinite(diffS)) { rayStart.x += rayDir.x * diffS; rayStart.y += rayDir.y * diffS; rayStart.z += rayDir.z * diffS; rayEnd.x += rayDir.x * diffE; rayEnd.y += rayDir.y * diffE; rayEnd.z += rayDir.z * diffE; } // If the voxel still isn't inside the volume, then this ray // doesn't really intersect the volume, thus, make it all zero if (rayEnd.x > bounds1 + 1.0f || rayEnd.y > bounds3 + 1.0f || rayEnd.z > bounds5 + 1.0f || rayEnd.x < bounds0 - 1.0f || rayEnd.y < bounds2 - 1.0f || rayEnd.z < bounds4 - 1.0f|| rayStart.x > bounds1 + 1.0f || rayStart.y > bounds3 + 1.0f || rayStart.z > bounds5 + 1.0f || rayStart.x < bounds0 - 1.0f || rayStart.y < bounds2 - 1.0f || rayStart.z < bounds4 - 1.0f ) { rayStart = rayEnd; } //refine the ray's length and direction to reflect any changes in the starting and ending co-ordinates rayDir.x = rayEnd.x - rayStart.x; rayDir.y = rayEnd.y - rayStart.y; rayDir.z = rayEnd.z - rayStart.z; } __device__ void CUDAkernel_SetRayEnds(const int2& index, float3& rayStart, float3& rayDir, const int& outIndex) { //set the original estimates of the starting and ending co-ordinates in the co-ordinates of the view (not voxels) //note: viewRayZ = 0 for start and viewRayZ = 1 for end __syncthreads(); float viewRayX = outInfo.flipped ? ( ((float) index.x) / (float) outInfo.resolution.x ) : 1.0f - ( ((float) index.x) / (float) outInfo.resolution.x ); float viewRayY = ( ((float) index.y) / (float) outInfo.resolution.y ); __syncthreads(); float endDepth = tex2D(zbuffer_texture, 1.0f-viewRayX, viewRayY ); //multiply the start co-ordinate in the view by the view to voxels matrix to get the co-ordinate in voxels (NOT YET NORMALIZED) __syncthreads(); rayStart.x = viewRayX*renInfo.ViewToVoxelsMatrix[0] + viewRayY*renInfo.ViewToVoxelsMatrix[1] + renInfo.ViewToVoxelsMatrix[3]; rayStart.y = viewRayX*renInfo.ViewToVoxelsMatrix[4] + viewRayY*renInfo.ViewToVoxelsMatrix[5] + renInfo.ViewToVoxelsMatrix[7]; rayStart.z = viewRayX*renInfo.ViewToVoxelsMatrix[8] + viewRayY*renInfo.ViewToVoxelsMatrix[9] + renInfo.ViewToVoxelsMatrix[11]; float startNorm = viewRayX*renInfo.ViewToVoxelsMatrix[12] + viewRayY*renInfo.ViewToVoxelsMatrix[13] + renInfo.ViewToVoxelsMatrix[15]; //multiply the equivalent for the end ray, noting that much of the pre-normalized computation is the same as the start ray __syncthreads(); float3 rayEnd; float3 rayFull; rayEnd.x = rayStart.x + endDepth*renInfo.ViewToVoxelsMatrix[2]; rayEnd.y = rayStart.y + endDepth*renInfo.ViewToVoxelsMatrix[6]; rayEnd.z = rayStart.z + endDepth*renInfo.ViewToVoxelsMatrix[10]; float endNorm = startNorm + endDepth*renInfo.ViewToVoxelsMatrix[14]; __syncthreads(); rayFull.x = rayStart.x + renInfo.ViewToVoxelsMatrix[2]; rayFull.y = rayStart.y + renInfo.ViewToVoxelsMatrix[6]; rayFull.z = rayStart.z + renInfo.ViewToVoxelsMatrix[10]; float fullNorm = startNorm + renInfo.ViewToVoxelsMatrix[14]; __syncthreads(); //normalize (and ergo finish) the start ray's matrix multiplication rayStart.x /= startNorm; rayStart.y /= startNorm; rayStart.z /= startNorm; //normalize (and ergo finish) the end ray's matrix multiplication rayEnd.x /= endNorm; rayEnd.y /= endNorm; rayEnd.z /= endNorm; rayFull.x /= fullNorm; rayFull.y /= fullNorm; rayFull.z /= fullNorm; //put the maximum depth in the buffer float3 oldStart = rayStart; rayDir.x = rayFull.x - rayStart.x; rayDir.y = rayFull.y - rayStart.y; rayDir.z = rayFull.z - rayStart.z; __syncthreads(); float maxDepth = __fsqrt_rz( rayDir.x*rayDir.x + rayDir.y*rayDir.y + rayDir.z*rayDir.z ); outInfo.maxDepthBuffer[outIndex] = maxDepth; __syncthreads(); //refine the ray to only include areas that are both within the volume, and within the clipping planes of said volume //note that ClipRayAgainstVolume calculate the ray's correct length and direction and returns it in rayInc CUDAkernel_ClipRayAgainstClippingPlanes(rayStart, rayEnd, rayDir); CUDAkernel_ClipRayAgainstVolume(rayStart, rayEnd, rayDir); //put the maximum depth in the buffer __syncthreads(); float rayLength = __fsqrt_rz( rayDir.x*rayDir.x + rayDir.y*rayDir.y + rayDir.z*rayDir.z ); float minDepth = __fsqrt_rz( (rayStart.x-oldStart.x)*(rayStart.x-oldStart.x) + (rayStart.y-oldStart.y)*(rayStart.y-oldStart.y) + (rayStart.z-oldStart.z)*(rayStart.z-oldStart.z) ); outInfo.minDepthBuffer[outIndex] = (rayLength > 0.0f) ? minDepth : maxDepth; __syncthreads(); } __global__ void CUDAkernel_renderAlgo_formRays( ) { //index in the output image (2D) int2 index; index.x = blockDim.x * blockIdx.x + threadIdx.x; index.y = blockDim.y * blockIdx.y + threadIdx.y; //index in the output image (1D) int outindex = index.x + index.y * outInfo.resolution.x; float3 rayStart; //ray starting point float3 rayInc; // ray sample increment float numSteps; //maximum number of samples along this ray // Calculate the starting and ending points of the ray, as well as the direction vector CUDAkernel_SetRayEnds(index, rayStart, rayInc, outindex); //determine the maximum number of steps the ray should sample and determine the length of each step __syncthreads(); float3 spacing = volInfo.Spacing; float minSpacing = volInfo.MinSpacing; __syncthreads(); numSteps = __fsqrt_ru(rayInc.x*rayInc.x*spacing.x*spacing.x+ rayInc.y*rayInc.y*spacing.y*spacing.y+ rayInc.z*rayInc.z*spacing.z*spacing.z) / minSpacing; rayInc.x /= numSteps; rayInc.y /= numSteps; rayInc.z /= numSteps; //find the information regarding the exclusion area float excludeStart = 0.0; float excludeEnd = 0.0; CUDAkernel_FindKeyholeValues( rayStart, rayInc, numSteps, excludeStart, excludeEnd ); //write out data __syncthreads(); outInfo.rayStartX[outindex] = rayStart.x; __syncthreads(); outInfo.rayStartY[outindex] = rayStart.y; __syncthreads(); outInfo.rayStartZ[outindex] = rayStart.z; __syncthreads(); outInfo.rayIncX[outindex] = rayInc.x; __syncthreads(); outInfo.rayIncY[outindex] = rayInc.y; __syncthreads(); outInfo.rayIncZ[outindex] = rayInc.z; __syncthreads(); outInfo.numSteps[outindex] = numSteps; __syncthreads(); outInfo.excludeStart[outindex] = excludeStart; __syncthreads(); outInfo.excludeEnd[outindex] = excludeEnd; __syncthreads(); } __global__ void CUDAkernel_shadeAlgo_normBuffer( ) { int outIndex = threadIdx.x + blockDim.x * blockIdx.x; // index of result image float curr = outInfo.depthBuffer[outIndex]; float max = outInfo.maxDepthBuffer[outIndex]; float min = outInfo.minDepthBuffer[outIndex]; curr = (max > 0.0f) ? (curr + min) / max : 1.0f; outInfo.depthBuffer[outIndex] = curr; } __global__ void CUDAkernel_shadeAlgo_doCelShade( ) { //index in the output image int outindex = threadIdx.x + blockDim.x * blockIdx.x; // index of result image //get the depth information from the buffer and the colour information from the output image float2 depthDiffX; float2 depthDiffY; __syncthreads(); depthDiffY.y = outInfo.depthBuffer[outindex+outInfo.resolution.x]; __syncthreads(); depthDiffY.x = outInfo.depthBuffer[outindex]; __syncthreads(); depthDiffX.y = outInfo.depthBuffer[outindex+1]; __syncthreads(); depthDiffX.x = depthDiffY.x; //compute the gradient magnitude float gradMag = __fsqrt_rz( (depthDiffX.y - depthDiffX.x)*(depthDiffX.y - depthDiffX.x) + (depthDiffY.y - depthDiffY.x)*(depthDiffY.y - depthDiffY.x) ); //grab cel shading parameters __syncthreads(); float darkness = renInfo.celr; float a = renInfo.cela; float c = renInfo.celc; __syncthreads(); //multiply by the cel-shading factor gradMag = 1.0f - darkness * saturate( (gradMag - a) * c ); //grab distance shading parameters __syncthreads(); darkness = renInfo.disr; a = renInfo.disa; c = renInfo.disc; __syncthreads(); //multiply by the depth factor gradMag *= 1.0f - darkness * saturate( (depthDiffX.x - a) * c ); uchar4 colour; __syncthreads(); colour = outInfo.deviceOutputImage[outindex]; __syncthreads(); colour.x = gradMag * ((float) colour.x); colour.y = gradMag * ((float) colour.y); colour.z = gradMag * ((float) colour.z); __syncthreads(); outInfo.deviceOutputImage[outindex] = colour; } bool CUDA_vtkCudaVolumeMapper_renderAlgo_loadZBuffer(const float* zBuffer, const int zBufferSizeX, const int zBufferSizeY, hipStream_t* stream) { if(ZBufferArray) { hipFreeArray(ZBufferArray); } //load the zBuffer from the host to the array hipMallocArray(&ZBufferArray, &channelDesc, zBufferSizeX, zBufferSizeY); cudaMemcpyToArrayAsync(ZBufferArray, 0, 0, zBuffer, sizeof(float)*zBufferSizeX*zBufferSizeY, hipMemcpyHostToDevice, *stream); //define the texture parameters and bind the texture to the array zbuffer_texture.normalized = true; zbuffer_texture.filterMode = hipFilterModePoint; zbuffer_texture.addressMode[0] = hipAddressModeClamp; zbuffer_texture.addressMode[1] = hipAddressModeClamp; hipBindTextureToArray(zbuffer_texture, ZBufferArray, channelDesc); #ifdef DEBUG_VTKCUDAVISUALIZATION hipDeviceSynchronize(); std::cout << "Load Z-Buffer: " << hipGetErrorString( hipGetLastError() ) << std::endl; #endif return (hipGetLastError() == 0); } bool CUDA_vtkCudaVolumeMapper_renderAlgo_unloadZBuffer(hipStream_t* stream) { if(ZBufferArray) { hipFreeArray(ZBufferArray); } ZBufferArray = 0; #ifdef DEBUG_VTKCUDAVISUALIZATION hipDeviceSynchronize(); std::cout << "Unload Z-Buffer: " << hipGetErrorString( hipGetLastError() ) << std::endl; #endif return (hipGetLastError() == 0); } //load in a random 16x16 noise array to deartefact the image in real time bool CUDA_vtkCudaVolumeMapper_renderAlgo_loadrandomRayOffsets(const float* randomRayOffsets, hipStream_t* stream) { hipMemcpyToSymbolAsync(dRandomRayOffsets, randomRayOffsets, BLOCK_DIM2D*BLOCK_DIM2D*sizeof(float), 0, hipMemcpyHostToDevice, *stream); #ifdef DEBUG_VTKCUDAVISUALIZATION hipDeviceSynchronize(); std:cout << "Load ray offsets: " << hipGetErrorString( hipGetLastError() ) << std::endl; #endif return (hipGetLastError() == 0); } bool CUDA_vtkCudaVolumeMapper_renderAlgo_unloadrandomRayOffsets(hipStream_t* stream) { #ifdef DEBUG_VTKCUDAVISUALIZATION hipDeviceSynchronize(); std::cout << "Unload ray offsets: " << hipGetErrorString( hipGetLastError() ) << std:endl; #endif return (hipGetLastError() == 0); } template<typename T, typename S> __global__ void CUDAkernel_convertUnit( T* hostBuffer, S* deviceBuffer, int bufferSize ) { int index = threadIdx.x + blockDim.x * blockIdx.x; T value = hostBuffer[index]; if( index < bufferSize ) { deviceBuffer[index] = (S) value; } } #define CUDA_castBuffer_OptimalThreadSize 512 template<typename T, typename S> void CUDA_castBuffer(T* hostBuffer, S** deviceBuffer, int bufferSize) { //allocate required device memory buffers T* deviceBufferOrgType; S* deviceBufferNewType; hipMalloc( (void**) &deviceBufferOrgType, sizeof(T)*bufferSize ); hipMalloc( (void**) &deviceBufferNewType, sizeof(S)*bufferSize ); //copy Org buffer hipMemcpy(deviceBufferOrgType, hostBuffer, sizeof(T)*bufferSize, hipMemcpyHostToDevice ); //create size thread structure dim3 threads (CUDA_castBuffer_OptimalThreadSize, 1, 1); dim3 grid ( (bufferSize-1)/CUDA_castBuffer_OptimalThreadSize+1, 1, 1 ); //cast on GPU hipLaunchKernelGGL(( CUDAkernel_convertUnit<T,S>), dim3(grid),dim3(threads), 0, 0, deviceBufferOrgType,deviceBufferNewType,bufferSize); //deallocate buffer of type T and return new buffer hipFree( deviceBufferOrgType ); *deviceBuffer = deviceBufferNewType; } template<typename T> void CUDA_allocBuffer(T* hostBuffer, T** deviceBuffer, int bufferSize) { //allocate required device memory buffers hipMalloc( (void**) deviceBuffer, sizeof(T)*bufferSize ); //copy Org buffer hipMemcpy(*deviceBuffer, hostBuffer, sizeof(T)*bufferSize, hipMemcpyHostToDevice ); } template void CUDA_allocBuffer<float>(float* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<char,float>(char* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<unsigned char,float>(unsigned char* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<short,float>(short* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<unsigned short,float>(unsigned short* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<int,float>(int* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<unsigned int,float>(unsigned int* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<long,float>(long* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<unsigned long,float>(unsigned long* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<float,float>(float* hostBuffer, float** deviceBuffer, int bufferSize); void CUDA_deallocateMemory(void* ptr) { hipFree(ptr); } // Because __constants__ can't be extern'd across compilation units, include these files into this compilation unit #include "CUDA_vtkCuda1DVolumeMapper_renderAlgo.cuh" #include "CUDA_vtkCudaDRRImageVolumeMapper_renderAlgo.cuh" #include "CUDA_vtkCudaDualImageVolumeMapper_renderAlgo.cuh" #include "CUDA_vtkCuda2DInExLogicVolumeMapper_renderAlgo.cuh" #include "CUDA_vtkCuda2DVolumeMapper_renderAlgo.cuh" #endif
a86c231fff38acb6812153129fd6d1a74603ec11.cu
#ifndef _CUDA_VTKCUDAVOLUMEMAPPER_RENDERALGO_H #define _CUDA_VTKCUDAVOLUMEMAPPER_RENDERALGO_H #include "CUDA_vtkCudaVolumeMapper_renderAlgo.h" #include <cuda.h> #include <iostream> #define BLOCK_DIM2D 16 //16 is optimal, 4 is the minimum and 16 is the maximum //execution parameters and general information __constant__ cudaVolumeInformation volInfo; __constant__ cudaRendererInformation renInfo; __constant__ cudaOutputImageInformation outInfo; __constant__ float dRandomRayOffsets[BLOCK_DIM2D*BLOCK_DIM2D]; //texture element information for the ZBuffer cudaArray* ZBufferArray = 0; texture<float, 2, cudaReadModeElementType> zbuffer_texture; #define bindSingle2DTexture( textureToBind, value) textureToBind.normalized = true; \ textureToBind.filterMode = cudaFilterModePoint; \ textureToBind.addressMode[0] = cudaAddressModeClamp; \ textureToBind.addressMode[1] = cudaAddressModeClamp; \ cudaBindTextureToArray(textureToBind, value, channelDesc); #define load2DArray(array, values, s, tr) if(array) cudaFreeArray(array); \ cudaMallocArray( &array, &channelDesc, s, s); \ cudaMemcpyToArrayAsync(array, 0, 0, values, sizeof(float)*s*s, \ cudaMemcpyHostToDevice, tr); #define unloadArray(a) if(a); cudaFreeArray(a); a = 0; //channel for loading input data and transfer functions cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float2>(); __device__ void CUDAkernel_FindKeyholeValues(float3 rayStart, float3 rayInc, float& numSteps, float& excludeStart, float& excludeEnd ) { __syncthreads(); const int numPlanes = renInfo.NumberOfKeyholePlanes; __syncthreads(); //create a rayEnd holder float3 oldRayStart = rayStart; float3 rayDir; rayDir.x = numSteps * rayInc.x; rayDir.y = numSteps * rayInc.y; rayDir.z = numSteps * rayInc.z; float3 rayEnd; rayEnd.x = rayStart.x + rayDir.x; rayEnd.y = rayStart.y + rayDir.y; rayEnd.z = rayStart.z + rayDir.z; //default to some safe values excludeStart = 1.0f; excludeEnd = -1.0f; // loop through all provided clipping planes if(!numPlanes) { return; } int flag = 0; for ( int i = 0; i < numPlanes; i++ ) { //refine the ray direction to account for any changes in starting or ending position rayDir.x = rayEnd.x - rayStart.x; rayDir.y = rayEnd.y - rayStart.y; rayDir.z = rayEnd.z - rayStart.z; //collect all the information about the current clipping plane float4 keyholePlane; __syncthreads(); keyholePlane.x = renInfo.KeyholePlanes[4*i]; keyholePlane.y = renInfo.KeyholePlanes[4*i+1]; keyholePlane.z = renInfo.KeyholePlanes[4*i+2]; keyholePlane.w = renInfo.KeyholePlanes[4*i+3]; __syncthreads(); const float dp = keyholePlane.x*rayDir.x + keyholePlane.y*rayDir.y + keyholePlane.z*rayDir.z; const float t = -(keyholePlane.x*rayStart.x + keyholePlane.y*rayStart.y + keyholePlane.z*rayStart.z + keyholePlane.w) / dp; const float point0 = rayStart.x + t*rayDir.x; const float point1 = rayStart.y + t*rayDir.y; const float point2 = rayStart.z + t*rayDir.z; //if the ray intersects the plane, set the start or end point to the intersection point if ( t > 0.0f && t < 1.0f ) { dp > 0.0f ? rayStart.x = point0 : rayEnd.x = point0; dp > 0.0f ? rayStart.y = point1 : rayEnd.y = point1; dp > 0.0f ? rayStart.z = point2 : rayEnd.z = point2; } //flag this ray if it is outside the plane entirely flag |= (dp > 0.0f && t > 1.0f); flag |= (dp < 0.0f && t < 0.0f); }//for rayStart.x -= oldRayStart.x; rayStart.y -= oldRayStart.y; rayStart.z -= oldRayStart.z; rayEnd.x -= oldRayStart.x; rayEnd.y -= oldRayStart.y; rayEnd.z -= oldRayStart.z; //if the ray is not inside the clipping planes, make the ray zero length float invRayLengthSquared = 1.0f / (rayInc.x*rayInc.x + rayInc.y*rayInc.y + rayInc.z*rayInc.z); excludeStart = flag ? -1.0f : (rayStart.x * rayInc.x + rayStart.y * rayInc.y + rayStart.z * rayInc.z ) * invRayLengthSquared; excludeEnd = flag ? -1.0f : (rayEnd.x * rayInc.x + rayEnd.y * rayInc.y + rayEnd.z * rayInc.z ) * invRayLengthSquared; } __device__ void CUDAkernel_ClipRayAgainstClippingPlanes(float3& rayStart, float3& rayEnd, float3& rayDir) { __syncthreads(); const int numPlanes = renInfo.NumberOfClippingPlanes; __syncthreads(); // loop through all 6 clipping planes if(!numPlanes) { return; } int flag = 0; #pragma unroll 1 for ( int i = 0; i < numPlanes; i++ ) { //refine the ray direction to account for any changes in starting or ending position rayDir.x = rayEnd.x - rayStart.x; rayDir.y = rayEnd.y - rayStart.y; rayDir.z = rayEnd.z - rayStart.z; //collect all the information about the current clipping plane float4 clippingPlane; __syncthreads(); clippingPlane.x = renInfo.ClippingPlanes[4*i]; clippingPlane.y = renInfo.ClippingPlanes[4*i+1]; clippingPlane.z = renInfo.ClippingPlanes[4*i+2]; clippingPlane.w = renInfo.ClippingPlanes[4*i+3]; __syncthreads(); const float dp = clippingPlane.x*rayDir.x + clippingPlane.y*rayDir.y + clippingPlane.z*rayDir.z; const float t = -(clippingPlane.x*rayStart.x + clippingPlane.y*rayStart.y + clippingPlane.z*rayStart.z + clippingPlane.w) / dp; const float point0 = rayStart.x + t*rayDir.x; const float point1 = rayStart.y + t*rayDir.y; const float point2 = rayStart.z + t*rayDir.z; //if the ray intersects the plane, set the start or end point to the intersection point if ( t > 0.0f && t < 1.0f ) { dp > 0.0f ? rayStart.x = point0 : rayEnd.x = point0; dp > 0.0f ? rayStart.y = point1 : rayEnd.y = point1; dp > 0.0f ? rayStart.z = point2 : rayEnd.z = point2; } //flag this ray if it is outside the plane entirely flag |= (dp > 0.0f && t > 1.0f); flag |= (dp < 0.0f && t < 0.0f); }//for //if the ray is not inside the clipping planes, make the ray zero length if(flag) { rayStart.x = rayEnd.x; rayStart.y = rayEnd.y; rayStart.z = rayEnd.z; } } __device__ void CUDAkernel_ClipRayAgainstVolume(float3& rayStart, float3& rayEnd, float3& rayDir) { //define the ray's length and direction to account for any changes in starting and ending position rayDir.x = rayEnd.x - rayStart.x; rayDir.y = rayEnd.y - rayStart.y; rayDir.z = rayEnd.z - rayStart.z; //collect the information about the bounds of the volume in voxels from the volume information __syncthreads(); const float bounds0 = volInfo.Bounds[0]+1.0f; const float bounds1 = volInfo.Bounds[1]-1.0f; const float bounds2 = volInfo.Bounds[2]+1.0f; const float bounds3 = volInfo.Bounds[3]-1.0f; const float bounds4 = volInfo.Bounds[4]+1.0f; const float bounds5 = volInfo.Bounds[5]-1.0f; __syncthreads(); float diffS; float diffE; //find the intersection of the ray and the volume (in the x direction) if (rayDir.x > 0.0f) { diffS = rayStart.x < bounds0 ? bounds0 - rayStart.x : 0.0f; diffE = rayEnd.x > bounds1 ? bounds1 - rayEnd.x : 0.0f; } else { diffS = rayStart.x > bounds1 ? bounds1 - rayStart.x : 0.0f; diffE = rayEnd.x < bounds0 ? bounds0 - rayEnd.x : 0.0f; } diffS /= rayDir.x; diffE /= rayDir.x; //crop the ray to fit the x direction if possible if(isfinite(diffS)) { rayStart.x += rayDir.x * diffS; rayStart.y += rayDir.y * diffS; rayStart.z += rayDir.z * diffS; rayEnd.x += rayDir.x * diffE; rayEnd.y += rayDir.y * diffE; rayEnd.z += rayDir.z * diffE; } //find the intersection of the ray and the volume (in the y direction) if(rayDir.y > 0.0f) { diffS = rayStart.y < bounds2 ? bounds2 - rayStart.y : 0.0f; diffE = rayEnd.y > bounds3 ? bounds3 - rayEnd.y : 0.0f; } else { diffS = rayStart.y > bounds3 ? bounds3 - rayStart.y : 0.0f; diffE = rayEnd.y < bounds2 ? bounds2 - rayEnd.y : 0.0f; } diffS /= rayDir.y; diffE /= rayDir.y; //crop the ray to fit the y direction if possible if(isfinite(diffS)) { rayStart.x += rayDir.x * diffS; rayStart.y += rayDir.y * diffS; rayStart.z += rayDir.z * diffS; rayEnd.x += rayDir.x * diffE; rayEnd.y += rayDir.y * diffE; rayEnd.z += rayDir.z * diffE; } //find the intersection of the ray and the volume (in the z direction) if(rayDir.z > 0.0f) { diffS = rayStart.z < bounds4 ? bounds4 - rayStart.z : 0.0f; diffE = rayEnd.z > bounds5 ? bounds5 - rayEnd.z : 0.0f; } else { diffS = rayStart.z > bounds5 ? bounds5 - rayStart.z : 0.0f; diffE = rayEnd.z < bounds4 ? bounds4 - rayEnd.z : 0.0f; } diffS /= rayDir.z; diffE /= rayDir.z; //crop the ray to fit the z direction if possible if(isfinite(diffS)) { rayStart.x += rayDir.x * diffS; rayStart.y += rayDir.y * diffS; rayStart.z += rayDir.z * diffS; rayEnd.x += rayDir.x * diffE; rayEnd.y += rayDir.y * diffE; rayEnd.z += rayDir.z * diffE; } // If the voxel still isn't inside the volume, then this ray // doesn't really intersect the volume, thus, make it all zero if (rayEnd.x > bounds1 + 1.0f || rayEnd.y > bounds3 + 1.0f || rayEnd.z > bounds5 + 1.0f || rayEnd.x < bounds0 - 1.0f || rayEnd.y < bounds2 - 1.0f || rayEnd.z < bounds4 - 1.0f|| rayStart.x > bounds1 + 1.0f || rayStart.y > bounds3 + 1.0f || rayStart.z > bounds5 + 1.0f || rayStart.x < bounds0 - 1.0f || rayStart.y < bounds2 - 1.0f || rayStart.z < bounds4 - 1.0f ) { rayStart = rayEnd; } //refine the ray's length and direction to reflect any changes in the starting and ending co-ordinates rayDir.x = rayEnd.x - rayStart.x; rayDir.y = rayEnd.y - rayStart.y; rayDir.z = rayEnd.z - rayStart.z; } __device__ void CUDAkernel_SetRayEnds(const int2& index, float3& rayStart, float3& rayDir, const int& outIndex) { //set the original estimates of the starting and ending co-ordinates in the co-ordinates of the view (not voxels) //note: viewRayZ = 0 for start and viewRayZ = 1 for end __syncthreads(); float viewRayX = outInfo.flipped ? ( ((float) index.x) / (float) outInfo.resolution.x ) : 1.0f - ( ((float) index.x) / (float) outInfo.resolution.x ); float viewRayY = ( ((float) index.y) / (float) outInfo.resolution.y ); __syncthreads(); float endDepth = tex2D(zbuffer_texture, 1.0f-viewRayX, viewRayY ); //multiply the start co-ordinate in the view by the view to voxels matrix to get the co-ordinate in voxels (NOT YET NORMALIZED) __syncthreads(); rayStart.x = viewRayX*renInfo.ViewToVoxelsMatrix[0] + viewRayY*renInfo.ViewToVoxelsMatrix[1] + renInfo.ViewToVoxelsMatrix[3]; rayStart.y = viewRayX*renInfo.ViewToVoxelsMatrix[4] + viewRayY*renInfo.ViewToVoxelsMatrix[5] + renInfo.ViewToVoxelsMatrix[7]; rayStart.z = viewRayX*renInfo.ViewToVoxelsMatrix[8] + viewRayY*renInfo.ViewToVoxelsMatrix[9] + renInfo.ViewToVoxelsMatrix[11]; float startNorm = viewRayX*renInfo.ViewToVoxelsMatrix[12] + viewRayY*renInfo.ViewToVoxelsMatrix[13] + renInfo.ViewToVoxelsMatrix[15]; //multiply the equivalent for the end ray, noting that much of the pre-normalized computation is the same as the start ray __syncthreads(); float3 rayEnd; float3 rayFull; rayEnd.x = rayStart.x + endDepth*renInfo.ViewToVoxelsMatrix[2]; rayEnd.y = rayStart.y + endDepth*renInfo.ViewToVoxelsMatrix[6]; rayEnd.z = rayStart.z + endDepth*renInfo.ViewToVoxelsMatrix[10]; float endNorm = startNorm + endDepth*renInfo.ViewToVoxelsMatrix[14]; __syncthreads(); rayFull.x = rayStart.x + renInfo.ViewToVoxelsMatrix[2]; rayFull.y = rayStart.y + renInfo.ViewToVoxelsMatrix[6]; rayFull.z = rayStart.z + renInfo.ViewToVoxelsMatrix[10]; float fullNorm = startNorm + renInfo.ViewToVoxelsMatrix[14]; __syncthreads(); //normalize (and ergo finish) the start ray's matrix multiplication rayStart.x /= startNorm; rayStart.y /= startNorm; rayStart.z /= startNorm; //normalize (and ergo finish) the end ray's matrix multiplication rayEnd.x /= endNorm; rayEnd.y /= endNorm; rayEnd.z /= endNorm; rayFull.x /= fullNorm; rayFull.y /= fullNorm; rayFull.z /= fullNorm; //put the maximum depth in the buffer float3 oldStart = rayStart; rayDir.x = rayFull.x - rayStart.x; rayDir.y = rayFull.y - rayStart.y; rayDir.z = rayFull.z - rayStart.z; __syncthreads(); float maxDepth = __fsqrt_rz( rayDir.x*rayDir.x + rayDir.y*rayDir.y + rayDir.z*rayDir.z ); outInfo.maxDepthBuffer[outIndex] = maxDepth; __syncthreads(); //refine the ray to only include areas that are both within the volume, and within the clipping planes of said volume //note that ClipRayAgainstVolume calculate the ray's correct length and direction and returns it in rayInc CUDAkernel_ClipRayAgainstClippingPlanes(rayStart, rayEnd, rayDir); CUDAkernel_ClipRayAgainstVolume(rayStart, rayEnd, rayDir); //put the maximum depth in the buffer __syncthreads(); float rayLength = __fsqrt_rz( rayDir.x*rayDir.x + rayDir.y*rayDir.y + rayDir.z*rayDir.z ); float minDepth = __fsqrt_rz( (rayStart.x-oldStart.x)*(rayStart.x-oldStart.x) + (rayStart.y-oldStart.y)*(rayStart.y-oldStart.y) + (rayStart.z-oldStart.z)*(rayStart.z-oldStart.z) ); outInfo.minDepthBuffer[outIndex] = (rayLength > 0.0f) ? minDepth : maxDepth; __syncthreads(); } __global__ void CUDAkernel_renderAlgo_formRays( ) { //index in the output image (2D) int2 index; index.x = blockDim.x * blockIdx.x + threadIdx.x; index.y = blockDim.y * blockIdx.y + threadIdx.y; //index in the output image (1D) int outindex = index.x + index.y * outInfo.resolution.x; float3 rayStart; //ray starting point float3 rayInc; // ray sample increment float numSteps; //maximum number of samples along this ray // Calculate the starting and ending points of the ray, as well as the direction vector CUDAkernel_SetRayEnds(index, rayStart, rayInc, outindex); //determine the maximum number of steps the ray should sample and determine the length of each step __syncthreads(); float3 spacing = volInfo.Spacing; float minSpacing = volInfo.MinSpacing; __syncthreads(); numSteps = __fsqrt_ru(rayInc.x*rayInc.x*spacing.x*spacing.x+ rayInc.y*rayInc.y*spacing.y*spacing.y+ rayInc.z*rayInc.z*spacing.z*spacing.z) / minSpacing; rayInc.x /= numSteps; rayInc.y /= numSteps; rayInc.z /= numSteps; //find the information regarding the exclusion area float excludeStart = 0.0; float excludeEnd = 0.0; CUDAkernel_FindKeyholeValues( rayStart, rayInc, numSteps, excludeStart, excludeEnd ); //write out data __syncthreads(); outInfo.rayStartX[outindex] = rayStart.x; __syncthreads(); outInfo.rayStartY[outindex] = rayStart.y; __syncthreads(); outInfo.rayStartZ[outindex] = rayStart.z; __syncthreads(); outInfo.rayIncX[outindex] = rayInc.x; __syncthreads(); outInfo.rayIncY[outindex] = rayInc.y; __syncthreads(); outInfo.rayIncZ[outindex] = rayInc.z; __syncthreads(); outInfo.numSteps[outindex] = numSteps; __syncthreads(); outInfo.excludeStart[outindex] = excludeStart; __syncthreads(); outInfo.excludeEnd[outindex] = excludeEnd; __syncthreads(); } __global__ void CUDAkernel_shadeAlgo_normBuffer( ) { int outIndex = threadIdx.x + blockDim.x * blockIdx.x; // index of result image float curr = outInfo.depthBuffer[outIndex]; float max = outInfo.maxDepthBuffer[outIndex]; float min = outInfo.minDepthBuffer[outIndex]; curr = (max > 0.0f) ? (curr + min) / max : 1.0f; outInfo.depthBuffer[outIndex] = curr; } __global__ void CUDAkernel_shadeAlgo_doCelShade( ) { //index in the output image int outindex = threadIdx.x + blockDim.x * blockIdx.x; // index of result image //get the depth information from the buffer and the colour information from the output image float2 depthDiffX; float2 depthDiffY; __syncthreads(); depthDiffY.y = outInfo.depthBuffer[outindex+outInfo.resolution.x]; __syncthreads(); depthDiffY.x = outInfo.depthBuffer[outindex]; __syncthreads(); depthDiffX.y = outInfo.depthBuffer[outindex+1]; __syncthreads(); depthDiffX.x = depthDiffY.x; //compute the gradient magnitude float gradMag = __fsqrt_rz( (depthDiffX.y - depthDiffX.x)*(depthDiffX.y - depthDiffX.x) + (depthDiffY.y - depthDiffY.x)*(depthDiffY.y - depthDiffY.x) ); //grab cel shading parameters __syncthreads(); float darkness = renInfo.celr; float a = renInfo.cela; float c = renInfo.celc; __syncthreads(); //multiply by the cel-shading factor gradMag = 1.0f - darkness * saturate( (gradMag - a) * c ); //grab distance shading parameters __syncthreads(); darkness = renInfo.disr; a = renInfo.disa; c = renInfo.disc; __syncthreads(); //multiply by the depth factor gradMag *= 1.0f - darkness * saturate( (depthDiffX.x - a) * c ); uchar4 colour; __syncthreads(); colour = outInfo.deviceOutputImage[outindex]; __syncthreads(); colour.x = gradMag * ((float) colour.x); colour.y = gradMag * ((float) colour.y); colour.z = gradMag * ((float) colour.z); __syncthreads(); outInfo.deviceOutputImage[outindex] = colour; } bool CUDA_vtkCudaVolumeMapper_renderAlgo_loadZBuffer(const float* zBuffer, const int zBufferSizeX, const int zBufferSizeY, cudaStream_t* stream) { if(ZBufferArray) { cudaFreeArray(ZBufferArray); } //load the zBuffer from the host to the array cudaMallocArray(&ZBufferArray, &channelDesc, zBufferSizeX, zBufferSizeY); cudaMemcpyToArrayAsync(ZBufferArray, 0, 0, zBuffer, sizeof(float)*zBufferSizeX*zBufferSizeY, cudaMemcpyHostToDevice, *stream); //define the texture parameters and bind the texture to the array zbuffer_texture.normalized = true; zbuffer_texture.filterMode = cudaFilterModePoint; zbuffer_texture.addressMode[0] = cudaAddressModeClamp; zbuffer_texture.addressMode[1] = cudaAddressModeClamp; cudaBindTextureToArray(zbuffer_texture, ZBufferArray, channelDesc); #ifdef DEBUG_VTKCUDAVISUALIZATION cudaThreadSynchronize(); std::cout << "Load Z-Buffer: " << cudaGetErrorString( cudaGetLastError() ) << std::endl; #endif return (cudaGetLastError() == 0); } bool CUDA_vtkCudaVolumeMapper_renderAlgo_unloadZBuffer(cudaStream_t* stream) { if(ZBufferArray) { cudaFreeArray(ZBufferArray); } ZBufferArray = 0; #ifdef DEBUG_VTKCUDAVISUALIZATION cudaThreadSynchronize(); std::cout << "Unload Z-Buffer: " << cudaGetErrorString( cudaGetLastError() ) << std::endl; #endif return (cudaGetLastError() == 0); } //load in a random 16x16 noise array to deartefact the image in real time bool CUDA_vtkCudaVolumeMapper_renderAlgo_loadrandomRayOffsets(const float* randomRayOffsets, cudaStream_t* stream) { cudaMemcpyToSymbolAsync(dRandomRayOffsets, randomRayOffsets, BLOCK_DIM2D*BLOCK_DIM2D*sizeof(float), 0, cudaMemcpyHostToDevice, *stream); #ifdef DEBUG_VTKCUDAVISUALIZATION cudaThreadSynchronize(); std:cout << "Load ray offsets: " << cudaGetErrorString( cudaGetLastError() ) << std::endl; #endif return (cudaGetLastError() == 0); } bool CUDA_vtkCudaVolumeMapper_renderAlgo_unloadrandomRayOffsets(cudaStream_t* stream) { #ifdef DEBUG_VTKCUDAVISUALIZATION cudaThreadSynchronize(); std::cout << "Unload ray offsets: " << cudaGetErrorString( cudaGetLastError() ) << std:endl; #endif return (cudaGetLastError() == 0); } template<typename T, typename S> __global__ void CUDAkernel_convertUnit( T* hostBuffer, S* deviceBuffer, int bufferSize ) { int index = threadIdx.x + blockDim.x * blockIdx.x; T value = hostBuffer[index]; if( index < bufferSize ) { deviceBuffer[index] = (S) value; } } #define CUDA_castBuffer_OptimalThreadSize 512 template<typename T, typename S> void CUDA_castBuffer(T* hostBuffer, S** deviceBuffer, int bufferSize) { //allocate required device memory buffers T* deviceBufferOrgType; S* deviceBufferNewType; cudaMalloc( (void**) &deviceBufferOrgType, sizeof(T)*bufferSize ); cudaMalloc( (void**) &deviceBufferNewType, sizeof(S)*bufferSize ); //copy Org buffer cudaMemcpy(deviceBufferOrgType, hostBuffer, sizeof(T)*bufferSize, cudaMemcpyHostToDevice ); //create size thread structure dim3 threads (CUDA_castBuffer_OptimalThreadSize, 1, 1); dim3 grid ( (bufferSize-1)/CUDA_castBuffer_OptimalThreadSize+1, 1, 1 ); //cast on GPU CUDAkernel_convertUnit<T,S><<<grid,threads>>>(deviceBufferOrgType,deviceBufferNewType,bufferSize); //deallocate buffer of type T and return new buffer cudaFree( deviceBufferOrgType ); *deviceBuffer = deviceBufferNewType; } template<typename T> void CUDA_allocBuffer(T* hostBuffer, T** deviceBuffer, int bufferSize) { //allocate required device memory buffers cudaMalloc( (void**) deviceBuffer, sizeof(T)*bufferSize ); //copy Org buffer cudaMemcpy(*deviceBuffer, hostBuffer, sizeof(T)*bufferSize, cudaMemcpyHostToDevice ); } template void CUDA_allocBuffer<float>(float* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<char,float>(char* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<unsigned char,float>(unsigned char* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<short,float>(short* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<unsigned short,float>(unsigned short* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<int,float>(int* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<unsigned int,float>(unsigned int* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<long,float>(long* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<unsigned long,float>(unsigned long* hostBuffer, float** deviceBuffer, int bufferSize); template void CUDA_castBuffer<float,float>(float* hostBuffer, float** deviceBuffer, int bufferSize); void CUDA_deallocateMemory(void* ptr) { cudaFree(ptr); } // Because __constants__ can't be extern'd across compilation units, include these files into this compilation unit #include "CUDA_vtkCuda1DVolumeMapper_renderAlgo.cuh" #include "CUDA_vtkCudaDRRImageVolumeMapper_renderAlgo.cuh" #include "CUDA_vtkCudaDualImageVolumeMapper_renderAlgo.cuh" #include "CUDA_vtkCuda2DInExLogicVolumeMapper_renderAlgo.cuh" #include "CUDA_vtkCuda2DVolumeMapper_renderAlgo.cuh" #endif
8a1f0285a6e4c448ab927739fe054dad57238331.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlaqps2_gpu.cu normal z -> c, Tue Sep 2 12:38:15 2014 */ #include "common_magma.h" #include "commonblas_c.h" #define PRECISION_c // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /* --------------------------------------------------------------------------- */ /** Purpose ------- CLAQPS computes a step of QR factorization with column pivoting of a complex M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 @param[in] offset INTEGER The number of rows of A that have been factorized in previous steps. @param[in] NB INTEGER The number of columns to factorize. @param[out] kb INTEGER The number of columns actually factorized. @param[in,out] A COMPLEX array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[in,out] jpvt INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. @param[out] tau COMPLEX array, dimension (KB) The scalar factors of the elementary reflectors. @param[in,out] VN1 REAL array, dimension (N) The vector with the partial column norms. @param[in,out] VN2 REAL array, dimension (N) The vector with the exact column norms. @param[in,out] AUXV COMPLEX array, dimension (NB) Auxiliar vector. @param[in,out] F COMPLEX array, dimension (LDF,NB) Matrix F' = L*Y'*A. @param[in] ldf INTEGER The leading dimension of the array F. LDF >= max(1,N). @ingroup magma_cgeqp3_aux ********************************************************************/ extern "C" magma_int_t magma_claqps2_gpu(magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, magmaFloatComplex *A, magma_int_t lda, magma_int_t *jpvt, magmaFloatComplex *tau, float *vn1, float *vn2, magmaFloatComplex *auxv, magmaFloatComplex *F, magma_int_t ldf) { #define A(i, j) (A + (i) + (j)*(lda )) #define F(i, j) (F + (i) + (j)*(ldf )) magmaFloatComplex c_zero = MAGMA_C_MAKE( 0.,0.); magmaFloatComplex c_one = MAGMA_C_MAKE( 1.,0.); magmaFloatComplex c_neg_one = MAGMA_C_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; magma_int_t k, rk; magmaFloatComplex tauk; magma_int_t pvt, itemp; float tol3z; magmaFloatComplex *dAkk = auxv; auxv+=nb; float lsticc, *lsticcs; magma_smalloc( &lsticcs, 1+256*(n+255)/256 ); tol3z = magma_ssqrt( lapackf77_slamch("Epsilon")); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ pvt = k - 1 + magma_isamax( n-k, &vn1[k], ione ); if (pvt != k) { magmablas_cswap( k+1, F(pvt,0), ldf, F(k,0), ldf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; #if (defined(PRECISION_d) || defined(PRECISION_z)) //magma_dswap( 1, &vn1[pvt], 1, &vn1[k], 1 ); //magma_dswap( 1, &vn2[pvt], 1, &vn2[k], 1 ); magma_dswap( 2, &vn1[pvt], n+offset, &vn1[k], n+offset); #else //magma_sswap( 1, &vn1[pvt], 1, &vn1[k], 1 ); //magma_sswap( 1, &vn2[pvt], 1, &vn2[k], 1 ); magma_sswap(2, &vn1[pvt], n+offset, &vn1[k], n+offset); #endif magmablas_cswap( m, A(0,pvt), ione, A(0, k), ione ); } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. Optimization: multiply with beta=0; wait for vector and subtract */ if (k > 0) { magmablas_cgemv_conjv( m-rk, k, c_neg_one, A(rk, 0), lda, F(k, 0), ldf, c_one, A(rk, k), ione ); } /* Generate elementary reflector H(k). */ magma_clarfg_gpu(m-rk, A(rk, k), A(rk + 1, k), &tau[k], &vn1[k], &dAkk[k]); magma_csetvector( 1, &c_one, 1, A(rk, k), 1 ); /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1 || k > 0 ) magma_cgetvector( 1, &tau[k], 1, &tauk, 1 ); if (k < n-1) { magma_cgemv( MagmaConjTrans, m-rk, n-k-1, tauk, A( rk, k+1 ), lda, A( rk, k ), 1, c_zero, F( k+1, k ), 1 ); } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K) := tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K) so, F is (updated A)*V */ if (k > 0) { /*z__1 = MAGMA_C_NEGATE( tauk ); magma_cgemv( MagmaConjTrans, m-rk, k, z__1, A(rk, 0), lda, A(rk, k), ione, c_zero, auxv, ione );*/ hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m-rk, A(rk, 0), lda, A(rk, k), auxv, tau+k); /* I think we only need stricly lower-triangular part */ magma_cgemv( MagmaNoTrans, n-k-1, k, c_one, F(k+1,0), ldf, auxv, ione, c_one, F(k+1,k), ione ); } /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; /* left-looking update of rows, * * since F=A'v with original A, so no right-looking */ magma_cgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2, c_neg_one, A(rk, 0 ), lda, F(k+1,0 ), ldf, c_one, A(rk, k+1), lda ); } /* Update partial column norms. */ if (rk < min(m, n+offset)-1){ magmablas_scnrm2_row_check_adjust(n-k-1, tol3z, &vn1[k+1], &vn2[k+1], A(rk,k+1), lda, lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #else magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #endif } //*A(rk, k) = Akk; //magma_csetvector( 1, &Akk, 1, A(rk, k), 1 ); //magmablas_clacpy(MagmaUpperLower, 1, 1, dAkk, 1, A(rk, k), 1); ++k; } // restore the diagonals magma_ccopymatrix( 1, k, dAkk, 1, A(offset, 0), lda+1 ); // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)) { i__1 = m - rk - 1; i__2 = n - *kb; magma_cgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb, c_neg_one, A(rk+1, 0 ), lda, F(*kb, 0 ), ldf, c_one, A(rk+1, *kb), lda ); } /* Recomputation of difficult columns. */ if( lsticc > 0 ) { // printf( " -- recompute dnorms --\n" ); magmablas_scnrm2_check(m-rk-1, n-*kb, A(rk+1,*kb), lda, &vn1[*kb], lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_scopymatrix( n-*kb, 1, &vn1[*kb], n, &vn2[*kb], n); #else magma_scopymatrix( n-*kb, 1, &vn1[*kb], n, &vn2[*kb], n); #endif } magma_free(lsticcs); return MAGMA_SUCCESS; } /* magma_claqps */
8a1f0285a6e4c448ab927739fe054dad57238331.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlaqps2_gpu.cu normal z -> c, Tue Sep 2 12:38:15 2014 */ #include "common_magma.h" #include "commonblas_c.h" #define PRECISION_c // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /* --------------------------------------------------------------------------- */ /** Purpose ------- CLAQPS computes a step of QR factorization with column pivoting of a complex M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 @param[in] offset INTEGER The number of rows of A that have been factorized in previous steps. @param[in] NB INTEGER The number of columns to factorize. @param[out] kb INTEGER The number of columns actually factorized. @param[in,out] A COMPLEX array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[in,out] jpvt INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. @param[out] tau COMPLEX array, dimension (KB) The scalar factors of the elementary reflectors. @param[in,out] VN1 REAL array, dimension (N) The vector with the partial column norms. @param[in,out] VN2 REAL array, dimension (N) The vector with the exact column norms. @param[in,out] AUXV COMPLEX array, dimension (NB) Auxiliar vector. @param[in,out] F COMPLEX array, dimension (LDF,NB) Matrix F' = L*Y'*A. @param[in] ldf INTEGER The leading dimension of the array F. LDF >= max(1,N). @ingroup magma_cgeqp3_aux ********************************************************************/ extern "C" magma_int_t magma_claqps2_gpu(magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, magmaFloatComplex *A, magma_int_t lda, magma_int_t *jpvt, magmaFloatComplex *tau, float *vn1, float *vn2, magmaFloatComplex *auxv, magmaFloatComplex *F, magma_int_t ldf) { #define A(i, j) (A + (i) + (j)*(lda )) #define F(i, j) (F + (i) + (j)*(ldf )) magmaFloatComplex c_zero = MAGMA_C_MAKE( 0.,0.); magmaFloatComplex c_one = MAGMA_C_MAKE( 1.,0.); magmaFloatComplex c_neg_one = MAGMA_C_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; magma_int_t k, rk; magmaFloatComplex tauk; magma_int_t pvt, itemp; float tol3z; magmaFloatComplex *dAkk = auxv; auxv+=nb; float lsticc, *lsticcs; magma_smalloc( &lsticcs, 1+256*(n+255)/256 ); tol3z = magma_ssqrt( lapackf77_slamch("Epsilon")); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ pvt = k - 1 + magma_isamax( n-k, &vn1[k], ione ); if (pvt != k) { magmablas_cswap( k+1, F(pvt,0), ldf, F(k,0), ldf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; #if (defined(PRECISION_d) || defined(PRECISION_z)) //magma_dswap( 1, &vn1[pvt], 1, &vn1[k], 1 ); //magma_dswap( 1, &vn2[pvt], 1, &vn2[k], 1 ); magma_dswap( 2, &vn1[pvt], n+offset, &vn1[k], n+offset); #else //magma_sswap( 1, &vn1[pvt], 1, &vn1[k], 1 ); //magma_sswap( 1, &vn2[pvt], 1, &vn2[k], 1 ); magma_sswap(2, &vn1[pvt], n+offset, &vn1[k], n+offset); #endif magmablas_cswap( m, A(0,pvt), ione, A(0, k), ione ); } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. Optimization: multiply with beta=0; wait for vector and subtract */ if (k > 0) { magmablas_cgemv_conjv( m-rk, k, c_neg_one, A(rk, 0), lda, F(k, 0), ldf, c_one, A(rk, k), ione ); } /* Generate elementary reflector H(k). */ magma_clarfg_gpu(m-rk, A(rk, k), A(rk + 1, k), &tau[k], &vn1[k], &dAkk[k]); magma_csetvector( 1, &c_one, 1, A(rk, k), 1 ); /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1 || k > 0 ) magma_cgetvector( 1, &tau[k], 1, &tauk, 1 ); if (k < n-1) { magma_cgemv( MagmaConjTrans, m-rk, n-k-1, tauk, A( rk, k+1 ), lda, A( rk, k ), 1, c_zero, F( k+1, k ), 1 ); } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K) := tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K) so, F is (updated A)*V */ if (k > 0) { /*z__1 = MAGMA_C_NEGATE( tauk ); magma_cgemv( MagmaConjTrans, m-rk, k, z__1, A(rk, 0), lda, A(rk, k), ione, c_zero, auxv, ione );*/ magma_cgemv_kernel3<<< k, BLOCK_SIZE, 0, magma_stream >>>(m-rk, A(rk, 0), lda, A(rk, k), auxv, tau+k); /* I think we only need stricly lower-triangular part */ magma_cgemv( MagmaNoTrans, n-k-1, k, c_one, F(k+1,0), ldf, auxv, ione, c_one, F(k+1,k), ione ); } /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; /* left-looking update of rows, * * since F=A'v with original A, so no right-looking */ magma_cgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2, c_neg_one, A(rk, 0 ), lda, F(k+1,0 ), ldf, c_one, A(rk, k+1), lda ); } /* Update partial column norms. */ if (rk < min(m, n+offset)-1){ magmablas_scnrm2_row_check_adjust(n-k-1, tol3z, &vn1[k+1], &vn2[k+1], A(rk,k+1), lda, lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #else magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #endif } //*A(rk, k) = Akk; //magma_csetvector( 1, &Akk, 1, A(rk, k), 1 ); //magmablas_clacpy(MagmaUpperLower, 1, 1, dAkk, 1, A(rk, k), 1); ++k; } // restore the diagonals magma_ccopymatrix( 1, k, dAkk, 1, A(offset, 0), lda+1 ); // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)) { i__1 = m - rk - 1; i__2 = n - *kb; magma_cgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb, c_neg_one, A(rk+1, 0 ), lda, F(*kb, 0 ), ldf, c_one, A(rk+1, *kb), lda ); } /* Recomputation of difficult columns. */ if( lsticc > 0 ) { // printf( " -- recompute dnorms --\n" ); magmablas_scnrm2_check(m-rk-1, n-*kb, A(rk+1,*kb), lda, &vn1[*kb], lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_scopymatrix( n-*kb, 1, &vn1[*kb], n, &vn2[*kb], n); #else magma_scopymatrix( n-*kb, 1, &vn1[*kb], n, &vn2[*kb], n); #endif } magma_free(lsticcs); return MAGMA_SUCCESS; } /* magma_claqps */
332772e9dfd192711bf5c816dbceb2b574be7981.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/datetime.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/null_mask.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> namespace cudf { namespace datetime { namespace detail { enum class datetime_component { INVALID = 0, YEAR, MONTH, DAY, WEEKDAY, HOUR, MINUTE, SECOND, }; template <datetime_component Component> struct extract_component_operator { template <typename Timestamp> CUDA_DEVICE_CALLABLE int16_t operator()(Timestamp const ts) const { using namespace simt::std::chrono; auto days_since_epoch = floor<days>(ts); auto time_since_midnight = ts - days_since_epoch; if (time_since_midnight.count() < 0) { time_since_midnight += days(1); } auto hrs_ = duration_cast<hours>(time_since_midnight); auto mins_ = duration_cast<minutes>(time_since_midnight - hrs_); auto secs_ = duration_cast<seconds>(time_since_midnight - hrs_ - mins_); switch (Component) { case datetime_component::YEAR: return static_cast<int>(year_month_day(days_since_epoch).year()); case datetime_component::MONTH: return static_cast<unsigned>(year_month_day(days_since_epoch).month()); case datetime_component::DAY: return static_cast<unsigned>(year_month_day(days_since_epoch).day()); case datetime_component::WEEKDAY: return year_month_weekday(days_since_epoch).weekday().iso_encoding(); case datetime_component::HOUR: return hrs_.count(); case datetime_component::MINUTE: return mins_.count(); case datetime_component::SECOND: return secs_.count(); default: return 0; } } }; // Number of days until month indexed by leap year and month (0-based index) static __device__ int16_t const days_until_month[2][13] = { {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}, // For non leap years {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366} // For leap years }; CUDA_DEVICE_CALLABLE uint8_t days_in_month(simt::std::chrono::month mon, bool is_leap_year) { return days_until_month[is_leap_year][unsigned{mon}] - days_until_month[is_leap_year][unsigned{mon} - 1]; } // Round up the date to the last day of the month and return the // date only (without the time component) struct extract_last_day_of_month { template <typename Timestamp> CUDA_DEVICE_CALLABLE timestamp_D operator()(Timestamp const ts) const { using namespace simt::std::chrono; // IDEAL: does not work with CUDA10.0 due to nvcc compiler bug // cannot invoke ym_last_day.day() // const year_month_day orig_ymd(floor<days>(ts)); // const year_month_day_last ym_last_day(orig_ymd.year(), month_day_last(orig_ymd.month())); // return timestamp_D(sys_days(ym_last_day)); // Only has the days - time component is chopped off, which is what we want auto const days_since_epoch = floor<days>(ts); auto const date = year_month_day(days_since_epoch); auto const last_day = days_in_month(date.month(), date.year().is_leap()); return timestamp_D(days_since_epoch + days(last_day - static_cast<unsigned>(date.day()))); } }; // Extract the day number of the year present in the timestamp struct extract_day_num_of_year { template <typename Timestamp> CUDA_DEVICE_CALLABLE int16_t operator()(Timestamp const ts) const { using namespace simt::std::chrono; // Only has the days - time component is chopped off, which is what we want auto const days_since_epoch = floor<days>(ts); auto const date = year_month_day(days_since_epoch); return days_until_month[date.year().is_leap()][unsigned{date.month()} - 1] + unsigned{date.day()}; } }; // Apply the functor for every element/row in the input column to create the output column template <typename TransformFunctor, typename OutputColT> struct launch_functor { column_view input; mutable_column_view output; launch_functor(column_view inp, mutable_column_view out) : input(inp), output(out) {} template <typename Element> typename std::enable_if_t<!cudf::is_timestamp_t<Element>::value, void> operator()( hipStream_t stream) const { CUDF_FAIL("Cannot extract datetime component from non-timestamp column."); } template <typename Timestamp> typename std::enable_if_t<cudf::is_timestamp_t<Timestamp>::value, void> operator()( hipStream_t stream) const { thrust::transform(rmm::exec_policy(stream)->on(stream), input.begin<Timestamp>(), input.end<Timestamp>(), output.begin<OutputColT>(), TransformFunctor{}); } }; // Create an output column by applying the functor to every element from the input column template <typename TransformFunctor, cudf::type_id OutputColCudfT> std::unique_ptr<column> apply_datetime_op(column_view const& column, hipStream_t stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(is_timestamp(column.type()), "Column type should be timestamp"); auto size = column.size(); auto output_col_type = data_type{OutputColCudfT}; // Return an empty column if source column is empty if (size == 0) return make_empty_column(output_col_type); auto output = make_fixed_width_column( output_col_type, size, copy_bitmask(column, stream, mr), column.null_count(), stream, mr); auto launch = launch_functor<TransformFunctor, typename cudf::id_to_type_impl<OutputColCudfT>::type>{ column, static_cast<mutable_column_view>(*output)}; type_dispatcher(column.type(), launch, stream); return output; } struct add_calendrical_months_functor { column_view timestamp_column; column_view months_column; mutable_column_view output; add_calendrical_months_functor(column_view tsc, column_view mc, mutable_column_view out) : timestamp_column(tsc), months_column(mc), output(out) { } // std chrono implementation is copied here due to nvcc bug 2909685 // https://howardhinnant.github.io/date_algorithms.html#days_from_civil static CUDA_DEVICE_CALLABLE timestamp_D compute_sys_days(simt::std::chrono::year_month_day const& ymd) { const int yr = static_cast<int>(ymd.year()) - (ymd.month() <= simt::std::chrono::month{2}); const unsigned mth = static_cast<unsigned>(ymd.month()); const unsigned dy = static_cast<unsigned>(ymd.day()); const int era = (yr >= 0 ? yr : yr - 399) / 400; const unsigned yoe = static_cast<unsigned>(yr - era * 400); // [0, 399] const unsigned doy = (153 * (mth + (mth > 2 ? -3 : 9)) + 2) / 5 + dy - 1; // [0, 365] const unsigned doe = yoe * 365 + yoe / 4 - yoe / 100 + doy; // [0, 146096] return timestamp_D{duration_D{era * 146097 + static_cast<int>(doe) - 719468}}; } template <typename Element> typename std::enable_if_t<!cudf::is_timestamp_t<Element>::value, void> operator()( hipStream_t stream) const { CUDF_FAIL("Cannot extract datetime component from non-timestamp column."); } template <typename Timestamp> typename std::enable_if_t<cudf::is_timestamp_t<Timestamp>::value, void> operator()( hipStream_t stream) const { thrust::transform(rmm::exec_policy(stream)->on(stream), timestamp_column.begin<Timestamp>(), timestamp_column.end<Timestamp>(), months_column.begin<int16_t>(), output.begin<Timestamp>(), [] __device__(auto time_val, auto months_val) { using namespace simt::std::chrono; using duration_m = duration<int32_t, months::period>; // Get the days component from the input auto days_since_epoch = floor<days>(time_val); // Add the number of months year_month_day ymd{days_since_epoch}; ymd += duration_m{months_val}; // If the new date isn't valid, scale it back to the last day of the // month. // IDEAL: if (!ymd.ok()) ymd = ymd.year()/ymd.month()/last; auto month_days = days_in_month(ymd.month(), ymd.year().is_leap()); if (unsigned{ymd.day()} > month_days) ymd = ymd.year() / ymd.month() / day{month_days}; // Put back the time component to the date return // IDEAL: sys_days{ymd} + ... compute_sys_days(ymd) + (time_val - days_since_epoch); }); } }; std::unique_ptr<column> add_calendrical_months(column_view const& timestamp_column, column_view const& months_column, hipStream_t stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(is_timestamp(timestamp_column.type()), "Column type should be timestamp"); CUDF_EXPECTS(months_column.type() == data_type{type_id::INT16}, "Months column type should be INT16"); CUDF_EXPECTS(timestamp_column.size() == months_column.size(), "Timestamp and months column should be of the same size"); auto size = timestamp_column.size(); auto output_col_type = timestamp_column.type(); // Return an empty column if source column is empty if (size == 0) return make_empty_column(output_col_type); auto output_col_mask = bitmask_and(table_view({timestamp_column, months_column}), mr, stream); auto output = make_fixed_width_column( output_col_type, size, std::move(output_col_mask), cudf::UNKNOWN_NULL_COUNT, stream, mr); auto launch = add_calendrical_months_functor{ timestamp_column, months_column, static_cast<mutable_column_view>(*output)}; type_dispatcher(timestamp_column.type(), launch, stream); return output; } } // namespace detail std::unique_ptr<column> extract_year(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::YEAR>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_month(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::MONTH>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_day(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::DAY>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_weekday(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::WEEKDAY>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_hour(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::HOUR>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_minute(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::MINUTE>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_second(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::SECOND>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> last_day_of_month(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op<detail::extract_last_day_of_month, cudf::type_id::TIMESTAMP_DAYS>(column, 0, mr); } std::unique_ptr<column> day_of_year(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op<detail::extract_day_num_of_year, cudf::type_id::INT16>( column, 0, mr); } std::unique_ptr<cudf::column> add_calendrical_months(cudf::column_view const& timestamp_column, cudf::column_view const& months_column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::add_calendrical_months(timestamp_column, months_column, 0, mr); } } // namespace datetime } // namespace cudf
332772e9dfd192711bf5c816dbceb2b574be7981.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/datetime.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/null_mask.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> namespace cudf { namespace datetime { namespace detail { enum class datetime_component { INVALID = 0, YEAR, MONTH, DAY, WEEKDAY, HOUR, MINUTE, SECOND, }; template <datetime_component Component> struct extract_component_operator { template <typename Timestamp> CUDA_DEVICE_CALLABLE int16_t operator()(Timestamp const ts) const { using namespace simt::std::chrono; auto days_since_epoch = floor<days>(ts); auto time_since_midnight = ts - days_since_epoch; if (time_since_midnight.count() < 0) { time_since_midnight += days(1); } auto hrs_ = duration_cast<hours>(time_since_midnight); auto mins_ = duration_cast<minutes>(time_since_midnight - hrs_); auto secs_ = duration_cast<seconds>(time_since_midnight - hrs_ - mins_); switch (Component) { case datetime_component::YEAR: return static_cast<int>(year_month_day(days_since_epoch).year()); case datetime_component::MONTH: return static_cast<unsigned>(year_month_day(days_since_epoch).month()); case datetime_component::DAY: return static_cast<unsigned>(year_month_day(days_since_epoch).day()); case datetime_component::WEEKDAY: return year_month_weekday(days_since_epoch).weekday().iso_encoding(); case datetime_component::HOUR: return hrs_.count(); case datetime_component::MINUTE: return mins_.count(); case datetime_component::SECOND: return secs_.count(); default: return 0; } } }; // Number of days until month indexed by leap year and month (0-based index) static __device__ int16_t const days_until_month[2][13] = { {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}, // For non leap years {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366} // For leap years }; CUDA_DEVICE_CALLABLE uint8_t days_in_month(simt::std::chrono::month mon, bool is_leap_year) { return days_until_month[is_leap_year][unsigned{mon}] - days_until_month[is_leap_year][unsigned{mon} - 1]; } // Round up the date to the last day of the month and return the // date only (without the time component) struct extract_last_day_of_month { template <typename Timestamp> CUDA_DEVICE_CALLABLE timestamp_D operator()(Timestamp const ts) const { using namespace simt::std::chrono; // IDEAL: does not work with CUDA10.0 due to nvcc compiler bug // cannot invoke ym_last_day.day() // const year_month_day orig_ymd(floor<days>(ts)); // const year_month_day_last ym_last_day(orig_ymd.year(), month_day_last(orig_ymd.month())); // return timestamp_D(sys_days(ym_last_day)); // Only has the days - time component is chopped off, which is what we want auto const days_since_epoch = floor<days>(ts); auto const date = year_month_day(days_since_epoch); auto const last_day = days_in_month(date.month(), date.year().is_leap()); return timestamp_D(days_since_epoch + days(last_day - static_cast<unsigned>(date.day()))); } }; // Extract the day number of the year present in the timestamp struct extract_day_num_of_year { template <typename Timestamp> CUDA_DEVICE_CALLABLE int16_t operator()(Timestamp const ts) const { using namespace simt::std::chrono; // Only has the days - time component is chopped off, which is what we want auto const days_since_epoch = floor<days>(ts); auto const date = year_month_day(days_since_epoch); return days_until_month[date.year().is_leap()][unsigned{date.month()} - 1] + unsigned{date.day()}; } }; // Apply the functor for every element/row in the input column to create the output column template <typename TransformFunctor, typename OutputColT> struct launch_functor { column_view input; mutable_column_view output; launch_functor(column_view inp, mutable_column_view out) : input(inp), output(out) {} template <typename Element> typename std::enable_if_t<!cudf::is_timestamp_t<Element>::value, void> operator()( cudaStream_t stream) const { CUDF_FAIL("Cannot extract datetime component from non-timestamp column."); } template <typename Timestamp> typename std::enable_if_t<cudf::is_timestamp_t<Timestamp>::value, void> operator()( cudaStream_t stream) const { thrust::transform(rmm::exec_policy(stream)->on(stream), input.begin<Timestamp>(), input.end<Timestamp>(), output.begin<OutputColT>(), TransformFunctor{}); } }; // Create an output column by applying the functor to every element from the input column template <typename TransformFunctor, cudf::type_id OutputColCudfT> std::unique_ptr<column> apply_datetime_op(column_view const& column, cudaStream_t stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(is_timestamp(column.type()), "Column type should be timestamp"); auto size = column.size(); auto output_col_type = data_type{OutputColCudfT}; // Return an empty column if source column is empty if (size == 0) return make_empty_column(output_col_type); auto output = make_fixed_width_column( output_col_type, size, copy_bitmask(column, stream, mr), column.null_count(), stream, mr); auto launch = launch_functor<TransformFunctor, typename cudf::id_to_type_impl<OutputColCudfT>::type>{ column, static_cast<mutable_column_view>(*output)}; type_dispatcher(column.type(), launch, stream); return output; } struct add_calendrical_months_functor { column_view timestamp_column; column_view months_column; mutable_column_view output; add_calendrical_months_functor(column_view tsc, column_view mc, mutable_column_view out) : timestamp_column(tsc), months_column(mc), output(out) { } // std chrono implementation is copied here due to nvcc bug 2909685 // https://howardhinnant.github.io/date_algorithms.html#days_from_civil static CUDA_DEVICE_CALLABLE timestamp_D compute_sys_days(simt::std::chrono::year_month_day const& ymd) { const int yr = static_cast<int>(ymd.year()) - (ymd.month() <= simt::std::chrono::month{2}); const unsigned mth = static_cast<unsigned>(ymd.month()); const unsigned dy = static_cast<unsigned>(ymd.day()); const int era = (yr >= 0 ? yr : yr - 399) / 400; const unsigned yoe = static_cast<unsigned>(yr - era * 400); // [0, 399] const unsigned doy = (153 * (mth + (mth > 2 ? -3 : 9)) + 2) / 5 + dy - 1; // [0, 365] const unsigned doe = yoe * 365 + yoe / 4 - yoe / 100 + doy; // [0, 146096] return timestamp_D{duration_D{era * 146097 + static_cast<int>(doe) - 719468}}; } template <typename Element> typename std::enable_if_t<!cudf::is_timestamp_t<Element>::value, void> operator()( cudaStream_t stream) const { CUDF_FAIL("Cannot extract datetime component from non-timestamp column."); } template <typename Timestamp> typename std::enable_if_t<cudf::is_timestamp_t<Timestamp>::value, void> operator()( cudaStream_t stream) const { thrust::transform(rmm::exec_policy(stream)->on(stream), timestamp_column.begin<Timestamp>(), timestamp_column.end<Timestamp>(), months_column.begin<int16_t>(), output.begin<Timestamp>(), [] __device__(auto time_val, auto months_val) { using namespace simt::std::chrono; using duration_m = duration<int32_t, months::period>; // Get the days component from the input auto days_since_epoch = floor<days>(time_val); // Add the number of months year_month_day ymd{days_since_epoch}; ymd += duration_m{months_val}; // If the new date isn't valid, scale it back to the last day of the // month. // IDEAL: if (!ymd.ok()) ymd = ymd.year()/ymd.month()/last; auto month_days = days_in_month(ymd.month(), ymd.year().is_leap()); if (unsigned{ymd.day()} > month_days) ymd = ymd.year() / ymd.month() / day{month_days}; // Put back the time component to the date return // IDEAL: sys_days{ymd} + ... compute_sys_days(ymd) + (time_val - days_since_epoch); }); } }; std::unique_ptr<column> add_calendrical_months(column_view const& timestamp_column, column_view const& months_column, cudaStream_t stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(is_timestamp(timestamp_column.type()), "Column type should be timestamp"); CUDF_EXPECTS(months_column.type() == data_type{type_id::INT16}, "Months column type should be INT16"); CUDF_EXPECTS(timestamp_column.size() == months_column.size(), "Timestamp and months column should be of the same size"); auto size = timestamp_column.size(); auto output_col_type = timestamp_column.type(); // Return an empty column if source column is empty if (size == 0) return make_empty_column(output_col_type); auto output_col_mask = bitmask_and(table_view({timestamp_column, months_column}), mr, stream); auto output = make_fixed_width_column( output_col_type, size, std::move(output_col_mask), cudf::UNKNOWN_NULL_COUNT, stream, mr); auto launch = add_calendrical_months_functor{ timestamp_column, months_column, static_cast<mutable_column_view>(*output)}; type_dispatcher(timestamp_column.type(), launch, stream); return output; } } // namespace detail std::unique_ptr<column> extract_year(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::YEAR>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_month(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::MONTH>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_day(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::DAY>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_weekday(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::WEEKDAY>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_hour(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::HOUR>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_minute(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::MINUTE>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> extract_second(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op< detail::extract_component_operator<detail::datetime_component::SECOND>, cudf::type_id::INT16>(column, 0, mr); } std::unique_ptr<column> last_day_of_month(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op<detail::extract_last_day_of_month, cudf::type_id::TIMESTAMP_DAYS>(column, 0, mr); } std::unique_ptr<column> day_of_year(column_view const& column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::apply_datetime_op<detail::extract_day_num_of_year, cudf::type_id::INT16>( column, 0, mr); } std::unique_ptr<cudf::column> add_calendrical_months(cudf::column_view const& timestamp_column, cudf::column_view const& months_column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::add_calendrical_months(timestamp_column, months_column, 0, mr); } } // namespace datetime } // namespace cudf
ed3f46e77a402242c3fda34a51052c877bf053af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "histogram_utils.cuh" #include <hip/hip_cooperative_groups.h> #include <library/cpp/cuda/wrappers/arch.cuh> #include <library/cpp/cuda/wrappers/cub_include.h> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include _CUB_INCLUDE(cub/warp/warp_scan.cuh) using namespace cooperative_groups; namespace NKernel { __global__ void CopyHistogramsImpl(const ui32* leftLeaves, const ui32* rightLeaves, ui32 numStats, ui32 binFeaturesInHist, float* histograms) { const ui32 leftLeafId = __ldg(leftLeaves + blockIdx.y); const ui32 rightLeafId = __ldg(rightLeaves + blockIdx.y); ui32 i = blockIdx.x * blockDim.x + threadIdx.x; float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats; float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats; const ui32 histSize = binFeaturesInHist * numStats; while (i < histSize) { WriteThrough(dstHist + i, __ldg(srcHist + i)); i += gridDim.x * blockDim.x; } } void CopyHistograms(const ui32* leftLeaves, const ui32* rightLeaves, const ui32 leavesCount, ui32 numStats, ui32 binFeaturesInHist, float* histograms, TCudaStream stream ) { const ui32 histSize = numStats * binFeaturesInHist; ui32 blockSize = 256; dim3 numBlocks; numBlocks.z = 1; numBlocks.y = leavesCount; numBlocks.x = CeilDivide(histSize, blockSize); if (numBlocks.x) { hipLaunchKernelGGL(( CopyHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms); } } __global__ void CopyHistogramImpl(const ui32 leftLeafId, const ui32 rightLeafId, ui32 numStats, ui32 binFeaturesInHist, float* histograms) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats; float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats; const ui32 histSize = binFeaturesInHist * numStats; while (i < histSize) { WriteThrough(dstHist + i, __ldg(srcHist + i)); i += gridDim.x * blockDim.x; } } void CopyHistogram(const ui32 leftLeaves, const ui32 rightLeaves, ui32 numStats, ui32 binFeaturesInHist, float* histograms, TCudaStream stream ) { const ui32 histSize = numStats * binFeaturesInHist; ui32 blockSize = 256; dim3 numBlocks; numBlocks.z = 1; numBlocks.y = 1; numBlocks.x = CeilDivide(histSize, blockSize); if (numBlocks.x) { hipLaunchKernelGGL(( CopyHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms); } } //write histogram block to histograms __global__ void WriteReducesHistogramsImpl(int histBlockOffset, int binFeaturesInBlock, const ui32* histogramIds, const float* blockHistogram, const int binFeatureCount, float* dstHistogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int leafId = blockIdx.y; const int statId = blockIdx.z; const size_t statCount = gridDim.z; const int dstId = histogramIds[blockIdx.y]; if (binFeatureId < binFeaturesInBlock) { blockHistogram += binFeatureId; blockHistogram += binFeaturesInBlock * statId; blockHistogram += leafId * binFeaturesInBlock * statCount; const float val = __ldg(blockHistogram); dstHistogram += dstId * binFeatureCount * statCount; dstHistogram += statId * binFeatureCount; dstHistogram += histBlockOffset + binFeatureId; dstHistogram[0] = val; } } void WriteReducesHistograms(int blockOffset, int histBlockSize, const ui32* histogramIds, ui32 leafCount, ui32 statCount, const float* blockHistogram, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 128; dim3 numBlocks; numBlocks.x = CeilDivide(histBlockSize, blockSize); numBlocks.y = leafCount; numBlocks.z = statCount; if (histBlockSize && leafCount && statCount) { hipLaunchKernelGGL(( WriteReducesHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, blockOffset, histBlockSize, histogramIds, blockHistogram, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void WriteReducesHistogramImpl(int histBlockOffset, int binFeaturesInBlock, const ui32 dstId, const float* blockHistogram, const int binFeatureCount, float* dstHistogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int leafId = 0; const int statId = blockIdx.z; const size_t statCount = gridDim.z; if (binFeatureId < binFeaturesInBlock) { blockHistogram += binFeatureId; blockHistogram += binFeaturesInBlock * statId; blockHistogram += leafId * binFeaturesInBlock * statCount; const float val = __ldg(blockHistogram); dstHistogram += dstId * binFeatureCount * statCount; dstHistogram += statId * binFeatureCount; dstHistogram += histBlockOffset + binFeatureId; dstHistogram[0] = val; } } void WriteReducesHistogram(int blockOffset, int histBlockSize, const ui32 histogramId, ui32 statCount, const float* blockHistogram, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 128; dim3 numBlocks; numBlocks.x = CeilDivide(histBlockSize, blockSize); numBlocks.y = 1; numBlocks.z = statCount; if (histBlockSize && statCount) { hipLaunchKernelGGL(( WriteReducesHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, blockOffset, histBlockSize, histogramId, blockHistogram, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void ZeroHistogramsImpl(const ui32* histIds, const int binFeatureCount, float* dstHistogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int statId = blockIdx.z; const size_t statCount = gridDim.z; const int dstHist = histIds[blockIdx.y]; if (binFeatureId < binFeatureCount) { dstHistogram += dstHist * binFeatureCount * statCount; dstHistogram += statId * binFeatureCount; WriteThrough(dstHistogram + binFeatureId, 0.0f); } } void ZeroHistograms(const ui32* histIds, ui32 idsCount, ui32 statCount, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(binFeatureCount, blockSize); numBlocks.y = idsCount; numBlocks.z = statCount; if (idsCount && statCount) { hipLaunchKernelGGL(( ZeroHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, histIds, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void ZeroHistogramImpl(const ui32 dstHist, const int binFeatureCount, float* dstHistogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int statId = blockIdx.z; const size_t statCount = gridDim.z; if (binFeatureId < binFeatureCount) { dstHistogram += dstHist * binFeatureCount * statCount; dstHistogram += statId * binFeatureCount; WriteThrough(dstHistogram + binFeatureId, 0.0f); } } void ZeroHistogram(const ui32 histId, ui32 statCount, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(binFeatureCount, blockSize); numBlocks.y = 1; numBlocks.z = statCount; if (statCount) { hipLaunchKernelGGL(( ZeroHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, histId, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void SubstractHistogramsImpl(const ui32* fromIds, const ui32* whatIds, const int binFeatureCount, float* histogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int fromId = __ldg(fromIds + blockIdx.y); const int whatId = __ldg(whatIds + blockIdx.y); const int statId = blockIdx.z; const size_t statCount = gridDim.z; histogram += binFeatureId; if (binFeatureId < binFeatureCount) { const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount; const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount; float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset); if (statId == 0) { newVal = max(newVal, 0.0f); } WriteThrough(histogram + fromOffset, newVal); } } void SubstractHistgorams(const ui32* fromIds, const ui32* whatIds, const int idsCount, const int statCount, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(binFeatureCount, blockSize); numBlocks.y = idsCount; numBlocks.z = statCount; if (idsCount && statCount) { hipLaunchKernelGGL(( SubstractHistogramsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, fromIds, whatIds, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void SubstractHistogramImpl(const ui32 fromId, const ui32 whatId, const int binFeatureCount, float* histogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int statId = blockIdx.z; const size_t statCount = gridDim.z; histogram += binFeatureId; if (binFeatureId < binFeatureCount) { const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount; const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount; float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset); if (statId == 0) { newVal = max(newVal, 0.0f); } WriteThrough(histogram + fromOffset, newVal); } } void SubstractHistgoram(const ui32 fromIds, const ui32 whatIds, const int statCount, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(binFeatureCount, blockSize); numBlocks.y = 1; numBlocks.z = statCount; if (statCount) { hipLaunchKernelGGL(( SubstractHistogramImpl), dim3(numBlocks), dim3(blockSize), 0, stream, fromIds, whatIds, binFeatureCount, dstHistogram); } } template <int BlockSize> __global__ void ScanHistogramsImpl(const TBinarizedFeature* features, int featureCount, const ui32* histIds, const int binFeatureCount, float* histograms) { const int featuresPerBlock = BlockSize / 32; using WarpScan = cub::WarpScan<double>; __shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock]; const int warpId = threadIdx.x / 32; const int threadIdInWarp = threadIdx.x & 31; const int featureId = blockIdx.x * featuresPerBlock + warpId; const int histId = histIds[blockIdx.y]; const int statId = blockIdx.z; const ui64 statCount = gridDim.z; if (featureId < featureCount) { features += featureId; const bool skipFeature = features->OneHotFeature || (features->Folds <= 1); if (!skipFeature) { histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex; const int folds = features->Folds; const int n = ((folds + 31) / 32) * 32; double prefixSum = 0; for (int binOffset = 0; binOffset < n; binOffset += 32) { const double val = (binOffset + threadIdInWarp) < folds ? histograms[(binOffset + threadIdInWarp)] : 0.0f; double sum = 0; __syncwarp(); WarpScan(tempStorage[warpId]).InclusiveSum(val, sum); __syncwarp(); sum += prefixSum; if ((binOffset + threadIdInWarp) < folds) { histograms[binOffset + threadIdInWarp] = sum; } if ((binOffset + 32) < n) { prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff); } } } } }; void ScanHistograms( const TBinarizedFeature* features, int fCount, const ui32* ids, const int idsCount, const int statCount, const int binFeatureCount, float* histograms, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(fCount * 32, blockSize); numBlocks.y = idsCount; numBlocks.z = statCount; if (idsCount && statCount) { hipLaunchKernelGGL(( ScanHistogramsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, features, fCount, ids, binFeatureCount, histograms); } } template <int BlockSize> __global__ void ScanHistogramImpl(const TBinarizedFeature* features, int featureCount, ui32 histId, const int binFeatureCount, float* histograms) { const int featuresPerBlock = BlockSize / 32; using WarpScan = cub::WarpScan<double>; __shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock]; const int warpId = threadIdx.x / 32; const int threadIdInWarp = threadIdx.x & 31; const int featureId = blockIdx.x * featuresPerBlock + warpId; const int statId = blockIdx.z; const ui64 statCount = gridDim.z; if (featureId < featureCount) { features += featureId; const bool skipFeature = features->OneHotFeature || (features->Folds <= 1); if (!skipFeature) { histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex; const int folds = features->Folds; const int n = ((folds + 31) / 32) * 32; double prefixSum = 0; for (int binOffset = 0; binOffset < n; binOffset += 32) { const double val = (binOffset + threadIdInWarp) < folds ? histograms[(binOffset + threadIdInWarp)] : 0.0f; double sum = 0; __syncwarp(); WarpScan(tempStorage[warpId]).InclusiveSum(val, sum); __syncwarp(); sum += prefixSum; if ((binOffset + threadIdInWarp) < folds) { histograms[binOffset + threadIdInWarp] = sum; } if ((binOffset + 32) < n) { prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff); } } } } }; void ScanHistogram( const TBinarizedFeature* features, int fCount, ui32 id, const int statCount, const int binFeatureCount, float* histograms, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(fCount * 32, blockSize); numBlocks.y = 1; numBlocks.z = statCount; if (statCount) { hipLaunchKernelGGL(( ScanHistogramImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, features, fCount, id, binFeatureCount, histograms); } } }
ed3f46e77a402242c3fda34a51052c877bf053af.cu
#include "histogram_utils.cuh" #include <cooperative_groups.h> #include <library/cpp/cuda/wrappers/arch.cuh> #include <library/cpp/cuda/wrappers/cub_include.h> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include _CUB_INCLUDE(cub/warp/warp_scan.cuh) using namespace cooperative_groups; namespace NKernel { __global__ void CopyHistogramsImpl(const ui32* leftLeaves, const ui32* rightLeaves, ui32 numStats, ui32 binFeaturesInHist, float* histograms) { const ui32 leftLeafId = __ldg(leftLeaves + blockIdx.y); const ui32 rightLeafId = __ldg(rightLeaves + blockIdx.y); ui32 i = blockIdx.x * blockDim.x + threadIdx.x; float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats; float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats; const ui32 histSize = binFeaturesInHist * numStats; while (i < histSize) { WriteThrough(dstHist + i, __ldg(srcHist + i)); i += gridDim.x * blockDim.x; } } void CopyHistograms(const ui32* leftLeaves, const ui32* rightLeaves, const ui32 leavesCount, ui32 numStats, ui32 binFeaturesInHist, float* histograms, TCudaStream stream ) { const ui32 histSize = numStats * binFeaturesInHist; ui32 blockSize = 256; dim3 numBlocks; numBlocks.z = 1; numBlocks.y = leavesCount; numBlocks.x = CeilDivide(histSize, blockSize); if (numBlocks.x) { CopyHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms); } } __global__ void CopyHistogramImpl(const ui32 leftLeafId, const ui32 rightLeafId, ui32 numStats, ui32 binFeaturesInHist, float* histograms) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; float* srcHist = histograms + leftLeafId * binFeaturesInHist * numStats; float* dstHist = histograms + rightLeafId * binFeaturesInHist * numStats; const ui32 histSize = binFeaturesInHist * numStats; while (i < histSize) { WriteThrough(dstHist + i, __ldg(srcHist + i)); i += gridDim.x * blockDim.x; } } void CopyHistogram(const ui32 leftLeaves, const ui32 rightLeaves, ui32 numStats, ui32 binFeaturesInHist, float* histograms, TCudaStream stream ) { const ui32 histSize = numStats * binFeaturesInHist; ui32 blockSize = 256; dim3 numBlocks; numBlocks.z = 1; numBlocks.y = 1; numBlocks.x = CeilDivide(histSize, blockSize); if (numBlocks.x) { CopyHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeaves, rightLeaves, numStats, binFeaturesInHist, histograms); } } //write histogram block to histograms __global__ void WriteReducesHistogramsImpl(int histBlockOffset, int binFeaturesInBlock, const ui32* histogramIds, const float* blockHistogram, const int binFeatureCount, float* dstHistogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int leafId = blockIdx.y; const int statId = blockIdx.z; const size_t statCount = gridDim.z; const int dstId = histogramIds[blockIdx.y]; if (binFeatureId < binFeaturesInBlock) { blockHistogram += binFeatureId; blockHistogram += binFeaturesInBlock * statId; blockHistogram += leafId * binFeaturesInBlock * statCount; const float val = __ldg(blockHistogram); dstHistogram += dstId * binFeatureCount * statCount; dstHistogram += statId * binFeatureCount; dstHistogram += histBlockOffset + binFeatureId; dstHistogram[0] = val; } } void WriteReducesHistograms(int blockOffset, int histBlockSize, const ui32* histogramIds, ui32 leafCount, ui32 statCount, const float* blockHistogram, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 128; dim3 numBlocks; numBlocks.x = CeilDivide(histBlockSize, blockSize); numBlocks.y = leafCount; numBlocks.z = statCount; if (histBlockSize && leafCount && statCount) { WriteReducesHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(blockOffset, histBlockSize, histogramIds, blockHistogram, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void WriteReducesHistogramImpl(int histBlockOffset, int binFeaturesInBlock, const ui32 dstId, const float* blockHistogram, const int binFeatureCount, float* dstHistogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int leafId = 0; const int statId = blockIdx.z; const size_t statCount = gridDim.z; if (binFeatureId < binFeaturesInBlock) { blockHistogram += binFeatureId; blockHistogram += binFeaturesInBlock * statId; blockHistogram += leafId * binFeaturesInBlock * statCount; const float val = __ldg(blockHistogram); dstHistogram += dstId * binFeatureCount * statCount; dstHistogram += statId * binFeatureCount; dstHistogram += histBlockOffset + binFeatureId; dstHistogram[0] = val; } } void WriteReducesHistogram(int blockOffset, int histBlockSize, const ui32 histogramId, ui32 statCount, const float* blockHistogram, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 128; dim3 numBlocks; numBlocks.x = CeilDivide(histBlockSize, blockSize); numBlocks.y = 1; numBlocks.z = statCount; if (histBlockSize && statCount) { WriteReducesHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(blockOffset, histBlockSize, histogramId, blockHistogram, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void ZeroHistogramsImpl(const ui32* histIds, const int binFeatureCount, float* dstHistogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int statId = blockIdx.z; const size_t statCount = gridDim.z; const int dstHist = histIds[blockIdx.y]; if (binFeatureId < binFeatureCount) { dstHistogram += dstHist * binFeatureCount * statCount; dstHistogram += statId * binFeatureCount; WriteThrough(dstHistogram + binFeatureId, 0.0f); } } void ZeroHistograms(const ui32* histIds, ui32 idsCount, ui32 statCount, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(binFeatureCount, blockSize); numBlocks.y = idsCount; numBlocks.z = statCount; if (idsCount && statCount) { ZeroHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(histIds, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void ZeroHistogramImpl(const ui32 dstHist, const int binFeatureCount, float* dstHistogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int statId = blockIdx.z; const size_t statCount = gridDim.z; if (binFeatureId < binFeatureCount) { dstHistogram += dstHist * binFeatureCount * statCount; dstHistogram += statId * binFeatureCount; WriteThrough(dstHistogram + binFeatureId, 0.0f); } } void ZeroHistogram(const ui32 histId, ui32 statCount, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(binFeatureCount, blockSize); numBlocks.y = 1; numBlocks.z = statCount; if (statCount) { ZeroHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(histId, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void SubstractHistogramsImpl(const ui32* fromIds, const ui32* whatIds, const int binFeatureCount, float* histogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int fromId = __ldg(fromIds + blockIdx.y); const int whatId = __ldg(whatIds + blockIdx.y); const int statId = blockIdx.z; const size_t statCount = gridDim.z; histogram += binFeatureId; if (binFeatureId < binFeatureCount) { const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount; const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount; float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset); if (statId == 0) { newVal = max(newVal, 0.0f); } WriteThrough(histogram + fromOffset, newVal); } } void SubstractHistgorams(const ui32* fromIds, const ui32* whatIds, const int idsCount, const int statCount, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(binFeatureCount, blockSize); numBlocks.y = idsCount; numBlocks.z = statCount; if (idsCount && statCount) { SubstractHistogramsImpl<<<numBlocks, blockSize, 0, stream>>>(fromIds, whatIds, binFeatureCount, dstHistogram); } } //write histogram block to histograms __global__ void SubstractHistogramImpl(const ui32 fromId, const ui32 whatId, const int binFeatureCount, float* histogram) { const int binFeatureId = blockIdx.x * blockDim.x + threadIdx.x; const int statId = blockIdx.z; const size_t statCount = gridDim.z; histogram += binFeatureId; if (binFeatureId < binFeatureCount) { const ui64 fromOffset = fromId * binFeatureCount * statCount + statId * binFeatureCount; const ui64 whatOffset = whatId * binFeatureCount * statCount + statId * binFeatureCount; float newVal = __ldg(histogram + fromOffset) - __ldg(histogram + whatOffset); if (statId == 0) { newVal = max(newVal, 0.0f); } WriteThrough(histogram + fromOffset, newVal); } } void SubstractHistgoram(const ui32 fromIds, const ui32 whatIds, const int statCount, const int binFeatureCount, float* dstHistogram, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(binFeatureCount, blockSize); numBlocks.y = 1; numBlocks.z = statCount; if (statCount) { SubstractHistogramImpl<<<numBlocks, blockSize, 0, stream>>>(fromIds, whatIds, binFeatureCount, dstHistogram); } } template <int BlockSize> __global__ void ScanHistogramsImpl(const TBinarizedFeature* features, int featureCount, const ui32* histIds, const int binFeatureCount, float* histograms) { const int featuresPerBlock = BlockSize / 32; using WarpScan = cub::WarpScan<double>; __shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock]; const int warpId = threadIdx.x / 32; const int threadIdInWarp = threadIdx.x & 31; const int featureId = blockIdx.x * featuresPerBlock + warpId; const int histId = histIds[blockIdx.y]; const int statId = blockIdx.z; const ui64 statCount = gridDim.z; if (featureId < featureCount) { features += featureId; const bool skipFeature = features->OneHotFeature || (features->Folds <= 1); if (!skipFeature) { histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex; const int folds = features->Folds; const int n = ((folds + 31) / 32) * 32; double prefixSum = 0; for (int binOffset = 0; binOffset < n; binOffset += 32) { const double val = (binOffset + threadIdInWarp) < folds ? histograms[(binOffset + threadIdInWarp)] : 0.0f; double sum = 0; __syncwarp(); WarpScan(tempStorage[warpId]).InclusiveSum(val, sum); __syncwarp(); sum += prefixSum; if ((binOffset + threadIdInWarp) < folds) { histograms[binOffset + threadIdInWarp] = sum; } if ((binOffset + 32) < n) { prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff); } } } } }; void ScanHistograms( const TBinarizedFeature* features, int fCount, const ui32* ids, const int idsCount, const int statCount, const int binFeatureCount, float* histograms, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(fCount * 32, blockSize); numBlocks.y = idsCount; numBlocks.z = statCount; if (idsCount && statCount) { ScanHistogramsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(features, fCount, ids, binFeatureCount, histograms); } } template <int BlockSize> __global__ void ScanHistogramImpl(const TBinarizedFeature* features, int featureCount, ui32 histId, const int binFeatureCount, float* histograms) { const int featuresPerBlock = BlockSize / 32; using WarpScan = cub::WarpScan<double>; __shared__ typename WarpScan::TempStorage tempStorage[featuresPerBlock]; const int warpId = threadIdx.x / 32; const int threadIdInWarp = threadIdx.x & 31; const int featureId = blockIdx.x * featuresPerBlock + warpId; const int statId = blockIdx.z; const ui64 statCount = gridDim.z; if (featureId < featureCount) { features += featureId; const bool skipFeature = features->OneHotFeature || (features->Folds <= 1); if (!skipFeature) { histograms += histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex; const int folds = features->Folds; const int n = ((folds + 31) / 32) * 32; double prefixSum = 0; for (int binOffset = 0; binOffset < n; binOffset += 32) { const double val = (binOffset + threadIdInWarp) < folds ? histograms[(binOffset + threadIdInWarp)] : 0.0f; double sum = 0; __syncwarp(); WarpScan(tempStorage[warpId]).InclusiveSum(val, sum); __syncwarp(); sum += prefixSum; if ((binOffset + threadIdInWarp) < folds) { histograms[binOffset + threadIdInWarp] = sum; } if ((binOffset + 32) < n) { prefixSum = cub::ShuffleIndex<32, double>(sum, 31, 0xffffffff); } } } } }; void ScanHistogram( const TBinarizedFeature* features, int fCount, ui32 id, const int statCount, const int binFeatureCount, float* histograms, TCudaStream stream) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = CeilDivide(fCount * 32, blockSize); numBlocks.y = 1; numBlocks.z = statCount; if (statCount) { ScanHistogramImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(features, fCount, id, binFeatureCount, histograms); } } }
df21e7a518a7f2663a3cd03e0ce870dbe6caa6b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/distance.h> #include <thrust/binary_search.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/count.h> #include <thrust/for_each.h> #include <thrust/tabulate.h> #include <cub/device/device_copy.cuh> #include <cstdint> #include <random> #include "thrust/scan.h" #include <hiprand/hiprand.h> #include <nvbench_helper.cuh> class generator_t { public: generator_t(); ~generator_t(); template <typename T> void operator()(seed_t seed, thrust::device_vector<T> &data, bit_entropy entropy, T min = std::numeric_limits<T>::lowest(), T max = std::numeric_limits<T>::max()); template <typename T> thrust::device_vector<T> power_law_segment_offsets(seed_t seed, std::size_t total_elements, std::size_t total_segments); double *distribution(); hiprandGenerator_t &gen() { return m_gen; } double *prepare_random_generator(seed_t seed, std::size_t num_items); double *prepare_lognormal_random_generator(seed_t seed, std::size_t num_items); private: hiprandGenerator_t m_gen; thrust::device_vector<double> m_distribution; }; template <typename T> struct random_to_item_t { double m_min; double m_max; __host__ __device__ random_to_item_t(T min, T max) : m_min(static_cast<double>(min)) , m_max(static_cast<double>(max)) {} __host__ __device__ T operator()(double random_value) { return static_cast<T>((m_max - m_min) * random_value + m_min); } }; generator_t::generator_t() { hiprandCreateGenerator(&m_gen, HIPRAND_RNG_PSEUDO_DEFAULT); } generator_t::~generator_t() { hiprandDestroyGenerator(m_gen); } double *generator_t::distribution() { return thrust::raw_pointer_cast(m_distribution.data()); } double *generator_t::prepare_random_generator(seed_t seed, std::size_t num_items) { hiprandSetPseudoRandomGeneratorSeed(m_gen, seed.get()); m_distribution.resize(num_items); hiprandGenerateUniformDouble(m_gen, this->distribution(), num_items); return this->distribution(); } double *generator_t::prepare_lognormal_random_generator(seed_t seed, std::size_t num_segments) { hiprandSetPseudoRandomGeneratorSeed(m_gen, seed.get()); m_distribution.resize(num_segments); const double mean = 3.0; const double sigma = 1.2; hiprandGenerateLogNormalDouble(m_gen, this->distribution(), num_segments, mean, sigma); return this->distribution(); } template <class T> __global__ void and_kernel(T *d_in, T *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { d_in[i] = d_in[i] & d_tmp[i]; } } __global__ void and_kernel(float *d_in, float *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { std::uint32_t result = reinterpret_cast<std::uint32_t &>(d_in[i]) & reinterpret_cast<std::uint32_t &>(d_tmp[i]); d_in[i] = reinterpret_cast<float &>(result); } } __global__ void and_kernel(double *d_in, double *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { std::uint64_t result = reinterpret_cast<std::uint64_t &>(d_in[i]) & reinterpret_cast<std::uint64_t &>(d_tmp[i]); d_in[i] = reinterpret_cast<double &>(result); } } __global__ void and_kernel(complex *d_in, complex *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { double in_real = d_in[i].real(); double in_imag = d_in[i].imag(); double tmp_real = d_tmp[i].real(); double tmp_imag = d_tmp[i].imag(); std::uint64_t result_real = reinterpret_cast<std::uint64_t &>(in_real) & reinterpret_cast<std::uint64_t &>(tmp_real); std::uint64_t result_imag = reinterpret_cast<std::uint64_t &>(in_imag) & reinterpret_cast<std::uint64_t &>(tmp_imag); d_in[i].real(reinterpret_cast<double &>(result_real)); d_in[i].imag(reinterpret_cast<double &>(result_imag)); } } __global__ void set_real_kernel(complex *d_in, complex min, complex max, double *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { d_in[i].real(random_to_item_t<double>{min.real(), max.real()}(d_tmp[i])); } } __global__ void set_imag_kernel(complex *d_in, complex min, complex max, double *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { d_in[i].imag(random_to_item_t<double>{min.imag(), max.imag()}(d_tmp[i])); } } template <class T> struct lognormal_transformer_t { std::size_t total_elements; double sum; __device__ T operator()(double val) { return floor(val * total_elements / sum); } }; template <class T> __global__ void lognormal_adjust_kernel(T *segment_sizes, std::size_t diff) { const unsigned tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < diff) { segment_sizes[tid]++; } } template <class T> thrust::device_vector<T> generator_t::power_law_segment_offsets(seed_t seed, std::size_t total_elements, std::size_t total_segments) { thrust::device_vector<T> segment_sizes(total_segments + 1); prepare_lognormal_random_generator(seed, total_segments); if (thrust::count(m_distribution.begin(), m_distribution.end(), 0.0) == total_segments) { thrust::fill_n(m_distribution.begin(), total_segments, 1.0); } double sum = thrust::reduce(m_distribution.begin(), m_distribution.end()); thrust::transform(m_distribution.begin(), m_distribution.end(), segment_sizes.begin(), lognormal_transformer_t<T>{total_elements, sum}); const int diff = total_elements - thrust::reduce(segment_sizes.begin(), segment_sizes.end()); const int block_size = 256; const int grid_size = (std::abs(diff) + block_size - 1) / block_size; T *d_segment_sizes = thrust::raw_pointer_cast(segment_sizes.data()); hipLaunchKernelGGL(( lognormal_adjust_kernel<T>), dim3(grid_size), dim3(block_size), 0, 0, d_segment_sizes, diff); thrust::exclusive_scan(segment_sizes.begin(), segment_sizes.end(), segment_sizes.begin()); return segment_sizes; } template <class T> void generator_t::operator()(seed_t seed, thrust::device_vector<T> &data, bit_entropy entropy, T min, T max) { switch (entropy) { case bit_entropy::_1_000: { prepare_random_generator(seed, data.size()); thrust::transform(m_distribution.begin(), m_distribution.end(), data.begin(), random_to_item_t<T>(min, max)); return; } case bit_entropy::_0_000: { std::mt19937 rng; rng.seed(static_cast<std::mt19937::result_type>(seed.get())); std::uniform_real_distribution<float> dist(0.0f, 1.0f); T random_value = random_to_item_t<T>(min, max)(dist(rng)); thrust::fill(data.begin(), data.end(), random_value); return; } default: { prepare_random_generator(seed, data.size()); thrust::transform(m_distribution.begin(), m_distribution.end(), data.begin(), random_to_item_t<T>(min, max)); const int number_of_steps = static_cast<int>(entropy); thrust::device_vector<T> tmp(data.size()); const int threads_in_block = 256; const int blocks_in_grid = (data.size() + threads_in_block - 1) / threads_in_block; for (int i = 0; i < number_of_steps; i++, ++seed) { (*this)(seed, tmp, bit_entropy::_1_000, min, max); hipLaunchKernelGGL(( and_kernel), dim3(blocks_in_grid), dim3(threads_in_block), 0, 0, thrust::raw_pointer_cast(data.data()), thrust::raw_pointer_cast(tmp.data()), data.size()); hipStreamSynchronize(0); } return; } }; } template <> void generator_t::operator()(seed_t seed, thrust::device_vector<complex> &data, bit_entropy entropy, complex min, complex max) { const int threads_in_block = 256; const int blocks_in_grid = (data.size() + threads_in_block - 1) / threads_in_block; switch (entropy) { case bit_entropy::_1_000: { prepare_random_generator(seed, data.size()); ++seed; hipLaunchKernelGGL(( set_real_kernel), dim3(blocks_in_grid), dim3(threads_in_block), 0, 0, thrust::raw_pointer_cast(data.data()), min, max, thrust::raw_pointer_cast(m_distribution.data()), data.size()); prepare_random_generator(seed, data.size()); ++seed; hipLaunchKernelGGL(( set_imag_kernel), dim3(blocks_in_grid), dim3(threads_in_block), 0, 0, thrust::raw_pointer_cast(data.data()), min, max, thrust::raw_pointer_cast(m_distribution.data()), data.size()); return; } case bit_entropy::_0_000: { std::mt19937 rng; rng.seed(static_cast<std::mt19937::result_type>(seed.get())); std::uniform_real_distribution<float> dist(0.0f, 1.0f); double random_imag = random_to_item_t<double>(min.imag(), max.imag())(dist(rng)); double random_real = random_to_item_t<double>(min.imag(), max.imag())(dist(rng)); complex random_value(random_real, random_imag); thrust::fill(data.begin(), data.end(), random_value); return; } default: { prepare_random_generator(seed, data.size()); prepare_random_generator(seed, data.size()); ++seed; hipLaunchKernelGGL(( set_real_kernel), dim3(blocks_in_grid), dim3(threads_in_block), 0, 0, thrust::raw_pointer_cast(data.data()), min, max, thrust::raw_pointer_cast(m_distribution.data()), data.size()); prepare_random_generator(seed, data.size()); ++seed; hipLaunchKernelGGL(( set_imag_kernel), dim3(blocks_in_grid), dim3(threads_in_block), 0, 0, thrust::raw_pointer_cast(data.data()), min, max, thrust::raw_pointer_cast(m_distribution.data()), data.size()); const int number_of_steps = static_cast<int>(entropy); thrust::device_vector<complex> tmp(data.size()); for (int i = 0; i < number_of_steps; i++, ++seed) { (*this)(seed, tmp, bit_entropy::_1_000, min, max); hipLaunchKernelGGL(( and_kernel), dim3(blocks_in_grid), dim3(threads_in_block), 0, 0, thrust::raw_pointer_cast(data.data()), thrust::raw_pointer_cast(tmp.data()), data.size()); hipStreamSynchronize(0); } return; } }; } struct random_to_probability_t { double m_probability; __host__ __device__ bool operator()(double random_value) { return random_value < m_probability; } }; template <> void generator_t::operator()(seed_t seed, thrust::device_vector<bool> &data, bit_entropy entropy, bool /* min */, bool /* max */) { if (entropy == bit_entropy::_0_000) { thrust::fill(data.begin(), data.end(), false); } else if (entropy == bit_entropy::_1_000) { thrust::fill(data.begin(), data.end(), true); } else { prepare_random_generator(seed, data.size()); thrust::transform(m_distribution.begin(), m_distribution.end(), data.begin(), random_to_probability_t{entropy_to_probability(entropy)}); } } template <typename T> void gen(seed_t seed, thrust::device_vector<T> &data, bit_entropy entropy, T min, T max) { generator_t{}(seed, data, entropy, min, max); } #define INSTANTIATE_RND(TYPE) \ template void gen<TYPE>(seed_t, \ thrust::device_vector<TYPE> & data, \ bit_entropy, \ TYPE min, \ TYPE max) #define INSTANTIATE(TYPE) INSTANTIATE_RND(TYPE); INSTANTIATE(bool); INSTANTIATE(uint8_t); INSTANTIATE(uint16_t); INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); INSTANTIATE(uint128_t); INSTANTIATE(int8_t); INSTANTIATE(int16_t); INSTANTIATE(int32_t); INSTANTIATE(int64_t); INSTANTIATE(int128_t); INSTANTIATE(float); INSTANTIATE(double); INSTANTIATE(complex); #undef INSTANTIATE #undef INSTANTIATE_RND template <typename T> thrust::device_vector<T> gen_power_law_offsets(seed_t seed, std::size_t total_elements, std::size_t total_segments) { return generator_t{}.power_law_segment_offsets<T>(seed, total_elements, total_segments); } #define INSTANTIATE(TYPE) \ template thrust::device_vector<TYPE> gen_power_law_offsets<TYPE>(seed_t, std::size_t, std::size_t) INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); #undef INSTANTIATE template <class T> struct offset_to_iterator_t { T *base_it; __host__ __device__ __forceinline__ T* operator()(std::size_t offset) const { return base_it + offset; } }; template <class T> struct repeat_index_t { __host__ __device__ __forceinline__ thrust::constant_iterator<T> operator()(std::size_t i) { return thrust::constant_iterator<T>(static_cast<T>(i)); } }; struct offset_to_size_t { std::size_t *offsets = nullptr; __host__ __device__ __forceinline__ std::size_t operator()(std::size_t i) { return offsets[i + 1] - offsets[i]; } }; template <typename T> thrust::device_vector<T> gen_power_law_key_segments(seed_t seed, std::size_t total_elements, thrust::device_vector<std::size_t> &segment_offsets) { std::size_t total_segments = segment_offsets.size() - 1; thrust::device_vector<T> out(total_elements); std::size_t *d_offsets = thrust::raw_pointer_cast(segment_offsets.data()); T *d_out = thrust::raw_pointer_cast(out.data()); thrust::counting_iterator<int> iota(0); offset_to_iterator_t<T> dst_transform_op{d_out}; auto d_range_srcs = thrust::make_transform_iterator(iota, repeat_index_t<T>{}); auto d_range_dsts = thrust::make_transform_iterator(d_offsets, dst_transform_op); auto d_range_sizes = thrust::make_transform_iterator(iota, offset_to_size_t{d_offsets}); std::uint8_t *d_temp_storage = nullptr; std::size_t temp_storage_bytes = 0; cub::DeviceCopy::Batched(d_temp_storage, temp_storage_bytes, d_range_srcs, d_range_dsts, d_range_sizes, total_segments); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); cub::DeviceCopy::Batched(d_temp_storage, temp_storage_bytes, d_range_srcs, d_range_dsts, d_range_sizes, total_segments); hipDeviceSynchronize(); return out; } template <typename T> thrust::device_vector<T> gen_power_law_key_segments(seed_t seed, std::size_t total_elements, std::size_t total_segments) { thrust::device_vector<std::size_t> segment_offsets = gen_power_law_offsets<std::size_t>(seed, total_elements, total_segments); return gen_power_law_key_segments<T>(seed, total_elements, segment_offsets); } #define INSTANTIATE(TYPE) \ template thrust::device_vector<TYPE> gen_power_law_key_segments<TYPE>(seed_t, \ std::size_t, \ std::size_t) INSTANTIATE(bool); INSTANTIATE(uint8_t); INSTANTIATE(uint16_t); INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); INSTANTIATE(uint128_t); INSTANTIATE(int8_t); INSTANTIATE(int16_t); INSTANTIATE(int32_t); INSTANTIATE(int64_t); INSTANTIATE(int128_t); INSTANTIATE(float); INSTANTIATE(double); INSTANTIATE(complex); #undef INSTANTIATE template <class T> struct gt_t { T val; __device__ bool operator()(T x) { return x > val; } }; template <typename T> thrust::device_vector<T> gen_uniform_offsets(seed_t seed, T total_elements, T min_segment_size, T max_segment_size) { thrust::device_vector<T> segment_offsets(total_elements + 2); gen(seed, segment_offsets, bit_entropy::_1_000, min_segment_size, max_segment_size); segment_offsets[total_elements] = total_elements + 1; thrust::exclusive_scan(segment_offsets.begin(), segment_offsets.end(), segment_offsets.begin()); typename thrust::device_vector<T>::iterator iter = thrust::find_if(segment_offsets.begin(), segment_offsets.end(), gt_t<T>{total_elements}); *iter = total_elements; segment_offsets.erase(iter + 1, segment_offsets.end()); return segment_offsets; } #define INSTANTIATE(TYPE) \ template thrust::device_vector<TYPE> gen_uniform_offsets<TYPE>(seed_t, TYPE, TYPE, TYPE) INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); #undef INSTANTIATE /** * @brief Generates a vector of random key segments. * * Not all parameter combinations can be satisfied. For instance, if the total * elements is less than the minimal segment size, the function will return a * vector with a single element that is outside of the requested range. * At most one segment can be out of the requested range. */ template <typename T> thrust::device_vector<T> gen_uniform_key_segments(seed_t seed, std::size_t total_elements, std::size_t min_segment_size, std::size_t max_segment_size) { thrust::device_vector<std::size_t> segment_offsets = gen_uniform_offsets(seed, total_elements, min_segment_size, max_segment_size); return gen_power_law_key_segments<T>(seed, total_elements, segment_offsets); } #define INSTANTIATE(TYPE) \ template thrust::device_vector<TYPE> gen_uniform_key_segments<TYPE>(seed_t, \ std::size_t, \ std::size_t, \ std::size_t) INSTANTIATE(bool); INSTANTIATE(uint8_t); INSTANTIATE(uint16_t); INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); INSTANTIATE(uint128_t); INSTANTIATE(int8_t); INSTANTIATE(int16_t); INSTANTIATE(int32_t); INSTANTIATE(int64_t); INSTANTIATE(int128_t); INSTANTIATE(float); INSTANTIATE(double); INSTANTIATE(complex); #undef INSTANTIATE
df21e7a518a7f2663a3cd03e0ce870dbe6caa6b9.cu
#include <thrust/distance.h> #include <thrust/binary_search.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/count.h> #include <thrust/for_each.h> #include <thrust/tabulate.h> #include <cub/device/device_copy.cuh> #include <cstdint> #include <random> #include "thrust/scan.h" #include <curand.h> #include <nvbench_helper.cuh> class generator_t { public: generator_t(); ~generator_t(); template <typename T> void operator()(seed_t seed, thrust::device_vector<T> &data, bit_entropy entropy, T min = std::numeric_limits<T>::lowest(), T max = std::numeric_limits<T>::max()); template <typename T> thrust::device_vector<T> power_law_segment_offsets(seed_t seed, std::size_t total_elements, std::size_t total_segments); double *distribution(); curandGenerator_t &gen() { return m_gen; } double *prepare_random_generator(seed_t seed, std::size_t num_items); double *prepare_lognormal_random_generator(seed_t seed, std::size_t num_items); private: curandGenerator_t m_gen; thrust::device_vector<double> m_distribution; }; template <typename T> struct random_to_item_t { double m_min; double m_max; __host__ __device__ random_to_item_t(T min, T max) : m_min(static_cast<double>(min)) , m_max(static_cast<double>(max)) {} __host__ __device__ T operator()(double random_value) { return static_cast<T>((m_max - m_min) * random_value + m_min); } }; generator_t::generator_t() { curandCreateGenerator(&m_gen, CURAND_RNG_PSEUDO_DEFAULT); } generator_t::~generator_t() { curandDestroyGenerator(m_gen); } double *generator_t::distribution() { return thrust::raw_pointer_cast(m_distribution.data()); } double *generator_t::prepare_random_generator(seed_t seed, std::size_t num_items) { curandSetPseudoRandomGeneratorSeed(m_gen, seed.get()); m_distribution.resize(num_items); curandGenerateUniformDouble(m_gen, this->distribution(), num_items); return this->distribution(); } double *generator_t::prepare_lognormal_random_generator(seed_t seed, std::size_t num_segments) { curandSetPseudoRandomGeneratorSeed(m_gen, seed.get()); m_distribution.resize(num_segments); const double mean = 3.0; const double sigma = 1.2; curandGenerateLogNormalDouble(m_gen, this->distribution(), num_segments, mean, sigma); return this->distribution(); } template <class T> __global__ void and_kernel(T *d_in, T *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { d_in[i] = d_in[i] & d_tmp[i]; } } __global__ void and_kernel(float *d_in, float *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { std::uint32_t result = reinterpret_cast<std::uint32_t &>(d_in[i]) & reinterpret_cast<std::uint32_t &>(d_tmp[i]); d_in[i] = reinterpret_cast<float &>(result); } } __global__ void and_kernel(double *d_in, double *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { std::uint64_t result = reinterpret_cast<std::uint64_t &>(d_in[i]) & reinterpret_cast<std::uint64_t &>(d_tmp[i]); d_in[i] = reinterpret_cast<double &>(result); } } __global__ void and_kernel(complex *d_in, complex *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { double in_real = d_in[i].real(); double in_imag = d_in[i].imag(); double tmp_real = d_tmp[i].real(); double tmp_imag = d_tmp[i].imag(); std::uint64_t result_real = reinterpret_cast<std::uint64_t &>(in_real) & reinterpret_cast<std::uint64_t &>(tmp_real); std::uint64_t result_imag = reinterpret_cast<std::uint64_t &>(in_imag) & reinterpret_cast<std::uint64_t &>(tmp_imag); d_in[i].real(reinterpret_cast<double &>(result_real)); d_in[i].imag(reinterpret_cast<double &>(result_imag)); } } __global__ void set_real_kernel(complex *d_in, complex min, complex max, double *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { d_in[i].real(random_to_item_t<double>{min.real(), max.real()}(d_tmp[i])); } } __global__ void set_imag_kernel(complex *d_in, complex min, complex max, double *d_tmp, std::size_t n) { const std::size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { d_in[i].imag(random_to_item_t<double>{min.imag(), max.imag()}(d_tmp[i])); } } template <class T> struct lognormal_transformer_t { std::size_t total_elements; double sum; __device__ T operator()(double val) { return floor(val * total_elements / sum); } }; template <class T> __global__ void lognormal_adjust_kernel(T *segment_sizes, std::size_t diff) { const unsigned tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < diff) { segment_sizes[tid]++; } } template <class T> thrust::device_vector<T> generator_t::power_law_segment_offsets(seed_t seed, std::size_t total_elements, std::size_t total_segments) { thrust::device_vector<T> segment_sizes(total_segments + 1); prepare_lognormal_random_generator(seed, total_segments); if (thrust::count(m_distribution.begin(), m_distribution.end(), 0.0) == total_segments) { thrust::fill_n(m_distribution.begin(), total_segments, 1.0); } double sum = thrust::reduce(m_distribution.begin(), m_distribution.end()); thrust::transform(m_distribution.begin(), m_distribution.end(), segment_sizes.begin(), lognormal_transformer_t<T>{total_elements, sum}); const int diff = total_elements - thrust::reduce(segment_sizes.begin(), segment_sizes.end()); const int block_size = 256; const int grid_size = (std::abs(diff) + block_size - 1) / block_size; T *d_segment_sizes = thrust::raw_pointer_cast(segment_sizes.data()); lognormal_adjust_kernel<T><<<grid_size, block_size>>>(d_segment_sizes, diff); thrust::exclusive_scan(segment_sizes.begin(), segment_sizes.end(), segment_sizes.begin()); return segment_sizes; } template <class T> void generator_t::operator()(seed_t seed, thrust::device_vector<T> &data, bit_entropy entropy, T min, T max) { switch (entropy) { case bit_entropy::_1_000: { prepare_random_generator(seed, data.size()); thrust::transform(m_distribution.begin(), m_distribution.end(), data.begin(), random_to_item_t<T>(min, max)); return; } case bit_entropy::_0_000: { std::mt19937 rng; rng.seed(static_cast<std::mt19937::result_type>(seed.get())); std::uniform_real_distribution<float> dist(0.0f, 1.0f); T random_value = random_to_item_t<T>(min, max)(dist(rng)); thrust::fill(data.begin(), data.end(), random_value); return; } default: { prepare_random_generator(seed, data.size()); thrust::transform(m_distribution.begin(), m_distribution.end(), data.begin(), random_to_item_t<T>(min, max)); const int number_of_steps = static_cast<int>(entropy); thrust::device_vector<T> tmp(data.size()); const int threads_in_block = 256; const int blocks_in_grid = (data.size() + threads_in_block - 1) / threads_in_block; for (int i = 0; i < number_of_steps; i++, ++seed) { (*this)(seed, tmp, bit_entropy::_1_000, min, max); and_kernel<<<blocks_in_grid, threads_in_block>>>(thrust::raw_pointer_cast(data.data()), thrust::raw_pointer_cast(tmp.data()), data.size()); cudaStreamSynchronize(0); } return; } }; } template <> void generator_t::operator()(seed_t seed, thrust::device_vector<complex> &data, bit_entropy entropy, complex min, complex max) { const int threads_in_block = 256; const int blocks_in_grid = (data.size() + threads_in_block - 1) / threads_in_block; switch (entropy) { case bit_entropy::_1_000: { prepare_random_generator(seed, data.size()); ++seed; set_real_kernel<<<blocks_in_grid, threads_in_block>>>(thrust::raw_pointer_cast(data.data()), min, max, thrust::raw_pointer_cast(m_distribution.data()), data.size()); prepare_random_generator(seed, data.size()); ++seed; set_imag_kernel<<<blocks_in_grid, threads_in_block>>>(thrust::raw_pointer_cast(data.data()), min, max, thrust::raw_pointer_cast(m_distribution.data()), data.size()); return; } case bit_entropy::_0_000: { std::mt19937 rng; rng.seed(static_cast<std::mt19937::result_type>(seed.get())); std::uniform_real_distribution<float> dist(0.0f, 1.0f); double random_imag = random_to_item_t<double>(min.imag(), max.imag())(dist(rng)); double random_real = random_to_item_t<double>(min.imag(), max.imag())(dist(rng)); complex random_value(random_real, random_imag); thrust::fill(data.begin(), data.end(), random_value); return; } default: { prepare_random_generator(seed, data.size()); prepare_random_generator(seed, data.size()); ++seed; set_real_kernel<<<blocks_in_grid, threads_in_block>>>(thrust::raw_pointer_cast(data.data()), min, max, thrust::raw_pointer_cast(m_distribution.data()), data.size()); prepare_random_generator(seed, data.size()); ++seed; set_imag_kernel<<<blocks_in_grid, threads_in_block>>>(thrust::raw_pointer_cast(data.data()), min, max, thrust::raw_pointer_cast(m_distribution.data()), data.size()); const int number_of_steps = static_cast<int>(entropy); thrust::device_vector<complex> tmp(data.size()); for (int i = 0; i < number_of_steps; i++, ++seed) { (*this)(seed, tmp, bit_entropy::_1_000, min, max); and_kernel<<<blocks_in_grid, threads_in_block>>>(thrust::raw_pointer_cast(data.data()), thrust::raw_pointer_cast(tmp.data()), data.size()); cudaStreamSynchronize(0); } return; } }; } struct random_to_probability_t { double m_probability; __host__ __device__ bool operator()(double random_value) { return random_value < m_probability; } }; template <> void generator_t::operator()(seed_t seed, thrust::device_vector<bool> &data, bit_entropy entropy, bool /* min */, bool /* max */) { if (entropy == bit_entropy::_0_000) { thrust::fill(data.begin(), data.end(), false); } else if (entropy == bit_entropy::_1_000) { thrust::fill(data.begin(), data.end(), true); } else { prepare_random_generator(seed, data.size()); thrust::transform(m_distribution.begin(), m_distribution.end(), data.begin(), random_to_probability_t{entropy_to_probability(entropy)}); } } template <typename T> void gen(seed_t seed, thrust::device_vector<T> &data, bit_entropy entropy, T min, T max) { generator_t{}(seed, data, entropy, min, max); } #define INSTANTIATE_RND(TYPE) \ template void gen<TYPE>(seed_t, \ thrust::device_vector<TYPE> & data, \ bit_entropy, \ TYPE min, \ TYPE max) #define INSTANTIATE(TYPE) INSTANTIATE_RND(TYPE); INSTANTIATE(bool); INSTANTIATE(uint8_t); INSTANTIATE(uint16_t); INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); INSTANTIATE(uint128_t); INSTANTIATE(int8_t); INSTANTIATE(int16_t); INSTANTIATE(int32_t); INSTANTIATE(int64_t); INSTANTIATE(int128_t); INSTANTIATE(float); INSTANTIATE(double); INSTANTIATE(complex); #undef INSTANTIATE #undef INSTANTIATE_RND template <typename T> thrust::device_vector<T> gen_power_law_offsets(seed_t seed, std::size_t total_elements, std::size_t total_segments) { return generator_t{}.power_law_segment_offsets<T>(seed, total_elements, total_segments); } #define INSTANTIATE(TYPE) \ template thrust::device_vector<TYPE> gen_power_law_offsets<TYPE>(seed_t, std::size_t, std::size_t) INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); #undef INSTANTIATE template <class T> struct offset_to_iterator_t { T *base_it; __host__ __device__ __forceinline__ T* operator()(std::size_t offset) const { return base_it + offset; } }; template <class T> struct repeat_index_t { __host__ __device__ __forceinline__ thrust::constant_iterator<T> operator()(std::size_t i) { return thrust::constant_iterator<T>(static_cast<T>(i)); } }; struct offset_to_size_t { std::size_t *offsets = nullptr; __host__ __device__ __forceinline__ std::size_t operator()(std::size_t i) { return offsets[i + 1] - offsets[i]; } }; template <typename T> thrust::device_vector<T> gen_power_law_key_segments(seed_t seed, std::size_t total_elements, thrust::device_vector<std::size_t> &segment_offsets) { std::size_t total_segments = segment_offsets.size() - 1; thrust::device_vector<T> out(total_elements); std::size_t *d_offsets = thrust::raw_pointer_cast(segment_offsets.data()); T *d_out = thrust::raw_pointer_cast(out.data()); thrust::counting_iterator<int> iota(0); offset_to_iterator_t<T> dst_transform_op{d_out}; auto d_range_srcs = thrust::make_transform_iterator(iota, repeat_index_t<T>{}); auto d_range_dsts = thrust::make_transform_iterator(d_offsets, dst_transform_op); auto d_range_sizes = thrust::make_transform_iterator(iota, offset_to_size_t{d_offsets}); std::uint8_t *d_temp_storage = nullptr; std::size_t temp_storage_bytes = 0; cub::DeviceCopy::Batched(d_temp_storage, temp_storage_bytes, d_range_srcs, d_range_dsts, d_range_sizes, total_segments); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); cub::DeviceCopy::Batched(d_temp_storage, temp_storage_bytes, d_range_srcs, d_range_dsts, d_range_sizes, total_segments); cudaDeviceSynchronize(); return out; } template <typename T> thrust::device_vector<T> gen_power_law_key_segments(seed_t seed, std::size_t total_elements, std::size_t total_segments) { thrust::device_vector<std::size_t> segment_offsets = gen_power_law_offsets<std::size_t>(seed, total_elements, total_segments); return gen_power_law_key_segments<T>(seed, total_elements, segment_offsets); } #define INSTANTIATE(TYPE) \ template thrust::device_vector<TYPE> gen_power_law_key_segments<TYPE>(seed_t, \ std::size_t, \ std::size_t) INSTANTIATE(bool); INSTANTIATE(uint8_t); INSTANTIATE(uint16_t); INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); INSTANTIATE(uint128_t); INSTANTIATE(int8_t); INSTANTIATE(int16_t); INSTANTIATE(int32_t); INSTANTIATE(int64_t); INSTANTIATE(int128_t); INSTANTIATE(float); INSTANTIATE(double); INSTANTIATE(complex); #undef INSTANTIATE template <class T> struct gt_t { T val; __device__ bool operator()(T x) { return x > val; } }; template <typename T> thrust::device_vector<T> gen_uniform_offsets(seed_t seed, T total_elements, T min_segment_size, T max_segment_size) { thrust::device_vector<T> segment_offsets(total_elements + 2); gen(seed, segment_offsets, bit_entropy::_1_000, min_segment_size, max_segment_size); segment_offsets[total_elements] = total_elements + 1; thrust::exclusive_scan(segment_offsets.begin(), segment_offsets.end(), segment_offsets.begin()); typename thrust::device_vector<T>::iterator iter = thrust::find_if(segment_offsets.begin(), segment_offsets.end(), gt_t<T>{total_elements}); *iter = total_elements; segment_offsets.erase(iter + 1, segment_offsets.end()); return segment_offsets; } #define INSTANTIATE(TYPE) \ template thrust::device_vector<TYPE> gen_uniform_offsets<TYPE>(seed_t, TYPE, TYPE, TYPE) INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); #undef INSTANTIATE /** * @brief Generates a vector of random key segments. * * Not all parameter combinations can be satisfied. For instance, if the total * elements is less than the minimal segment size, the function will return a * vector with a single element that is outside of the requested range. * At most one segment can be out of the requested range. */ template <typename T> thrust::device_vector<T> gen_uniform_key_segments(seed_t seed, std::size_t total_elements, std::size_t min_segment_size, std::size_t max_segment_size) { thrust::device_vector<std::size_t> segment_offsets = gen_uniform_offsets(seed, total_elements, min_segment_size, max_segment_size); return gen_power_law_key_segments<T>(seed, total_elements, segment_offsets); } #define INSTANTIATE(TYPE) \ template thrust::device_vector<TYPE> gen_uniform_key_segments<TYPE>(seed_t, \ std::size_t, \ std::size_t, \ std::size_t) INSTANTIATE(bool); INSTANTIATE(uint8_t); INSTANTIATE(uint16_t); INSTANTIATE(uint32_t); INSTANTIATE(uint64_t); INSTANTIATE(uint128_t); INSTANTIATE(int8_t); INSTANTIATE(int16_t); INSTANTIATE(int32_t); INSTANTIATE(int64_t); INSTANTIATE(int128_t); INSTANTIATE(float); INSTANTIATE(double); INSTANTIATE(complex); #undef INSTANTIATE
e307cc1d06ad9726afe37a781b545f13f7a476d9.hip
// !!! This is a file automatically generated by hipify!!! #include "Surface1D.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "CudaUtil.h" namespace Surf1D { __device__ inline void computeLocalCoordinates1d(const int vid, const int vidFeature, const MeshBaseDevice* mesh, Vec3f* xi1, Vec3f* xi2) { Vec3f normal = mesh->vertexNormals[vid]; if (mesh->isFlat) { normal = Vec3f(0, 0, 1); } // construct a random vector Vec3f randVec = mesh->vertexPoints[vidFeature] - mesh->vertexPoints[vid]; if (normal.cross(randVec).norm() < 10.f * EPSILON) { printf("computeLocalCoordinates: normal vector needs to be recalculated. normal: %.10g %.10g %.10g randVec: %.10g %.10g %.10g\n", normal.x, normal.y, normal.z, randVec.x, randVec.y, randVec.z); //assert(0); randVec = { normal[0], normal[2], -normal[1] }; } // compute tanget vectors Vec3f t1 = normal.cross(randVec); Vec3f t2 = normal.cross(t1); // normalize normal.normalize(); t1.normalize(); t2.normalize(); if (std::abs(t1.cross(t2).dot(normal) - 1) > 10.f * EPSILON) { printf("computeLocalCoordinates: computation of local coordinates failed for vertex %i\n", vid); assert(0); } *xi1 = t2; *xi2 = normal; } __device__ localSurface1d computeLocalSurface1d(const int vid, MeshBaseDevice* mesh) { Vec3f p_center = mesh->vertexPoints[vid]; // neighbors (also contains vh itself) int neighbors[MAX_NEIGHBORS]; int pos = 0; neighbors[pos++] = vid; int vidFeature = -1; // get a neighbor vertex of vid which is a feature vertex // collect one-ring for (auto it = mesh->he_ccw_order_begin(vid); it != mesh->he_ccw_order_end(vid); ++it) { int dst = it->targetVertex; if (mesh->isFeature(dst)) { neighbors[pos++] = dst; vidFeature = dst; } } // neighbors contains all feature neighbors and us if (vidFeature == -1) { printf("vidFeature is -1, vid %i\n", vid); assert(0); } Vec3f ei[2]; computeLocalCoordinates1d(vid, vidFeature, mesh, &ei[0], &ei[1]); //convert points to frenet basis & fitting function f(x,y) = ax -> (x)(a)T = z =^= Ax=b -> ATA x = ATb float ATA = 0; float ATb = 0; for (auto i = 0; i < pos; ++i) { Vec3f p_neighbor = mesh->vertexPoints[neighbors[i]]; Vec3f p = p_neighbor - p_center; const float u = ei[0].dot(p); const float v = ei[1].dot(p); ATb += u * u * v; ATA += u * u * u * u; } float abc = ATb / ATA; localSurface1d para; para.p0 = p_center; para.a = abc; para.ei[0] = ei[0]; para.ei[1] = ei[1]; //printf("vid %i p0 %f %f %f a %f e0 %f %f %f e1 %f %f %f\n", vid, XYZ(p_center),abc,XYZ(ei[0]), XYZ(ei[1])); return para; } __global__ void k_computeLocalSurfaces1d(MeshBaseDevice* mesh, ArrayView<localSurface1d> localSurfaces) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nVerticesSurf; idx += blockDim.x * gridDim.x) { if (mesh->isFeature(idx)) { localSurfaces[idx] = computeLocalSurface1d(idx, mesh); } } } }
e307cc1d06ad9726afe37a781b545f13f7a476d9.cu
#include "Surface1D.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "CudaUtil.h" namespace Surf1D { __device__ inline void computeLocalCoordinates1d(const int vid, const int vidFeature, const MeshBaseDevice* mesh, Vec3f* xi1, Vec3f* xi2) { Vec3f normal = mesh->vertexNormals[vid]; if (mesh->isFlat) { normal = Vec3f(0, 0, 1); } // construct a random vector Vec3f randVec = mesh->vertexPoints[vidFeature] - mesh->vertexPoints[vid]; if (normal.cross(randVec).norm() < 10.f * EPSILON) { printf("computeLocalCoordinates: normal vector needs to be recalculated. normal: %.10g %.10g %.10g randVec: %.10g %.10g %.10g\n", normal.x, normal.y, normal.z, randVec.x, randVec.y, randVec.z); //assert(0); randVec = { normal[0], normal[2], -normal[1] }; } // compute tanget vectors Vec3f t1 = normal.cross(randVec); Vec3f t2 = normal.cross(t1); // normalize normal.normalize(); t1.normalize(); t2.normalize(); if (std::abs(t1.cross(t2).dot(normal) - 1) > 10.f * EPSILON) { printf("computeLocalCoordinates: computation of local coordinates failed for vertex %i\n", vid); assert(0); } *xi1 = t2; *xi2 = normal; } __device__ localSurface1d computeLocalSurface1d(const int vid, MeshBaseDevice* mesh) { Vec3f p_center = mesh->vertexPoints[vid]; // neighbors (also contains vh itself) int neighbors[MAX_NEIGHBORS]; int pos = 0; neighbors[pos++] = vid; int vidFeature = -1; // get a neighbor vertex of vid which is a feature vertex // collect one-ring for (auto it = mesh->he_ccw_order_begin(vid); it != mesh->he_ccw_order_end(vid); ++it) { int dst = it->targetVertex; if (mesh->isFeature(dst)) { neighbors[pos++] = dst; vidFeature = dst; } } // neighbors contains all feature neighbors and us if (vidFeature == -1) { printf("vidFeature is -1, vid %i\n", vid); assert(0); } Vec3f ei[2]; computeLocalCoordinates1d(vid, vidFeature, mesh, &ei[0], &ei[1]); //convert points to frenet basis & fitting function f(x,y) = ax -> (x)(a)T = z =^= Ax=b -> ATA x = ATb float ATA = 0; float ATb = 0; for (auto i = 0; i < pos; ++i) { Vec3f p_neighbor = mesh->vertexPoints[neighbors[i]]; Vec3f p = p_neighbor - p_center; const float u = ei[0].dot(p); const float v = ei[1].dot(p); ATb += u * u * v; ATA += u * u * u * u; } float abc = ATb / ATA; localSurface1d para; para.p0 = p_center; para.a = abc; para.ei[0] = ei[0]; para.ei[1] = ei[1]; //printf("vid %i p0 %f %f %f a %f e0 %f %f %f e1 %f %f %f\n", vid, XYZ(p_center),abc,XYZ(ei[0]), XYZ(ei[1])); return para; } __global__ void k_computeLocalSurfaces1d(MeshBaseDevice* mesh, ArrayView<localSurface1d> localSurfaces) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nVerticesSurf; idx += blockDim.x * gridDim.x) { if (mesh->isFeature(idx)) { localSurfaces[idx] = computeLocalSurface1d(idx, mesh); } } } }
be4c36dbc970e9c8c25648a383d884cb9833b846.hip
// !!! This is a file automatically generated by hipify!!! //============================================================================ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. //============================================================================ #include <vtkm/cont/cuda/internal/testing/Testing.h> #include <vtkm/cont/ArrayHandle.h> #include <vtkm/cont/RuntimeDeviceTracker.h> #include <vtkm/cont/cuda/DeviceAdapterCuda.h> #include <vtkm/cont/cuda/ErrorCuda.h> #include <vtkm/cont/cuda/internal/CudaAllocator.h> #include <vtkm/cont/cuda/internal/testing/Testing.h> #include <hip/hip_runtime.h> using vtkm::cont::cuda::internal::CudaAllocator; namespace { template <typename ValueType> ValueType* AllocateManagedPointer(vtkm::Id numValues) { void* result; VTKM_CUDA_CALL(hipMallocManaged(&result, static_cast<size_t>(numValues) * sizeof(ValueType))); // Some sanity checks: VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(result), "Allocated pointer is not a device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(result), "Allocated pointer is not managed."); return static_cast<ValueType*>(result); } template <typename ValueType> ValueType* AllocateDevicePointer(vtkm::Id numValues) { void* result; VTKM_CUDA_CALL(hipMalloc(&result, static_cast<size_t>(numValues) * sizeof(ValueType))); // Some sanity checks: VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(result), "Allocated pointer is not a device pointer."); VTKM_TEST_ASSERT(!CudaAllocator::IsManagedPointer(result), "Allocated pointer is managed."); return static_cast<ValueType*>(result); } template <typename ValueType> vtkm::cont::ArrayHandle<ValueType> CreateArrayHandle(vtkm::Id numValues, bool managed) { ValueType* ptr = managed ? AllocateManagedPointer<ValueType>(numValues) : AllocateDevicePointer<ValueType>(numValues); return vtkm::cont::make_ArrayHandle(ptr, numValues); } template <typename ValueType> void TestPrepareForInput(bool managed) { vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForInput(vtkm::cont::DeviceAdapterTagCuda()); void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after PrepareForInput."); VTKM_TEST_ASSERT(execArray != nullptr, "No execution array after PrepareForInput."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(execArray), "PrepareForInput execution array not device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "PrepareForInput control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(execArray), "PrepareForInput execution array unmanaged."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "PrepareForInput control array unmanaged."); } VTKM_TEST_ASSERT(execArray == contArray, "PrepareForInput managed arrays not shared."); } template <typename ValueType> void TestPrepareForInPlace(bool managed) { vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForInPlace(vtkm::cont::DeviceAdapterTagCuda()); void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after PrepareForInPlace."); VTKM_TEST_ASSERT(execArray != nullptr, "No execution array after PrepareForInPlace."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(execArray), "PrepareForInPlace execution array not device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "PrepareForInPlace control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(execArray), "PrepareForInPlace execution array unmanaged."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "PrepareForInPlace control array unmanaged."); } VTKM_TEST_ASSERT(execArray == contArray, "PrepareForInPlace managed arrays not shared."); } template <typename ValueType> void TestPrepareForOutput(bool managed) { // Should reuse a managed control pointer if buffer is large enough. vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForOutput(32, vtkm::cont::DeviceAdapterTagCuda()); void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after PrepareForOutput."); VTKM_TEST_ASSERT(execArray != nullptr, "No execution array after PrepareForOutput."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(execArray), "PrepareForOutput execution array not device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "PrepareForOutput control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(execArray), "PrepareForOutput execution array unmanaged."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "PrepareForOutput control array unmanaged."); } VTKM_TEST_ASSERT(execArray == contArray, "PrepareForOutput managed arrays not shared."); } template <typename ValueType> void TestReleaseResourcesExecution(bool managed) { vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForInput(vtkm::cont::DeviceAdapterTagCuda()); void* origArray = handle.Internals->ExecutionArray; handle.ReleaseResourcesExecution(); void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after ReleaseResourcesExecution."); VTKM_TEST_ASSERT(execArray == nullptr, "Execution array not cleared after ReleaseResourcesExecution."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "ReleaseResourcesExecution control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "ReleaseResourcesExecution control array unmanaged."); } VTKM_TEST_ASSERT(origArray == contArray, "Control array changed after ReleaseResourcesExecution."); } template <typename ValueType> void TestRoundTrip(bool managed) { vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForOutput(32, vtkm::cont::DeviceAdapterTagCuda()); void* origContArray = handle.Internals->ControlArray->GetBasePointer(); { void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after PrepareForOutput."); VTKM_TEST_ASSERT(execArray != nullptr, "No execution array after PrepareForOutput."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(execArray), "PrepareForOutput execution array not device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "PrepareForOutput control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(execArray), "PrepareForOutput execution array unmanaged."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "PrepareForOutput control array unmanaged."); } VTKM_TEST_ASSERT(execArray == contArray, "PrepareForOutput managed arrays not shared."); } try { handle.GetPortalControl(); } catch (vtkm::cont::ErrorBadValue&) { if (managed) { throw; // Exception is unexpected } // If !managed, this exception is intentional to indicate that the control // array is a non-managed device pointer, and thus unaccessible from the // control environment. Return because we've already validated correct // behavior by catching this exception. return; } if (!managed) { VTKM_TEST_FAIL("Expected exception not thrown."); } { void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after GetPortalConst."); VTKM_TEST_ASSERT(execArray == nullptr, "Execution array not cleared after GetPortalConst."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "GetPortalConst control array not device pointer."); VTKM_TEST_ASSERT(origContArray == contArray, "GetPortalConst changed control array."); } } template <typename ValueType> void DoTests() { TestPrepareForInput<ValueType>(false); TestPrepareForInPlace<ValueType>(false); TestPrepareForOutput<ValueType>(false); TestReleaseResourcesExecution<ValueType>(false); TestRoundTrip<ValueType>(false); // If this device does not support managed memory, skip the managed tests. if (!CudaAllocator::UsingManagedMemory()) { std::cerr << "Skipping some tests -- device does not support managed memory.\n"; } else { TestPrepareForInput<ValueType>(true); TestPrepareForInPlace<ValueType>(true); TestPrepareForOutput<ValueType>(true); TestReleaseResourcesExecution<ValueType>(true); TestRoundTrip<ValueType>(true); } } struct ArgToTemplateType { template <typename ValueType> void operator()(ValueType) const { DoTests<ValueType>(); } }; void Launch() { using Types = vtkm::ListTagBase<vtkm::UInt8, vtkm::Vec<vtkm::UInt8, 3>, vtkm::Float32, vtkm::Vec<vtkm::Float32, 4>, vtkm::Float64, vtkm::Vec<vtkm::Float64, 4>>; vtkm::testing::Testing::TryTypes(ArgToTemplateType(), Types()); } } // end anon namespace int UnitTestCudaShareUserProvidedManagedMemory(int argc, char* argv[]) { auto& tracker = vtkm::cont::GetRuntimeDeviceTracker(); tracker.ForceDevice(vtkm::cont::DeviceAdapterTagCuda{}); int ret = vtkm::cont::testing::Testing::Run(Launch, argc, argv); return vtkm::cont::cuda::internal::Testing::CheckCudaBeforeExit(ret); }
be4c36dbc970e9c8c25648a383d884cb9833b846.cu
//============================================================================ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. //============================================================================ #include <vtkm/cont/cuda/internal/testing/Testing.h> #include <vtkm/cont/ArrayHandle.h> #include <vtkm/cont/RuntimeDeviceTracker.h> #include <vtkm/cont/cuda/DeviceAdapterCuda.h> #include <vtkm/cont/cuda/ErrorCuda.h> #include <vtkm/cont/cuda/internal/CudaAllocator.h> #include <vtkm/cont/cuda/internal/testing/Testing.h> #include <cuda_runtime.h> using vtkm::cont::cuda::internal::CudaAllocator; namespace { template <typename ValueType> ValueType* AllocateManagedPointer(vtkm::Id numValues) { void* result; VTKM_CUDA_CALL(cudaMallocManaged(&result, static_cast<size_t>(numValues) * sizeof(ValueType))); // Some sanity checks: VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(result), "Allocated pointer is not a device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(result), "Allocated pointer is not managed."); return static_cast<ValueType*>(result); } template <typename ValueType> ValueType* AllocateDevicePointer(vtkm::Id numValues) { void* result; VTKM_CUDA_CALL(cudaMalloc(&result, static_cast<size_t>(numValues) * sizeof(ValueType))); // Some sanity checks: VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(result), "Allocated pointer is not a device pointer."); VTKM_TEST_ASSERT(!CudaAllocator::IsManagedPointer(result), "Allocated pointer is managed."); return static_cast<ValueType*>(result); } template <typename ValueType> vtkm::cont::ArrayHandle<ValueType> CreateArrayHandle(vtkm::Id numValues, bool managed) { ValueType* ptr = managed ? AllocateManagedPointer<ValueType>(numValues) : AllocateDevicePointer<ValueType>(numValues); return vtkm::cont::make_ArrayHandle(ptr, numValues); } template <typename ValueType> void TestPrepareForInput(bool managed) { vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForInput(vtkm::cont::DeviceAdapterTagCuda()); void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after PrepareForInput."); VTKM_TEST_ASSERT(execArray != nullptr, "No execution array after PrepareForInput."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(execArray), "PrepareForInput execution array not device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "PrepareForInput control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(execArray), "PrepareForInput execution array unmanaged."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "PrepareForInput control array unmanaged."); } VTKM_TEST_ASSERT(execArray == contArray, "PrepareForInput managed arrays not shared."); } template <typename ValueType> void TestPrepareForInPlace(bool managed) { vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForInPlace(vtkm::cont::DeviceAdapterTagCuda()); void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after PrepareForInPlace."); VTKM_TEST_ASSERT(execArray != nullptr, "No execution array after PrepareForInPlace."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(execArray), "PrepareForInPlace execution array not device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "PrepareForInPlace control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(execArray), "PrepareForInPlace execution array unmanaged."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "PrepareForInPlace control array unmanaged."); } VTKM_TEST_ASSERT(execArray == contArray, "PrepareForInPlace managed arrays not shared."); } template <typename ValueType> void TestPrepareForOutput(bool managed) { // Should reuse a managed control pointer if buffer is large enough. vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForOutput(32, vtkm::cont::DeviceAdapterTagCuda()); void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after PrepareForOutput."); VTKM_TEST_ASSERT(execArray != nullptr, "No execution array after PrepareForOutput."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(execArray), "PrepareForOutput execution array not device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "PrepareForOutput control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(execArray), "PrepareForOutput execution array unmanaged."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "PrepareForOutput control array unmanaged."); } VTKM_TEST_ASSERT(execArray == contArray, "PrepareForOutput managed arrays not shared."); } template <typename ValueType> void TestReleaseResourcesExecution(bool managed) { vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForInput(vtkm::cont::DeviceAdapterTagCuda()); void* origArray = handle.Internals->ExecutionArray; handle.ReleaseResourcesExecution(); void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after ReleaseResourcesExecution."); VTKM_TEST_ASSERT(execArray == nullptr, "Execution array not cleared after ReleaseResourcesExecution."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "ReleaseResourcesExecution control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "ReleaseResourcesExecution control array unmanaged."); } VTKM_TEST_ASSERT(origArray == contArray, "Control array changed after ReleaseResourcesExecution."); } template <typename ValueType> void TestRoundTrip(bool managed) { vtkm::cont::ArrayHandle<ValueType> handle = CreateArrayHandle<ValueType>(32, managed); handle.PrepareForOutput(32, vtkm::cont::DeviceAdapterTagCuda()); void* origContArray = handle.Internals->ControlArray->GetBasePointer(); { void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after PrepareForOutput."); VTKM_TEST_ASSERT(execArray != nullptr, "No execution array after PrepareForOutput."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(execArray), "PrepareForOutput execution array not device pointer."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "PrepareForOutput control array not device pointer."); if (managed) { VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(execArray), "PrepareForOutput execution array unmanaged."); VTKM_TEST_ASSERT(CudaAllocator::IsManagedPointer(contArray), "PrepareForOutput control array unmanaged."); } VTKM_TEST_ASSERT(execArray == contArray, "PrepareForOutput managed arrays not shared."); } try { handle.GetPortalControl(); } catch (vtkm::cont::ErrorBadValue&) { if (managed) { throw; // Exception is unexpected } // If !managed, this exception is intentional to indicate that the control // array is a non-managed device pointer, and thus unaccessible from the // control environment. Return because we've already validated correct // behavior by catching this exception. return; } if (!managed) { VTKM_TEST_FAIL("Expected exception not thrown."); } { void* contArray = handle.Internals->ControlArray->GetBasePointer(); void* execArray = handle.Internals->ExecutionArray; VTKM_TEST_ASSERT(contArray != nullptr, "No control array after GetPortalConst."); VTKM_TEST_ASSERT(execArray == nullptr, "Execution array not cleared after GetPortalConst."); VTKM_TEST_ASSERT(CudaAllocator::IsDevicePointer(contArray), "GetPortalConst control array not device pointer."); VTKM_TEST_ASSERT(origContArray == contArray, "GetPortalConst changed control array."); } } template <typename ValueType> void DoTests() { TestPrepareForInput<ValueType>(false); TestPrepareForInPlace<ValueType>(false); TestPrepareForOutput<ValueType>(false); TestReleaseResourcesExecution<ValueType>(false); TestRoundTrip<ValueType>(false); // If this device does not support managed memory, skip the managed tests. if (!CudaAllocator::UsingManagedMemory()) { std::cerr << "Skipping some tests -- device does not support managed memory.\n"; } else { TestPrepareForInput<ValueType>(true); TestPrepareForInPlace<ValueType>(true); TestPrepareForOutput<ValueType>(true); TestReleaseResourcesExecution<ValueType>(true); TestRoundTrip<ValueType>(true); } } struct ArgToTemplateType { template <typename ValueType> void operator()(ValueType) const { DoTests<ValueType>(); } }; void Launch() { using Types = vtkm::ListTagBase<vtkm::UInt8, vtkm::Vec<vtkm::UInt8, 3>, vtkm::Float32, vtkm::Vec<vtkm::Float32, 4>, vtkm::Float64, vtkm::Vec<vtkm::Float64, 4>>; vtkm::testing::Testing::TryTypes(ArgToTemplateType(), Types()); } } // end anon namespace int UnitTestCudaShareUserProvidedManagedMemory(int argc, char* argv[]) { auto& tracker = vtkm::cont::GetRuntimeDeviceTracker(); tracker.ForceDevice(vtkm::cont::DeviceAdapterTagCuda{}); int ret = vtkm::cont::testing::Testing::Run(Launch, argc, argv); return vtkm::cont::cuda::internal::Testing::CheckCudaBeforeExit(ret); }
aa2c6984cc8b297f017a1e6e06150b714b97930a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_header.h" #include <algorithm> #include <vector> #define NB_POINTS 21 #define NB_RUN 5 #define INF(A, B) (A.x < B.x || (A.x == B.x && A.y <= B.y)) #define EQ(A, B) (A.x == B.x && A.y == B.y) #define INFEQ(A, B) (INF(A, B) || EQ(A, B)) __device__ int A_diag[NB_POINTS]; __device__ int B_diag[NB_POINTS]; struct Point { int x, y; //Just to keep the comparison nearby bool operator <(const Point &p) const { return x < p.x || (x == p.x && y < p.y); } }; int cross(const Point &O, const Point &A, const Point &B) { return (A.x - O.x) * (B.y - O.y) - (A.y - O.y) * (B.x - O.x); } Point* generate_hull(int length) { Point* P = (Point*)malloc(length * sizeof(Point)); for (int i = 0; i < length; ++i) { P[i].x = rand() % length; P[i].y = rand() % length; } return P; } bool contains(Point* P, int length, int x, int y) { for (int i = 0; i < length; ++i) { if (P[i].x == x && P[i].y == y) return true; } return false; } void print_number(int i) { if ((i + 1) < 10) { printf("0%d ", i + 1); } else { printf("%d ", i + 1); } } void print_points(Point* P, int size, int P_length) { printf(" "); for (int i = 0; i < size; ++i) { print_number(i); } printf("\n---"); for (int i = 0; i < size; ++i) { printf("---"); } printf("\n"); for (int i = 0; i < size; ++i) { print_number(i); printf("|"); for (int j = 0; j < size; ++j) { if (contains(P, P_length, i, j)) { printf("+ "); } else { printf(" "); } } printf("\n"); } } /** Find the intersection between one diagonal and the merge path. */ __global__ void pathBig_k(Point *A, int length_A, Point *B, int length_B, int start_diag) { int nb_threads = gridDim.x * blockDim.x; int i = threadIdx.x + blockIdx.x * blockDim.x; int index_diag = (i + start_diag) * (length_A + length_B) / nb_threads; duo K, P, Q; if (i > length_A) { K.x = i - length_A; K.y = length_A; P.x = length_A; P.y = i - length_A; } else { K.x = 0; K.y = i; P.x = i; P.y = 0; } while (true) { int offset = abs(K.y - P.y) / 2; Q.x = K.x + offset; Q.y = K.y - offset; //if (Q.y >= 0 && Q.x <= length_B && (Q.y == length_A || Q.x == 0 || A[Q.y] > B[Q.x - 1])) { if (Q.y >= 0 && Q.x <= length_B && (Q.y == length_A || Q.x == 0 || INF(B[Q.x - 1], A[Q.y]))) { //if (Q.x == length_B || Q.y == 0 || A[Q.y - 1] <= B[Q.x]) { if (Q.x == length_B || Q.y == 0 || INFEQ(A[Q.y - 1], B[Q.x])) { A_diag[index_diag] = Q.y; B_diag[index_diag] = Q.x; break; } else { K.x = Q.x + 1; K.y = Q.y - 1; } } else { P.x = Q.x - 1; P.y = Q.y + 1; } } } __global__ void mergeBig_k(Point *A, int length_A, Point *B, int length_B, Point* M, int start_diag) { int i = threadIdx.x + blockIdx.x * blockDim.x; int nb_threads = gridDim.x * blockDim.x; int length = (length_A + length_B) / nb_threads; int start_M = i * (length_A + length_B) / nb_threads; for (int k = 0; k < length; ++k) { int i = start_diag + start_M + k; //if (A_diag[i] < length_A && (B_diag[i] == length_B || A[A_diag[i]] <= B[B_diag[i]])) { if (A_diag[i] < length_A && (B_diag[i] == length_B || INFEQ(A[A_diag[i]], B[B_diag[i]]))) { M[start_M + k] = A[A_diag[i]]; } else { M[start_M + k] = B[B_diag[i]]; } } } void mergeSortGPU(Point *M, int length) { Point *M_dev, *M_dev_copy; int mergeSize = 2; testCUDA(hipMalloc((void**)&M_dev, length * sizeof(Point))); testCUDA(hipMalloc((void**)&M_dev_copy, length * sizeof(Point))); testCUDA(hipMemcpy(M_dev, M, length * sizeof(Point), hipMemcpyHostToDevice)); while (mergeSize <= pow(2, ceil(log2(length)))) { testCUDA(hipMemcpy(M_dev_copy, M_dev, length * sizeof(Point), hipMemcpyDeviceToDevice)); for (int k = 0; k < ((length + mergeSize - 1) / mergeSize); ++k) { if (k < (length / mergeSize)) pathBig_k << <1, mergeSize >> > (M_dev + k * mergeSize, mergeSize / 2, M_dev + (2 * k + 1)*(mergeSize / 2), mergeSize / 2, mergeSize*k); else { // k==length/mergeSize int mergeSizeLast = length % mergeSize; if (mergeSizeLast > mergeSize / 2) { pathBig_k << <1, mergeSizeLast >> > (M_dev + k * mergeSize, mergeSize / 2, M_dev + k * mergeSize + mergeSize / 2, mergeSizeLast - (mergeSize / 2), mergeSize*k); } } } testCUDA(hipDeviceSynchronize()); for (int k = 0; k < ((length + mergeSize - 1) / mergeSize); ++k) { if (k < (length / mergeSize)) mergeBig_k << <1, mergeSize >> > (M_dev_copy + k * mergeSize, mergeSize / 2, M_dev_copy + (2 * k + 1)*(mergeSize / 2), mergeSize / 2, M_dev + k * mergeSize, mergeSize*k); else { // k==length/mergeSize int mergeSizeLast = length % mergeSize; if (mergeSizeLast > mergeSize / 2) { mergeBig_k << <1, mergeSizeLast >> > (M_dev_copy + k * mergeSize, mergeSize / 2, M_dev_copy + k * mergeSize + mergeSize / 2, mergeSizeLast - (mergeSize / 2), M_dev + k * mergeSize, mergeSize*k); } } } testCUDA(hipDeviceSynchronize()); mergeSize *= 2; } testCUDA(hipMemcpy(M, M_dev, length * sizeof(Point), hipMemcpyDeviceToHost)); testCUDA(hipFree(M_dev)); testCUDA(hipFree(M_dev_copy)); } Point* convex_hull(Point* P, int n, int *h_length) { int k = 0; // Sort points lexicographically mergeSortGPU(P, n); Point* H = (Point*)malloc(n * sizeof(Point)); // Build lower hull for (int i = 0; i < n; ++i) { while (k >= 2 && cross(H[k - 2], H[k - 1], P[i]) <= 0) k--; H[k++] = P[i]; } // Build upper hull for (int i = n - 1, t = k + 1; i > 0; --i) { while (k >= t && cross(H[k - 2], H[k - 1], P[i - 1]) <= 0) k--; H[k++] = P[i - 1]; } *h_length = k; return H; } int main(int argc, char *argv[]) { // initialize random seed srand(time(0)); for (int i = 0; i < NB_RUN; ++i) { int h_length = 0; Point* P = generate_hull(NB_POINTS); printf("The set of points we want to get the convex hull of is : \n"); print_points(P, NB_POINTS, NB_POINTS); Point* H = convex_hull(P, NB_POINTS, &h_length); printf("The convex hull associated is : \n"); print_points(H, NB_POINTS, h_length); free(H); free(P); } }
aa2c6984cc8b297f017a1e6e06150b714b97930a.cu
#include "cuda_header.h" #include <algorithm> #include <vector> #define NB_POINTS 21 #define NB_RUN 5 #define INF(A, B) (A.x < B.x || (A.x == B.x && A.y <= B.y)) #define EQ(A, B) (A.x == B.x && A.y == B.y) #define INFEQ(A, B) (INF(A, B) || EQ(A, B)) __device__ int A_diag[NB_POINTS]; __device__ int B_diag[NB_POINTS]; struct Point { int x, y; //Just to keep the comparison nearby bool operator <(const Point &p) const { return x < p.x || (x == p.x && y < p.y); } }; int cross(const Point &O, const Point &A, const Point &B) { return (A.x - O.x) * (B.y - O.y) - (A.y - O.y) * (B.x - O.x); } Point* generate_hull(int length) { Point* P = (Point*)malloc(length * sizeof(Point)); for (int i = 0; i < length; ++i) { P[i].x = rand() % length; P[i].y = rand() % length; } return P; } bool contains(Point* P, int length, int x, int y) { for (int i = 0; i < length; ++i) { if (P[i].x == x && P[i].y == y) return true; } return false; } void print_number(int i) { if ((i + 1) < 10) { printf("0%d ", i + 1); } else { printf("%d ", i + 1); } } void print_points(Point* P, int size, int P_length) { printf(" "); for (int i = 0; i < size; ++i) { print_number(i); } printf("\n---"); for (int i = 0; i < size; ++i) { printf("---"); } printf("\n"); for (int i = 0; i < size; ++i) { print_number(i); printf("|"); for (int j = 0; j < size; ++j) { if (contains(P, P_length, i, j)) { printf("+ "); } else { printf(" "); } } printf("\n"); } } /** Find the intersection between one diagonal and the merge path. */ __global__ void pathBig_k(Point *A, int length_A, Point *B, int length_B, int start_diag) { int nb_threads = gridDim.x * blockDim.x; int i = threadIdx.x + blockIdx.x * blockDim.x; int index_diag = (i + start_diag) * (length_A + length_B) / nb_threads; duo K, P, Q; if (i > length_A) { K.x = i - length_A; K.y = length_A; P.x = length_A; P.y = i - length_A; } else { K.x = 0; K.y = i; P.x = i; P.y = 0; } while (true) { int offset = abs(K.y - P.y) / 2; Q.x = K.x + offset; Q.y = K.y - offset; //if (Q.y >= 0 && Q.x <= length_B && (Q.y == length_A || Q.x == 0 || A[Q.y] > B[Q.x - 1])) { if (Q.y >= 0 && Q.x <= length_B && (Q.y == length_A || Q.x == 0 || INF(B[Q.x - 1], A[Q.y]))) { //if (Q.x == length_B || Q.y == 0 || A[Q.y - 1] <= B[Q.x]) { if (Q.x == length_B || Q.y == 0 || INFEQ(A[Q.y - 1], B[Q.x])) { A_diag[index_diag] = Q.y; B_diag[index_diag] = Q.x; break; } else { K.x = Q.x + 1; K.y = Q.y - 1; } } else { P.x = Q.x - 1; P.y = Q.y + 1; } } } __global__ void mergeBig_k(Point *A, int length_A, Point *B, int length_B, Point* M, int start_diag) { int i = threadIdx.x + blockIdx.x * blockDim.x; int nb_threads = gridDim.x * blockDim.x; int length = (length_A + length_B) / nb_threads; int start_M = i * (length_A + length_B) / nb_threads; for (int k = 0; k < length; ++k) { int i = start_diag + start_M + k; //if (A_diag[i] < length_A && (B_diag[i] == length_B || A[A_diag[i]] <= B[B_diag[i]])) { if (A_diag[i] < length_A && (B_diag[i] == length_B || INFEQ(A[A_diag[i]], B[B_diag[i]]))) { M[start_M + k] = A[A_diag[i]]; } else { M[start_M + k] = B[B_diag[i]]; } } } void mergeSortGPU(Point *M, int length) { Point *M_dev, *M_dev_copy; int mergeSize = 2; testCUDA(cudaMalloc((void**)&M_dev, length * sizeof(Point))); testCUDA(cudaMalloc((void**)&M_dev_copy, length * sizeof(Point))); testCUDA(cudaMemcpy(M_dev, M, length * sizeof(Point), cudaMemcpyHostToDevice)); while (mergeSize <= pow(2, ceil(log2(length)))) { testCUDA(cudaMemcpy(M_dev_copy, M_dev, length * sizeof(Point), cudaMemcpyDeviceToDevice)); for (int k = 0; k < ((length + mergeSize - 1) / mergeSize); ++k) { if (k < (length / mergeSize)) pathBig_k << <1, mergeSize >> > (M_dev + k * mergeSize, mergeSize / 2, M_dev + (2 * k + 1)*(mergeSize / 2), mergeSize / 2, mergeSize*k); else { // k==length/mergeSize int mergeSizeLast = length % mergeSize; if (mergeSizeLast > mergeSize / 2) { pathBig_k << <1, mergeSizeLast >> > (M_dev + k * mergeSize, mergeSize / 2, M_dev + k * mergeSize + mergeSize / 2, mergeSizeLast - (mergeSize / 2), mergeSize*k); } } } testCUDA(cudaDeviceSynchronize()); for (int k = 0; k < ((length + mergeSize - 1) / mergeSize); ++k) { if (k < (length / mergeSize)) mergeBig_k << <1, mergeSize >> > (M_dev_copy + k * mergeSize, mergeSize / 2, M_dev_copy + (2 * k + 1)*(mergeSize / 2), mergeSize / 2, M_dev + k * mergeSize, mergeSize*k); else { // k==length/mergeSize int mergeSizeLast = length % mergeSize; if (mergeSizeLast > mergeSize / 2) { mergeBig_k << <1, mergeSizeLast >> > (M_dev_copy + k * mergeSize, mergeSize / 2, M_dev_copy + k * mergeSize + mergeSize / 2, mergeSizeLast - (mergeSize / 2), M_dev + k * mergeSize, mergeSize*k); } } } testCUDA(cudaDeviceSynchronize()); mergeSize *= 2; } testCUDA(cudaMemcpy(M, M_dev, length * sizeof(Point), cudaMemcpyDeviceToHost)); testCUDA(cudaFree(M_dev)); testCUDA(cudaFree(M_dev_copy)); } Point* convex_hull(Point* P, int n, int *h_length) { int k = 0; // Sort points lexicographically mergeSortGPU(P, n); Point* H = (Point*)malloc(n * sizeof(Point)); // Build lower hull for (int i = 0; i < n; ++i) { while (k >= 2 && cross(H[k - 2], H[k - 1], P[i]) <= 0) k--; H[k++] = P[i]; } // Build upper hull for (int i = n - 1, t = k + 1; i > 0; --i) { while (k >= t && cross(H[k - 2], H[k - 1], P[i - 1]) <= 0) k--; H[k++] = P[i - 1]; } *h_length = k; return H; } int main(int argc, char *argv[]) { // initialize random seed srand(time(0)); for (int i = 0; i < NB_RUN; ++i) { int h_length = 0; Point* P = generate_hull(NB_POINTS); printf("The set of points we want to get the convex hull of is : \n"); print_points(P, NB_POINTS, NB_POINTS); Point* H = convex_hull(P, NB_POINTS, &h_length); printf("The convex hull associated is : \n"); print_points(H, NB_POINTS, h_length); free(H); free(P); } }
69741a59c4d1ce6a28e119e2fdecf5e094a9e55b.hip
// !!! This is a file automatically generated by hipify!!! /////////////////////////////////////////////////////////// // SolverGates.cpp // Implementation of the Class SolverGates // Created on: 27-Dec-2013 7:57:50 PM // Original author: Saeed Shariati /////////////////////////////////////////////////////////// #include "SolverGates.h" #include <assert.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hip/hip_runtime.h> #include <math.h> using namespace pn2s::models; #define SINGULARITY 1.0e-6 //A mask to check INSTANT variable in the channel #define INSTANT_X 1 #define INSTANT_Y 2 #define INSTANT_Z 4 #define IS_SECOND_GATE 8 #define NUMBER_OF_MULTI_PROCESSOR 8 #define PARAM_SIZE 13 SolverGates::SolverGates(): _stream(0), _Vm(0) { } SolverGates::~SolverGates() { } size_t SolverGates::AllocateMemory(models::ModelStatistic& s, hipStream_t stream) { _m_statistic = s; _stream = stream; if(_m_statistic.nGates <= 0) return 0; _nCompt = s.nCompts_per_model; size_t val = 0; val += _ch_currents_gk.AllocateMemory(_m_statistic.nChannels_all); val += _ch_currents_gkek.AllocateMemory(_m_statistic.nChannels_all); val += _state.AllocateMemory(_m_statistic.nGates, 0); val += _gk.AllocateMemory(_m_statistic.nChannels_all, 0); //Channel currents //Indices val += _comptIndex.AllocateMemory(_m_statistic.nGates, 0); val += _channelIndex.AllocateMemory(_m_statistic.nGates, 0); val += _gateIndex.AllocateMemory(_m_statistic.nGates, 0); //Constant values val += _ek.AllocateMemory(_m_statistic.nGates, 0); val += _gbar.AllocateMemory(_m_statistic.nGates, 0); val += _power.AllocateMemory(_m_statistic.nGates, 0); val += _params.AllocateMemory(_m_statistic.nGates, 0); val += _params_div_min_max.AllocateMemory(_m_statistic.nGates, 0); int threadSize = min(max((int)_m_statistic.nChannels_all/8,16), 32); _threads=dim3(2,threadSize, 1); _blocks=dim3(max((int)(ceil((double)_m_statistic.nChannels_all / _threads.y)),1), 1); return val; } void SolverGates::PrepareSolver(PField<TYPE_>* Vm, PField<TYPE_>* hm, PField<TYPE_>* rhs) { if(_m_statistic.nGates) { _ch_currents_gk.Host2Device_Async(_stream); _ch_currents_gkek.Host2Device_Async(_stream); _state.Host2Device_Async(_stream); _gk.Host2Device_Async(_stream); _comptIndex.Host2Device_Async(_stream); _channelIndex.Host2Device_Async(_stream); _gateIndex.Host2Device_Async(_stream); _ek.Host2Device_Async(_stream); _gbar.Host2Device_Async(_stream); _power.Host2Device_Async(_stream); _params.Host2Device_Async(_stream); _params_div_min_max.Host2Device_Async(_stream); _Vm = Vm; _rhs = rhs; _hm = hm; _threads=dim3(32*3); _blocks=dim3(ceil(_m_statistic.nGates / (double)_threads.x)); } } /** * KERNELS */ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __global__ void advanceGates( TYPE_* state, TYPE_* gk, TYPE_* current_gk, TYPE_* current_gk_ek, TYPE_* power, pn2s::models::GateParams* params, TYPE3_* div_min_max, TYPE_* gbar, TYPE_* ek, int* comptIndex, int* channelIndex, int* gateIndex, TYPE_* v, TYPE_* rhs, size_t size, TYPE_ dt) { extern __shared__ TYPE2_ data[]; TYPE_ temp, temp2, A, B; int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; int fi = gateIndex[idx]; if ( power[idx] > 0.0 ) { TYPE_ x = v[comptIndex[idx]]; temp = div_min_max[idx].y; temp2 = div_min_max[idx].z; // Calculate new states TYPE_ dx = ( temp2 - temp ) / div_min_max[idx].x; // Check boundaries x = fmax(temp, x); x = fmin(temp2, x); if ( fabs(params[idx].p[PARAMS_A_F]) < SINGULARITY ) { temp = 0.0; A = 0.0; } else { temp2 = params[idx].p[PARAMS_A_C] + exp( ( x + params[idx].p[PARAMS_A_D] ) / params[idx].p[PARAMS_A_F] ); if ( fabs( temp2 ) < SINGULARITY ) { temp2 = params[idx].p[PARAMS_A_C] + exp( ( x + dx/10.0 + params[idx].p[PARAMS_A_D] ) / params[idx].p[PARAMS_A_F] ); temp = ( params[idx].p[PARAMS_A_A] + params[idx].p[PARAMS_A_B] * (x + dx/10 ) ) / temp2; temp2 = params[idx].p[PARAMS_A_C] + exp( ( x - dx/10.0 + params[idx].p[PARAMS_A_D] ) / params[idx].p[PARAMS_A_F] ); temp += ( params[idx].p[PARAMS_A_A] + params[idx].p[1] * (x - dx/10 ) ) / temp2; temp /= 2.0; A = temp; } else { temp = ( params[idx].p[PARAMS_A_A] + params[idx].p[PARAMS_A_B] * x) / temp2; A = temp; } } if ( fabs( params[idx].p[PARAMS_B_F] ) < SINGULARITY ) { B = 0.0; } else { temp2 = params[idx].p[7] + exp( ( x + params[idx].p[8] ) / params[idx].p[9] ); if ( fabs( temp2 ) < SINGULARITY ) { temp2 = params[idx].p[7] + exp( ( x + dx/10.0 + params[idx].p[8] ) / params[idx].p[9] ); temp = (params[idx].p[5] + params[idx].p[6] * (x + dx/10) ) / temp2; temp2 = params[idx].p[7] + exp( ( x - dx/10.0 + params[idx].p[8] ) / params[idx].p[9] ); temp += (params[idx].p[5] + params[idx].p[6] * (x - dx/10) ) / temp2; temp /= 2.0; B = temp; } else { B = (params[idx].p[5] + params[idx].p[6] * x ) / temp2; } } if ( fabs( temp2 ) > SINGULARITY ) B += temp; temp2 = state[idx]; temp = 1.0 + dt / 2.0 * B; //new value for temp state[idx] = ( temp2 * ( 2.0 - temp ) + dt * A ) / temp; __syncthreads(); //Update channels characteristics data[threadIdx.x].x = temp2; if (power[idx] > 1) { data[threadIdx.x].x *= temp2; if (power[idx] > 2) { data[threadIdx.x].x *= temp2; if (power[idx] > 3) { data[threadIdx.x].x *= temp2; if (power[idx] > 4) { data[threadIdx.x].x = pow( temp2, power[idx]); } } } } __syncthreads(); unsigned char bitmap = 1; if(fi & bitmap) { data[threadIdx.x-1].x *= data[threadIdx.x].x; data[threadIdx.x].x = 0; } __syncthreads(); data[threadIdx.x].x = gbar[idx] *data[threadIdx.x].x; data[threadIdx.x].y = ek[idx] *data[threadIdx.x].x; if(!(fi & bitmap)) { current_gk[channelIndex[idx]] = data[threadIdx.x].x; // int cidx = comptIndex[idx]; // current_gk[cidx] = 0; // current_gk_ek[cidx] = 0; // atomicAdd(current_gk + cidx, data[threadIdx.x].x); // atomicAdd(current_gk_ek + cidx, data[threadIdx.x].y); } } } double SolverGates::Input() { return 0; } double SolverGates::Process() { clock_t start_time = clock(); if(_m_statistic.nGates > 0) { int smem_size = (sizeof(TYPE2_) * _threads.x); hipLaunchKernelGGL(( advanceGates) , dim3(_blocks), dim3(_threads), smem_size, _stream, _state.device, _gk.device, _ch_currents_gk.device, _ch_currents_gkek.device, _power.device, _params.device, _params_div_min_max.device, _gbar.device, _ek.device, _comptIndex.device, _channelIndex.device, _gateIndex.device, _Vm->device, _rhs->device, _m_statistic.nGates, _m_statistic.dt); assert(hipSuccess == hipGetLastError()); } double elapsed_time = ( std::clock() - start_time ); // cout << "GATE: " << elapsed_time << endl << flush; return elapsed_time; } double SolverGates::Output() { clock_t start_time = clock(); // _ch_currents_gk_ek.Device2Host_Async(_stream); return std::clock() - start_time ; } /** * Set/Get methods */ void SolverGates::SetGateParams(int index, vector<double>& params) { for (int i = 0; i < min((int)params.size(),13); ++i) _params[index].p[i] = (TYPE_)params[i]; _params_div_min_max[index].x = (TYPE_)params[PARAMS_DIV]; _params_div_min_max[index].y = (TYPE_)params[PARAMS_MIN]; _params_div_min_max[index].z = (TYPE_)params[PARAMS_MAX]; } void SolverGates::SetValue(int index, FIELD::GATE field, TYPE_ value) { switch(field) { case FIELD::GATE_CH_GBAR: _gbar[index] = value; break; case FIELD::GATE_CH_GK: _gk[index] = value; _ch_currents_gk[_channelIndex[index]] = value; break; case FIELD::GATE_CH_EK: _ek[index] = value; _ch_currents_gkek[_channelIndex[index]] = value; break; case FIELD::GATE_POWER: _power[index] = (unsigned char)value; break; case FIELD::GATE_STATE: _state[index] = value; break; case FIELD::GATE_COMPONENT_INDEX: _comptIndex[index] = (int)value; break; case FIELD::GATE_CHANNEL_INDEX: _channelIndex[index] = (int)value; break; case FIELD::GATE_INDEX: _gateIndex[index] = (int)value; break; } } TYPE_ SolverGates::GetValue(int index, FIELD::GATE field) { // switch(field) // { // case FIELD::CH_GBAR: // return _gbar[index]; // case FIELD::CH_X_POWER: // return _xPower[index]; // case FIELD::CH_Y_POWER: // return _yPower[index]; // case FIELD::CH_Z_POWER: // return _zPower[index]; // } return 0; }
69741a59c4d1ce6a28e119e2fdecf5e094a9e55b.cu
/////////////////////////////////////////////////////////// // SolverGates.cpp // Implementation of the Class SolverGates // Created on: 27-Dec-2013 7:57:50 PM // Original author: Saeed Shariati /////////////////////////////////////////////////////////// #include "SolverGates.h" #include <assert.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cuda.h> #include <math.h> using namespace pn2s::models; #define SINGULARITY 1.0e-6 //A mask to check INSTANT variable in the channel #define INSTANT_X 1 #define INSTANT_Y 2 #define INSTANT_Z 4 #define IS_SECOND_GATE 8 #define NUMBER_OF_MULTI_PROCESSOR 8 #define PARAM_SIZE 13 SolverGates::SolverGates(): _stream(0), _Vm(0) { } SolverGates::~SolverGates() { } size_t SolverGates::AllocateMemory(models::ModelStatistic& s, cudaStream_t stream) { _m_statistic = s; _stream = stream; if(_m_statistic.nGates <= 0) return 0; _nCompt = s.nCompts_per_model; size_t val = 0; val += _ch_currents_gk.AllocateMemory(_m_statistic.nChannels_all); val += _ch_currents_gkek.AllocateMemory(_m_statistic.nChannels_all); val += _state.AllocateMemory(_m_statistic.nGates, 0); val += _gk.AllocateMemory(_m_statistic.nChannels_all, 0); //Channel currents //Indices val += _comptIndex.AllocateMemory(_m_statistic.nGates, 0); val += _channelIndex.AllocateMemory(_m_statistic.nGates, 0); val += _gateIndex.AllocateMemory(_m_statistic.nGates, 0); //Constant values val += _ek.AllocateMemory(_m_statistic.nGates, 0); val += _gbar.AllocateMemory(_m_statistic.nGates, 0); val += _power.AllocateMemory(_m_statistic.nGates, 0); val += _params.AllocateMemory(_m_statistic.nGates, 0); val += _params_div_min_max.AllocateMemory(_m_statistic.nGates, 0); int threadSize = min(max((int)_m_statistic.nChannels_all/8,16), 32); _threads=dim3(2,threadSize, 1); _blocks=dim3(max((int)(ceil((double)_m_statistic.nChannels_all / _threads.y)),1), 1); return val; } void SolverGates::PrepareSolver(PField<TYPE_>* Vm, PField<TYPE_>* hm, PField<TYPE_>* rhs) { if(_m_statistic.nGates) { _ch_currents_gk.Host2Device_Async(_stream); _ch_currents_gkek.Host2Device_Async(_stream); _state.Host2Device_Async(_stream); _gk.Host2Device_Async(_stream); _comptIndex.Host2Device_Async(_stream); _channelIndex.Host2Device_Async(_stream); _gateIndex.Host2Device_Async(_stream); _ek.Host2Device_Async(_stream); _gbar.Host2Device_Async(_stream); _power.Host2Device_Async(_stream); _params.Host2Device_Async(_stream); _params_div_min_max.Host2Device_Async(_stream); _Vm = Vm; _rhs = rhs; _hm = hm; _threads=dim3(32*3); _blocks=dim3(ceil(_m_statistic.nGates / (double)_threads.x)); } } /** * KERNELS */ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __global__ void advanceGates( TYPE_* state, TYPE_* gk, TYPE_* current_gk, TYPE_* current_gk_ek, TYPE_* power, pn2s::models::GateParams* params, TYPE3_* div_min_max, TYPE_* gbar, TYPE_* ek, int* comptIndex, int* channelIndex, int* gateIndex, TYPE_* v, TYPE_* rhs, size_t size, TYPE_ dt) { extern __shared__ TYPE2_ data[]; TYPE_ temp, temp2, A, B; int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; int fi = gateIndex[idx]; if ( power[idx] > 0.0 ) { TYPE_ x = v[comptIndex[idx]]; temp = div_min_max[idx].y; temp2 = div_min_max[idx].z; // Calculate new states TYPE_ dx = ( temp2 - temp ) / div_min_max[idx].x; // Check boundaries x = fmax(temp, x); x = fmin(temp2, x); if ( fabs(params[idx].p[PARAMS_A_F]) < SINGULARITY ) { temp = 0.0; A = 0.0; } else { temp2 = params[idx].p[PARAMS_A_C] + exp( ( x + params[idx].p[PARAMS_A_D] ) / params[idx].p[PARAMS_A_F] ); if ( fabs( temp2 ) < SINGULARITY ) { temp2 = params[idx].p[PARAMS_A_C] + exp( ( x + dx/10.0 + params[idx].p[PARAMS_A_D] ) / params[idx].p[PARAMS_A_F] ); temp = ( params[idx].p[PARAMS_A_A] + params[idx].p[PARAMS_A_B] * (x + dx/10 ) ) / temp2; temp2 = params[idx].p[PARAMS_A_C] + exp( ( x - dx/10.0 + params[idx].p[PARAMS_A_D] ) / params[idx].p[PARAMS_A_F] ); temp += ( params[idx].p[PARAMS_A_A] + params[idx].p[1] * (x - dx/10 ) ) / temp2; temp /= 2.0; A = temp; } else { temp = ( params[idx].p[PARAMS_A_A] + params[idx].p[PARAMS_A_B] * x) / temp2; A = temp; } } if ( fabs( params[idx].p[PARAMS_B_F] ) < SINGULARITY ) { B = 0.0; } else { temp2 = params[idx].p[7] + exp( ( x + params[idx].p[8] ) / params[idx].p[9] ); if ( fabs( temp2 ) < SINGULARITY ) { temp2 = params[idx].p[7] + exp( ( x + dx/10.0 + params[idx].p[8] ) / params[idx].p[9] ); temp = (params[idx].p[5] + params[idx].p[6] * (x + dx/10) ) / temp2; temp2 = params[idx].p[7] + exp( ( x - dx/10.0 + params[idx].p[8] ) / params[idx].p[9] ); temp += (params[idx].p[5] + params[idx].p[6] * (x - dx/10) ) / temp2; temp /= 2.0; B = temp; } else { B = (params[idx].p[5] + params[idx].p[6] * x ) / temp2; } } if ( fabs( temp2 ) > SINGULARITY ) B += temp; temp2 = state[idx]; temp = 1.0 + dt / 2.0 * B; //new value for temp state[idx] = ( temp2 * ( 2.0 - temp ) + dt * A ) / temp; __syncthreads(); //Update channels characteristics data[threadIdx.x].x = temp2; if (power[idx] > 1) { data[threadIdx.x].x *= temp2; if (power[idx] > 2) { data[threadIdx.x].x *= temp2; if (power[idx] > 3) { data[threadIdx.x].x *= temp2; if (power[idx] > 4) { data[threadIdx.x].x = pow( temp2, power[idx]); } } } } __syncthreads(); unsigned char bitmap = 1; if(fi & bitmap) { data[threadIdx.x-1].x *= data[threadIdx.x].x; data[threadIdx.x].x = 0; } __syncthreads(); data[threadIdx.x].x = gbar[idx] *data[threadIdx.x].x; data[threadIdx.x].y = ek[idx] *data[threadIdx.x].x; if(!(fi & bitmap)) { current_gk[channelIndex[idx]] = data[threadIdx.x].x; // int cidx = comptIndex[idx]; // current_gk[cidx] = 0; // current_gk_ek[cidx] = 0; // atomicAdd(current_gk + cidx, data[threadIdx.x].x); // atomicAdd(current_gk_ek + cidx, data[threadIdx.x].y); } } } double SolverGates::Input() { return 0; } double SolverGates::Process() { clock_t start_time = clock(); if(_m_statistic.nGates > 0) { int smem_size = (sizeof(TYPE2_) * _threads.x); advanceGates <<<_blocks, _threads, smem_size, _stream>>> ( _state.device, _gk.device, _ch_currents_gk.device, _ch_currents_gkek.device, _power.device, _params.device, _params_div_min_max.device, _gbar.device, _ek.device, _comptIndex.device, _channelIndex.device, _gateIndex.device, _Vm->device, _rhs->device, _m_statistic.nGates, _m_statistic.dt); assert(cudaSuccess == cudaGetLastError()); } double elapsed_time = ( std::clock() - start_time ); // cout << "GATE: " << elapsed_time << endl << flush; return elapsed_time; } double SolverGates::Output() { clock_t start_time = clock(); // _ch_currents_gk_ek.Device2Host_Async(_stream); return std::clock() - start_time ; } /** * Set/Get methods */ void SolverGates::SetGateParams(int index, vector<double>& params) { for (int i = 0; i < min((int)params.size(),13); ++i) _params[index].p[i] = (TYPE_)params[i]; _params_div_min_max[index].x = (TYPE_)params[PARAMS_DIV]; _params_div_min_max[index].y = (TYPE_)params[PARAMS_MIN]; _params_div_min_max[index].z = (TYPE_)params[PARAMS_MAX]; } void SolverGates::SetValue(int index, FIELD::GATE field, TYPE_ value) { switch(field) { case FIELD::GATE_CH_GBAR: _gbar[index] = value; break; case FIELD::GATE_CH_GK: _gk[index] = value; _ch_currents_gk[_channelIndex[index]] = value; break; case FIELD::GATE_CH_EK: _ek[index] = value; _ch_currents_gkek[_channelIndex[index]] = value; break; case FIELD::GATE_POWER: _power[index] = (unsigned char)value; break; case FIELD::GATE_STATE: _state[index] = value; break; case FIELD::GATE_COMPONENT_INDEX: _comptIndex[index] = (int)value; break; case FIELD::GATE_CHANNEL_INDEX: _channelIndex[index] = (int)value; break; case FIELD::GATE_INDEX: _gateIndex[index] = (int)value; break; } } TYPE_ SolverGates::GetValue(int index, FIELD::GATE field) { // switch(field) // { // case FIELD::CH_GBAR: // return _gbar[index]; // case FIELD::CH_X_POWER: // return _xPower[index]; // case FIELD::CH_Y_POWER: // return _yPower[index]; // case FIELD::CH_Z_POWER: // return _zPower[index]; // } return 0; }
91d85407db160292dd8104fd251e9128028cc980.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "math_functions.h" #include "math_constants.h" #include <stdio.h> #include <stdlib.h> #include <stdexcept> #include "kernel_hip.cuh" #include "SettingsWrapper.h" #define USE_SHARED #define cudaErrorCheck(...) { cudaErrorCheckingFunction((__VA_ARGS__), __FILE__, __LINE__); } #define blockSize ((int) 64) #define blocksToLoad ((int) 2) static Particle *plist; static int numpart; static Particle *device_plist; static Vector3 *host_pos; static inline void cudaErrorCheckingFunction(hipError_t error, const char* file, int line, bool abort = true) { if (error != hipSuccess) { fprintf(stderr, "Cuda error: %s %s %d\n", hipGetErrorString(error), file, line); //throw std::runtime_error("CUDA error"); } } static __device__ void setColour(Particle& p, int index, Vector3* outColor) { float magVel = magVector3(&p.v); //outColor[idx] = {fminf(1.0, 0.5/minDist) ,0.1, fminf(1.0f, minDist/0.5)}; float r = fmaxf(fminf(magVel * 7, 1.0f), 0.1f); float g = fmaxf(fminf(magVel * 7 - 0.5, 1.0f), 0.1f); float b = fmaxf(fminf(magVel * 7 - 0.75, 1.0f), 0.1f); outColor[index] = { r, g, b }; } static __global__ void gpu_doStepWithShared(Vector3 *outPos, Vector3 *outColor, Particle * nplist, int numP, float k0, float G, float timeStep) { int idx = blockIdx.x * blockDim.x + threadIdx.x; Vector3 newPos, newV; __shared__ Particle plist[blockSize*blocksToLoad]; Particle self = nplist[idx]; Vector3 force = { 0.0f,0.0f,0.0f }; /* e.g., Block size = 8, sizeof(nplist) = 4*8 = 32, blocksToLoad = 2 Blocks: 0 1 2 3 |--------|--------|--------|--------| Each block will have 2 blocks loaded into shared memory. Block 1 threads will load on the second iteration: T0: nplist[16] and nplist[24] T1: 17 25 T2: 18 26 T3: 19 27 T4: 20 28 T5: 21 29 T6: 22 30 T7: 23 31 T2 loads from: nplist: 0 1 2 3 |--------|--------|--X-----|--Y-----| | | V V into plist: |--Z----- --W-----| */ //blockDim.x*blockSize; for (int block = 0; block < numP; block += blockSize * blocksToLoad) { //For every section of blocks to load into plist for (int i = 0; i < blocksToLoad; i++) { int offset = threadIdx.x + i * blockSize; plist[offset] = nplist[block + offset]; } __syncthreads(); for (int i = 0; i < blockSize*blocksToLoad; i++) { Vector3 r; copyVector3(&r, &self.pos); subVector3(&r, &plist[i].pos); float magR = magVector3(&r) + 0.05; float scaleFactor = -plist[i].m / (magR*magR*magR); Vector3 newForce = { 0.0f, 0.0f ,0.0f }; copyVector3(&newForce, &r); scaleVector3(&newForce, G*scaleFactor); addVector3(&force, &newForce); } } newV.x = self.v.x + force.x*timeStep / self.m; newV.y = self.v.y + force.y*timeStep / self.m; newV.z = self.v.z + force.z*timeStep / self.m; float magNewV = magVector3(&newV); if (magNewV > 25) { normVector3(&newV); newV.x *= 25; newV.y *= 25; newV.z *= 25; } newPos.x = self.pos.x + timeStep * (self.v.x + newV.x)*0.5; newPos.y = self.pos.y + timeStep * (self.v.y + newV.y)*0.5; newPos.z = self.pos.z + timeStep * (self.v.z + newV.z)*0.5; if (self.isStationary) { copyVector3(&newPos, &self.pos); copyVector3(&newV, &self.v); } setColour(self, idx, outColor); copyVector3(&outPos[idx], &self.pos); copyVector3(&nplist[idx].pos, &newPos); copyVector3(&nplist[idx].v, &newV); } static __global__ void loadOutputs(Vector3 * pos, Vector3 * colour, Particle * nplist) { int idx = blockIdx.x * blockDim.x + threadIdx.x; setColour(nplist[idx], idx, colour); copyVector3(&pos[idx], &nplist[idx].pos); } void DeviceFunctions::loadData(Vector3 * pos, Vector3 * colour) { dim3 blocks(numpart / blockSize, 1, 1); dim3 threadsPerBlock(blockSize, 1, 1); hipLaunchKernelGGL(( loadOutputs) , dim3(blocks), dim3(threadsPerBlock) , 0, 0, pos, colour, device_plist); cudaErrorCheck(hipGetLastError()); hipDeviceSynchronize(); } Particle * DeviceFunctions::getPlist() { return plist; } Vector3* DeviceFunctions::getParticlePos(Vector3 *pos) { hipMemcpy(host_pos, pos, sizeof(Vector3)*numpart, hipMemcpyDeviceToHost); return host_pos; } void DeviceFunctions::doStep(float timestep, Vector3 *pos, Vector3 *colour) { dim3 blocks(numpart / blockSize, 1, 1); dim3 threadsPerBlock(blockSize, 1, 1); hipDeviceSynchronize(); hipLaunchKernelGGL(( gpu_doStepWithShared) , dim3(blocks), dim3(threadsPerBlock) , 0, 0, pos, colour, device_plist, numpart, 1.5, 0.0001, timestep); cudaErrorCheck(hipGetLastError()); } static float randFloat() { return (float)((double) rand() / (double)RAND_MAX); } static float randRange(float a, float b) { return ((float)rand() / RAND_MAX)*(b - a) + a; } static float randRange(std::array<float, 2> ab) { return ((float)rand() / RAND_MAX)*(ab[1] - ab[0]) + ab[0]; } static float randGauss() { float u1 = randFloat(), u2 = randFloat(); return sqrtf(-2*logf(u1))*cosf(2*CUDART_PI*u2); } static std::array<float,2> randGauss2() { float u1 = randFloat(), u2 = randFloat(); return { sqrtf(-2*logf(u1))*cosf(2*CUDART_PI*u2), sqrtf(-2*logf(u1))*sinf(2*CUDART_PI*u2) }; } float magnitude(const std::array<float, 3>& a) { return sqrtf(a[0]*a[0] + a[1]*a[1] + a[2]*a[2]); } void sampleFromSpawn(std::array<float,3>& z_prev, float& p_prev) { SettingsWrapper &sw = SettingsWrapper::get(); const int samples = sw.Spawn.SpawnFunc_Samples; const float sigma = sw.Spawn.SpawnFunc_SigmaQ; for (int i = 0; i < samples; i++) { float p_prop; std::array<float, 3> z_prop; float u; do { z_prop = { sigma*randGauss() + z_prev[0], sigma*randGauss() + z_prev[1], sigma*randGauss() + z_prev[2] }; p_prop = sw.Spawn.SpawnFunc( z_prop[0], z_prop[1], z_prop[2], magnitude(z_prop), atan2f(z_prop[2], z_prop[0]), //Azimuth atan2f(z_prop[1], z_prop[0]*z_prop[0] + z_prop[2]*z_prop[2]), //Altitude randFloat()); u = randFloat(); } while (abs(p_prop) / p_prev <= u); if (p_prev > p_prop) { printf("%f %f\n", p_prev, p_prop); } p_prev = abs(p_prop); z_prev = z_prop; } } void spawnParticles() { SettingsWrapper &sw = SettingsWrapper::get(); const int sd = sw.Spawn.spawn_distr; std::array<float, 3> z_cur{ 0,0,0 }; float p_cur; if (sd == Spawn_Distr::USER_DEFINED) { p_cur = sw.Spawn.SpawnFunc( z_cur[0], z_cur[1], z_cur[2], magnitude(z_cur), atan2f(z_cur[2], z_cur[0]), //Azimuth atan2f(z_cur[1], z_cur[0] * z_cur[0] + z_cur[2] * z_cur[2]), //Altitude randFloat()); } for (int i = 0; i < numpart; i++) { plist[i].pos = { randRange(sw.Spawn.paramX),randRange(sw.Spawn.paramY),randRange(sw.Spawn.paramZ) }; switch (sd) { case Spawn_Distr::GAUSS: { float x = randGauss()*sw.Spawn.paramX[1] + sw.Spawn.paramX[0]; float y = randGauss()*sw.Spawn.paramY[1] + sw.Spawn.paramY[0]; float z = randGauss()*sw.Spawn.paramZ[1] + sw.Spawn.paramZ[0]; plist[i].pos = { x, y, z }; break; } case Spawn_Distr::RING: { float theta1 = randRange(0, 2 * CUDART_PI); float r2 = randFloat()*sw.Spawn.paramY[1]; float theta2 = randRange(0, 2 * CUDART_PI); float x = sw.Spawn.paramX[1] * cosf(theta1) + r2 * cosf(theta2)*cosf(theta1); float z = sw.Spawn.paramZ[1] * sinf(theta1) + r2 * cosf(theta2)*sinf(theta1); float y = r2 * sinf(theta2); plist[i].pos = { x, y, z }; break; } case Spawn_Distr::UNIFORM: { plist[i].pos = { randRange(sw.Spawn.paramX),randRange(sw.Spawn.paramY),randRange(sw.Spawn.paramZ) }; break; } case Spawn_Distr::USER_DEFINED: { sampleFromSpawn(z_cur, p_cur); copyVector3(&(plist[i].pos), z_cur); break; } default: plist[i].pos = { randRange(-5,5),randRange(-5,5),randRange(-5, 5) }; break; } plist[i].v = { 0.0, 0.0, 0.0 }; plist[i].m = randFloat() / numpart; plist[i].isStationary = 0; } } void initializeVelocity() { SettingsWrapper &sw = SettingsWrapper::get(); for (int i = 0; i < numpart; i++) { float mag = magVector3(&plist[i].pos); Vector3 &pos = plist[i].pos; Vector3 ortho = { -pos.z, 0.0, pos.x }; normVector3(&ortho); float scale; switch (sw.Spawn.angularMomentum) { case AngularMomentum_Distr::NONE: scale = 0; break; case AngularMomentum_Distr::MAG_SQ: scale = mag * mag; break; case AngularMomentum_Distr::MAG: scale = mag; break; case AngularMomentum_Distr::INV_MAG_SQ: scale = 1.0f / (mag*mag); break; case AngularMomentum_Distr::INV_MAG: scale = 1.0f / mag; break; case AngularMomentum_Distr::UNIFORM: scale = randFloat(); break; case AngularMomentum_Distr::GAUSS: scale = 0; break; case AngularMomentum_Distr::USER_DEFINED: scale = 1; copyVector3(&ortho, sw.Spawn.VelocityFunc( pos.x, pos.y, pos.z, mag, atan2f(pos.z, pos.x), //Azimuth atan2f(pos.y, pos.x*pos.x + pos.z*pos.z), //Altitude randFloat() )); break; default: scale = 0; break; } scaleVector3(&ortho, scale*sw.Spawn.initialAngularMomentumCoefficent); copyVector3(&plist[i].v, &ortho); } } int DeviceFunctions::setup(int num) { numpart = ((num - 1) / (blockSize*blocksToLoad) + 1)*blockSize*blocksToLoad; static bool firstRun = true; if(firstRun) cudaErrorCheck(hipSetDeviceFlags(hipDeviceScheduleBlockingSync)); //printf("Startup...\n"); //printf("Given %d particles, rounding to %d - the nearest multiple of blocksize*blocksToLoad (%d * %d)\n", num, numpart, blockSize, blocksToLoad); //printf("sizeof(Particle) = %d\n", sizeof(Particle)); plist = (Particle*)malloc(sizeof(Particle)*numpart); host_pos = (Vector3*)malloc(sizeof(Vector3)*numpart); spawnParticles(); initializeVelocity(); cudaErrorCheck(hipMalloc(&device_plist, sizeof(Particle)*numpart)); cudaErrorCheck(hipMemcpy(device_plist, plist, sizeof(Particle)*numpart, hipMemcpyHostToDevice)); cudaErrorCheck(hipDeviceSynchronize()); firstRun = false; return numpart; } #undef blockSize int DeviceFunctions::shutdown() { // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaErrorCheck(hipFree(device_plist)); device_plist = nullptr; free(plist); free(host_pos); return 0; }
91d85407db160292dd8104fd251e9128028cc980.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "math_functions.h" #include "math_constants.h" #include <stdio.h> #include <stdlib.h> #include <stdexcept> #include "kernel.cuh" #include "SettingsWrapper.h" #define USE_SHARED #define cudaErrorCheck(...) { cudaErrorCheckingFunction((__VA_ARGS__), __FILE__, __LINE__); } #define blockSize ((int) 64) #define blocksToLoad ((int) 2) static Particle *plist; static int numpart; static Particle *device_plist; static Vector3 *host_pos; static inline void cudaErrorCheckingFunction(cudaError_t error, const char* file, int line, bool abort = true) { if (error != cudaSuccess) { fprintf(stderr, "Cuda error: %s %s %d\n", cudaGetErrorString(error), file, line); //throw std::runtime_error("CUDA error"); } } static __device__ void setColour(Particle& p, int index, Vector3* outColor) { float magVel = magVector3(&p.v); //outColor[idx] = {fminf(1.0, 0.5/minDist) ,0.1, fminf(1.0f, minDist/0.5)}; float r = fmaxf(fminf(magVel * 7, 1.0f), 0.1f); float g = fmaxf(fminf(magVel * 7 - 0.5, 1.0f), 0.1f); float b = fmaxf(fminf(magVel * 7 - 0.75, 1.0f), 0.1f); outColor[index] = { r, g, b }; } static __global__ void gpu_doStepWithShared(Vector3 *outPos, Vector3 *outColor, Particle * nplist, int numP, float k0, float G, float timeStep) { int idx = blockIdx.x * blockDim.x + threadIdx.x; Vector3 newPos, newV; __shared__ Particle plist[blockSize*blocksToLoad]; Particle self = nplist[idx]; Vector3 force = { 0.0f,0.0f,0.0f }; /* e.g., Block size = 8, sizeof(nplist) = 4*8 = 32, blocksToLoad = 2 Blocks: 0 1 2 3 |--------|--------|--------|--------| Each block will have 2 blocks loaded into shared memory. Block 1 threads will load on the second iteration: T0: nplist[16] and nplist[24] T1: 17 25 T2: 18 26 T3: 19 27 T4: 20 28 T5: 21 29 T6: 22 30 T7: 23 31 T2 loads from: nplist: 0 1 2 3 |--------|--------|--X-----|--Y-----| | | V V into plist: |--Z----- --W-----| */ //blockDim.x*blockSize; for (int block = 0; block < numP; block += blockSize * blocksToLoad) { //For every section of blocks to load into plist for (int i = 0; i < blocksToLoad; i++) { int offset = threadIdx.x + i * blockSize; plist[offset] = nplist[block + offset]; } __syncthreads(); for (int i = 0; i < blockSize*blocksToLoad; i++) { Vector3 r; copyVector3(&r, &self.pos); subVector3(&r, &plist[i].pos); float magR = magVector3(&r) + 0.05; float scaleFactor = -plist[i].m / (magR*magR*magR); Vector3 newForce = { 0.0f, 0.0f ,0.0f }; copyVector3(&newForce, &r); scaleVector3(&newForce, G*scaleFactor); addVector3(&force, &newForce); } } newV.x = self.v.x + force.x*timeStep / self.m; newV.y = self.v.y + force.y*timeStep / self.m; newV.z = self.v.z + force.z*timeStep / self.m; float magNewV = magVector3(&newV); if (magNewV > 25) { normVector3(&newV); newV.x *= 25; newV.y *= 25; newV.z *= 25; } newPos.x = self.pos.x + timeStep * (self.v.x + newV.x)*0.5; newPos.y = self.pos.y + timeStep * (self.v.y + newV.y)*0.5; newPos.z = self.pos.z + timeStep * (self.v.z + newV.z)*0.5; if (self.isStationary) { copyVector3(&newPos, &self.pos); copyVector3(&newV, &self.v); } setColour(self, idx, outColor); copyVector3(&outPos[idx], &self.pos); copyVector3(&nplist[idx].pos, &newPos); copyVector3(&nplist[idx].v, &newV); } static __global__ void loadOutputs(Vector3 * pos, Vector3 * colour, Particle * nplist) { int idx = blockIdx.x * blockDim.x + threadIdx.x; setColour(nplist[idx], idx, colour); copyVector3(&pos[idx], &nplist[idx].pos); } void DeviceFunctions::loadData(Vector3 * pos, Vector3 * colour) { dim3 blocks(numpart / blockSize, 1, 1); dim3 threadsPerBlock(blockSize, 1, 1); loadOutputs <<< blocks, threadsPerBlock >>> (pos, colour, device_plist); cudaErrorCheck(cudaGetLastError()); cudaDeviceSynchronize(); } Particle * DeviceFunctions::getPlist() { return plist; } Vector3* DeviceFunctions::getParticlePos(Vector3 *pos) { cudaMemcpy(host_pos, pos, sizeof(Vector3)*numpart, cudaMemcpyDeviceToHost); return host_pos; } void DeviceFunctions::doStep(float timestep, Vector3 *pos, Vector3 *colour) { dim3 blocks(numpart / blockSize, 1, 1); dim3 threadsPerBlock(blockSize, 1, 1); cudaDeviceSynchronize(); gpu_doStepWithShared <<< blocks, threadsPerBlock >>> (pos, colour, device_plist, numpart, 1.5, 0.0001, timestep); cudaErrorCheck(cudaGetLastError()); } static float randFloat() { return (float)((double) rand() / (double)RAND_MAX); } static float randRange(float a, float b) { return ((float)rand() / RAND_MAX)*(b - a) + a; } static float randRange(std::array<float, 2> ab) { return ((float)rand() / RAND_MAX)*(ab[1] - ab[0]) + ab[0]; } static float randGauss() { float u1 = randFloat(), u2 = randFloat(); return sqrtf(-2*logf(u1))*cosf(2*CUDART_PI*u2); } static std::array<float,2> randGauss2() { float u1 = randFloat(), u2 = randFloat(); return { sqrtf(-2*logf(u1))*cosf(2*CUDART_PI*u2), sqrtf(-2*logf(u1))*sinf(2*CUDART_PI*u2) }; } float magnitude(const std::array<float, 3>& a) { return sqrtf(a[0]*a[0] + a[1]*a[1] + a[2]*a[2]); } void sampleFromSpawn(std::array<float,3>& z_prev, float& p_prev) { SettingsWrapper &sw = SettingsWrapper::get(); const int samples = sw.Spawn.SpawnFunc_Samples; const float sigma = sw.Spawn.SpawnFunc_SigmaQ; for (int i = 0; i < samples; i++) { float p_prop; std::array<float, 3> z_prop; float u; do { z_prop = { sigma*randGauss() + z_prev[0], sigma*randGauss() + z_prev[1], sigma*randGauss() + z_prev[2] }; p_prop = sw.Spawn.SpawnFunc( z_prop[0], z_prop[1], z_prop[2], magnitude(z_prop), atan2f(z_prop[2], z_prop[0]), //Azimuth atan2f(z_prop[1], z_prop[0]*z_prop[0] + z_prop[2]*z_prop[2]), //Altitude randFloat()); u = randFloat(); } while (abs(p_prop) / p_prev <= u); if (p_prev > p_prop) { printf("%f %f\n", p_prev, p_prop); } p_prev = abs(p_prop); z_prev = z_prop; } } void spawnParticles() { SettingsWrapper &sw = SettingsWrapper::get(); const int sd = sw.Spawn.spawn_distr; std::array<float, 3> z_cur{ 0,0,0 }; float p_cur; if (sd == Spawn_Distr::USER_DEFINED) { p_cur = sw.Spawn.SpawnFunc( z_cur[0], z_cur[1], z_cur[2], magnitude(z_cur), atan2f(z_cur[2], z_cur[0]), //Azimuth atan2f(z_cur[1], z_cur[0] * z_cur[0] + z_cur[2] * z_cur[2]), //Altitude randFloat()); } for (int i = 0; i < numpart; i++) { plist[i].pos = { randRange(sw.Spawn.paramX),randRange(sw.Spawn.paramY),randRange(sw.Spawn.paramZ) }; switch (sd) { case Spawn_Distr::GAUSS: { float x = randGauss()*sw.Spawn.paramX[1] + sw.Spawn.paramX[0]; float y = randGauss()*sw.Spawn.paramY[1] + sw.Spawn.paramY[0]; float z = randGauss()*sw.Spawn.paramZ[1] + sw.Spawn.paramZ[0]; plist[i].pos = { x, y, z }; break; } case Spawn_Distr::RING: { float theta1 = randRange(0, 2 * CUDART_PI); float r2 = randFloat()*sw.Spawn.paramY[1]; float theta2 = randRange(0, 2 * CUDART_PI); float x = sw.Spawn.paramX[1] * cosf(theta1) + r2 * cosf(theta2)*cosf(theta1); float z = sw.Spawn.paramZ[1] * sinf(theta1) + r2 * cosf(theta2)*sinf(theta1); float y = r2 * sinf(theta2); plist[i].pos = { x, y, z }; break; } case Spawn_Distr::UNIFORM: { plist[i].pos = { randRange(sw.Spawn.paramX),randRange(sw.Spawn.paramY),randRange(sw.Spawn.paramZ) }; break; } case Spawn_Distr::USER_DEFINED: { sampleFromSpawn(z_cur, p_cur); copyVector3(&(plist[i].pos), z_cur); break; } default: plist[i].pos = { randRange(-5,5),randRange(-5,5),randRange(-5, 5) }; break; } plist[i].v = { 0.0, 0.0, 0.0 }; plist[i].m = randFloat() / numpart; plist[i].isStationary = 0; } } void initializeVelocity() { SettingsWrapper &sw = SettingsWrapper::get(); for (int i = 0; i < numpart; i++) { float mag = magVector3(&plist[i].pos); Vector3 &pos = plist[i].pos; Vector3 ortho = { -pos.z, 0.0, pos.x }; normVector3(&ortho); float scale; switch (sw.Spawn.angularMomentum) { case AngularMomentum_Distr::NONE: scale = 0; break; case AngularMomentum_Distr::MAG_SQ: scale = mag * mag; break; case AngularMomentum_Distr::MAG: scale = mag; break; case AngularMomentum_Distr::INV_MAG_SQ: scale = 1.0f / (mag*mag); break; case AngularMomentum_Distr::INV_MAG: scale = 1.0f / mag; break; case AngularMomentum_Distr::UNIFORM: scale = randFloat(); break; case AngularMomentum_Distr::GAUSS: scale = 0; break; case AngularMomentum_Distr::USER_DEFINED: scale = 1; copyVector3(&ortho, sw.Spawn.VelocityFunc( pos.x, pos.y, pos.z, mag, atan2f(pos.z, pos.x), //Azimuth atan2f(pos.y, pos.x*pos.x + pos.z*pos.z), //Altitude randFloat() )); break; default: scale = 0; break; } scaleVector3(&ortho, scale*sw.Spawn.initialAngularMomentumCoefficent); copyVector3(&plist[i].v, &ortho); } } int DeviceFunctions::setup(int num) { numpart = ((num - 1) / (blockSize*blocksToLoad) + 1)*blockSize*blocksToLoad; static bool firstRun = true; if(firstRun) cudaErrorCheck(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync)); //printf("Startup...\n"); //printf("Given %d particles, rounding to %d - the nearest multiple of blocksize*blocksToLoad (%d * %d)\n", num, numpart, blockSize, blocksToLoad); //printf("sizeof(Particle) = %d\n", sizeof(Particle)); plist = (Particle*)malloc(sizeof(Particle)*numpart); host_pos = (Vector3*)malloc(sizeof(Vector3)*numpart); spawnParticles(); initializeVelocity(); cudaErrorCheck(cudaMalloc(&device_plist, sizeof(Particle)*numpart)); cudaErrorCheck(cudaMemcpy(device_plist, plist, sizeof(Particle)*numpart, cudaMemcpyHostToDevice)); cudaErrorCheck(cudaDeviceSynchronize()); firstRun = false; return numpart; } #undef blockSize int DeviceFunctions::shutdown() { // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaErrorCheck(cudaFree(device_plist)); device_plist = nullptr; free(plist); free(host_pos); return 0; }
8cb459fc95798d4a42725fda1312f9e6116455b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/polygamma_impl.cuh" #include <limits> #include "include/hip/hip_fp16.h" #include "plugin/device/cpu/kernel/nnacl/op_base.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/elementwise/elementswise_pub_impl.cuh" constexpr size_t kThreadsPerBlock = 256; template <typename T> inline __device__ T trigamma(const T input) { double sign = +1; double result = 0; constexpr double PI = static_cast<double>(3.141592653589793238462643383279502); auto y = static_cast<double>(input); if (y < 0.5) { sign = -1; const double sin_pi_x = sin(PI * y); result -= (PI * PI) / (sin_pi_x * sin_pi_x); y = 1 - y; } for (int i = 0; i < 6; ++i) { result += 1 / (y * y); y += 1; } const double ixx = 1 / (y * y); result += (1 + 1 / (2 * y) + ixx * (1. / 6 - ixx * (1. / 30 - ixx * (1. / 42)))) / y; return static_cast<T>(sign * result); } template <> inline __device__ float trigamma(const float input) { float sign = +1; float result = 0; constexpr float PI = static_cast<float>(3.141592653589793238462643383279502); auto y = static_cast<float>(input); if (y < 0.5f) { sign = -1; const float sin_pi_x = sinf(PI * y); result -= (PI * PI) / (sin_pi_x * sin_pi_x); y = 1 - y; } for (int i = 0; i < 6; ++i) { result += 1 / (y * y); y += 1; } const float ixx = 1 / (y * y); result += (1 + 1 / (2*y) + ixx * (1.f/6 - ixx * (1.f/30 - ixx * (1.f/42)))) / y; return sign * result; } template <typename T> inline __device__ T calc_polygamma(const int64_t y, const T input) { auto poly_n = y; auto zeta_n = static_cast<double>(poly_n); constexpr double one = static_cast<double>(1.0); double poly_result = ((poly_n % 2) ? one : -one) * exp(lgamma(zeta_n + one)); double p = zeta_n + one; double q = static_cast<double>(input); const double MACHEP = static_cast<double>(1.11022302462515654042E-16); constexpr double zero = static_cast<double>(0.0); constexpr double half = static_cast<double>(0.5); static const double A[] = { 12.0, -720.0, 30240.0, -1209600.0, 47900160.0, -1.8924375803183791606e9, /*1.307674368e12/691*/ 7.47242496e10, -2.950130727918164224e12, /*1.067062284288e16/3617*/ 1.1646782814350067249e14, /*5.109094217170944e18/43867*/ -4.5979787224074726105e15, /*8.028576626982912e20/174611*/ 1.8152105401943546773e17, /*1.5511210043330985984e23/854513*/ -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091*/ }; int i = 0; double a, b, k, s, t, w; if (p == one) { return static_cast<T>(std::numeric_limits<double>::infinity() * poly_result); } if (p < one) { return static_cast<T>(std::numeric_limits<double>::quiet_NaN() * poly_result); } if (q <= zero) { if (q == floor(q)) { return static_cast<T>(std::numeric_limits<double>::infinity() * poly_result); } if (p != floor(p)) { return static_cast<T>(std::numeric_limits<double>::quiet_NaN() * poly_result); } } s = pow(q, -p); a = q; i = 0; b = zero; while ((i < 9) || (a <= static_cast<double>(9.0))) { i += 1; a += one; b = pow(a, -p); s += b; if ((-MACHEP * s < b) && (b < MACHEP * s)) { return static_cast<T>(s * poly_result); } } w = a; s += b * w / (p - one); s -= half * b; a = one; k = zero; for (int i = 0; i < 12; i++) { a *= p + k; b /= w; t = a * b / A[i]; s = s + t; t = fabs(t / s); if (t < MACHEP) { return static_cast<T>(s * poly_result); } k += one; a *= p + k; b /= w; k += one; } return static_cast<T>(s * poly_result); } template <> inline __device__ float calc_polygamma(const int64_t y, const float input) { auto poly_n = y; auto zeta_n = static_cast<float>(poly_n); constexpr float one = static_cast<float>(1.0); float poly_result = ((poly_n % static_cast<int64_t>(2)) ? one : -one) * exp(lgammaf(zeta_n + one)); float p = zeta_n + one; float q = static_cast<float>(input); constexpr float MACHEP = static_cast<float>(1.11022302462515654042E-16); constexpr float zero = static_cast<float>(0.0); constexpr float half = static_cast<float>(0.5); static const float A[] = { 12.0, -720.0, 30240.0, -1209600.0, 47900160.0, -1.8924375803183791606e9, /*1.307674368e12/691*/ 7.47242496e10, -2.950130727918164224e12, /*1.067062284288e16/3617*/ 1.1646782814350067249e14, /*5.109094217170944e18/43867*/ -4.5979787224074726105e15, /*8.028576626982912e20/174611*/ 1.8152105401943546773e17, /*1.5511210043330985984e23/854513*/ -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091*/ }; int i = 0; float a, b, k, s, t, w; if (p == one) { return std::numeric_limits<float>::infinity() * poly_result; } if (p < one) { return std::numeric_limits<float>::quiet_NaN() * poly_result; } if (q <= zero) { if (q == floor(q)) { return std::numeric_limits<float>::infinity() * poly_result; } if (p != floor(p)) { return std::numeric_limits<float>::quiet_NaN() * poly_result; } } s = pow(q, -p); a = q; i = 0; b = zero; while ((i < 9) || (a <= static_cast<float>(9.0))) { i += 1; a += one; b = pow(a, -p); s += b; if ((-MACHEP * s < b) && (b < MACHEP * s)) { return s * poly_result; } } w = a; s += b * w / (p - one); s -= half * b; a = one; k = zero; for (int i = 0; i < 12; i++) { a *= p + k; b /= w; t = a * b / A[i]; s = s + t; t = fabs(t / s); if (t < MACHEP) { return s * poly_result; } k += one; a *= p + k; b /= w; k += one; } return s * poly_result; } template <uint vec_size, typename T> __device__ __forceinline__ void VectorizedCallTri(const T *input, T *output, uint offset) { uint tid = threadIdx.x; using VecT = cuda::elementwise::AlignVec<T, vec_size>; auto vec_input = reinterpret_cast<const VecT *>(input + offset); auto vec_output = reinterpret_cast<VecT *>(output + offset); VecT cache = vec_input[tid]; VecT out1{0}; #pragma unroll for (uint j = 0; j < vec_size; j++) { auto output_pair = trigamma(cache.elements_[j]); out1.elements_[j] = output_pair; } vec_output[tid] = out1; } template <uint vec_size, typename T> __device__ __forceinline__ void NormalCallTri(const T *input, T *output, uint offset, uint remaining) { uint loop = UP_DIV(remaining, vec_size); for (uint i = threadIdx.x; i < loop; i += blockDim.x) { #pragma unroll for (uint j = 0; j < vec_size; j++) { uint index = i * vec_size + j; if (index >= remaining) { return; } index += offset; auto output_pair = trigamma(input[index]); output[index] = output_pair; } } } template <uint vec_size, typename T1, typename T2> __device__ __forceinline__ void VectorizedCall(const T1 *a, const T2 *input, T2 *output, uint offset) { uint tid = threadIdx.x; using VecT = cuda::elementwise::AlignVec<T2, vec_size>; auto a_int = static_cast<int64_t>(a[0]); auto vec_input = reinterpret_cast<const VecT *>(input + offset); auto vec_output = reinterpret_cast<VecT *>(output + offset); VecT cache = vec_input[tid]; VecT out1{0}; #pragma unroll for (uint j = 0; j < vec_size; j++) { auto output_pair = calc_polygamma(a_int, cache.elements_[j]); out1.elements_[j] = output_pair; } vec_output[tid] = out1; } template <uint vec_size, typename T1, typename T2> __device__ __forceinline__ void NormalCall(const T1 *a, const T2 *input, T2 *output, uint offset, uint remaining) { uint loop = UP_DIV(remaining, vec_size); auto a_int = static_cast<int64_t>(a[0]); for (uint i = threadIdx.x; i < loop; i += blockDim.x) { #pragma unroll for (uint j = 0; j < vec_size; j++) { uint index = i * vec_size + j; if (index >= remaining) { return; } index += offset; auto output_pair = calc_polygamma(a_int, input[index]); output[index] = output_pair; } } } template <uint vec_size, typename T1, typename T2> __global__ void CalPolygammaKernel(size_t num_count, const T1 *a, const T2 *input, T2 *output) { auto y = a[0]; uint elements_per_block = kThreadsPerBlock * vec_size; if (y == T1(1)) { for (uint offset = elements_per_block * blockIdx.x; offset < num_count; offset += elements_per_block * gridDim.x) { uint remaining = num_count - offset; if (remaining < elements_per_block) { NormalCallTri<vec_size, T2>(input, output, offset, remaining); } else { VectorizedCallTri<vec_size, T2>(input, output, offset); } } } else if (y > T1(1)) { for (uint offset = elements_per_block * blockIdx.x; offset < num_count; offset += elements_per_block * gridDim.x) { uint remaining = num_count - offset; if (remaining < elements_per_block) { NormalCall<vec_size, T1, T2>(a, input, output, offset, remaining); } else { VectorizedCall<vec_size, T1, T2>(a, input, output, offset); } } } return; } template <typename T1, typename T2> void CalPolygamma(const size_t num_count, const T1 *a, const T2 *input, T2 *output, const uint32_t &device_id, hipStream_t cuda_stream) { constexpr size_t vec_size = cuda::elementwise::VecSize<T2>(); const size_t block_x = kThreadsPerBlock < num_count ? kThreadsPerBlock : num_count; const size_t elements_per_block = kThreadsPerBlock * vec_size; const size_t grid_x = UP_DIV(num_count, elements_per_block); hipLaunchKernelGGL(( CalPolygammaKernel<vec_size, T1, T2>), dim3(grid_x), dim3(block_x), 0, cuda_stream, num_count, a, input, output); } template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int32_t *a, const float *input, float *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int32_t *a, const double *input, double *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int32_t *a, const half *input, half *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int64_t *a, const float *input, float *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int64_t *a, const double *input, double *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int64_t *a, const half *input, half *output, const uint32_t &device_id, hipStream_t cuda_stream);
8cb459fc95798d4a42725fda1312f9e6116455b4.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/polygamma_impl.cuh" #include <limits> #include "include/cuda_fp16.h" #include "plugin/device/cpu/kernel/nnacl/op_base.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/elementwise/elementswise_pub_impl.cuh" constexpr size_t kThreadsPerBlock = 256; template <typename T> inline __device__ T trigamma(const T input) { double sign = +1; double result = 0; constexpr double PI = static_cast<double>(3.141592653589793238462643383279502); auto y = static_cast<double>(input); if (y < 0.5) { sign = -1; const double sin_pi_x = sin(PI * y); result -= (PI * PI) / (sin_pi_x * sin_pi_x); y = 1 - y; } for (int i = 0; i < 6; ++i) { result += 1 / (y * y); y += 1; } const double ixx = 1 / (y * y); result += (1 + 1 / (2 * y) + ixx * (1. / 6 - ixx * (1. / 30 - ixx * (1. / 42)))) / y; return static_cast<T>(sign * result); } template <> inline __device__ float trigamma(const float input) { float sign = +1; float result = 0; constexpr float PI = static_cast<float>(3.141592653589793238462643383279502); auto y = static_cast<float>(input); if (y < 0.5f) { sign = -1; const float sin_pi_x = sinf(PI * y); result -= (PI * PI) / (sin_pi_x * sin_pi_x); y = 1 - y; } for (int i = 0; i < 6; ++i) { result += 1 / (y * y); y += 1; } const float ixx = 1 / (y * y); result += (1 + 1 / (2*y) + ixx * (1.f/6 - ixx * (1.f/30 - ixx * (1.f/42)))) / y; return sign * result; } template <typename T> inline __device__ T calc_polygamma(const int64_t y, const T input) { auto poly_n = y; auto zeta_n = static_cast<double>(poly_n); constexpr double one = static_cast<double>(1.0); double poly_result = ((poly_n % 2) ? one : -one) * exp(lgamma(zeta_n + one)); double p = zeta_n + one; double q = static_cast<double>(input); const double MACHEP = static_cast<double>(1.11022302462515654042E-16); constexpr double zero = static_cast<double>(0.0); constexpr double half = static_cast<double>(0.5); static const double A[] = { 12.0, -720.0, 30240.0, -1209600.0, 47900160.0, -1.8924375803183791606e9, /*1.307674368e12/691*/ 7.47242496e10, -2.950130727918164224e12, /*1.067062284288e16/3617*/ 1.1646782814350067249e14, /*5.109094217170944e18/43867*/ -4.5979787224074726105e15, /*8.028576626982912e20/174611*/ 1.8152105401943546773e17, /*1.5511210043330985984e23/854513*/ -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091*/ }; int i = 0; double a, b, k, s, t, w; if (p == one) { return static_cast<T>(std::numeric_limits<double>::infinity() * poly_result); } if (p < one) { return static_cast<T>(std::numeric_limits<double>::quiet_NaN() * poly_result); } if (q <= zero) { if (q == floor(q)) { return static_cast<T>(std::numeric_limits<double>::infinity() * poly_result); } if (p != floor(p)) { return static_cast<T>(std::numeric_limits<double>::quiet_NaN() * poly_result); } } s = pow(q, -p); a = q; i = 0; b = zero; while ((i < 9) || (a <= static_cast<double>(9.0))) { i += 1; a += one; b = pow(a, -p); s += b; if ((-MACHEP * s < b) && (b < MACHEP * s)) { return static_cast<T>(s * poly_result); } } w = a; s += b * w / (p - one); s -= half * b; a = one; k = zero; for (int i = 0; i < 12; i++) { a *= p + k; b /= w; t = a * b / A[i]; s = s + t; t = fabs(t / s); if (t < MACHEP) { return static_cast<T>(s * poly_result); } k += one; a *= p + k; b /= w; k += one; } return static_cast<T>(s * poly_result); } template <> inline __device__ float calc_polygamma(const int64_t y, const float input) { auto poly_n = y; auto zeta_n = static_cast<float>(poly_n); constexpr float one = static_cast<float>(1.0); float poly_result = ((poly_n % static_cast<int64_t>(2)) ? one : -one) * exp(lgammaf(zeta_n + one)); float p = zeta_n + one; float q = static_cast<float>(input); constexpr float MACHEP = static_cast<float>(1.11022302462515654042E-16); constexpr float zero = static_cast<float>(0.0); constexpr float half = static_cast<float>(0.5); static const float A[] = { 12.0, -720.0, 30240.0, -1209600.0, 47900160.0, -1.8924375803183791606e9, /*1.307674368e12/691*/ 7.47242496e10, -2.950130727918164224e12, /*1.067062284288e16/3617*/ 1.1646782814350067249e14, /*5.109094217170944e18/43867*/ -4.5979787224074726105e15, /*8.028576626982912e20/174611*/ 1.8152105401943546773e17, /*1.5511210043330985984e23/854513*/ -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091*/ }; int i = 0; float a, b, k, s, t, w; if (p == one) { return std::numeric_limits<float>::infinity() * poly_result; } if (p < one) { return std::numeric_limits<float>::quiet_NaN() * poly_result; } if (q <= zero) { if (q == floor(q)) { return std::numeric_limits<float>::infinity() * poly_result; } if (p != floor(p)) { return std::numeric_limits<float>::quiet_NaN() * poly_result; } } s = pow(q, -p); a = q; i = 0; b = zero; while ((i < 9) || (a <= static_cast<float>(9.0))) { i += 1; a += one; b = pow(a, -p); s += b; if ((-MACHEP * s < b) && (b < MACHEP * s)) { return s * poly_result; } } w = a; s += b * w / (p - one); s -= half * b; a = one; k = zero; for (int i = 0; i < 12; i++) { a *= p + k; b /= w; t = a * b / A[i]; s = s + t; t = fabs(t / s); if (t < MACHEP) { return s * poly_result; } k += one; a *= p + k; b /= w; k += one; } return s * poly_result; } template <uint vec_size, typename T> __device__ __forceinline__ void VectorizedCallTri(const T *input, T *output, uint offset) { uint tid = threadIdx.x; using VecT = cuda::elementwise::AlignVec<T, vec_size>; auto vec_input = reinterpret_cast<const VecT *>(input + offset); auto vec_output = reinterpret_cast<VecT *>(output + offset); VecT cache = vec_input[tid]; VecT out1{0}; #pragma unroll for (uint j = 0; j < vec_size; j++) { auto output_pair = trigamma(cache.elements_[j]); out1.elements_[j] = output_pair; } vec_output[tid] = out1; } template <uint vec_size, typename T> __device__ __forceinline__ void NormalCallTri(const T *input, T *output, uint offset, uint remaining) { uint loop = UP_DIV(remaining, vec_size); for (uint i = threadIdx.x; i < loop; i += blockDim.x) { #pragma unroll for (uint j = 0; j < vec_size; j++) { uint index = i * vec_size + j; if (index >= remaining) { return; } index += offset; auto output_pair = trigamma(input[index]); output[index] = output_pair; } } } template <uint vec_size, typename T1, typename T2> __device__ __forceinline__ void VectorizedCall(const T1 *a, const T2 *input, T2 *output, uint offset) { uint tid = threadIdx.x; using VecT = cuda::elementwise::AlignVec<T2, vec_size>; auto a_int = static_cast<int64_t>(a[0]); auto vec_input = reinterpret_cast<const VecT *>(input + offset); auto vec_output = reinterpret_cast<VecT *>(output + offset); VecT cache = vec_input[tid]; VecT out1{0}; #pragma unroll for (uint j = 0; j < vec_size; j++) { auto output_pair = calc_polygamma(a_int, cache.elements_[j]); out1.elements_[j] = output_pair; } vec_output[tid] = out1; } template <uint vec_size, typename T1, typename T2> __device__ __forceinline__ void NormalCall(const T1 *a, const T2 *input, T2 *output, uint offset, uint remaining) { uint loop = UP_DIV(remaining, vec_size); auto a_int = static_cast<int64_t>(a[0]); for (uint i = threadIdx.x; i < loop; i += blockDim.x) { #pragma unroll for (uint j = 0; j < vec_size; j++) { uint index = i * vec_size + j; if (index >= remaining) { return; } index += offset; auto output_pair = calc_polygamma(a_int, input[index]); output[index] = output_pair; } } } template <uint vec_size, typename T1, typename T2> __global__ void CalPolygammaKernel(size_t num_count, const T1 *a, const T2 *input, T2 *output) { auto y = a[0]; uint elements_per_block = kThreadsPerBlock * vec_size; if (y == T1(1)) { for (uint offset = elements_per_block * blockIdx.x; offset < num_count; offset += elements_per_block * gridDim.x) { uint remaining = num_count - offset; if (remaining < elements_per_block) { NormalCallTri<vec_size, T2>(input, output, offset, remaining); } else { VectorizedCallTri<vec_size, T2>(input, output, offset); } } } else if (y > T1(1)) { for (uint offset = elements_per_block * blockIdx.x; offset < num_count; offset += elements_per_block * gridDim.x) { uint remaining = num_count - offset; if (remaining < elements_per_block) { NormalCall<vec_size, T1, T2>(a, input, output, offset, remaining); } else { VectorizedCall<vec_size, T1, T2>(a, input, output, offset); } } } return; } template <typename T1, typename T2> void CalPolygamma(const size_t num_count, const T1 *a, const T2 *input, T2 *output, const uint32_t &device_id, cudaStream_t cuda_stream) { constexpr size_t vec_size = cuda::elementwise::VecSize<T2>(); const size_t block_x = kThreadsPerBlock < num_count ? kThreadsPerBlock : num_count; const size_t elements_per_block = kThreadsPerBlock * vec_size; const size_t grid_x = UP_DIV(num_count, elements_per_block); CalPolygammaKernel<vec_size, T1, T2><<<grid_x, block_x, 0, cuda_stream>>>(num_count, a, input, output); } template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int32_t *a, const float *input, float *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int32_t *a, const double *input, double *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int32_t *a, const half *input, half *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int64_t *a, const float *input, float *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int64_t *a, const double *input, double *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPolygamma(const size_t num_count, const int64_t *a, const half *input, half *output, const uint32_t &device_id, cudaStream_t cuda_stream);
02cc13f3f76a1ece71e4b894ab07b9b7f44e850b.hip
// !!! This is a file automatically generated by hipify!!! /** * * bashCGPU/CUDA * https://suzukiiichiro.github.io/search/?keyword= * -arch=sm_13 or -arch=sm_61 CPU $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -r CPU $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -c GPU $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -g GPU GPU $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -n N: Total Unique dd:hh:mm:ss.ms 4: 2 0 00:00:00:00.15 5: 10 0 00:00:00:00.00 6: 4 0 00:00:00:00.00 7: 40 0 00:00:00:00.00 8: 92 0 00:00:00:00.00 9: 352 0 00:00:00:00.00 10: 724 0 00:00:00:00.00 11: 2680 0 00:00:00:00.00 12: 14200 0 00:00:00:00.00 13: 73712 0 00:00:00:00.00 14: 365596 0 00:00:00:00.04 15: 2279184 0 00:00:00:00.21 16: 14772512 0 00:00:00:02.05 17: 95815104 0 00:00:00:19.56 18: 666090624 0 00:00:03:15.21 kLayer_nodeLayer GPUleft,right,down kLayer_nodeLayer(size,4) 244nqueenleft,down,rightnodes nodes push_back nodes3left,dwon,right [0]left[1]down[2]right bitmap_build_nodeLayer int numSolutions = nodes.size() / 6; 31/3 nodes21/6 solutions += 2*hostSolutions[i]; // Symmetry GPUTOTAL2 dim_nodeLayer GPU bitmap_solve_nodeLayercounter(+1)solutions solutionsGPU bitmap_solve_ndoeLayer down==mask 1 down1 masksize1 n811111111 down11mask */ #include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 #define MAX 27 // //#define UINT64_C(c) c ## ULL // // unsigned long TOTAL=0; unsigned long UNIQUE=0; // void bitmap_NR(unsigned int size,int row) { unsigned int mask=(1<<size)-1; unsigned int bitmap[size]; unsigned int bit=0; unsigned int left[size]; unsigned int down[size]; unsigned int right[size]; left[0]=0; down[0]=0; right[0]=0; bitmap[row]=mask; while(row>-1){ if(bitmap[row]>0){ bit=-bitmap[row]&bitmap[row];// bitmap[row]=bitmap[row]^bit;// if(row==(size-1)){ TOTAL++; row--; }else{ unsigned int n=row++; left[row]=(left[n]|bit)<<1; down[row]=down[n]|bit; right[row]=(right[n]|bit)>>1; // bitmap[row]=mask&~(left[row]|down[row]|right[row]); } }else{ row--; } }//end while } // void bitmap_R(unsigned int size,unsigned int row,unsigned int left,unsigned int down, unsigned int right) { unsigned int mask=(1<<size)-1; unsigned int bit=0; if(row==size){ TOTAL++; }else{ // for(unsigned int bitmap=mask&~(left|down|right);bitmap;bitmap=bitmap&~bit){ bit=bitmap&-bitmap; bitmap_R(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1); } } } // __host__ __device__ long bitmap_solve_nodeLayer(int size,long left,long down,long right) { long mask=(1<<size)-1; long counter = 0; if (down==mask) { // down return 1; } long bit=0; for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){ bit=-bitmap&bitmap; counter += bitmap_solve_nodeLayer(size,(left|bit)>>1,(down|bit),(right|bit)<< 1); } return counter; } // i i __global__ void dim_nodeLayer(int size,long* nodes, long* solutions, int numElements) { int i=blockDim.x * blockIdx.x + threadIdx.x; if(i<numElements){ solutions[i]=bitmap_solve_nodeLayer(size,nodes[3 * i],nodes[3 * i + 1],nodes[3 * i + 2]); } } // 0bit int countBits_nodeLayer(long n) { int counter = 0; while (n){ n &= (n - 1); // counter++; } return counter; } // k long kLayer_nodeLayer(int size,std::vector<long>& nodes, int k, long left, long down, long right) { long counter=0; long mask=(1<<size)-1; // down if (countBits_nodeLayer(down) == k) { nodes.push_back(left); nodes.push_back(down); nodes.push_back(right); return 1; } long bit=0; for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){ bit=-bitmap&bitmap; // counter+=kLayer_nodeLayer(size,nodes,k,(left|bit)>>1,(down|bit),(right|bit)<<1); } return counter; } // k std::vector<long> kLayer_nodeLayer(int size,int k) { std::vector<long> nodes{}; kLayer_nodeLayer(size,nodes, k, 0, 0, 0); return nodes; } // GPU void bitmap_build_nodeLayer(int size) { // 3 //3 // 2 // 4N169844 std::vector<long> nodes = kLayer_nodeLayer(size,4); // // size_t nodeSize = nodes.size() * sizeof(long); long* hostNodes = (long*)malloc(nodeSize); hostNodes = &nodes[0]; long* deviceNodes = NULL; hipMalloc((void**)&deviceNodes, nodeSize); hipMemcpy(deviceNodes, hostNodes, nodeSize, hipMemcpyHostToDevice); // long* deviceSolutions = NULL; // 3 int numSolutions = nodes.size() / 6; size_t solutionSize = numSolutions * sizeof(long); hipMalloc((void**)&deviceSolutions, solutionSize); // CUDA int threadsPerBlock = 256; int blocksPerGrid = (numSolutions + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( dim_nodeLayer) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, size,deviceNodes, deviceSolutions, numSolutions); // long* hostSolutions = (long*)malloc(solutionSize); hipMemcpy(hostSolutions, deviceSolutions, solutionSize, hipMemcpyDeviceToHost); // long solutions = 0; for (long i = 0; i < numSolutions; i++) { solutions += 2*hostSolutions[i]; // Symmetry } // TOTAL=solutions; } // CUDA bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ struct hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} hipSetDevice(i); return true; } // int main(int argc,char** argv) { bool cpu=false,cpur=false,gpu=false,gpuNodeLayer=false; int argstart=2; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuNodeLayer=true;} else{ gpuNodeLayer=true; } //gpu argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]); printf(" -r: CPU \n"); printf(" -c: CPU \n"); printf(" -g: GPU \n"); printf(" -n: GPU \n"); } if(cpur){ printf("\n\n \n"); } else if(cpu){ printf("\n\n \n"); } else if(gpu){ printf("\n\n GPU\n"); } else if(gpuNodeLayer){ printf("\n\n GPU \n"); } if(cpu||cpur) { int min=4; int targetN=17; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ TOTAL=UNIQUE=0; gettimeofday(&t0, NULL);// if(cpur){ // bitmap_R(size,0,0,0,0); } if(cpu){ // bitmap_NR(size,0); } // gettimeofday(&t1, NULL);// int ss;int ms;int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%16ld%17ld%12.2d:%02d:%02d:%02d.%02d\n", size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } //end for }//end if if(gpu||gpuNodeLayer) { if(!InitCUDA()){return 0;} /* int steps=24576; */ int min=4; int targetN=21; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ gettimeofday(&t0,NULL); // if(gpu){ TOTAL=UNIQUE=0; TOTAL=bitmap_solve_nodeLayer(size,0,0,0); // }else if(gpuNodeLayer){ TOTAL=UNIQUE=0; bitmap_build_nodeLayer(size); // } gettimeofday(&t1,NULL); // int ss;int ms;int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); }//end for }//end if return 0; }
02cc13f3f76a1ece71e4b894ab07b9b7f44e850b.cu
/** * * bash版ビットマップのC言語版のGPU/CUDA移植版 * 詳しい説明はこちらをどうぞ https://suzukiiichiro.github.io/search/?keyword=Nクイーン問題 * アーキテクチャの指定(なくても問題なし、あれば高速) -arch=sm_13 or -arch=sm_61 CPUの再帰での実行 $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -r CPUの非再帰での実行 $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -c GPUのシングルスレッド $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -g GPUのマルチスレッド ビットマップ GPUノードレイヤー $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -n N: Total Unique dd:hh:mm:ss.ms 4: 2 0 00:00:00:00.15 5: 10 0 00:00:00:00.00 6: 4 0 00:00:00:00.00 7: 40 0 00:00:00:00.00 8: 92 0 00:00:00:00.00 9: 352 0 00:00:00:00.00 10: 724 0 00:00:00:00.00 11: 2680 0 00:00:00:00.00 12: 14200 0 00:00:00:00.00 13: 73712 0 00:00:00:00.00 14: 365596 0 00:00:00:00.04 15: 2279184 0 00:00:00:00.21 16: 14772512 0 00:00:00:02.05 17: 95815104 0 00:00:00:19.56 18: 666090624 0 00:00:03:15.21 コメント追加 ・kLayer_nodeLayer GPUで並列実行するためのleft,right,downを作成する kLayer_nodeLayer(size,4) 第2引数の4は4行目までnqueenを実行し、それまでのleft,down,rightをnodes配列に格納する nodesはベクター配列で構造体でもなんでも格納できる push_backで追加。 nodes配列は3個で1セットleft,dwon,rightの情報を同じ配列に格納する [0]left[1]down[2]right ・bitmap_build_nodeLayer int numSolutions = nodes.size() / 6; 3個で1セットなので/3 さらにnodesの2分の1だけ実行すればミラーになるので/6 solutions += 2*hostSolutions[i]; // Symmetry GPUごとのTOTALを集計している。ミラー分最後に2倍する ・dim_nodeLayer GPU並列処理 bitmap_solve_nodeLayerを再帰呼び出しし、counter(最終行までクイーンを置けると+1)をsolutionsに格納する solutionsは配列でGPUのステップ数分ある ・bitmap_solve_ndoeLayer down==maskが最終行までクイーンを置けた状態 ビットだとクイーンを置けない場所に1が立つ downだとクイーンを置いた場所に1が立つ maskは、size分1が立っているもの n8だと11111111 downはクイーンが配置されるたびに配置された列に1が立って行くので最終行までクイーンを置くと全列に1が立った状態になりmaskと同じ内容になる */ #include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 #define MAX 27 // システムによって以下のマクロが必要であればコメントを外してください。 //#define UINT64_C(c) c ## ULL // // グローバル変数 unsigned long TOTAL=0; unsigned long UNIQUE=0; // ビットマップ 非再帰版 void bitmap_NR(unsigned int size,int row) { unsigned int mask=(1<<size)-1; unsigned int bitmap[size]; unsigned int bit=0; unsigned int left[size]; unsigned int down[size]; unsigned int right[size]; left[0]=0; down[0]=0; right[0]=0; bitmap[row]=mask; while(row>-1){ if(bitmap[row]>0){ bit=-bitmap[row]&bitmap[row];//一番右のビットを取り出す bitmap[row]=bitmap[row]^bit;//配置可能なパターンが一つずつ取り出される if(row==(size-1)){ TOTAL++; row--; }else{ unsigned int n=row++; left[row]=(left[n]|bit)<<1; down[row]=down[n]|bit; right[row]=(right[n]|bit)>>1; //クイーンが配置可能な位置を表す bitmap[row]=mask&~(left[row]|down[row]|right[row]); } }else{ row--; } }//end while } // ビットマップ 再帰版 void bitmap_R(unsigned int size,unsigned int row,unsigned int left,unsigned int down, unsigned int right) { unsigned int mask=(1<<size)-1; unsigned int bit=0; if(row==size){ TOTAL++; }else{ // クイーンが配置可能な位置を表す for(unsigned int bitmap=mask&~(left|down|right);bitmap;bitmap=bitmap&~bit){ bit=bitmap&-bitmap; bitmap_R(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1); } } } // クイーンの効きを判定して解を返す __host__ __device__ long bitmap_solve_nodeLayer(int size,long left,long down,long right) { long mask=(1<<size)-1; long counter = 0; if (down==mask) { // downがすべて専有され解が見つかる return 1; } long bit=0; for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){ bit=-bitmap&bitmap; counter += bitmap_solve_nodeLayer(size,(left|bit)>>1,(down|bit),(right|bit)<< 1); } return counter; } // i 番目のメンバを i 番目の部分木の解で埋める __global__ void dim_nodeLayer(int size,long* nodes, long* solutions, int numElements) { int i=blockDim.x * blockIdx.x + threadIdx.x; if(i<numElements){ solutions[i]=bitmap_solve_nodeLayer(size,nodes[3 * i],nodes[3 * i + 1],nodes[3 * i + 2]); } } // 0以外のbitをカウント int countBits_nodeLayer(long n) { int counter = 0; while (n){ n &= (n - 1); // 右端のゼロ以外の数字を削除 counter++; } return counter; } // ノードをk番目のレイヤーのノードで埋める long kLayer_nodeLayer(int size,std::vector<long>& nodes, int k, long left, long down, long right) { long counter=0; long mask=(1<<size)-1; // すべてのdownが埋まったら、解決策を見つけたことになる。 if (countBits_nodeLayer(down) == k) { nodes.push_back(left); nodes.push_back(down); nodes.push_back(right); return 1; } long bit=0; for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){ bit=-bitmap&bitmap; // 解を加えて対角線をずらす counter+=kLayer_nodeLayer(size,nodes,k,(left|bit)>>1,(down|bit),(right|bit)<<1); } return counter; } // k 番目のレイヤのすべてのノードを含むベクトルを返す。 std::vector<long> kLayer_nodeLayer(int size,int k) { std::vector<long> nodes{}; kLayer_nodeLayer(size,nodes, k, 0, 0, 0); return nodes; } // 【GPU ビットマップ】ノードレイヤーの作成 void bitmap_build_nodeLayer(int size) { // ツリーの3番目のレイヤーにあるノード //(それぞれ連続する3つの数字でエンコードされる)のベクトル。 // レイヤー2以降はノードの数が均等なので、対称性を利用できる。 // レイヤ4には十分なノードがある(N16の場合、9844)。 std::vector<long> nodes = kLayer_nodeLayer(size,4); // デバイスにはクラスがないので、 // 最初の要素を指定してからデバイスにコピーする。 size_t nodeSize = nodes.size() * sizeof(long); long* hostNodes = (long*)malloc(nodeSize); hostNodes = &nodes[0]; long* deviceNodes = NULL; cudaMalloc((void**)&deviceNodes, nodeSize); cudaMemcpy(deviceNodes, hostNodes, nodeSize, cudaMemcpyHostToDevice); // デバイス出力の割り当て long* deviceSolutions = NULL; // 必要なのはノードの半分だけで、各ノードは3つの整数で符号化される。 int numSolutions = nodes.size() / 6; size_t solutionSize = numSolutions * sizeof(long); cudaMalloc((void**)&deviceSolutions, solutionSize); // CUDAカーネルを起動する。 int threadsPerBlock = 256; int blocksPerGrid = (numSolutions + threadsPerBlock - 1) / threadsPerBlock; dim_nodeLayer <<<blocksPerGrid, threadsPerBlock >>> (size,deviceNodes, deviceSolutions, numSolutions); // 結果をホストにコピー long* hostSolutions = (long*)malloc(solutionSize); cudaMemcpy(hostSolutions, deviceSolutions, solutionSize, cudaMemcpyDeviceToHost); // 部分解を加算し、結果を表示する。 long solutions = 0; for (long i = 0; i < numSolutions; i++) { solutions += 2*hostSolutions[i]; // Symmetry } // 出力 TOTAL=solutions; } // CUDA 初期化 bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ struct cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} cudaSetDevice(i); return true; } //メイン int main(int argc,char** argv) { bool cpu=false,cpur=false,gpu=false,gpuNodeLayer=false; int argstart=2; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuNodeLayer=true;} else{ gpuNodeLayer=true; } //デフォルトをgpuとする argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]); printf(" -r: CPU 再帰\n"); printf(" -c: CPU 非再帰\n"); printf(" -g: GPU 再帰\n"); printf(" -n: GPU ノードレイヤー\n"); } if(cpur){ printf("\n\nビットマップ 再帰 \n"); } else if(cpu){ printf("\n\nビットマップ 非再帰 \n"); } else if(gpu){ printf("\n\nビットマップ GPU\n"); } else if(gpuNodeLayer){ printf("\n\nビットマップ GPUノードレイヤー \n"); } if(cpu||cpur) { int min=4; int targetN=17; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ TOTAL=UNIQUE=0; gettimeofday(&t0, NULL);//計測開始 if(cpur){ //再帰 bitmap_R(size,0,0,0,0); } if(cpu){ //非再帰 bitmap_NR(size,0); } // gettimeofday(&t1, NULL);//計測終了 int ss;int ms;int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%16ld%17ld%12.2d:%02d:%02d:%02d.%02d\n", size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } //end for }//end if if(gpu||gpuNodeLayer) { if(!InitCUDA()){return 0;} /* int steps=24576; */ int min=4; int targetN=21; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ gettimeofday(&t0,NULL); // 計測開始 if(gpu){ TOTAL=UNIQUE=0; TOTAL=bitmap_solve_nodeLayer(size,0,0,0); //ビットマップ }else if(gpuNodeLayer){ TOTAL=UNIQUE=0; bitmap_build_nodeLayer(size); // ビットマップ } gettimeofday(&t1,NULL); // 計測終了 int ss;int ms;int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); }//end for }//end if return 0; }
667a2598367084bc488dec53972350cc356e8fed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common_hip_defs.cuh" #include "common_cuda_funcs.cuh" namespace { template <typename scalar_t> __global__ void wb_cuda_scale_calc_kernel( const scalar_t* __restrict__ input, scalar_t* __restrict__ scale_output, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, const int64_t total_elements_count) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint64_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum = 0; for (int i = gtidx; i < total_elements_count; i += grid_size) { sum += abs(*(input + i)); } sum /= total_elements_count; __shared__ scalar_t sh_mem[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_mem, sum, tidx, bidx, dev_tmp, dev_last_block_counter, scale_output, gridDim.x); } template <typename scalar_t> __global__ void wb_cuda_binarize_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, const int64_t scale_count, const int64_t elements_per_scale, const int64_t size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int64_t scale_idx = static_cast<int64_t>(idx / elements_per_scale) % scale_count; scalar_t scale_element = *(scale + scale_idx); *(output + idx) = (*(input + idx) > 0) ? scale_element : -scale_element; } } template <typename scalar_t> __global__ void ab_cuda_forward_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, const scalar_t* __restrict__ thresholds, const int64_t threshold_count, const int64_t contiguous_elements_per_threshold, const int64_t size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int64_t threshold_idx = static_cast<int64_t>(idx / contiguous_elements_per_threshold) % threshold_count; scalar_t threshold_element = (*(thresholds + threshold_idx)) * (*scale); *(output + idx) = (*(input + idx) > threshold_element) ? (*scale) : static_cast<scalar_t>(0.0); } } template <typename scalar_t> __global__ void ab_cuda_grad_input_kernel( scalar_t* __restrict__ grad_input, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, const int64_t size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { const scalar_t input_element = *(input + idx); *(grad_input + idx) = (input_element > 0 && input_element < *scale) ? *(grad_output + idx) : static_cast<scalar_t>(0.0); } } template <typename scalar_t> __global__ void ab_cuda_grad_scale_kernel( scalar_t* __restrict__ grad_scale, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, const int64_t total_elements_count) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint32_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum = 0; for (int i = gtidx; i < total_elements_count; i += grid_size) { scalar_t err_element = (*(output + i) - *(input + i)) / *scale; scalar_t grad_element = *(grad_output + i); sum += (*(input + i) < *scale) ? err_element * grad_element : grad_element; } __shared__ scalar_t sh_mem[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_mem, sum, tidx, bidx, dev_tmp, dev_last_block_counter, grad_scale, gridDim.x); } template <typename scalar_t> __global__ void ab_cuda_grad_thresholds_kernel( scalar_t* __restrict__ grad_thresholds, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, int64_t total_elements_per_threshold, int64_t contiguous_elements_per_threshold, int64_t threshold_count, int64_t channel_offset) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint32_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum = 0; for (int i = gtidx; i < total_elements_per_threshold; i += grid_size) { // i is the global thread index - need to calculate the input array index // that belongs to a specific scale index from i. Will do this by treating i // as the index in a non-existing array where input values belonging to a single // scale have a contiguous block layout, but will recalculate actual index into the // input/output array based on the fact that the values belonging to a single scale // in reality have interleaved block layout, with a spacing between the blocks // equal to channel_offset int actual_idx = (i / contiguous_elements_per_threshold) * channel_offset + (i % contiguous_elements_per_threshold); scalar_t input_element = *(input + actual_idx); if (input_element < *scale && input_element > 0) { sum += -*(grad_output + actual_idx); } } __shared__ scalar_t sh_mem[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_mem, sum, tidx, bidx, dev_tmp, dev_last_block_counter, grad_thresholds, gridDim.x); } } at::Tensor wb_cuda_forward( at::Tensor input, bool per_channel) { const auto quantized_elements_count = input.numel(); int64_t elements_per_scale = 0; int64_t scale_count = per_channel ? input.size(0) : 1; int64_t input_elements_count = input.numel(); auto scale = at::zeros({scale_count}, input.options()); elements_per_scale = input_elements_count / input.size(0); auto grid_size = ::min(GET_BLOCKS(elements_per_scale), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); auto dev_tmp = at::empty({grid_size}, input.options()); auto dev_last_block_counter = at::zeros({1}, at::device(input.options().device()).dtype(at::kInt)); auto output = at::empty_like(input); for (int ch_idx = 0; ch_idx < scale_count; ch_idx++) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "wb_cuda_forward_scale", ([&] { hipLaunchKernelGGL(( wb_cuda_scale_calc_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data<scalar_t>() + ch_idx * elements_per_scale, scale.data<scalar_t>() + ch_idx, dev_tmp.data<scalar_t>(), dev_last_block_counter.data<int>(), elements_per_scale); })); dev_tmp.fill_(0.0); dev_last_block_counter.fill_(0); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "wb_cuda_forward_binarize", ([&] { hipLaunchKernelGGL(( wb_cuda_binarize_kernel<scalar_t>), dim3(GET_BLOCKS(input_elements_count)), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), scale_count, elements_per_scale, input_elements_count ); })); return output; } at::Tensor ab_cuda_forward( at::Tensor input, at::Tensor scale, at::Tensor thresholds) { const auto quantized_elements_count = input.numel(); int64_t input_elements_count = input.numel(); int64_t threshold_count = thresholds.numel(); TORCH_CHECK(input.size(1) == threshold_count, "Threshold count is not equal to activations channel count"); int64_t contiguous_elements_per_threshold = input_elements_count / input.size(0) / input.size(1); auto output = at::empty_like(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_forward", ([&] { hipLaunchKernelGGL(( ab_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(input_elements_count)), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), thresholds.data<scalar_t>(), threshold_count, contiguous_elements_per_threshold, input_elements_count ); })); return output; } std::vector<at::Tensor> ab_cuda_backward( at::Tensor grad_output, at::Tensor input, at::Tensor scale, at::Tensor output) { int64_t input_elements_count = input.numel(); int64_t threshold_count = input.size(1); int64_t channel_offset = input.numel() / input.size(0); std::vector<int64_t> threshold_shape(input.dim()); for (int64_t dim_idx = 0; dim_idx < input.dim(); dim_idx++) { if (dim_idx != 1) { threshold_shape[dim_idx] = 1; } else { threshold_shape[dim_idx] = input.size(dim_idx); } } auto grad_input = at::empty_like(input); auto grad_scale = at::empty_like(scale); auto grad_thresholds = at::empty(threshold_shape, input.options()); int64_t total_elements_per_threshold = input.numel() / threshold_count; int64_t contiguous_elements_per_threshold = input_elements_count / input.size(0) / input.size(1); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_backward", ([&] { hipLaunchKernelGGL(( ab_cuda_grad_input_kernel<scalar_t>), dim3(GET_BLOCKS(input_elements_count)), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data<scalar_t>(), grad_output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), input_elements_count ); })); auto grid_size = ::min(GET_BLOCKS(input_elements_count), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); auto dev_tmp = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_backward", ([&] { hipLaunchKernelGGL(( ab_cuda_grad_scale_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_scale.data<scalar_t>(), grad_output.data<scalar_t>(), output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), dev_tmp.data<scalar_t>(), dev_last_block_counter.data<int>(), input_elements_count); })); grid_size = ::min(GET_BLOCKS(total_elements_per_threshold), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); dev_tmp = at::empty({grid_size}, grad_output.options()); dev_last_block_counter = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); // Same concept as for per activation channel quantization for (int64_t ch_idx = 0; ch_idx < threshold_count; ch_idx++) { auto init_element_offset = contiguous_elements_per_threshold * ch_idx; AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_backward", ([&] { hipLaunchKernelGGL(( ab_cuda_grad_thresholds_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_thresholds.data<scalar_t>() + ch_idx, grad_output.data<scalar_t>() + init_element_offset, input.data<scalar_t>() + init_element_offset, scale.data<scalar_t>(), dev_tmp.data<scalar_t>(), dev_last_block_counter.data<int>(), total_elements_per_threshold, contiguous_elements_per_threshold, threshold_count, channel_offset); })); dev_tmp.fill_(0.0); dev_last_block_counter.fill_(0); } return {grad_input, grad_scale, grad_thresholds}; }
667a2598367084bc488dec53972350cc356e8fed.cu
#include "common_cuda_defs.cuh" #include "common_cuda_funcs.cuh" namespace { template <typename scalar_t> __global__ void wb_cuda_scale_calc_kernel( const scalar_t* __restrict__ input, scalar_t* __restrict__ scale_output, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, const int64_t total_elements_count) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint64_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum = 0; for (int i = gtidx; i < total_elements_count; i += grid_size) { sum += abs(*(input + i)); } sum /= total_elements_count; __shared__ scalar_t sh_mem[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_mem, sum, tidx, bidx, dev_tmp, dev_last_block_counter, scale_output, gridDim.x); } template <typename scalar_t> __global__ void wb_cuda_binarize_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, const int64_t scale_count, const int64_t elements_per_scale, const int64_t size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int64_t scale_idx = static_cast<int64_t>(idx / elements_per_scale) % scale_count; scalar_t scale_element = *(scale + scale_idx); *(output + idx) = (*(input + idx) > 0) ? scale_element : -scale_element; } } template <typename scalar_t> __global__ void ab_cuda_forward_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, const scalar_t* __restrict__ thresholds, const int64_t threshold_count, const int64_t contiguous_elements_per_threshold, const int64_t size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int64_t threshold_idx = static_cast<int64_t>(idx / contiguous_elements_per_threshold) % threshold_count; scalar_t threshold_element = (*(thresholds + threshold_idx)) * (*scale); *(output + idx) = (*(input + idx) > threshold_element) ? (*scale) : static_cast<scalar_t>(0.0); } } template <typename scalar_t> __global__ void ab_cuda_grad_input_kernel( scalar_t* __restrict__ grad_input, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, const int64_t size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { const scalar_t input_element = *(input + idx); *(grad_input + idx) = (input_element > 0 && input_element < *scale) ? *(grad_output + idx) : static_cast<scalar_t>(0.0); } } template <typename scalar_t> __global__ void ab_cuda_grad_scale_kernel( scalar_t* __restrict__ grad_scale, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, const int64_t total_elements_count) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint32_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum = 0; for (int i = gtidx; i < total_elements_count; i += grid_size) { scalar_t err_element = (*(output + i) - *(input + i)) / *scale; scalar_t grad_element = *(grad_output + i); sum += (*(input + i) < *scale) ? err_element * grad_element : grad_element; } __shared__ scalar_t sh_mem[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_mem, sum, tidx, bidx, dev_tmp, dev_last_block_counter, grad_scale, gridDim.x); } template <typename scalar_t> __global__ void ab_cuda_grad_thresholds_kernel( scalar_t* __restrict__ grad_thresholds, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, int64_t total_elements_per_threshold, int64_t contiguous_elements_per_threshold, int64_t threshold_count, int64_t channel_offset) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint32_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum = 0; for (int i = gtidx; i < total_elements_per_threshold; i += grid_size) { // i is the global thread index - need to calculate the input array index // that belongs to a specific scale index from i. Will do this by treating i // as the index in a non-existing array where input values belonging to a single // scale have a contiguous block layout, but will recalculate actual index into the // input/output array based on the fact that the values belonging to a single scale // in reality have interleaved block layout, with a spacing between the blocks // equal to channel_offset int actual_idx = (i / contiguous_elements_per_threshold) * channel_offset + (i % contiguous_elements_per_threshold); scalar_t input_element = *(input + actual_idx); if (input_element < *scale && input_element > 0) { sum += -*(grad_output + actual_idx); } } __shared__ scalar_t sh_mem[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_mem, sum, tidx, bidx, dev_tmp, dev_last_block_counter, grad_thresholds, gridDim.x); } } at::Tensor wb_cuda_forward( at::Tensor input, bool per_channel) { const auto quantized_elements_count = input.numel(); int64_t elements_per_scale = 0; int64_t scale_count = per_channel ? input.size(0) : 1; int64_t input_elements_count = input.numel(); auto scale = at::zeros({scale_count}, input.options()); elements_per_scale = input_elements_count / input.size(0); auto grid_size = std::min(GET_BLOCKS(elements_per_scale), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); auto dev_tmp = at::empty({grid_size}, input.options()); auto dev_last_block_counter = at::zeros({1}, at::device(input.options().device()).dtype(at::kInt)); auto output = at::empty_like(input); for (int ch_idx = 0; ch_idx < scale_count; ch_idx++) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "wb_cuda_forward_scale", ([&] { wb_cuda_scale_calc_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( input.data<scalar_t>() + ch_idx * elements_per_scale, scale.data<scalar_t>() + ch_idx, dev_tmp.data<scalar_t>(), dev_last_block_counter.data<int>(), elements_per_scale); })); dev_tmp.fill_(0.0); dev_last_block_counter.fill_(0); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "wb_cuda_forward_binarize", ([&] { wb_cuda_binarize_kernel<scalar_t><<<GET_BLOCKS(input_elements_count), CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), scale_count, elements_per_scale, input_elements_count ); })); return output; } at::Tensor ab_cuda_forward( at::Tensor input, at::Tensor scale, at::Tensor thresholds) { const auto quantized_elements_count = input.numel(); int64_t input_elements_count = input.numel(); int64_t threshold_count = thresholds.numel(); TORCH_CHECK(input.size(1) == threshold_count, "Threshold count is not equal to activations channel count"); int64_t contiguous_elements_per_threshold = input_elements_count / input.size(0) / input.size(1); auto output = at::empty_like(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_forward", ([&] { ab_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(input_elements_count), CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), thresholds.data<scalar_t>(), threshold_count, contiguous_elements_per_threshold, input_elements_count ); })); return output; } std::vector<at::Tensor> ab_cuda_backward( at::Tensor grad_output, at::Tensor input, at::Tensor scale, at::Tensor output) { int64_t input_elements_count = input.numel(); int64_t threshold_count = input.size(1); int64_t channel_offset = input.numel() / input.size(0); std::vector<int64_t> threshold_shape(input.dim()); for (int64_t dim_idx = 0; dim_idx < input.dim(); dim_idx++) { if (dim_idx != 1) { threshold_shape[dim_idx] = 1; } else { threshold_shape[dim_idx] = input.size(dim_idx); } } auto grad_input = at::empty_like(input); auto grad_scale = at::empty_like(scale); auto grad_thresholds = at::empty(threshold_shape, input.options()); int64_t total_elements_per_threshold = input.numel() / threshold_count; int64_t contiguous_elements_per_threshold = input_elements_count / input.size(0) / input.size(1); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_backward", ([&] { ab_cuda_grad_input_kernel<scalar_t><<<GET_BLOCKS(input_elements_count), CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data<scalar_t>(), grad_output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), input_elements_count ); })); auto grid_size = std::min(GET_BLOCKS(input_elements_count), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); auto dev_tmp = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_backward", ([&] { ab_cuda_grad_scale_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( grad_scale.data<scalar_t>(), grad_output.data<scalar_t>(), output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), dev_tmp.data<scalar_t>(), dev_last_block_counter.data<int>(), input_elements_count); })); grid_size = std::min(GET_BLOCKS(total_elements_per_threshold), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); dev_tmp = at::empty({grid_size}, grad_output.options()); dev_last_block_counter = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); // Same concept as for per activation channel quantization for (int64_t ch_idx = 0; ch_idx < threshold_count; ch_idx++) { auto init_element_offset = contiguous_elements_per_threshold * ch_idx; AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_backward", ([&] { ab_cuda_grad_thresholds_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( grad_thresholds.data<scalar_t>() + ch_idx, grad_output.data<scalar_t>() + init_element_offset, input.data<scalar_t>() + init_element_offset, scale.data<scalar_t>(), dev_tmp.data<scalar_t>(), dev_last_block_counter.data<int>(), total_elements_per_threshold, contiguous_elements_per_threshold, threshold_count, channel_offset); })); dev_tmp.fill_(0.0); dev_last_block_counter.fill_(0); } return {grad_input, grad_scale, grad_thresholds}; }
6d0938181234eff7c2f79734a5871fe46c5c6392.hip
// !!! This is a file automatically generated by hipify!!! /** * File: ga.cu * Author: Jeanhwea * Email: hujinghui@buaa.edu.cn */ #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include "helper.cuh" #include "ga.cuh" __device__ size_t d_npop; __device__ size_t d_ngen; // chromosome for [sz_taks * d_npop*2] int * h_chrm; /************************************************************************/ /* hash value for each person */ /************************************************************************/ unsigned long * h_hashv; /************************************************************************/ /* you can get fitness value like this: */ /* h_fitv[ itask-1 ]]; */ /************************************************************************/ float * h_fitv; /************************************************************************/ /* ordering of each person in our population */ /************************************************************************/ size_t * h_order; __device__ size_t * d_order; // host data for display result int * chrm; unsigned long * hashv; float * fitv; size_t * order; void cuGaEvolve() { StopWatchInterface * timer = NULL; float elapse_time_inMs = 0.0f; hipEvent_t start, stop; // Choose which GPU to run on, change this on a multi-GPU system. checkCudaErrors(hipSetDevice(0)); // Allocate GPU buffer allocMemOnDevice(); // transfer data to GPU moveDataToDevice(); if (ntask > MAX_CHRM_LEN) { fprintf(stderr, "ntask = %d (> MAX_CHRM_LEN)\n", ntask); exit(1); } gaAllocMem(); // starting timer ... sdkCreateTimer(&timer); checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipDeviceSynchronize()); sdkStartTimer(&timer); checkCudaErrors(hipEventRecord(start, 0)); // Launch a kernel on the GPU with one thread for each element. gaEvolve(npop, ngen); checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&timer); checkCudaErrors(hipEventElapsedTime(&elapse_time_inMs, start, stop)); elapse_time_inMs = sdkGetTimerValue(&timer); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); // Check for any errors launching the kernel checkCudaErrors(hipGetLastError()); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. checkCudaErrors(hipDeviceSynchronize()); //printf("total time in GPU = %f ms\n", elapse_time_inMs); printf("%f ms\n", elapse_time_inMs); gaFreeMem(); freeMemOnDevice(); } __global__ void gaSetPara(size_t npop, size_t ngen, size_t * h_order) { d_npop = npop; d_ngen = ngen; d_order = h_order; } void gaAllocMem() { size_t m_size; // chromosome attribution of a person m_size = 2 * npop * ntask * sizeof(int); checkCudaErrors(hipMalloc((void **)&h_chrm, m_size)); // hash value attribution of a person m_size = 2 * npop * sizeof(unsigned long); checkCudaErrors(hipMalloc((void **)&h_hashv, m_size)); // fitness value attribution of a person m_size = 2 * npop * sizeof(float); checkCudaErrors(hipMalloc((void **)&h_fitv, m_size)); // ordering after each selection m_size = 2 * npop * sizeof(size_t); checkCudaErrors(hipMalloc((void **)&h_order, m_size)); chrm = (int *) calloc(2 * npop * ntask, sizeof(int)); assert(chrm != 0); hashv = (unsigned long *) calloc(2 * npop, sizeof(unsigned long)); assert(hashv != 0); fitv = (float *) calloc(2 * npop, sizeof(float)); assert(fitv != 0); order = (size_t *) calloc(2 * npop, sizeof(size_t)); assert(order != 0); } void gaFreeMem() { checkCudaErrors(hipFree(h_chrm)); checkCudaErrors(hipFree(h_hashv)); checkCudaErrors(hipFree(h_fitv)); checkCudaErrors(hipFree(h_order)); free(chrm); free(hashv); free(fitv); free(order); } static void dbPrintPerson(int * person, size_t n, char * tag, FILE * out) { size_t i; fprintf(out, "%s : ", tag); for (i = 0; i < n; i++) { fprintf(out, "%d", person[i]); if (i < n-1) { fprintf(out, "->"); } else { fprintf(out, "\n"); } } } void dbDisplayWorld(FILE * out) { size_t i; checkCudaErrors(hipMemcpy(chrm, h_chrm, 2*npop * ntask * sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(hashv, h_hashv, 2*npop * sizeof(unsigned long), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(fitv, h_fitv, 2*npop * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); fprintf(out, "parent----\n"); for (i = 0; i < npop; i++) {; char tag[100]; sprintf(tag, "i%04d\th%08u\tf%f\t",i, hashv[order[i]], fitv[order[i]]); dbPrintPerson(chrm+ntask*order[i], ntask, tag, out); } fprintf(out, "children----\n"); for (i = npop; i < 2*npop; i++) {; char tag[100]; sprintf(tag, "i%04d\th%08u\tf%f\t",i, hashv[order[i]], fitv[order[i]]); dbPrintPerson(chrm+ntask*order[i], ntask, tag, out); } } void dbPrintResult(FILE * out) { size_t i, j; checkCudaErrors(hipMemcpy(chrm, h_chrm, 2*npop * ntask * sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); for (i = 0; i < 3; i++) { int * person = chrm+ntask*order[i]; for (j = 0; j < ntask; j++) { fprintf(out, "%d%c", person[j] ,j==(ntask-1)? '\n': ' '); } } } void gaEvolve(size_t npop, size_t ngen) { size_t i; FILE * fd_info, * fd_resu, * fd_chom; fd_info = fopen("output.txt", "w"); fd_resu = fopen("result.txt", "w"); fd_chom = fopen("chomes.txt", "w"); assert(fd_info != 0); hipLaunchKernelGGL(( gaSetPara), dim3(1), dim3(1), 0, 0, npop, ngen, h_order); size_t msize_occupy; msize_occupy = npop * nreso * sizeof(float); if (msize_occupy > MAX_SHARED_MEM) { fprintf(stderr, "msize_occupy = %d (> MAX_SHARED_MEM(%d))\n", msize_occupy, MAX_SHARED_MEM); exit(1); } for (i = 0; i < 2*npop; i++) { order[i] = i; } checkCudaErrors(hipMemcpy(h_order, order, 2*npop * sizeof(size_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gaInit), dim3(1), dim3(npop), msize_occupy, 0, h_chrm, h_hashv, h_fitv); dbDisplayWorld(fd_chom); for (i = 0; i < ngen; ++i) { fprintf(fd_chom,"%d generation----------------------\n", i+1); hipLaunchKernelGGL(( gaCrossover), dim3(1), dim3(npop/2), msize_occupy, 0, h_chrm, h_hashv, h_fitv); hipLaunchKernelGGL(( gaMutation), dim3(1), dim3(npop), msize_occupy, 0, h_chrm, h_hashv, h_fitv); gaSelection(); gaStatistics(fd_info); dbDisplayWorld(fd_chom); checkCudaErrors(hipDeviceSynchronize()); } dbDisplayWorld(fd_chom); dbPrintResult(fd_resu); fclose(fd_info); fclose(fd_resu); fclose(fd_chom); } /************************************************************************/ /* Initialize a person */ /************************************************************************/ __global__ void gaInit(int * h_chrm, unsigned long * h_hashv, float * h_fitv) { int * person; size_t tid = threadIdx.x; extern __shared__ float sh_occupys[]; float * occupy; person = h_chrm + d_ntask * d_order[tid]; occupy = sh_occupys + tid * d_nreso; size_t i; for (i = 0; i < d_ntask; i++) { person[i] = i+1; } size_t a, b; for (i = 0; i < d_ntask; i++) { a = randInt(0, d_ntask-1); b = i; if (a > b) { int tmp; tmp=a; a=b; b=tmp; } swapBits(a, b, person); } h_hashv[d_order[tid]] = hashfunc(person, d_ntask); h_fitv[d_order[tid]] = gaObject(person, occupy); // printf("%d %08u %f\n", d_order[tid], h_hashv[d_order[tid]], h_fitv[d_order[tid]]); __syncthreads(); } __global__ void gaCrossover(int * h_chrm, unsigned long * h_hashv, float * h_fitv) { int * dad, * mom, * bro, * sis, * person; size_t a, b, tid; size_t j, k; bool needCrossover; float * occupy; extern __shared__ float sh_occupys[]; tid = threadIdx.x; occupy = sh_occupys + tid * d_nreso; needCrossover = true; while (needCrossover) { a = randInt(0, d_npop-1); b = randInt(0, d_npop-1); dad = h_chrm + d_ntask*d_order[a]; mom = h_chrm + d_ntask*d_order[b]; bro = h_chrm + d_ntask*d_order[d_npop+2*tid]; sis = h_chrm + d_ntask*d_order[d_npop+2*tid+1]; crossover(dad, mom, bro, sis); if (!check(bro)) { fixPerson(bro); } if (!check(sis)) { fixPerson(sis); } unsigned long bro_hash, sis_hash; bro_hash = hashfunc(bro, d_ntask); sis_hash = hashfunc(sis, d_ntask); h_hashv[d_order[d_npop+2*tid]] = bro_hash; h_hashv[d_order[d_npop+2*tid+1]] = sis_hash; needCrossover = false; for (j = 0; j < d_npop; j++) { // pick j-th person (parent) person = h_chrm + d_ntask*d_order[j]; // check for brother if (bro_hash == h_hashv[d_order[j]]) { for (k = 0; k < d_ntask; k++) { if (bro[k] != person[k]) break; } if (k == d_ntask) { // need re-crossover needCrossover = true; break; } } // check for sister if (sis_hash == h_hashv[d_order[j]]) { for (k = 0; k < d_ntask; k++) { if (sis[k] != person[k]) break; } if (k == d_ntask) { // need re-crossover needCrossover = true; break; } } } __syncthreads(); } if (!needCrossover) { h_fitv[d_order[d_npop+2*tid]] = gaObject(bro, occupy); h_fitv[d_order[d_npop+2*tid+1]] = gaObject(sis, occupy); } __syncthreads(); } /************************************************************************/ /* ordering-based two points crossover */ /************************************************************************/ __device__ void crossover(int * dad, int * mom, int * bro, int * sis) { size_t i, j, k, a, b; int dad_new[MAX_CHRM_LEN], mom_new[MAX_CHRM_LEN]; a = randInt(0, d_ntask-1); b = randInt(0, d_ntask-1); if (a > b) { size_t tmp; tmp=a; a=b; b=tmp; } for (i = 0; i < d_ntask; i++) { dad_new[i] = dad[i]; mom_new[i] = mom[i]; bro[i] = 0; sis[i] = 0; } // copy selected continuous region first (part1) for (i = a; i <= b; i++) { bro[i] = mom[i]; sis[i] = dad[i]; } // remove duplicated items for (k = 0; k < d_ntask; k++) { for (i = a; i <= b; i++) { if (dad_new[k] == mom[i]) { dad_new[k] = 0; break; } } for (i = a; i <= b; i++) { if (mom_new[k] == dad[i]) { mom_new[k] = 0; break; } } } // copy remainder region (part2) i = j = 0; for (k = 0; k < d_ntask; k++) { if (bro[k] == 0) { for (; i < d_ntask; i++) { if (dad_new[i] != 0) { bro[k] = dad_new[i++]; break; } } } if (sis[k] == 0) { for (; j < d_ntask; j++) { if (mom_new[j] != 0) { sis[k] = mom_new[j++]; break; } } } } } __global__ void gaMutation(int * h_chrm, unsigned long * h_hashv, float * h_fitv) { int * person; size_t tid; float * occupy; extern __shared__ float sh_occupys[]; tid = threadIdx.x; occupy = sh_occupys + tid * d_nreso; if (randProb() < PROB_MUTATION) { // mutate n-th parent person = h_chrm + d_ntask*d_order[tid]; mutation(person); h_hashv[d_order[tid]] = hashfunc(person, d_ntask); h_fitv[d_order[tid]] = gaObject(person, occupy); } if (randProb() < PROB_MUTATION) { // mutate n-th child person = h_chrm + d_ntask*d_order[tid+d_npop]; mutation(person); h_hashv[d_order[tid+d_npop]] = hashfunc(person, d_ntask); h_fitv[d_order[tid+d_npop]] = gaObject(person, occupy); } __syncthreads(); } /************************************************************************/ /* two points swap mutation */ /************************************************************************/ __device__ void mutation(int * person) { size_t a, b; a = randInt(0, d_ntask-1); b = randInt(0, d_ntask-1); if (a > b) { size_t tmp; tmp=a; a=b; b=tmp; } swapBits(a, b, person); } /************************************************************************/ /* calculate fitness value, and move the bests to the parent of next */ /* generation. */ /************************************************************************/ void gaSelection() { // copy fitness value form device checkCudaErrors(hipMemcpy(fitv, h_fitv, 2*npop * sizeof(float), hipMemcpyDeviceToHost)); // sort individual by fitness value qsort(order, 2*npop, sizeof(size_t), fitvalueCompare); // transfer the order after sorting checkCudaErrors(hipMemcpy(h_order, order, 2*npop * sizeof(size_t), hipMemcpyHostToDevice)); } /************************************************************************/ /* Statistics of some important information. */ /************************************************************************/ void gaStatistics(FILE * out) { size_t i; for (i = 0; i < npop; i++) { fprintf(out, "%f%c", fitv[order[i]], i==npop-1 ? '\n': ' '); } } /****************************************************************************/ /* return true, if a-th task swap with b-th task; otherwise, return false. */ /****************************************************************************/ __device__ bool swapBits(size_t a, size_t b, int * person) { bool ret = true; // notice that, a < b if (a >= b) { ret = false; } else { size_t i, a_itask, b_itask, k_itask; a_itask = person[a]; b_itask = person[b]; for (i = a; i <= b; i++) { k_itask = person[i]; if ( (i!=a) && IS_DEPEND(a_itask, k_itask) ){ ret = false; break; } if ( (i!=b) && IS_DEPEND(k_itask, b_itask) ) { ret = false; break; } } } if (ret) { int tmp; tmp=person[a]; person[a]=person[b]; person[b]=tmp; } return ret; } #define HASH_SHIFT (3) #define HASH_SIZE (19921104) __device__ unsigned long hashfunc(int * person, size_t num) { unsigned long hash_value; hash_value = 0; for (size_t i = 0; i < num; i++) { hash_value = (((unsigned long)person[i] + hash_value) << HASH_SHIFT ) % HASH_SIZE; } return hash_value; } __device__ float gaObject(int * person, float * occupy) { float score; if (check(person)) { scheFCFS(person, occupy); score = getMaxTotalOccupy(occupy); if (score == 0.0f) { score = INF_DURATION; } } else { score = INF_DURATION; } return score; } /************************************************************************/ /* feasibility check for <chromo_id>-th chromosome. */ /* return true, if pass; otherwise, return false */ /************************************************************************/ __device__ bool check(int * person) { size_t i, j; for (i = 0; i < d_ntask; i++) { for (j = i+1; j < d_ntask; j++) { int i_itask, j_itask; i_itask = person[i]; j_itask = person[j]; if (IS_DEPEND(j_itask, i_itask)) { // printf("failed depend %d -> %d\n", j_itask, i_itask); return false; } } } return true; } /************************************************************************/ /* scheduler, implement FCFS (first come, first service). */ /************************************************************************/ __device__ void scheFCFS(int * person, float * occupy) { size_t i, r, itask; // set temporary data struct as 0 clearResouceOccupy(occupy); for (i = 0; i < d_ntask; i++) { itask = person[i]; float dura = DURATION(itask); size_t min_id = 0; float min_occ, occ; for (r = 1; r <= d_nreso; r++) { // search all resources if (IS_ASSIGN(itask,r)) { if (min_id == 0) { min_occ = getTotalOccupy(r, occupy); min_id = r; } else { occ = getTotalOccupy(r, occupy); if (occ < min_occ) { min_occ = occ; min_id = r; } } } } if (min_id > 0) { allocResouce(min_id, dura, occupy); } else { allocResouce(1, dura, occupy); } } } /************************************************************************/ /* move a person[ele] several steps forward */ /************************************************************************/ __device__ void personMoveForward(int * person, size_t ele, size_t step) { int tmp; size_t i; tmp = person[ele]; for (i = ele; i < ele + step; i++) { person[i] = person[i+1]; } person[ele+step] = tmp; } __device__ void fixPerson(int * person) { size_t i, j, step; i = 0; while (i < d_ntask) { // FOR all tasks listed in person array // Number of steps to move elements forward? step = 0; for (j = i+1; j < d_ntask; j++) { if (IS_DEPEND(person[j], person[i])) step = j-i; } if (step > 0) { personMoveForward(person, i, step); } else { // if no use to move, then i++ i++; } } } static int fitvalueCompare(const void *a, const void *b) { return (fitv[(*(size_t *)a)] > fitv[(*(size_t *)b)]) ? 1: -1; }
6d0938181234eff7c2f79734a5871fe46c5c6392.cu
/** * File: ga.cu * Author: Jeanhwea * Email: hujinghui@buaa.edu.cn */ #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include "helper.cuh" #include "ga.cuh" __device__ size_t d_npop; __device__ size_t d_ngen; // chromosome for [sz_taks * d_npop*2] int * h_chrm; /************************************************************************/ /* hash value for each person */ /************************************************************************/ unsigned long * h_hashv; /************************************************************************/ /* you can get fitness value like this: */ /* h_fitv[ itask-1 ]]; */ /************************************************************************/ float * h_fitv; /************************************************************************/ /* ordering of each person in our population */ /************************************************************************/ size_t * h_order; __device__ size_t * d_order; // host data for display result int * chrm; unsigned long * hashv; float * fitv; size_t * order; void cuGaEvolve() { StopWatchInterface * timer = NULL; float elapse_time_inMs = 0.0f; cudaEvent_t start, stop; // Choose which GPU to run on, change this on a multi-GPU system. checkCudaErrors(cudaSetDevice(0)); // Allocate GPU buffer allocMemOnDevice(); // transfer data to GPU moveDataToDevice(); if (ntask > MAX_CHRM_LEN) { fprintf(stderr, "ntask = %d (> MAX_CHRM_LEN)\n", ntask); exit(1); } gaAllocMem(); // starting timer ... sdkCreateTimer(&timer); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaThreadSynchronize()); sdkStartTimer(&timer); checkCudaErrors(cudaEventRecord(start, 0)); // Launch a kernel on the GPU with one thread for each element. gaEvolve(npop, ngen); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&timer); checkCudaErrors(cudaEventElapsedTime(&elapse_time_inMs, start, stop)); elapse_time_inMs = sdkGetTimerValue(&timer); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); // Check for any errors launching the kernel checkCudaErrors(cudaGetLastError()); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. checkCudaErrors(cudaDeviceSynchronize()); //printf("total time in GPU = %f ms\n", elapse_time_inMs); printf("%f ms\n", elapse_time_inMs); gaFreeMem(); freeMemOnDevice(); } __global__ void gaSetPara(size_t npop, size_t ngen, size_t * h_order) { d_npop = npop; d_ngen = ngen; d_order = h_order; } void gaAllocMem() { size_t m_size; // chromosome attribution of a person m_size = 2 * npop * ntask * sizeof(int); checkCudaErrors(cudaMalloc((void **)&h_chrm, m_size)); // hash value attribution of a person m_size = 2 * npop * sizeof(unsigned long); checkCudaErrors(cudaMalloc((void **)&h_hashv, m_size)); // fitness value attribution of a person m_size = 2 * npop * sizeof(float); checkCudaErrors(cudaMalloc((void **)&h_fitv, m_size)); // ordering after each selection m_size = 2 * npop * sizeof(size_t); checkCudaErrors(cudaMalloc((void **)&h_order, m_size)); chrm = (int *) calloc(2 * npop * ntask, sizeof(int)); assert(chrm != 0); hashv = (unsigned long *) calloc(2 * npop, sizeof(unsigned long)); assert(hashv != 0); fitv = (float *) calloc(2 * npop, sizeof(float)); assert(fitv != 0); order = (size_t *) calloc(2 * npop, sizeof(size_t)); assert(order != 0); } void gaFreeMem() { checkCudaErrors(cudaFree(h_chrm)); checkCudaErrors(cudaFree(h_hashv)); checkCudaErrors(cudaFree(h_fitv)); checkCudaErrors(cudaFree(h_order)); free(chrm); free(hashv); free(fitv); free(order); } static void dbPrintPerson(int * person, size_t n, char * tag, FILE * out) { size_t i; fprintf(out, "%s : ", tag); for (i = 0; i < n; i++) { fprintf(out, "%d", person[i]); if (i < n-1) { fprintf(out, "->"); } else { fprintf(out, "\n"); } } } void dbDisplayWorld(FILE * out) { size_t i; checkCudaErrors(cudaMemcpy(chrm, h_chrm, 2*npop * ntask * sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(hashv, h_hashv, 2*npop * sizeof(unsigned long), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(fitv, h_fitv, 2*npop * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); fprintf(out, "parent----\n"); for (i = 0; i < npop; i++) {; char tag[100]; sprintf(tag, "i%04d\th%08u\tf%f\t",i, hashv[order[i]], fitv[order[i]]); dbPrintPerson(chrm+ntask*order[i], ntask, tag, out); } fprintf(out, "children----\n"); for (i = npop; i < 2*npop; i++) {; char tag[100]; sprintf(tag, "i%04d\th%08u\tf%f\t",i, hashv[order[i]], fitv[order[i]]); dbPrintPerson(chrm+ntask*order[i], ntask, tag, out); } } void dbPrintResult(FILE * out) { size_t i, j; checkCudaErrors(cudaMemcpy(chrm, h_chrm, 2*npop * ntask * sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); for (i = 0; i < 3; i++) { int * person = chrm+ntask*order[i]; for (j = 0; j < ntask; j++) { fprintf(out, "%d%c", person[j] ,j==(ntask-1)? '\n': ' '); } } } void gaEvolve(size_t npop, size_t ngen) { size_t i; FILE * fd_info, * fd_resu, * fd_chom; fd_info = fopen("output.txt", "w"); fd_resu = fopen("result.txt", "w"); fd_chom = fopen("chomes.txt", "w"); assert(fd_info != 0); gaSetPara<<<1, 1>>>(npop, ngen, h_order); size_t msize_occupy; msize_occupy = npop * nreso * sizeof(float); if (msize_occupy > MAX_SHARED_MEM) { fprintf(stderr, "msize_occupy = %d (> MAX_SHARED_MEM(%d))\n", msize_occupy, MAX_SHARED_MEM); exit(1); } for (i = 0; i < 2*npop; i++) { order[i] = i; } checkCudaErrors(cudaMemcpy(h_order, order, 2*npop * sizeof(size_t), cudaMemcpyHostToDevice)); gaInit<<<1, npop, msize_occupy>>>(h_chrm, h_hashv, h_fitv); dbDisplayWorld(fd_chom); for (i = 0; i < ngen; ++i) { fprintf(fd_chom,"%d generation----------------------\n", i+1); gaCrossover<<<1, npop/2, msize_occupy>>>(h_chrm, h_hashv, h_fitv); gaMutation<<<1, npop, msize_occupy>>>(h_chrm, h_hashv, h_fitv); gaSelection(); gaStatistics(fd_info); dbDisplayWorld(fd_chom); checkCudaErrors(cudaDeviceSynchronize()); } dbDisplayWorld(fd_chom); dbPrintResult(fd_resu); fclose(fd_info); fclose(fd_resu); fclose(fd_chom); } /************************************************************************/ /* Initialize a person */ /************************************************************************/ __global__ void gaInit(int * h_chrm, unsigned long * h_hashv, float * h_fitv) { int * person; size_t tid = threadIdx.x; extern __shared__ float sh_occupys[]; float * occupy; person = h_chrm + d_ntask * d_order[tid]; occupy = sh_occupys + tid * d_nreso; size_t i; for (i = 0; i < d_ntask; i++) { person[i] = i+1; } size_t a, b; for (i = 0; i < d_ntask; i++) { a = randInt(0, d_ntask-1); b = i; if (a > b) { int tmp; tmp=a; a=b; b=tmp; } swapBits(a, b, person); } h_hashv[d_order[tid]] = hashfunc(person, d_ntask); h_fitv[d_order[tid]] = gaObject(person, occupy); // printf("%d %08u %f\n", d_order[tid], h_hashv[d_order[tid]], h_fitv[d_order[tid]]); __syncthreads(); } __global__ void gaCrossover(int * h_chrm, unsigned long * h_hashv, float * h_fitv) { int * dad, * mom, * bro, * sis, * person; size_t a, b, tid; size_t j, k; bool needCrossover; float * occupy; extern __shared__ float sh_occupys[]; tid = threadIdx.x; occupy = sh_occupys + tid * d_nreso; needCrossover = true; while (needCrossover) { a = randInt(0, d_npop-1); b = randInt(0, d_npop-1); dad = h_chrm + d_ntask*d_order[a]; mom = h_chrm + d_ntask*d_order[b]; bro = h_chrm + d_ntask*d_order[d_npop+2*tid]; sis = h_chrm + d_ntask*d_order[d_npop+2*tid+1]; crossover(dad, mom, bro, sis); if (!check(bro)) { fixPerson(bro); } if (!check(sis)) { fixPerson(sis); } unsigned long bro_hash, sis_hash; bro_hash = hashfunc(bro, d_ntask); sis_hash = hashfunc(sis, d_ntask); h_hashv[d_order[d_npop+2*tid]] = bro_hash; h_hashv[d_order[d_npop+2*tid+1]] = sis_hash; needCrossover = false; for (j = 0; j < d_npop; j++) { // pick j-th person (parent) person = h_chrm + d_ntask*d_order[j]; // check for brother if (bro_hash == h_hashv[d_order[j]]) { for (k = 0; k < d_ntask; k++) { if (bro[k] != person[k]) break; } if (k == d_ntask) { // need re-crossover needCrossover = true; break; } } // check for sister if (sis_hash == h_hashv[d_order[j]]) { for (k = 0; k < d_ntask; k++) { if (sis[k] != person[k]) break; } if (k == d_ntask) { // need re-crossover needCrossover = true; break; } } } __syncthreads(); } if (!needCrossover) { h_fitv[d_order[d_npop+2*tid]] = gaObject(bro, occupy); h_fitv[d_order[d_npop+2*tid+1]] = gaObject(sis, occupy); } __syncthreads(); } /************************************************************************/ /* ordering-based two points crossover */ /************************************************************************/ __device__ void crossover(int * dad, int * mom, int * bro, int * sis) { size_t i, j, k, a, b; int dad_new[MAX_CHRM_LEN], mom_new[MAX_CHRM_LEN]; a = randInt(0, d_ntask-1); b = randInt(0, d_ntask-1); if (a > b) { size_t tmp; tmp=a; a=b; b=tmp; } for (i = 0; i < d_ntask; i++) { dad_new[i] = dad[i]; mom_new[i] = mom[i]; bro[i] = 0; sis[i] = 0; } // copy selected continuous region first (part1) for (i = a; i <= b; i++) { bro[i] = mom[i]; sis[i] = dad[i]; } // remove duplicated items for (k = 0; k < d_ntask; k++) { for (i = a; i <= b; i++) { if (dad_new[k] == mom[i]) { dad_new[k] = 0; break; } } for (i = a; i <= b; i++) { if (mom_new[k] == dad[i]) { mom_new[k] = 0; break; } } } // copy remainder region (part2) i = j = 0; for (k = 0; k < d_ntask; k++) { if (bro[k] == 0) { for (; i < d_ntask; i++) { if (dad_new[i] != 0) { bro[k] = dad_new[i++]; break; } } } if (sis[k] == 0) { for (; j < d_ntask; j++) { if (mom_new[j] != 0) { sis[k] = mom_new[j++]; break; } } } } } __global__ void gaMutation(int * h_chrm, unsigned long * h_hashv, float * h_fitv) { int * person; size_t tid; float * occupy; extern __shared__ float sh_occupys[]; tid = threadIdx.x; occupy = sh_occupys + tid * d_nreso; if (randProb() < PROB_MUTATION) { // mutate n-th parent person = h_chrm + d_ntask*d_order[tid]; mutation(person); h_hashv[d_order[tid]] = hashfunc(person, d_ntask); h_fitv[d_order[tid]] = gaObject(person, occupy); } if (randProb() < PROB_MUTATION) { // mutate n-th child person = h_chrm + d_ntask*d_order[tid+d_npop]; mutation(person); h_hashv[d_order[tid+d_npop]] = hashfunc(person, d_ntask); h_fitv[d_order[tid+d_npop]] = gaObject(person, occupy); } __syncthreads(); } /************************************************************************/ /* two points swap mutation */ /************************************************************************/ __device__ void mutation(int * person) { size_t a, b; a = randInt(0, d_ntask-1); b = randInt(0, d_ntask-1); if (a > b) { size_t tmp; tmp=a; a=b; b=tmp; } swapBits(a, b, person); } /************************************************************************/ /* calculate fitness value, and move the bests to the parent of next */ /* generation. */ /************************************************************************/ void gaSelection() { // copy fitness value form device checkCudaErrors(cudaMemcpy(fitv, h_fitv, 2*npop * sizeof(float), cudaMemcpyDeviceToHost)); // sort individual by fitness value qsort(order, 2*npop, sizeof(size_t), fitvalueCompare); // transfer the order after sorting checkCudaErrors(cudaMemcpy(h_order, order, 2*npop * sizeof(size_t), cudaMemcpyHostToDevice)); } /************************************************************************/ /* Statistics of some important information. */ /************************************************************************/ void gaStatistics(FILE * out) { size_t i; for (i = 0; i < npop; i++) { fprintf(out, "%f%c", fitv[order[i]], i==npop-1 ? '\n': ' '); } } /****************************************************************************/ /* return true, if a-th task swap with b-th task; otherwise, return false. */ /****************************************************************************/ __device__ bool swapBits(size_t a, size_t b, int * person) { bool ret = true; // notice that, a < b if (a >= b) { ret = false; } else { size_t i, a_itask, b_itask, k_itask; a_itask = person[a]; b_itask = person[b]; for (i = a; i <= b; i++) { k_itask = person[i]; if ( (i!=a) && IS_DEPEND(a_itask, k_itask) ){ ret = false; break; } if ( (i!=b) && IS_DEPEND(k_itask, b_itask) ) { ret = false; break; } } } if (ret) { int tmp; tmp=person[a]; person[a]=person[b]; person[b]=tmp; } return ret; } #define HASH_SHIFT (3) #define HASH_SIZE (19921104) __device__ unsigned long hashfunc(int * person, size_t num) { unsigned long hash_value; hash_value = 0; for (size_t i = 0; i < num; i++) { hash_value = (((unsigned long)person[i] + hash_value) << HASH_SHIFT ) % HASH_SIZE; } return hash_value; } __device__ float gaObject(int * person, float * occupy) { float score; if (check(person)) { scheFCFS(person, occupy); score = getMaxTotalOccupy(occupy); if (score == 0.0f) { score = INF_DURATION; } } else { score = INF_DURATION; } return score; } /************************************************************************/ /* feasibility check for <chromo_id>-th chromosome. */ /* return true, if pass; otherwise, return false */ /************************************************************************/ __device__ bool check(int * person) { size_t i, j; for (i = 0; i < d_ntask; i++) { for (j = i+1; j < d_ntask; j++) { int i_itask, j_itask; i_itask = person[i]; j_itask = person[j]; if (IS_DEPEND(j_itask, i_itask)) { // printf("failed depend %d -> %d\n", j_itask, i_itask); return false; } } } return true; } /************************************************************************/ /* scheduler, implement FCFS (first come, first service). */ /************************************************************************/ __device__ void scheFCFS(int * person, float * occupy) { size_t i, r, itask; // set temporary data struct as 0 clearResouceOccupy(occupy); for (i = 0; i < d_ntask; i++) { itask = person[i]; float dura = DURATION(itask); size_t min_id = 0; float min_occ, occ; for (r = 1; r <= d_nreso; r++) { // search all resources if (IS_ASSIGN(itask,r)) { if (min_id == 0) { min_occ = getTotalOccupy(r, occupy); min_id = r; } else { occ = getTotalOccupy(r, occupy); if (occ < min_occ) { min_occ = occ; min_id = r; } } } } if (min_id > 0) { allocResouce(min_id, dura, occupy); } else { allocResouce(1, dura, occupy); } } } /************************************************************************/ /* move a person[ele] several steps forward */ /************************************************************************/ __device__ void personMoveForward(int * person, size_t ele, size_t step) { int tmp; size_t i; tmp = person[ele]; for (i = ele; i < ele + step; i++) { person[i] = person[i+1]; } person[ele+step] = tmp; } __device__ void fixPerson(int * person) { size_t i, j, step; i = 0; while (i < d_ntask) { // FOR all tasks listed in person array // Number of steps to move elements forward? step = 0; for (j = i+1; j < d_ntask; j++) { if (IS_DEPEND(person[j], person[i])) step = j-i; } if (step > 0) { personMoveForward(person, i, step); } else { // if no use to move, then i++ i++; } } } static int fitvalueCompare(const void *a, const void *b) { return (fitv[(*(size_t *)a)] > fitv[(*(size_t *)b)]) ? 1: -1; }
b55d4a963a7508169e4d2c76201dee2654104d9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* #_*_coding : UTF-8_*_ # Code writer: Weiguang.Zhao # Writing time: 2021/10/3 7:09 # File Name: AdjacentPoint # IDE: CLion */ #include <stdio.h> #include <stdlib.h> #include "cuda_config.h" #include "AdjacentPoint.h" // https://stackoverflow.com/a/14038590 #define CUDA_ERR_CHK(code) { cuda_err_chk((code), __FILE__, __LINE__); } inline void cuda_err_chk(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "\tCUDA ERROR: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void checkNearPoints_cuda(int *point_num_d, float *xyz_d, float *eps_d, int *ptsCnt_d){ int th_index = blockIdx.x*blockDim.x + threadIdx.x; if (th_index >= *point_num_d) return ; ptsCnt_d[th_index] = 0; // the number of adjacent points float o_x = xyz_d[th_index * 3 + 0]; float o_y = xyz_d[th_index * 3 + 1]; float o_z = xyz_d[th_index * 3 + 2]; for (int k =0; k< *point_num_d; k++){ if(th_index==k) continue; float k_x = xyz_d[k * 3 + 0]; float k_y = xyz_d[k * 3 + 1]; float k_z = xyz_d[k * 3 + 2]; float l2 = sqrt((k_x-o_x)*(k_x-o_x)+(k_y-o_y)*(k_y-o_y)+(k_z-o_z)*(k_z-o_z)); if (l2 <= *eps_d) { ptsCnt_d[th_index]= ptsCnt_d[th_index] + 1; } } } void checkNearPoints(const int point_num, float xyz[], const float radius){ dim3 blocks(DIVUP(point_num, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); int * ptsCnt_h; ptsCnt_h = (int *)malloc(point_num * sizeof(int)); // define gpu variable int * point_num_d; float * xyz_d; float * radius_d; int * ptsCnt_d; //mark the number of adjacent points //generate gpu ram CUDA_ERR_CHK( hipMalloc((void **) &point_num_d, sizeof(int))); CUDA_ERR_CHK(hipMalloc((void **) &xyz_d, 3*point_num*sizeof(float))); CUDA_ERR_CHK(hipMalloc((void **) &radius_d, sizeof(float))); CUDA_ERR_CHK(hipMalloc((void **) &ptsCnt_d, point_num*sizeof(int))); // copy host to device CUDA_ERR_CHK(hipMemcpy(point_num_d, &point_num, sizeof(int), hipMemcpyHostToDevice)); CUDA_ERR_CHK(hipMemcpy(xyz_d, xyz, 3*point_num*sizeof(float), hipMemcpyHostToDevice)); CUDA_ERR_CHK(hipMemcpy(radius_d, &radius, sizeof(float), hipMemcpyHostToDevice)); // start device kernel hipLaunchKernelGGL(( checkNearPoints_cuda), dim3(blocks), dim3(threads), 0, 0, point_num_d, xyz_d, radius_d, ptsCnt_d); // copy device to host CUDA_ERR_CHK(hipMemcpy(ptsCnt_h, ptsCnt_d, point_num*sizeof(float), hipMemcpyDeviceToHost)); // release gpu ram CUDA_ERR_CHK(hipFree(point_num_d)); CUDA_ERR_CHK(hipFree(xyz_d)); CUDA_ERR_CHK(hipFree(radius_d)); CUDA_ERR_CHK(hipFree(ptsCnt_d)); for (int i =0; i< point_num; i++){ printf("index: %d adjacent point number: %d \n",i, ptsCnt_h[i]); } }
b55d4a963a7508169e4d2c76201dee2654104d9f.cu
/* #_*_coding : UTF-8_*_ # Code writer: Weiguang.Zhao # Writing time: 2021/10/3 下午7:09 # File Name: AdjacentPoint # IDE: CLion */ #include <stdio.h> #include <stdlib.h> #include "cuda_config.h" #include "AdjacentPoint.h" // https://stackoverflow.com/a/14038590 #define CUDA_ERR_CHK(code) { cuda_err_chk((code), __FILE__, __LINE__); } inline void cuda_err_chk(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "\tCUDA ERROR: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void checkNearPoints_cuda(int *point_num_d, float *xyz_d, float *eps_d, int *ptsCnt_d){ int th_index = blockIdx.x*blockDim.x + threadIdx.x; if (th_index >= *point_num_d) return ; ptsCnt_d[th_index] = 0; // the number of adjacent points float o_x = xyz_d[th_index * 3 + 0]; float o_y = xyz_d[th_index * 3 + 1]; float o_z = xyz_d[th_index * 3 + 2]; for (int k =0; k< *point_num_d; k++){ if(th_index==k) continue; float k_x = xyz_d[k * 3 + 0]; float k_y = xyz_d[k * 3 + 1]; float k_z = xyz_d[k * 3 + 2]; float l2 = sqrt((k_x-o_x)*(k_x-o_x)+(k_y-o_y)*(k_y-o_y)+(k_z-o_z)*(k_z-o_z)); if (l2 <= *eps_d) { ptsCnt_d[th_index]= ptsCnt_d[th_index] + 1; } } } void checkNearPoints(const int point_num, float xyz[], const float radius){ dim3 blocks(DIVUP(point_num, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); int * ptsCnt_h; ptsCnt_h = (int *)malloc(point_num * sizeof(int)); // define gpu variable int * point_num_d; float * xyz_d; float * radius_d; int * ptsCnt_d; //mark the number of adjacent points //generate gpu ram CUDA_ERR_CHK( cudaMalloc((void **) &point_num_d, sizeof(int))); CUDA_ERR_CHK(cudaMalloc((void **) &xyz_d, 3*point_num*sizeof(float))); CUDA_ERR_CHK(cudaMalloc((void **) &radius_d, sizeof(float))); CUDA_ERR_CHK(cudaMalloc((void **) &ptsCnt_d, point_num*sizeof(int))); // copy host to device CUDA_ERR_CHK(cudaMemcpy(point_num_d, &point_num, sizeof(int), cudaMemcpyHostToDevice)); CUDA_ERR_CHK(cudaMemcpy(xyz_d, xyz, 3*point_num*sizeof(float), cudaMemcpyHostToDevice)); CUDA_ERR_CHK(cudaMemcpy(radius_d, &radius, sizeof(float), cudaMemcpyHostToDevice)); // start device kernel checkNearPoints_cuda<<<blocks, threads>>>(point_num_d, xyz_d, radius_d, ptsCnt_d); // copy device to host CUDA_ERR_CHK(cudaMemcpy(ptsCnt_h, ptsCnt_d, point_num*sizeof(float), cudaMemcpyDeviceToHost)); // release gpu ram CUDA_ERR_CHK(cudaFree(point_num_d)); CUDA_ERR_CHK(cudaFree(xyz_d)); CUDA_ERR_CHK(cudaFree(radius_d)); CUDA_ERR_CHK(cudaFree(ptsCnt_d)); for (int i =0; i< point_num; i++){ printf("index: %d adjacent point number: %d \n",i, ptsCnt_h[i]); } }
fe6f8b561653db8bf2741cf0d0d7d0dcda1c0cab.hip
// !!! This is a file automatically generated by hipify!!! #include "CUDAmatrixVector.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <Eigen\Eigen> #include <Eigen\Dense> //Eigen + CUDA //https://stackoverflow.com/questions/23802209/how-to-work-with-eigen-in-cuda-kernels/41120980#41120980 /* __global__ void cu_dot(Eigen::Vector3f *v1, Eigen::Vector3f *v2, double *out, size_t N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { out[idx] = v1[idx].dot(v2[idx]); } return; } */ /* __global__ void cu_getOffsetVector(Eigen::MatrixXf *basis, Eigen::VectorXf *variance, Eigen::VectorXf *sample, Eigen::VectorXf *out, size_t N) { return; } */ __global__ void outputHelper(){ printf("TEST!!!"); } __global__ void cu_getOffsetVector(int numVerts, int numVariance, float* basisMatrix, float* varianceVector, float* sampleVector, float* outOffsetVector) { int i_thread = blockIdx.x*blockDim.x + threadIdx.x; //Result vector will have size of outOffsetVector if (i_thread >= numVerts) { return; } if (i_thread == 0) { printf("basisMatrix: %f , %f, %f \n", basisMatrix[0], basisMatrix[numVerts], basisMatrix[2 * numVerts]); printf("varianceVector: %f , %f, %f \n", varianceVector[0], varianceVector[1], varianceVector[2]); printf("sampleVector: %f , %f, %f \n", sampleVector[0], sampleVector[1], sampleVector[2]); } //Make sure the result row is 0.0 before we add the summs outOffsetVector[i_thread] = 0.0f; for (int v = 0; v < numVariance; v++) { int matrixIndex = v * numVerts + i_thread; outOffsetVector[i_thread] += basisMatrix[matrixIndex] * (varianceVector[v] * sampleVector[v]); } return; } void getOffsetVector(const Eigen::MatrixXf &basis, const Eigen::MatrixXf &variance, const Eigen::MatrixXf &sample, Eigen::MatrixXf &out) { if (basis.cols() != variance.size() || variance.size() != sample.size()) { printf("Dimension missmatch!"); return; } } void runEigenCudaTest01() { Eigen::Matrix2f m; m << 1.0f, 2.0f, 3.0f, 4.0f; //initializes row-wise (left to right) float* m_data = m.data(); for (int i = 0; i < 4; i++) { std::cout << m_data[i] << std::endl; //acesses column wise 1,3,2,4 (top-down) } const int varianceSize = 3; const int verticesSize = 5; Eigen::Matrix<float, verticesSize, varianceSize> basisMat; basisMat << 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f; Eigen::Matrix<float, varianceSize, 1> varianceVec; varianceVec << 1.0f, 2.0f, 3.0f; Eigen::Matrix<float, varianceSize, 1> sampleVec; sampleVec << 2.0f, 2.0f, 2.0f; Eigen::Matrix<float, 1, verticesSize> resultVec; resultVec.setZero(); float* d_basisMatrix, float* d_varianceVector, float* d_sampleVector, float* d_outOffsetVector; hipMalloc(&d_basisMatrix, varianceSize * verticesSize * sizeof(float)); hipMalloc(&d_varianceVector, varianceSize * sizeof(float)); hipMalloc(&d_sampleVector, varianceSize * sizeof(float)); hipMalloc(&d_outOffsetVector, verticesSize * sizeof(float)); hipMemcpy(d_basisMatrix, basisMat.data(), varianceSize * verticesSize * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_varianceVector, varianceVec.data(), varianceSize * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_sampleVector, sampleVec.data(), varianceSize * sizeof(float), hipMemcpyHostToDevice); const int problemSize = verticesSize; const int blockSize = 256; const int numBlocks = int((problemSize + blockSize - 1) / blockSize); //if it divides EXACTLY this is better. (blocksize-1)/blocksize is just under 1 std::cout << "Starting Kernel" << std::endl; hipLaunchKernelGGL(( cu_getOffsetVector) , dim3(numBlocks), dim3(blockSize), 0, 0, verticesSize, varianceSize, d_basisMatrix, d_varianceVector, d_sampleVector, d_outOffsetVector); std::cout << "Finished Kernel" << std::endl; float *resultPointer = new float[verticesSize]; hipMemcpy(resultPointer, d_outOffsetVector, verticesSize * sizeof(float), hipMemcpyDeviceToHost); std::cout << "Resultat: " << std::endl; for (int i = 0; i < verticesSize; i++) { std::cout << resultPointer[i] << std::endl; } //https://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html //See: How to write generic, but non-templated function? //getOffsetVector(basisMat, varianceVec, sampleVec, resultVec); return; }
fe6f8b561653db8bf2741cf0d0d7d0dcda1c0cab.cu
#include "CUDAmatrixVector.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <Eigen\Eigen> #include <Eigen\Dense> //Eigen + CUDA //https://stackoverflow.com/questions/23802209/how-to-work-with-eigen-in-cuda-kernels/41120980#41120980 /* __global__ void cu_dot(Eigen::Vector3f *v1, Eigen::Vector3f *v2, double *out, size_t N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { out[idx] = v1[idx].dot(v2[idx]); } return; } */ /* __global__ void cu_getOffsetVector(Eigen::MatrixXf *basis, Eigen::VectorXf *variance, Eigen::VectorXf *sample, Eigen::VectorXf *out, size_t N) { return; } */ __global__ void outputHelper(){ printf("TEST!!!"); } __global__ void cu_getOffsetVector(int numVerts, int numVariance, float* basisMatrix, float* varianceVector, float* sampleVector, float* outOffsetVector) { int i_thread = blockIdx.x*blockDim.x + threadIdx.x; //Result vector will have size of outOffsetVector if (i_thread >= numVerts) { return; } if (i_thread == 0) { printf("basisMatrix: %f , %f, %f \n", basisMatrix[0], basisMatrix[numVerts], basisMatrix[2 * numVerts]); printf("varianceVector: %f , %f, %f \n", varianceVector[0], varianceVector[1], varianceVector[2]); printf("sampleVector: %f , %f, %f \n", sampleVector[0], sampleVector[1], sampleVector[2]); } //Make sure the result row is 0.0 before we add the summs outOffsetVector[i_thread] = 0.0f; for (int v = 0; v < numVariance; v++) { int matrixIndex = v * numVerts + i_thread; outOffsetVector[i_thread] += basisMatrix[matrixIndex] * (varianceVector[v] * sampleVector[v]); } return; } void getOffsetVector(const Eigen::MatrixXf &basis, const Eigen::MatrixXf &variance, const Eigen::MatrixXf &sample, Eigen::MatrixXf &out) { if (basis.cols() != variance.size() || variance.size() != sample.size()) { printf("Dimension missmatch!"); return; } } void runEigenCudaTest01() { Eigen::Matrix2f m; m << 1.0f, 2.0f, 3.0f, 4.0f; //initializes row-wise (left to right) float* m_data = m.data(); for (int i = 0; i < 4; i++) { std::cout << m_data[i] << std::endl; //acesses column wise 1,3,2,4 (top-down) } const int varianceSize = 3; const int verticesSize = 5; Eigen::Matrix<float, verticesSize, varianceSize> basisMat; basisMat << 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f; Eigen::Matrix<float, varianceSize, 1> varianceVec; varianceVec << 1.0f, 2.0f, 3.0f; Eigen::Matrix<float, varianceSize, 1> sampleVec; sampleVec << 2.0f, 2.0f, 2.0f; Eigen::Matrix<float, 1, verticesSize> resultVec; resultVec.setZero(); float* d_basisMatrix, float* d_varianceVector, float* d_sampleVector, float* d_outOffsetVector; cudaMalloc(&d_basisMatrix, varianceSize * verticesSize * sizeof(float)); cudaMalloc(&d_varianceVector, varianceSize * sizeof(float)); cudaMalloc(&d_sampleVector, varianceSize * sizeof(float)); cudaMalloc(&d_outOffsetVector, verticesSize * sizeof(float)); cudaMemcpy(d_basisMatrix, basisMat.data(), varianceSize * verticesSize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_varianceVector, varianceVec.data(), varianceSize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_sampleVector, sampleVec.data(), varianceSize * sizeof(float), cudaMemcpyHostToDevice); const int problemSize = verticesSize; const int blockSize = 256; const int numBlocks = int((problemSize + blockSize - 1) / blockSize); //if it divides EXACTLY this is better. (blocksize-1)/blocksize is just under 1 std::cout << "Starting Kernel" << std::endl; cu_getOffsetVector <<<numBlocks, blockSize>>> (verticesSize, varianceSize, d_basisMatrix, d_varianceVector, d_sampleVector, d_outOffsetVector); std::cout << "Finished Kernel" << std::endl; float *resultPointer = new float[verticesSize]; cudaMemcpy(resultPointer, d_outOffsetVector, verticesSize * sizeof(float), cudaMemcpyDeviceToHost); std::cout << "Resultat: " << std::endl; for (int i = 0; i < verticesSize; i++) { std::cout << resultPointer[i] << std::endl; } //https://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html //See: How to write generic, but non-templated function? //getOffsetVector(basisMat, varianceVec, sampleVec, resultVec); return; }
6fee39b4fb629af8324bddf264f2b265be41e41d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; }
6fee39b4fb629af8324bddf264f2b265be41e41d.cu
#include "includes.h" // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; }
304f0fc1da3d3b1e3a1bfebc07ff5e65cc489079.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates This file implements upper case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "common_magma.h" #include "ztrtri.h" /* This inverts the diagonal IB by IB inner blocks of A, and stores the results in d_dinvA. Each thread block with IB threads does one inner block. Each thread deals with one row of the inner block. */ __global__ void ztrtri_diag_kernel_upper( magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA) { int tx = threadIdx.x; int bx = blockIdx.x; int blk_ind = bx*IB; //int ind = blk_ind + tx; A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind) // TODO sB should be [IB][IB+1] to avoid bank conflicts, right? __shared__ magmaDoubleComplex sB[IB*IB]; magmaDoubleComplex y_tx; // load upper triangle of inner block of A; zero lower triangle & outside matrix #pragma unroll for( int j=0; j < IB; j++ ) { if (tx <= j && blk_ind + j < n) { sB[tx + j*IB] = A[tx + j*lda]; } else { sB[tx + j*IB] = MAGMA_Z_ZERO; } } __syncthreads(); // invert the diagonal if (diag == MagmaUnit) { sB[tx + tx*IB] = MAGMA_Z_ONE; } else { if ( sB[tx + tx*IB] == MAGMA_Z_ZERO ) { // singular or outside matrix sB[tx + tx*IB] = MAGMA_Z_ONE; } else { sB[tx + tx*IB] = MAGMA_Z_ONE / sB[tx + tx*IB]; } } // compute elements 0:j-1 of j-th column. for( int j=1; j < IB; j++ ) { if ( tx < j ) { // trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j) // each thread sums one element, y[tx] y_tx = MAGMA_Z_ZERO; #pragma unroll for( int k=0; k < j; k++ ) y_tx += sB[tx + k*IB] * sB[k + j*IB]; // scal: sB(0:j-1, j) = -sB(j,j) * y sB[tx + j*IB] = -sB[j + j*IB] * y_tx; } __syncthreads(); } // go to the (bx / ib_per_NB) outer NB*NB block, // then the (bx % ib_per_NB) inner IB*IB block inside that. int ib_per_NB = NB/IB; d_dinvA += (bx / ib_per_NB)*NB*NB + (bx % ib_per_NB)*(NB*IB + IB); // write result #pragma unroll for( int j=0; j < IB; j++ ) { d_dinvA[tx + j*NB] = sB[tx + j*IB]; } } /* Let A be an NB*NB upper triangular matrix, and B its inverse. Then the block decomposition [ A11 A12 ] * [ B11 B12 ] = [ I 0 ] [ 0 A22 ] [ 0 B22 ] [ 0 I ] yields A11*B11 = I ==> B11 = A11^{-1}, A22*B22 = I ==> B22 = A22^{-1}, A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22. ztrtri_diag_kernel inverts A11 and A22. triple_zgemm16 routines multiply: part 1: B12 = A12 * B22, part 2: B12 = -B11 * B12. At this level, inner block is jb=16, with one 4x4 thread block per inner block. Each submatrix Aij and Bij is jb x jb. The submatrix dimension is multiplied by 2 at each level, so the next level is jb*2 = 32. A "page" is the next bigger block, here jb*2=32, [ B11 B12 ] which contains [ 0 B22 ]. Outer blocks are NB x NB. A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on the bottom. Unfortunately, this means checking every single reference. We could easily verify that A12 is full, and select between a fast version without checks and a slow version with checks. B is stored in workspace that is a full multiple of NB x NB; no checks needed. We split this into part1 & part2 to synchronize all blocks and make sure that writes to B12 are observed by all blocks. */ /* * B12 = A12 * B22 */ __global__ void triple_zgemm16_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm16_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; // shadows lda argument int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm32_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm32_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm64_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm64_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm_above64_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1. // Each section needs all of the B matrix, so C cannot overwrite B. // Therefore, store B21 temporarily in the previously unused B12 matrix // (i.e., above diagonal), then in part 3, zero out B12. // // Kernels with jb <= 64 don't have this problem, because only the // NT x 16 section of C that overwrites the same section of B depends // on that section of B. // // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B12; write to B21 temp location A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm_above64_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 B = d_dinvA + jb; // B12, read from B21 temp location C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * zero out B21 temp location */ __global__ void triple_zgemm_above64_part3_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part three---------------------------// { // zero out B21 temp location magmaDoubleComplex *B21; int ldb = NB; B21 = d_dinvA + jb; B21 += ibx + id + iby*ldb; #pragma unroll for( int i = 0; i < 16; i++ ) { B21[i*ldb] = MAGMA_Z_ZERO; } } }
304f0fc1da3d3b1e3a1bfebc07ff5e65cc489079.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates This file implements upper case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "common_magma.h" #include "ztrtri.h" /* This inverts the diagonal IB by IB inner blocks of A, and stores the results in d_dinvA. Each thread block with IB threads does one inner block. Each thread deals with one row of the inner block. */ __global__ void ztrtri_diag_kernel_upper( magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA) { int tx = threadIdx.x; int bx = blockIdx.x; int blk_ind = bx*IB; //int ind = blk_ind + tx; A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind) // TODO sB should be [IB][IB+1] to avoid bank conflicts, right? __shared__ magmaDoubleComplex sB[IB*IB]; magmaDoubleComplex y_tx; // load upper triangle of inner block of A; zero lower triangle & outside matrix #pragma unroll for( int j=0; j < IB; j++ ) { if (tx <= j && blk_ind + j < n) { sB[tx + j*IB] = A[tx + j*lda]; } else { sB[tx + j*IB] = MAGMA_Z_ZERO; } } __syncthreads(); // invert the diagonal if (diag == MagmaUnit) { sB[tx + tx*IB] = MAGMA_Z_ONE; } else { if ( sB[tx + tx*IB] == MAGMA_Z_ZERO ) { // singular or outside matrix sB[tx + tx*IB] = MAGMA_Z_ONE; } else { sB[tx + tx*IB] = MAGMA_Z_ONE / sB[tx + tx*IB]; } } // compute elements 0:j-1 of j-th column. for( int j=1; j < IB; j++ ) { if ( tx < j ) { // trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j) // each thread sums one element, y[tx] y_tx = MAGMA_Z_ZERO; #pragma unroll for( int k=0; k < j; k++ ) y_tx += sB[tx + k*IB] * sB[k + j*IB]; // scal: sB(0:j-1, j) = -sB(j,j) * y sB[tx + j*IB] = -sB[j + j*IB] * y_tx; } __syncthreads(); } // go to the (bx / ib_per_NB) outer NB*NB block, // then the (bx % ib_per_NB) inner IB*IB block inside that. int ib_per_NB = NB/IB; d_dinvA += (bx / ib_per_NB)*NB*NB + (bx % ib_per_NB)*(NB*IB + IB); // write result #pragma unroll for( int j=0; j < IB; j++ ) { d_dinvA[tx + j*NB] = sB[tx + j*IB]; } } /* Let A be an NB*NB upper triangular matrix, and B its inverse. Then the block decomposition [ A11 A12 ] * [ B11 B12 ] = [ I 0 ] [ 0 A22 ] [ 0 B22 ] [ 0 I ] yields A11*B11 = I ==> B11 = A11^{-1}, A22*B22 = I ==> B22 = A22^{-1}, A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22. ztrtri_diag_kernel inverts A11 and A22. triple_zgemm16 routines multiply: part 1: B12 = A12 * B22, part 2: B12 = -B11 * B12. At this level, inner block is jb=16, with one 4x4 thread block per inner block. Each submatrix Aij and Bij is jb x jb. The submatrix dimension is multiplied by 2 at each level, so the next level is jb*2 = 32. A "page" is the next bigger block, here jb*2=32, [ B11 B12 ] which contains [ 0 B22 ]. Outer blocks are NB x NB. A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on the bottom. Unfortunately, this means checking every single reference. We could easily verify that A12 is full, and select between a fast version without checks and a slow version with checks. B is stored in workspace that is a full multiple of NB x NB; no checks needed. We split this into part1 & part2 to synchronize all blocks and make sure that writes to B12 are observed by all blocks. */ /* * B12 = A12 * B22 */ __global__ void triple_zgemm16_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm16_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; // shadows lda argument int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm32_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm32_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm64_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm64_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm_above64_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1. // Each section needs all of the B matrix, so C cannot overwrite B. // Therefore, store B21 temporarily in the previously unused B12 matrix // (i.e., above diagonal), then in part 3, zero out B12. // // Kernels with jb <= 64 don't have this problem, because only the // NT x 16 section of C that overwrites the same section of B depends // on that section of B. // // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B12; write to B21 temp location A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm_above64_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 B = d_dinvA + jb; // B12, read from B21 temp location C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4] = {0, 0, 0, 0}; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; zaxpy16( rA[0], &sB[12][0], rC ); zaxpy16( rA[1], &sB[13][0], rC ); zaxpy16( rA[2], &sB[14][0], rC ); zaxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * zero out B21 temp location */ __global__ void triple_zgemm_above64_part3_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part three---------------------------// { // zero out B21 temp location magmaDoubleComplex *B21; int ldb = NB; B21 = d_dinvA + jb; B21 += ibx + id + iby*ldb; #pragma unroll for( int i = 0; i < 16; i++ ) { B21[i*ldb] = MAGMA_Z_ZERO; } } }
4cb91cc2c9a5a11947dd41bcce923d1496647618.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define THREADS 5 #define BLOCKS 1 __global__ void testFunction(float *dev_a) { int thread = threadIdx.x; if(thread == 0) { printf("dev[%d] = %.2f;\n", thread, dev_a[thread+2]); int c = 0; if (thread == 0) { int dev_b[2]; dev_b[0] = 0; dev_b[1] = 1; c = dev_b[0] + dev_b[1]; } if (thread == 1) { int dev_b[3]; dev_b[0] = 0; dev_b[1] = 1; dev_b[2] = 2; c = dev_b[0] + dev_b[1] + dev_b[2]; } if (thread == 2) { int dev_b[4]; dev_b[0] = 0; dev_b[1] = 1; dev_b[2] = 2; dev_b[3] = 3; c = dev_b[0] + dev_b[1] + dev_b[2] + dev_b[3]; } printf("c = %d;", c); } } int main() { float a[THREADS] = { 1, 2, 3, 4, 5 }; printf("BEFORE START 1\n"); for(int i = 0; i<THREADS; i++) { printf("a[%d] = %.2f; ", i, a[i]); } printf("\nBEFORE END 1\n"); float *dev_a; hipMalloc((void**)&dev_a, THREADS*sizeof(float)); hipMemcpy(dev_a, a, THREADS*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( testFunction), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_a); hipFree(dev_a); printf("\nafter kernel.\n"); return 0; }
4cb91cc2c9a5a11947dd41bcce923d1496647618.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #include <stdio.h> #include <stdlib.h> #define THREADS 5 #define BLOCKS 1 __global__ void testFunction(float *dev_a) { int thread = threadIdx.x; if(thread == 0) { printf("dev[%d] = %.2f;\n", thread, dev_a[thread+2]); int c = 0; if (thread == 0) { int dev_b[2]; dev_b[0] = 0; dev_b[1] = 1; c = dev_b[0] + dev_b[1]; } if (thread == 1) { int dev_b[3]; dev_b[0] = 0; dev_b[1] = 1; dev_b[2] = 2; c = dev_b[0] + dev_b[1] + dev_b[2]; } if (thread == 2) { int dev_b[4]; dev_b[0] = 0; dev_b[1] = 1; dev_b[2] = 2; dev_b[3] = 3; c = dev_b[0] + dev_b[1] + dev_b[2] + dev_b[3]; } printf("c = %d;", c); } } int main() { float a[THREADS] = { 1, 2, 3, 4, 5 }; printf("BEFORE START 1\n"); for(int i = 0; i<THREADS; i++) { printf("a[%d] = %.2f; ", i, a[i]); } printf("\nBEFORE END 1\n"); float *dev_a; cudaMalloc((void**)&dev_a, THREADS*sizeof(float)); cudaMemcpy(dev_a, a, THREADS*sizeof(float), cudaMemcpyHostToDevice); testFunction<<<BLOCKS, THREADS>>>(dev_a); cudaFree(dev_a); printf("\nafter kernel.\n"); return 0; }
79647a49eed6ff87c99b42eeea034ddda9f75383.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* This example demonstrates how to use the CUBLAS library * by scaling an array of floating-point values on the device * and comparing the result to the same operation performed * on the host. */ /* Includes, system */ #include <stdio.h> #include <stdlib.h> #include <string.h> /* Includes, cuda */ #include <hip/hip_runtime.h> #include <rocblas.h> #include <helper_cuda.h> #include "kernel.h" /* Matrix size */ #define N (275) /* Host implementation of a simple version of sgemm */ /* Main */ int main(int argc, char **argv) { hipblasStatus_t status; float *h_A; float *h_B; float *h_C; float *h_C_ref; float *d_A = 0; float *d_B = 0; float *d_C = 0; float alpha = 1.0f; float beta = 0.0f; int n2 = N * N; int i; float error_norm; float ref_norm; float diff; hipblasHandle_t handle; int dev = findCudaDevice(argc, (const char **) argv); if (dev == -1) { return EXIT_FAILURE; } /* Initialize CUBLAS */ printf("simpleCUBLAS test running..\n"); status = hipblasCreate(&handle); h_A = (float*) malloc(3*2*sizeof(float)); h_B = (float*) malloc(2*3*sizeof(float)); h_C = (float*) malloc(3*3*sizeof(float)); for(int i = 0; i < 6; i++) { h_A[i] = 1; h_B[i] = 1; } h_A[0] = 2; h_A[5] = 2; h_A[3] = 3; for(int i = 0; i < 6; i++) { printf("%f \t", h_A[i]); if((i+1)%2 == 0) { printf("\n\r"); } } printf("\n\n\r"); for(int i = 0; i < 6; i++) { printf("%f \t", h_B[i]); if((i+1)%3 == 0) { printf("\n\r"); } } printf("\n\n\r"); hipError_t cuda_error = hipSuccess; cuda_error = hipMalloc((void**)&d_A, 24); cuda_error = hipMalloc((void**)&d_B, 2*3*sizeof(float)); cuda_error = hipMalloc((void**)&d_C, 3*3*sizeof(float)); hipMemcpy(d_A, h_A, 3*2* sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, 2*3*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_C, h_C, 3*3*sizeof(float), hipMemcpyHostToDevice); // hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 3, 3, 2, // (const float*) &alpha, (const float*) d_A, 3, // (const float*) d_B, 2, (const float*) &beta, // d_C, 3); hipLaunchKernelGGL(( simple_sgemm), dim3(1),dim3(10), 0, 0, 6, alpha, d_A, d_B, beta, d_C); hipMemcpy(h_C, d_C, 3*3*sizeof(float), hipMemcpyDeviceToHost); for(int i = 0; i < 9; i++) { printf("%f \t", h_C[i]); if((i+1)%3 == 0) { printf("\n\r"); } } hipblasDestroy(handle); }
79647a49eed6ff87c99b42eeea034ddda9f75383.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* This example demonstrates how to use the CUBLAS library * by scaling an array of floating-point values on the device * and comparing the result to the same operation performed * on the host. */ /* Includes, system */ #include <stdio.h> #include <stdlib.h> #include <string.h> /* Includes, cuda */ #include <cuda_runtime.h> #include <cublas_v2.h> #include <helper_cuda.h> #include "kernel.h" /* Matrix size */ #define N (275) /* Host implementation of a simple version of sgemm */ /* Main */ int main(int argc, char **argv) { cublasStatus_t status; float *h_A; float *h_B; float *h_C; float *h_C_ref; float *d_A = 0; float *d_B = 0; float *d_C = 0; float alpha = 1.0f; float beta = 0.0f; int n2 = N * N; int i; float error_norm; float ref_norm; float diff; cublasHandle_t handle; int dev = findCudaDevice(argc, (const char **) argv); if (dev == -1) { return EXIT_FAILURE; } /* Initialize CUBLAS */ printf("simpleCUBLAS test running..\n"); status = cublasCreate(&handle); h_A = (float*) malloc(3*2*sizeof(float)); h_B = (float*) malloc(2*3*sizeof(float)); h_C = (float*) malloc(3*3*sizeof(float)); for(int i = 0; i < 6; i++) { h_A[i] = 1; h_B[i] = 1; } h_A[0] = 2; h_A[5] = 2; h_A[3] = 3; for(int i = 0; i < 6; i++) { printf("%f \t", h_A[i]); if((i+1)%2 == 0) { printf("\n\r"); } } printf("\n\n\r"); for(int i = 0; i < 6; i++) { printf("%f \t", h_B[i]); if((i+1)%3 == 0) { printf("\n\r"); } } printf("\n\n\r"); cudaError_t cuda_error = cudaSuccess; cuda_error = cudaMalloc((void**)&d_A, 24); cuda_error = cudaMalloc((void**)&d_B, 2*3*sizeof(float)); cuda_error = cudaMalloc((void**)&d_C, 3*3*sizeof(float)); cudaMemcpy(d_A, h_A, 3*2* sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, 2*3*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, 3*3*sizeof(float), cudaMemcpyHostToDevice); // cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 3, 3, 2, // (const float*) &alpha, (const float*) d_A, 3, // (const float*) d_B, 2, (const float*) &beta, // d_C, 3); simple_sgemm<<<1,10>>>(6, alpha, d_A, d_B, beta, d_C); cudaMemcpy(h_C, d_C, 3*3*sizeof(float), cudaMemcpyDeviceToHost); for(int i = 0; i < 9; i++) { printf("%f \t", h_C[i]); if((i+1)%3 == 0) { printf("\n\r"); } } cublasDestroy(handle); }
0cfe7e4a9a7e9019c8404385d2d6d93d3d1d7892.hip
// !!! This is a file automatically generated by hipify!!! #include "light_transport_common.cuh" namespace VLR { struct DebugRenderingPayload { KernelRNG rng; WavelengthSamples wls; SampledSpectrum value; }; rtDeclareVariable(DebugRenderingPayload, sm_debugPayload, rtPayload, ); rtDeclareVariable(DebugRenderingAttribute, pv_debugRenderingAttribute, , ); // Context-scope Variables rtDeclareVariable(optix::uint2, pv_imageSize, , ); rtDeclareVariable(uint32_t, pv_numAccumFrames, , ); rtDeclareVariable(ProgSigSampleLensPosition, pv_progSampleLensPosition, , ); rtDeclareVariable(ProgSigSampleIDF, pv_progSampleIDF, , ); rtBuffer<KernelRNG, 2> pv_rngBuffer; rtBuffer<SpectrumStorage, 2> pv_outputBuffer; // for debug rendering RT_FUNCTION TripletSpectrum debugRenderingAttributeToSpectrum(const SurfacePoint &surfPt, DebugRenderingAttribute attribute) { TripletSpectrum value; switch (attribute) { case DebugRenderingAttribute::GeometricNormal: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, std::fmax(0.0f, 0.5f + 0.5f * surfPt.geometricNormal.x), std::fmax(0.0f, 0.5f + 0.5f * surfPt.geometricNormal.y), std::fmax(0.0f, 0.5f + 0.5f * surfPt.geometricNormal.z)); break; case DebugRenderingAttribute::ShadingTangent: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.x.x), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.x.y), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.x.z)); break; case DebugRenderingAttribute::ShadingBitangent: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.y.x), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.y.y), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.y.z)); break; case DebugRenderingAttribute::ShadingNormal: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.z.x), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.z.y), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.z.z)); break; case DebugRenderingAttribute::TextureCoordinates: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, surfPt.texCoord.u - ::floor(surfPt.texCoord.u), surfPt.texCoord.v - ::floor(surfPt.texCoord.v), 0.0f); break; case DebugRenderingAttribute::GeometricVsShadingNormal: { float sim = dot(surfPt.geometricNormal, surfPt.shadingFrame.z); bool opposite = sim < 0.0f; sim = ::fabs(sim); const float coeff = 5.0f; float sValue = 0.5f + coeff * (sim - 1); sValue = clamp(sValue, 0.0f, 1.0f); value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, sValue, opposite ? 0 : sValue, opposite ? 0 : sValue); break; } case DebugRenderingAttribute::ShadingFrameLengths: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, clamp(0.5f + 10 * (surfPt.shadingFrame.x.length() - 1), 0.0f, 1.0f), clamp(0.5f + 10 * (surfPt.shadingFrame.y.length() - 1), 0.0f, 1.0f), clamp(0.5f + 10 * (surfPt.shadingFrame.z.length() - 1), 0.0f, 1.0f)); break; case DebugRenderingAttribute::ShadingFrameOrthogonality: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, clamp(0.5f + 100 * dot(surfPt.shadingFrame.x, surfPt.shadingFrame.y), 0.0f, 1.0f), clamp(0.5f + 100 * dot(surfPt.shadingFrame.y, surfPt.shadingFrame.z), 0.0f, 1.0f), clamp(0.5f + 100 * dot(surfPt.shadingFrame.z, surfPt.shadingFrame.x), 0.0f, 1.0f)); break; default: break; } return value; } // Common Any Hit Program for All Primitive Types and Materials RT_PROGRAM void debugRenderingAnyHitWithAlpha() { HitPointParameter hitPointParam = a_hitPointParam; SurfacePoint surfPt; float hypAreaPDF; pv_progDecodeHitPoint(hitPointParam, &surfPt, &hypAreaPDF); float alpha = calcNode(pv_nodeAlpha, 1.0f, surfPt, sm_debugPayload.wls); // Stochastic Alpha Test if (sm_debugPayload.rng.getFloat0cTo1o() >= alpha) rtIgnoreIntersection(); } // Common Closest Hit Program for All Primitive Types and Materials RT_PROGRAM void debugRenderingClosestHit() { WavelengthSamples &wls = sm_payload.wls; SurfacePoint surfPt; float hypAreaPDF; calcSurfacePoint(&surfPt, &hypAreaPDF); //if (!surfPt.shadingFrame.x.allFinite() || !surfPt.shadingFrame.y.allFinite() || !surfPt.shadingFrame.z.allFinite()) // vlrprintf("(%g, %g, %g), (%g, %g, %g), (%g, %g, %g)\n", // surfPt.shadingFrame.x.x, surfPt.shadingFrame.x.y, surfPt.shadingFrame.x.z, // surfPt.shadingFrame.y.x, surfPt.shadingFrame.y.y, surfPt.shadingFrame.y.z, // surfPt.shadingFrame.z.x, surfPt.shadingFrame.z.y, surfPt.shadingFrame.z.z); if (pv_debugRenderingAttribute == DebugRenderingAttribute::BaseColor) { const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_materialIndex]; BSDF bsdf(matDesc, surfPt, wls); const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[matDesc.bsdfProcedureSetIndex]; auto progGetBaseColor = (ProgSigBSDFGetBaseColor)procSet.progGetBaseColor; sm_debugPayload.value = progGetBaseColor((const uint32_t*)&bsdf); } else { sm_debugPayload.value = debugRenderingAttributeToSpectrum(surfPt, pv_debugRenderingAttribute).evaluate(wls); } } // JP: Intersection/Bounding Box ProgramClosest Hit Program // OptiXBVHLBVHAABB // Miss Program RT_PROGRAM void debugRenderingMiss() { WavelengthSamples &wls = sm_payload.wls; Vector3D direction = asVector3D(sm_ray.direction); float phi, theta; direction.toPolarYUp(&theta, &phi); float sinPhi, cosPhi; VLR::sincos(phi, &sinPhi, &cosPhi); Vector3D texCoord0Dir = normalize(Vector3D(-cosPhi, 0.0f, -sinPhi)); ReferenceFrame shadingFrame; shadingFrame.x = texCoord0Dir; shadingFrame.z = -direction; shadingFrame.y = cross(shadingFrame.z, shadingFrame.x); SurfacePoint surfPt; surfPt.position = Point3D(direction.x, direction.y, direction.z); surfPt.shadingFrame = shadingFrame; surfPt.isPoint = false; surfPt.atInfinity = true; surfPt.geometricNormal = -direction; surfPt.u = phi; surfPt.v = theta; phi += pv_envLightDescriptor.body.asInfSphere.rotationPhi; phi = phi - ::floor(phi / (2 * M_PIf)) * 2 * M_PIf; surfPt.texCoord = TexCoord2D(phi / (2 * M_PIf), theta / M_PIf); if (pv_debugRenderingAttribute == DebugRenderingAttribute::BaseColor) { sm_debugPayload.value = SampledSpectrum::Zero(); } else { sm_debugPayload.value = debugRenderingAttributeToSpectrum(surfPt, pv_debugRenderingAttribute).evaluate(wls); } } // Common Ray Generation Program for All Camera Types RT_PROGRAM void debugRenderingRayGeneration() { KernelRNG rng = pv_rngBuffer[sm_launchIndex]; optix::float2 p = make_float2(sm_launchIndex.x + rng.getFloat0cTo1o(), sm_launchIndex.y + rng.getFloat0cTo1o()); float selectWLPDF; WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF); LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); LensPosQueryResult We0Result; SampledSpectrum We0 = pv_progSampleLensPosition(wls, We0Sample, &We0Result); IDFSample We1Sample(p.x / pv_imageSize.x, p.y / pv_imageSize.y); IDFQueryResult We1Result; SampledSpectrum We1 = pv_progSampleIDF(We0Result.surfPt, wls, We1Sample, &We1Result); Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal); SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) / (We0Result.areaPDF * We1Result.dirPDF * selectWLPDF)); optix::Ray ray = optix::make_Ray(asOptiXType(We0Result.surfPt.position), asOptiXType(rayDir), RayType::DebugPrimary, 0.0f, FLT_MAX); DebugRenderingPayload payload; payload.rng = rng; payload.wls = wls; rtTrace(pv_topGroup, ray, payload); pv_rngBuffer[sm_launchIndex] = payload.rng; if (!payload.value.allFinite()) { vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", pv_numAccumFrames, sm_launchIndex.x, sm_launchIndex.y); return; } if (pv_numAccumFrames == 1) pv_outputBuffer[sm_launchIndex].reset(); pv_outputBuffer[sm_launchIndex].add(wls, payload.value / selectWLPDF); } // Exception Program RT_PROGRAM void debugRenderingException() { //uint32_t code = rtGetExceptionCode(); rtPrintExceptionDetails(); } }
0cfe7e4a9a7e9019c8404385d2d6d93d3d1d7892.cu
#include "light_transport_common.cuh" namespace VLR { struct DebugRenderingPayload { KernelRNG rng; WavelengthSamples wls; SampledSpectrum value; }; rtDeclareVariable(DebugRenderingPayload, sm_debugPayload, rtPayload, ); rtDeclareVariable(DebugRenderingAttribute, pv_debugRenderingAttribute, , ); // Context-scope Variables rtDeclareVariable(optix::uint2, pv_imageSize, , ); rtDeclareVariable(uint32_t, pv_numAccumFrames, , ); rtDeclareVariable(ProgSigSampleLensPosition, pv_progSampleLensPosition, , ); rtDeclareVariable(ProgSigSampleIDF, pv_progSampleIDF, , ); rtBuffer<KernelRNG, 2> pv_rngBuffer; rtBuffer<SpectrumStorage, 2> pv_outputBuffer; // for debug rendering RT_FUNCTION TripletSpectrum debugRenderingAttributeToSpectrum(const SurfacePoint &surfPt, DebugRenderingAttribute attribute) { TripletSpectrum value; switch (attribute) { case DebugRenderingAttribute::GeometricNormal: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, std::fmax(0.0f, 0.5f + 0.5f * surfPt.geometricNormal.x), std::fmax(0.0f, 0.5f + 0.5f * surfPt.geometricNormal.y), std::fmax(0.0f, 0.5f + 0.5f * surfPt.geometricNormal.z)); break; case DebugRenderingAttribute::ShadingTangent: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.x.x), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.x.y), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.x.z)); break; case DebugRenderingAttribute::ShadingBitangent: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.y.x), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.y.y), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.y.z)); break; case DebugRenderingAttribute::ShadingNormal: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.z.x), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.z.y), std::fmax(0.0f, 0.5f + 0.5f * surfPt.shadingFrame.z.z)); break; case DebugRenderingAttribute::TextureCoordinates: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, surfPt.texCoord.u - std::floor(surfPt.texCoord.u), surfPt.texCoord.v - std::floor(surfPt.texCoord.v), 0.0f); break; case DebugRenderingAttribute::GeometricVsShadingNormal: { float sim = dot(surfPt.geometricNormal, surfPt.shadingFrame.z); bool opposite = sim < 0.0f; sim = std::fabs(sim); const float coeff = 5.0f; float sValue = 0.5f + coeff * (sim - 1); sValue = clamp(sValue, 0.0f, 1.0f); value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, sValue, opposite ? 0 : sValue, opposite ? 0 : sValue); break; } case DebugRenderingAttribute::ShadingFrameLengths: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, clamp(0.5f + 10 * (surfPt.shadingFrame.x.length() - 1), 0.0f, 1.0f), clamp(0.5f + 10 * (surfPt.shadingFrame.y.length() - 1), 0.0f, 1.0f), clamp(0.5f + 10 * (surfPt.shadingFrame.z.length() - 1), 0.0f, 1.0f)); break; case DebugRenderingAttribute::ShadingFrameOrthogonality: value = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65, clamp(0.5f + 100 * dot(surfPt.shadingFrame.x, surfPt.shadingFrame.y), 0.0f, 1.0f), clamp(0.5f + 100 * dot(surfPt.shadingFrame.y, surfPt.shadingFrame.z), 0.0f, 1.0f), clamp(0.5f + 100 * dot(surfPt.shadingFrame.z, surfPt.shadingFrame.x), 0.0f, 1.0f)); break; default: break; } return value; } // Common Any Hit Program for All Primitive Types and Materials RT_PROGRAM void debugRenderingAnyHitWithAlpha() { HitPointParameter hitPointParam = a_hitPointParam; SurfacePoint surfPt; float hypAreaPDF; pv_progDecodeHitPoint(hitPointParam, &surfPt, &hypAreaPDF); float alpha = calcNode(pv_nodeAlpha, 1.0f, surfPt, sm_debugPayload.wls); // Stochastic Alpha Test if (sm_debugPayload.rng.getFloat0cTo1o() >= alpha) rtIgnoreIntersection(); } // Common Closest Hit Program for All Primitive Types and Materials RT_PROGRAM void debugRenderingClosestHit() { WavelengthSamples &wls = sm_payload.wls; SurfacePoint surfPt; float hypAreaPDF; calcSurfacePoint(&surfPt, &hypAreaPDF); //if (!surfPt.shadingFrame.x.allFinite() || !surfPt.shadingFrame.y.allFinite() || !surfPt.shadingFrame.z.allFinite()) // vlrprintf("(%g, %g, %g), (%g, %g, %g), (%g, %g, %g)\n", // surfPt.shadingFrame.x.x, surfPt.shadingFrame.x.y, surfPt.shadingFrame.x.z, // surfPt.shadingFrame.y.x, surfPt.shadingFrame.y.y, surfPt.shadingFrame.y.z, // surfPt.shadingFrame.z.x, surfPt.shadingFrame.z.y, surfPt.shadingFrame.z.z); if (pv_debugRenderingAttribute == DebugRenderingAttribute::BaseColor) { const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_materialIndex]; BSDF bsdf(matDesc, surfPt, wls); const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[matDesc.bsdfProcedureSetIndex]; auto progGetBaseColor = (ProgSigBSDFGetBaseColor)procSet.progGetBaseColor; sm_debugPayload.value = progGetBaseColor((const uint32_t*)&bsdf); } else { sm_debugPayload.value = debugRenderingAttributeToSpectrum(surfPt, pv_debugRenderingAttribute).evaluate(wls); } } // JP: 本当は無限大の球のIntersection/Bounding Box Programを使用して環境光に関する処理もClosest Hit Programで統一的に行いたい。 // が、OptiXのBVHビルダーがLBVHベースなので無限大のAABBを生成するのは危険。 // 仕方なくMiss Programで環境光を処理する。 RT_PROGRAM void debugRenderingMiss() { WavelengthSamples &wls = sm_payload.wls; Vector3D direction = asVector3D(sm_ray.direction); float phi, theta; direction.toPolarYUp(&theta, &phi); float sinPhi, cosPhi; VLR::sincos(phi, &sinPhi, &cosPhi); Vector3D texCoord0Dir = normalize(Vector3D(-cosPhi, 0.0f, -sinPhi)); ReferenceFrame shadingFrame; shadingFrame.x = texCoord0Dir; shadingFrame.z = -direction; shadingFrame.y = cross(shadingFrame.z, shadingFrame.x); SurfacePoint surfPt; surfPt.position = Point3D(direction.x, direction.y, direction.z); surfPt.shadingFrame = shadingFrame; surfPt.isPoint = false; surfPt.atInfinity = true; surfPt.geometricNormal = -direction; surfPt.u = phi; surfPt.v = theta; phi += pv_envLightDescriptor.body.asInfSphere.rotationPhi; phi = phi - std::floor(phi / (2 * M_PIf)) * 2 * M_PIf; surfPt.texCoord = TexCoord2D(phi / (2 * M_PIf), theta / M_PIf); if (pv_debugRenderingAttribute == DebugRenderingAttribute::BaseColor) { sm_debugPayload.value = SampledSpectrum::Zero(); } else { sm_debugPayload.value = debugRenderingAttributeToSpectrum(surfPt, pv_debugRenderingAttribute).evaluate(wls); } } // Common Ray Generation Program for All Camera Types RT_PROGRAM void debugRenderingRayGeneration() { KernelRNG rng = pv_rngBuffer[sm_launchIndex]; optix::float2 p = make_float2(sm_launchIndex.x + rng.getFloat0cTo1o(), sm_launchIndex.y + rng.getFloat0cTo1o()); float selectWLPDF; WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF); LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); LensPosQueryResult We0Result; SampledSpectrum We0 = pv_progSampleLensPosition(wls, We0Sample, &We0Result); IDFSample We1Sample(p.x / pv_imageSize.x, p.y / pv_imageSize.y); IDFQueryResult We1Result; SampledSpectrum We1 = pv_progSampleIDF(We0Result.surfPt, wls, We1Sample, &We1Result); Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal); SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) / (We0Result.areaPDF * We1Result.dirPDF * selectWLPDF)); optix::Ray ray = optix::make_Ray(asOptiXType(We0Result.surfPt.position), asOptiXType(rayDir), RayType::DebugPrimary, 0.0f, FLT_MAX); DebugRenderingPayload payload; payload.rng = rng; payload.wls = wls; rtTrace(pv_topGroup, ray, payload); pv_rngBuffer[sm_launchIndex] = payload.rng; if (!payload.value.allFinite()) { vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", pv_numAccumFrames, sm_launchIndex.x, sm_launchIndex.y); return; } if (pv_numAccumFrames == 1) pv_outputBuffer[sm_launchIndex].reset(); pv_outputBuffer[sm_launchIndex].add(wls, payload.value / selectWLPDF); } // Exception Program RT_PROGRAM void debugRenderingException() { //uint32_t code = rtGetExceptionCode(); rtPrintExceptionDetails(); } }
a5e23a2326111b2e7698fa820a1c7ce19abe64d2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <unistd.h> #include <sys/types.h> #include <sys/ipc.h> #include <sys/shm.h> #include <errno.h> #include <signal.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "../cudaErr.h" #include "const.h" #include "Sender.hpp" // global variables to be initialized int shmid; TYPE *ptr; hipStream_t RT_stream_g; hipStream_t callBack_stream_g; hipEvent_t event_g; void init_shm (TYPE *ptr) { for (int i = 0; i < SIZE; i++) ptr->ready[i] = WAIT_VALUE; gpuErrchk (hipStreamCreate (&RT_stream_g)); gpuErrchk (hipStreamCreate (&callBack_stream_g)); memcpy (&(ptr->RT_stream), &RT_stream_g, sizeof(hipStream_t)); memcpy (&(ptr->callBackStream), &callBack_stream_g, sizeof(hipStream_t)); // gpuErrchk (hipStreamCreate (&(ptr->RT_stream))); // gpuErrchk (hipStreamCreate (&(ptr->callBackStream))); // ptr->RT_stream = RT_stream_g; // ptr->callBackStream = &callBack_stream_g; // ptr->memHandle = (hipIpcMemHandle_t *)malloc (sizeof (hipIpcMemHandle_t)); // ptr->eventHandle = (hipIpcEventHandle_t *)malloc (sizeof (hipIpcEventHandle_t)); } void init () { // FILE to key key_t key = ftok (FILENAME, FILEID); if (key == -1) { printf ("ftok failed, errno = %s\n", strerror(errno)); exit (-1); } // getting SHM id printf (" size of shm is %zu\n", sizeof(TYPE)); shmid = shmget (key, sizeof (TYPE), 0666|IPC_CREAT); if (shmid == -1) { printf ("shmget failed, errno = %s\n", strerror(errno)); exit (-1); } // attach the SHM to this process ptr = (TYPE *) shmat (shmid, (void *)0, 0); init_shm (ptr); } __global__ void init_kernel (GPU_DATA_TYPE *d_data) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; while (threadId < GPU_SIZE) { d_data[threadId] = 1; threadId += blockDim.x * gridDim.x; } } void sigInt_handler (int sig) { if (sig == SIGINT) { printf ("sender received SIGINT, calling hipProfilerStop before exiting\n"); gpuErrchk (hipProfilerStop ()); exit (0); } } int main() { init (); printf ("ptr is %p\n", (void *)ptr); gpuErrchk (hipProfilerStart ()); GPU_DATA_TYPE *d_a; gpuErrchk (hipMalloc (&d_a, sizeof(GPU_DATA_TYPE) * GPU_SIZE)); hipLaunchKernelGGL(( init_kernel), dim3(128),dim3(1024), 0, 0, d_a); Sender *sender = new Sender(shmid, ptr, d_a); hipIpcMemHandle_t handle; gpuErrchk (hipIpcGetMemHandle (&handle, d_a)); memcpy (&(ptr->memHandle), &handle, sizeof (hipIpcMemHandle_t)); sender->set_GPUIPC_handle (&handle); // set the signal handling function if (signal (SIGINT, sigInt_handler) == SIG_ERR) { printf ("cannot handle SIGINT\n"); exit(-1); } // main loop while (true) // for (int i = 0;i < 1000; i++) { sender->update (); sender->wait (); sender->process (); sender->notify (); // usleep (20); } }
a5e23a2326111b2e7698fa820a1c7ce19abe64d2.cu
#include <stdio.h> #include <unistd.h> #include <sys/types.h> #include <sys/ipc.h> #include <sys/shm.h> #include <errno.h> #include <signal.h> #include <cuda_profiler_api.h> #include <cuda.h> #include "../cudaErr.h" #include "const.h" #include "Sender.hpp" // global variables to be initialized int shmid; TYPE *ptr; cudaStream_t RT_stream_g; cudaStream_t callBack_stream_g; cudaEvent_t event_g; void init_shm (TYPE *ptr) { for (int i = 0; i < SIZE; i++) ptr->ready[i] = WAIT_VALUE; gpuErrchk (cudaStreamCreate (&RT_stream_g)); gpuErrchk (cudaStreamCreate (&callBack_stream_g)); memcpy (&(ptr->RT_stream), &RT_stream_g, sizeof(cudaStream_t)); memcpy (&(ptr->callBackStream), &callBack_stream_g, sizeof(cudaStream_t)); // gpuErrchk (cudaStreamCreate (&(ptr->RT_stream))); // gpuErrchk (cudaStreamCreate (&(ptr->callBackStream))); // ptr->RT_stream = RT_stream_g; // ptr->callBackStream = &callBack_stream_g; // ptr->memHandle = (cudaIpcMemHandle_t *)malloc (sizeof (cudaIpcMemHandle_t)); // ptr->eventHandle = (cudaIpcEventHandle_t *)malloc (sizeof (cudaIpcEventHandle_t)); } void init () { // FILE to key key_t key = ftok (FILENAME, FILEID); if (key == -1) { printf ("ftok failed, errno = %s\n", strerror(errno)); exit (-1); } // getting SHM id printf (" size of shm is %zu\n", sizeof(TYPE)); shmid = shmget (key, sizeof (TYPE), 0666|IPC_CREAT); if (shmid == -1) { printf ("shmget failed, errno = %s\n", strerror(errno)); exit (-1); } // attach the SHM to this process ptr = (TYPE *) shmat (shmid, (void *)0, 0); init_shm (ptr); } __global__ void init_kernel (GPU_DATA_TYPE *d_data) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; while (threadId < GPU_SIZE) { d_data[threadId] = 1; threadId += blockDim.x * gridDim.x; } } void sigInt_handler (int sig) { if (sig == SIGINT) { printf ("sender received SIGINT, calling cudaProfilerStop before exiting\n"); gpuErrchk (cudaProfilerStop ()); exit (0); } } int main() { init (); printf ("ptr is %p\n", (void *)ptr); gpuErrchk (cudaProfilerStart ()); GPU_DATA_TYPE *d_a; gpuErrchk (cudaMalloc (&d_a, sizeof(GPU_DATA_TYPE) * GPU_SIZE)); init_kernel<<<128,1024>>> (d_a); Sender *sender = new Sender(shmid, ptr, d_a); cudaIpcMemHandle_t handle; gpuErrchk (cudaIpcGetMemHandle (&handle, d_a)); memcpy (&(ptr->memHandle), &handle, sizeof (cudaIpcMemHandle_t)); sender->set_GPUIPC_handle (&handle); // set the signal handling function if (signal (SIGINT, sigInt_handler) == SIG_ERR) { printf ("cannot handle SIGINT\n"); exit(-1); } // main loop while (true) // for (int i = 0;i < 1000; i++) { sender->update (); sender->wait (); sender->process (); sender->notify (); // usleep (20); } }
b5d8bcb6042dc649d75c66c8a73f16390ea87a81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define NONE 0 #define UP 1 #define LEFT 2 #define DIAGONAL 3 #include "libxomp.h" #include "xomp_cuda_lib_inlined.cu" #ifdef __cplusplus extern "C" { #endif __global__ void OUT__1__4550__(long long nEle,long long m,long long n,int gapScore,int matchScore,int missmatchScore,long long si,long long sj,char *_dev_a,char *_dev_b,int *_dev_H,int *_dev_P,long long *_dev_maxPos_ptr) { long long _p_j; int _dev_lower; int _dev_upper; int _dev_loop_chunk_size; int _dev_loop_sched_index; int _dev_loop_stride; int _dev_thread_num = getCUDABlockThreadCount(1); int _dev_thread_id = getLoopIndexFromCUDAVariables(1); XOMP_static_sched_init((long long )0,nEle - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride); while(XOMP_static_sched_next(&_dev_loop_sched_index,nEle - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper)) for (_p_j = _dev_lower; _p_j <= _dev_upper; _p_j += 1) // going upwards : anti-diagnol direction { // going up vertically long long ai = si - _p_j; // going right in horizontal long long aj = sj + _p_j; ///------------inlined ------------------------------------------ // similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside { int up; int left; int diag; //Stores index of element long long index = m * ai + aj; //Get element above up = _dev_H[index - m - 0] + gapScore; //Get element on the left left = _dev_H[index - ((long long )1) - 0] + gapScore; //Get element on the diagonal int t_mms; if (((int )_dev_a[aj - ((long long )1) - 0]) == ((int )_dev_b[ai - ((long long )1) - 0])) t_mms = matchScore; else t_mms = missmatchScore; // matchMissmatchScore(i, j); diag = _dev_H[index - m - ((long long )1) - 0] + t_mms; // degug here // return; //Calculates the maximum int max = 0; int pred = 0; //same letter if (diag > max) { max = diag; pred = 3; } //remove letter if (up > max) { max = up; pred = 1; } //insert letter if (left > max) { max = left; pred = 2; } //Inserts the value in the similarity and predecessor matrixes _dev_H[index - 0] = max; _dev_P[index - 0] = pred; //Updates maximum score to be used as seed on backtrack /***** we use cuda atomicCAS to do critical ****** if (max > _dev_H[_dev_maxPos_ptr[0] - 0]) { //#pragma omp critical _dev_maxPos_ptr[0 - 0] = index; } ******/ { // \note \pp // locks seem to be a NOGO in CUDA warps, // thus the update to set the maximum is made nonblocking. unsigned long long int current = _dev_maxPos_ptr[0]; unsigned long long int assumed = current+1; while (assumed != current && max > _dev_H[current]) { assumed = current; // \note consider atomicCAS_system for multi GPU systems current = atomicCAS((unsigned long long int*)_dev_maxPos_ptr, (unsigned long long int)assumed, (unsigned long long int)index); } } } // --------------------------------------------------------------- } } void calculate(char *a,char *b,long long nEle,long long m,long long n,int gapScore,int matchScore,int missmatchScore,long long si,long long sj,int *H,int *P,long long *maxPos_ptr,long long j,int asz) { { xomp_deviceDataEnvironmentEnter(0); char *_dev_a; int _dev_a_size[1] = {m}; int _dev_a_offset[1] = {0}; int _dev_a_Dim[1] = {m}; _dev_a = ((char *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)a,1,sizeof(char ),_dev_a_size,_dev_a_offset,_dev_a_Dim,1,0))); char *_dev_b; int _dev_b_size[1] = {n}; int _dev_b_offset[1] = {0}; int _dev_b_Dim[1] = {n}; _dev_b = ((char *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)b,1,sizeof(char ),_dev_b_size,_dev_b_offset,_dev_b_Dim,1,0))); int *_dev_H; int _dev_H_size[1] = {asz}; int _dev_H_offset[1] = {0}; int _dev_H_Dim[1] = {asz}; _dev_H = ((int *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)H,1,sizeof(int ),_dev_H_size,_dev_H_offset,_dev_H_Dim,1,1))); int *_dev_P; int _dev_P_size[1] = {asz}; int _dev_P_offset[1] = {0}; int _dev_P_Dim[1] = {asz}; _dev_P = ((int *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)P,1,sizeof(int ),_dev_P_size,_dev_P_offset,_dev_P_Dim,1,1))); long long *_dev_maxPos_ptr; int _dev_maxPos_ptr_size[1] = {1}; int _dev_maxPos_ptr_offset[1] = {0}; int _dev_maxPos_ptr_Dim[1] = {1}; _dev_maxPos_ptr = ((long long *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)maxPos_ptr,1,sizeof(long long ),_dev_maxPos_ptr_size,_dev_maxPos_ptr_offset,_dev_maxPos_ptr_Dim,1,1))); /* Launch CUDA kernel ... */ int _threads_per_block_ = xomp_get_maxThreadsPerBlock(0); int _num_blocks_ = xomp_get_max1DBlock(0,nEle - 1 - ((long long )0) + 1); hipLaunchKernelGGL(( OUT__1__4550__), dim3(_num_blocks_),dim3(_threads_per_block_), 0, 0, nEle,m,n,gapScore,matchScore,missmatchScore,si,sj,_dev_a,_dev_b,_dev_H,_dev_P,_dev_maxPos_ptr); xomp_deviceDataEnvironmentExit(0); } } // } // for end nDiag // } // end omp parallel //} #ifdef __cplusplus } #endif
b5d8bcb6042dc649d75c66c8a73f16390ea87a81.cu
#define NONE 0 #define UP 1 #define LEFT 2 #define DIAGONAL 3 #include "libxomp.h" #include "xomp_cuda_lib_inlined.cu" #ifdef __cplusplus extern "C" { #endif __global__ void OUT__1__4550__(long long nEle,long long m,long long n,int gapScore,int matchScore,int missmatchScore,long long si,long long sj,char *_dev_a,char *_dev_b,int *_dev_H,int *_dev_P,long long *_dev_maxPos_ptr) { long long _p_j; int _dev_lower; int _dev_upper; int _dev_loop_chunk_size; int _dev_loop_sched_index; int _dev_loop_stride; int _dev_thread_num = getCUDABlockThreadCount(1); int _dev_thread_id = getLoopIndexFromCUDAVariables(1); XOMP_static_sched_init((long long )0,nEle - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride); while(XOMP_static_sched_next(&_dev_loop_sched_index,nEle - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper)) for (_p_j = _dev_lower; _p_j <= _dev_upper; _p_j += 1) // going upwards : anti-diagnol direction { // going up vertically long long ai = si - _p_j; // going right in horizontal long long aj = sj + _p_j; ///------------inlined ------------------------------------------ // similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside { int up; int left; int diag; //Stores index of element long long index = m * ai + aj; //Get element above up = _dev_H[index - m - 0] + gapScore; //Get element on the left left = _dev_H[index - ((long long )1) - 0] + gapScore; //Get element on the diagonal int t_mms; if (((int )_dev_a[aj - ((long long )1) - 0]) == ((int )_dev_b[ai - ((long long )1) - 0])) t_mms = matchScore; else t_mms = missmatchScore; // matchMissmatchScore(i, j); diag = _dev_H[index - m - ((long long )1) - 0] + t_mms; // degug here // return; //Calculates the maximum int max = 0; int pred = 0; //same letter ↖ if (diag > max) { max = diag; pred = 3; } //remove letter ↑ if (up > max) { max = up; pred = 1; } //insert letter ← if (left > max) { max = left; pred = 2; } //Inserts the value in the similarity and predecessor matrixes _dev_H[index - 0] = max; _dev_P[index - 0] = pred; //Updates maximum score to be used as seed on backtrack /***** we use cuda atomicCAS to do critical ****** if (max > _dev_H[_dev_maxPos_ptr[0] - 0]) { //#pragma omp critical _dev_maxPos_ptr[0 - 0] = index; } ******/ { // \note \pp // locks seem to be a NOGO in CUDA warps, // thus the update to set the maximum is made nonblocking. unsigned long long int current = _dev_maxPos_ptr[0]; unsigned long long int assumed = current+1; while (assumed != current && max > _dev_H[current]) { assumed = current; // \note consider atomicCAS_system for multi GPU systems current = atomicCAS((unsigned long long int*)_dev_maxPos_ptr, (unsigned long long int)assumed, (unsigned long long int)index); } } } // --------------------------------------------------------------- } } void calculate(char *a,char *b,long long nEle,long long m,long long n,int gapScore,int matchScore,int missmatchScore,long long si,long long sj,int *H,int *P,long long *maxPos_ptr,long long j,int asz) { { xomp_deviceDataEnvironmentEnter(0); char *_dev_a; int _dev_a_size[1] = {m}; int _dev_a_offset[1] = {0}; int _dev_a_Dim[1] = {m}; _dev_a = ((char *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)a,1,sizeof(char ),_dev_a_size,_dev_a_offset,_dev_a_Dim,1,0))); char *_dev_b; int _dev_b_size[1] = {n}; int _dev_b_offset[1] = {0}; int _dev_b_Dim[1] = {n}; _dev_b = ((char *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)b,1,sizeof(char ),_dev_b_size,_dev_b_offset,_dev_b_Dim,1,0))); int *_dev_H; int _dev_H_size[1] = {asz}; int _dev_H_offset[1] = {0}; int _dev_H_Dim[1] = {asz}; _dev_H = ((int *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)H,1,sizeof(int ),_dev_H_size,_dev_H_offset,_dev_H_Dim,1,1))); int *_dev_P; int _dev_P_size[1] = {asz}; int _dev_P_offset[1] = {0}; int _dev_P_Dim[1] = {asz}; _dev_P = ((int *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)P,1,sizeof(int ),_dev_P_size,_dev_P_offset,_dev_P_Dim,1,1))); long long *_dev_maxPos_ptr; int _dev_maxPos_ptr_size[1] = {1}; int _dev_maxPos_ptr_offset[1] = {0}; int _dev_maxPos_ptr_Dim[1] = {1}; _dev_maxPos_ptr = ((long long *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)maxPos_ptr,1,sizeof(long long ),_dev_maxPos_ptr_size,_dev_maxPos_ptr_offset,_dev_maxPos_ptr_Dim,1,1))); /* Launch CUDA kernel ... */ int _threads_per_block_ = xomp_get_maxThreadsPerBlock(0); int _num_blocks_ = xomp_get_max1DBlock(0,nEle - 1 - ((long long )0) + 1); OUT__1__4550__<<<_num_blocks_,_threads_per_block_>>>(nEle,m,n,gapScore,matchScore,missmatchScore,si,sj,_dev_a,_dev_b,_dev_H,_dev_P,_dev_maxPos_ptr); xomp_deviceDataEnvironmentExit(0); } } // } // for end nDiag // } // end omp parallel //} #ifdef __cplusplus } #endif
2992d71e8c5094b3cee79892587442334cd92c56.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Vector_Addition.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *dev_a = NULL; hipMalloc(&dev_a, XSIZE*YSIZE); int *dev_b = NULL; hipMalloc(&dev_b, XSIZE*YSIZE); int *dev_c = NULL; hipMalloc(&dev_c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Vector_Addition), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_a,dev_b,dev_c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Vector_Addition), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_a,dev_b,dev_c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Vector_Addition), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_a,dev_b,dev_c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2992d71e8c5094b3cee79892587442334cd92c56.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Vector_Addition.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *dev_a = NULL; cudaMalloc(&dev_a, XSIZE*YSIZE); int *dev_b = NULL; cudaMalloc(&dev_b, XSIZE*YSIZE); int *dev_c = NULL; cudaMalloc(&dev_c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Vector_Addition<<<gridBlock,threadBlock>>>(dev_a,dev_b,dev_c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Vector_Addition<<<gridBlock,threadBlock>>>(dev_a,dev_b,dev_c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Vector_Addition<<<gridBlock,threadBlock>>>(dev_a,dev_b,dev_c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3ab461b1207e1d64030eb7b95190d92b24d3d759.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zlobpcg_maxpy.cu normal z -> d, Tue Feb 9 16:05:42 2016 */ #include "magmasparse_internal.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 __global__ void magma_dlobpcg_maxpy_kernel( magma_int_t num_rows, magma_int_t num_vecs, double * X, double * Y) { int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index if ( row < num_rows ) { for( int i=0; i < num_vecs; i++ ) { Y[ row + i*num_rows ] += X[ row + i*num_rows ]; } } } /** Purpose ------- This routine computes a axpy for a mxn matrix: Y = X + Y It replaces: magma_daxpy(m*n, c_one, Y, 1, X, 1); / x1[0] x2[0] x3[0] \ | x1[1] x2[1] x3[1] | X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] . | x1[3] x2[3] x3[3] | \ x1[4] x2[4] x3[4] / Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] num_vecs magma_int_t number of vectors @param[in] X magmaDouble_ptr input vector X @param[in,out] Y magmaDouble_ptr input/output vector Y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dlobpcg_maxpy( magma_int_t num_rows, magma_int_t num_vecs, magmaDouble_ptr X, magmaDouble_ptr Y, magma_queue_t queue ) { // every thread handles one row magma_int_t block_size = BLOCK_SIZE; magma_int_t threads = BLOCK_SIZE; dim3 block( block_size ); dim3 grid( magma_ceildiv( num_rows, block_size ) ); hipLaunchKernelGGL(( magma_dlobpcg_maxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , num_rows, num_vecs, X, Y ); return MAGMA_SUCCESS; }
3ab461b1207e1d64030eb7b95190d92b24d3d759.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zlobpcg_maxpy.cu normal z -> d, Tue Feb 9 16:05:42 2016 */ #include "magmasparse_internal.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 __global__ void magma_dlobpcg_maxpy_kernel( magma_int_t num_rows, magma_int_t num_vecs, double * X, double * Y) { int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index if ( row < num_rows ) { for( int i=0; i < num_vecs; i++ ) { Y[ row + i*num_rows ] += X[ row + i*num_rows ]; } } } /** Purpose ------- This routine computes a axpy for a mxn matrix: Y = X + Y It replaces: magma_daxpy(m*n, c_one, Y, 1, X, 1); / x1[0] x2[0] x3[0] \ | x1[1] x2[1] x3[1] | X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] . | x1[3] x2[3] x3[3] | \ x1[4] x2[4] x3[4] / Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] num_vecs magma_int_t number of vectors @param[in] X magmaDouble_ptr input vector X @param[in,out] Y magmaDouble_ptr input/output vector Y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dlobpcg_maxpy( magma_int_t num_rows, magma_int_t num_vecs, magmaDouble_ptr X, magmaDouble_ptr Y, magma_queue_t queue ) { // every thread handles one row magma_int_t block_size = BLOCK_SIZE; magma_int_t threads = BLOCK_SIZE; dim3 block( block_size ); dim3 grid( magma_ceildiv( num_rows, block_size ) ); magma_dlobpcg_maxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( num_rows, num_vecs, X, Y ); return MAGMA_SUCCESS; }
a872c06f815f6a56b4fe3c79315dbb4b8325c7ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void stream(float* dA, float* dB, float* dC, float alpha, int N) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < N) { dA[id] = dB[id] + alpha*dC[id]; } } extern "C" { void LaunchStream(float* dA, float *dB, float* dC, float alpha, int N) { hipLaunchKernelGGL(( stream), dim3(ceil(((float)N)/1024)), dim3(1024), 0, 0, dA, dB, dC, alpha, N); } }
a872c06f815f6a56b4fe3c79315dbb4b8325c7ea.cu
__global__ void stream(float* dA, float* dB, float* dC, float alpha, int N) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < N) { dA[id] = dB[id] + alpha*dC[id]; } } extern "C" { void LaunchStream(float* dA, float *dB, float* dC, float alpha, int N) { stream<<<ceil(((float)N)/1024), 1024>>>(dA, dB, dC, alpha, N); } }
9f84ec45ea66729b0c4a7e9514fafcb3d18bf86e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 Stanford, NVIDIA, Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::dense(const Tensor& input, int outDim, ActiMode activation, bool use_bias, const Op* shared_op, Initializer* kernel_initializer, Initializer* bias_initializer, const char *name) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL) { bias_initializer = new ZeroInitializer(); } Linear *li = new Linear(*this, input, outDim, activation, use_bias, shared_op, kernel_initializer, bias_initializer, name); layers.push_back(li); return li->outputs[0]; } Linear::Linear(FFModel& model, const Tensor& _input, int out_dim, ActiMode _activation, bool _use_bias, const Op* shared_op, Initializer* _kernel_initializer, Initializer* _bias_initializer, const char* name) : Op(model, OP_LINEAR, shared_op, name, _input), in_channels(_input.adim[0]), out_channels(out_dim), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer) { numInputs = 1; numOutputs = 1; outputs[0].numDim = _input.numDim; for (int i = 1; i < outputs[0].numDim; i++) outputs[0].adim[i] = _input.adim[i]; outputs[0].adim[0] = out_dim; weights[0].numDim = 2; weights[0].adim[0] = in_channels; weights[0].adim[1] = out_channels; numWeights = 1; if (use_bias) { weights[1].numDim = 1; weights[1].adim[0] = out_channels; numWeights = 2; } } void Linear::create_weights(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_weights_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim assert(false); } } } template<int NDIM> void Linear::create_weights_with_dim(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname)); #ifdef FF_USE_NCCL ParameterSyncType comm_type = ParameterSyncType::NCCL; #else ParameterSyncType comm_type = ParameterSyncType::PS; #endif // Create kernel tensor { const int dims[2] = {out_channels, in_channels}; weights[0] = model.create_linear_weight<2, NDIM>(this, dims, DT_FLOAT, kernel_initializer, true/*create_grad*/, comm_type); } // Create bias tensor if (use_bias) { const int dims[1] = {out_channels}; weights[1] = model.create_linear_weight<1, NDIM>(this, dims, DT_FLOAT, bias_initializer, true/*create_grad*/, comm_type); assert(numWeights == 2); } else { assert(numWeights == 1); } } void Linear::create_output_and_partition(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_output_and_partition_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void Linear::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_n = part_rect.hi[NDIM-1] - part_rect.lo[NDIM-1] + 1; int in_dim = inputs[0].adim[0]; assert(in_dim == in_channels); int batch_size = inputs[0].adim[NDIM-1]; { int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = outputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<NDIM> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Create replica tensor if (num_par_c > 1) { { Rect<NDIM> extent; for (int i = 1; i < NDIM; i++) { extent.lo[i] = 0; assert(outputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0); extent.hi[i] = outputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1; } extent.lo[0] = 0; extent.hi[0] = in_dim-1; Transform<NDIM, NDIM> transform; for (int i = 0; i < NDIM; i++) for (int j = 0; j < NDIM; j++) transform[i][j] = 0; for (int i = 1; i < NDIM; i++) transform[i][i] = extent.hi[i] + 1; IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); } if (model.config.computationMode == COMP_MODE_TRAINING) { if (NDIM==1) { const int dims[2] = {num_par_c, in_dim}; replica = model.create_linear_replica<2>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else if (NDIM==2) { const int dims[3] = {num_par_c, batch_size, in_dim}; replica = model.create_linear_replica<3>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else if (NDIM==3) { const int dims[4] = {num_par_c, batch_size, inputs[0].adim[1], in_dim}; replica = model.create_linear_replica<4>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else { assert(false && "Unsupported dimension for parallelizing Linear operators" " using the parameter dim."); } // Backward use the same ip as inputs[0] input_grad_lps[0] = inputs[0].part_grad; { IndexSpaceT<NDIM> input_task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(input_rect)); Rect<NDIM+1> extent; for (int i = 0; i < NDIM; i++) { extent.lo[i] = 0; assert(inputs[0].adim[i] % (input_rect.hi[i] - input_rect.lo[i] + 1) == 0); extent.hi[i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1) - 1; } extent.lo[NDIM] = 0; extent.hi[NDIM] = num_par_c - 1; Transform<NDIM+1, NDIM> transform; for (int i = 0; i < NDIM+1; i++) for (int j = 0; j < NDIM; j++) transform[i][j] = 0; for (int i = 0; i < NDIM; i++) transform[i][i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1); IndexPartition ip = runtime->create_partition_by_restriction( ctx, replica.region_grad.get_index_space(), input_task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); // Note we use replica.part to save how to partition the replica // to compute input_grad_lps replica.part = runtime->get_logical_partition( ctx, replica.region_grad, ip); } } // if COMP_MODE_TRAINING } else { // when num_par_c == 1 if (input_rect == part_rect) { input_lps[0] = inputs[0].part; if (model.config.computationMode == COMP_MODE_TRAINING) { input_grad_lps[0] = inputs[0].part_grad; } } else { Rect<NDIM> extent; for (int i = 0; i < NDIM; i++) { extent.lo[i] = 0; assert(inputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0); extent.hi[i] = inputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1; } Transform<NDIM, NDIM> transform; for (int i = 0; i < NDIM; i++) for (int j = 0; j < NDIM; j++) { transform[i][j] = 0; if (i==j) transform[i][j] = extent.hi[i] + 1; } IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); if (model.config.computationMode == COMP_MODE_TRAINING) { input_grad_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region_grad, ip); } } } } /* regions[0](O): output regions[1](I): kernel regions[2](I): bias */ OpMeta* Linear::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (out_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return init_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } return NULL; } bool Linear::use_cudnn_activation(ActiMode mode) { switch (mode) { case AC_MODE_RELU: case AC_MODE_SIGMOID: case AC_MODE_TANH: return true; } return false; } template<int NDIM> OpMeta* Linear::init_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == task->regions.size()); assert(regions.size() == 2 || regions.size() == 3); const Linear* linear = (Linear*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); //TensorAccessorR<float, 2> acc_input( // regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, NDIM> acc_output( regions[0], task->regions[0], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[1], task->regions[1], FID_DATA, ctx, runtime); // TensorAccessorR<float, 1> acc_bias( // regions[3], task->regions[3], FID_DATA, ctx, runtime); //int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int in_dim = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; printf("init linear (input): in_dim(%d) out_dim(%d) batch_size(%d)\n", in_dim, out_dim, batch_size); LinearMeta* m = new LinearMeta(handle, batch_size); m->activation = linear->activation; m->use_bias = linear->use_bias; m->profiling = linear->profiling; std::strcpy(m->op_name, linear->name); if (use_cudnn_activation(m->activation)) { cudnnActivationMode_t mode; switch (linear->activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, out_dim, 1, 1)); } return m; } void Linear::init(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return init_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::init_with_dim(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(NDIM, pcname, pc); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; #ifdef FF_USE_NCCL handle.ncclComm = pc.nccl_comms[idx-1]; #endif argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(LINEAR_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); //launcher.add_region_requirement( // RegionRequirement(input_lps[0], 0/*projection id*/, // READ_ONLY, EXCLUSIVE, inputs[0].region)); //launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(1, FID_DATA); // launcher.add_region_requirement( // RegionRequirement(weights[1].part, 0/*projection id*/, // READ_ONLY, EXCLUSIVE, weights[1].region)); // launcher.add_field(3, FID_DATA); if (ff.config.computationMode == COMP_MODE_TRAINING) { // Add inputs[0].region_grad to avoid Legion warning launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(2, FID_DATA); } FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /*static*/ void Linear::forward_kernel(const LinearMeta* m, const float* input_ptr, float* output_ptr, const float* kernel_ptr, const float* bias_ptr, int in_dim, int out_dim, int batch_size) { float alpha = 1.0f, beta = 0.0f; checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N, out_dim, batch_size, in_dim, &alpha, kernel_ptr, in_dim, input_ptr, in_dim, &beta, output_ptr, out_dim)); // use_bias = True if (bias_ptr != NULL) { checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N, out_dim, batch_size, 1, &alpha, bias_ptr, 1, m->one_ptr, 1, &alpha, output_ptr, out_dim)); } if (use_cudnn_activation(m->activation)) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, output_ptr, &beta, m->outputTensor, output_ptr)); } else if (m->activation == AC_MODE_GELU) { size_t elements = (size_t)out_dim * (size_t) batch_size; constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI) constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI) hipLaunchKernelGGL(( gelu_forward_kernel), dim3(GET_BLOCKS(elements)), dim3(CUDA_NUM_THREADS), 0, 0, elements, B, C, output_ptr); } else if (m->activation == AC_MODE_NONE) { // Do nothing } else { assert(false && "Unsupported activation for Linear"); } } __host__ void Linear::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return forward_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I); input regions[1](O): output regions[2](I): kernel regions[3](I): bias */ template<int NDIM> void Linear::forward_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); assert(regions.size() == (3 + int(m->use_bias))); assert(task->regions.size() == (3 + int(m->use_bias))); TensorAccessorR<float, NDIM> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, NDIM> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_input.rect.volume() == in_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); const float* acc_bias_ptr = NULL; if (m->use_bias) { TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); assert(acc_bias.rect.volume() == out_dim); acc_bias_ptr = acc_bias.ptr; } hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDA(hipblasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif Linear::forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias_ptr, in_dim, out_dim, batch_size); if (m->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("%s [Linear] forward time = %.2lfms\n", m->op_name, elapsed); //print_tensor<NDIM, float>(acc_input.ptr, acc_input.rect, "[Linear:forward:input]"); //print_tensor<2, float>(acc_kernel.ptr, acc_kernel.rect, "[Linear:forward:kernel]"); //print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Linear:forward:bias]"); //print_tensor<NDIM, float>(acc_output.ptr, acc_output.rect, "[Linear:forward:output]"); } } void Linear::forward(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return forward_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::forward_with_dim(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); if (use_bias) { launcher.add_region_requirement( RegionRequirement(weights[1].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); } runtime->execute_index_space(ctx, launcher); } __global__ void sigmoid_backward(float *grad_ptr, const float *output, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = grad_ptr[i] * output[i] * (1 - output[i]); } } /*static*/ void Linear::backward_kernel(const LinearMeta* m, const float* input_ptr, float* input_grad_ptr, const float* output_ptr, float* output_grad_ptr, const float* kernel_ptr, float* kernel_grad_ptr, float* bias_grad_ptr, int in_dim, int out_dim, int batch_size) { float alpha = 1.0f; int output_size = out_dim * batch_size; if (m->activation == AC_MODE_RELU) { hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(output_size)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, output_size); } else if (m->activation == AC_MODE_SIGMOID) { hipLaunchKernelGGL(( sigmoid_backward), dim3(GET_BLOCKS(output_size)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, output_size); } else { // TODO: only support relu and sigmoid for now assert(m->activation == AC_MODE_NONE); } // Compute weight gradiant // NOTE: we use alpha=1 for kernel_grad to accumulate gradients checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_T, in_dim, out_dim, batch_size, &alpha, input_ptr, in_dim, output_grad_ptr, out_dim, &alpha, kernel_grad_ptr, in_dim)); // Compute bias gradiant // NOTE: we use alpha=1 for bias_grad to accumulate gradients // use_bias = True if (bias_grad_ptr != NULL) { checkCUDA(hipblasSgemv(m->handle.blas, HIPBLAS_OP_N, out_dim, batch_size, &alpha, output_grad_ptr, out_dim, m->one_ptr, 1, &alpha, bias_grad_ptr, 1)); } // Compute data gradiant // NOTE: we use alpha=1 for input_grad to accumulate gradients checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_N, in_dim, batch_size, out_dim, &alpha, kernel_ptr, in_dim, output_grad_ptr, out_dim, &alpha, input_grad_ptr, in_dim)); } void Linear::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return backward_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I): input regions[1](I/O): replica_grad or input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): filter regions[5](I/O): filter_grad regions[6](I/O): bias_grad */ template<int NDIM> __host__ void Linear::backward_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); assert(regions.size() == (6 + int(m->use_bias))); assert(task->regions.size() == (6 + int(m->use_bias))); float* input_grad = NULL; TensorAccessorR<float, NDIM> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorR<float, NDIM> acc_output( regions[2], task->regions[2], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; Domain domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); if (domain.get_dim() == NDIM+1) { assert(domain.get_volume() == in_dim * batch_size); input_grad = helperGetTensorPointerWO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); } else { TensorAccessorW<float, NDIM> acc_replica_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, true/*readOutput*/); assert(acc_replica_grad.rect.volume() == in_dim * batch_size); input_grad = acc_replica_grad.ptr; } TensorAccessorW<float, NDIM> acc_output_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[4], task->regions[4], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_kernel_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, true/*readOutput*/); // make sure the sizes match assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_output_grad.rect.volume() == out_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); assert(acc_kernel_grad.rect.volume() == in_dim * out_dim); float* acc_bias_grad_ptr = NULL; if (m->use_bias) { TensorAccessorW<float, 1> acc_bias_grad( regions[6], task->regions[6], FID_DATA, ctx, runtime, true/*readOutput*/); assert(acc_bias_grad.rect.volume() == out_dim); acc_bias_grad_ptr = static_cast<float*>(acc_bias_grad.ptr); } hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDA(hipblasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif Linear::backward_kernel(m, acc_input.ptr, input_grad, acc_output.ptr, acc_output_grad.ptr, acc_kernel.ptr, acc_kernel_grad.ptr, acc_bias_grad_ptr, in_dim, out_dim, batch_size); if (m->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("Linear backward time = %.2lfms\n", elapsed); //print_tensor<NDIM, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Linear:backward:output_grad]"); //print_tensor<2, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Linear:backward:kernel_grad]"); //print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Linear:backward:bias_grad]"); //print_tensor<2, float>(input_grad, acc_input.rect, "[Linear:backward:input_grad]"); } } void Linear::backward2_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return backward2_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I/O): input_grad regions[1](I): replicas */ template<int NDIM> __host__ void Linear::backward2_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //const LinearMeta* m = *((LinearMeta**) task->local_args); TensorAccessorW<float, NDIM> acc_input_grad( regions[0], task->regions[0], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 3> acc_replica( regions[1], task->regions[1], FID_DATA, ctx, runtime); assert(acc_input_grad.rect.hi[0] == acc_replica.rect.hi[0]); assert(acc_input_grad.rect.lo[0] == acc_replica.rect.lo[0]); assert(acc_input_grad.rect.hi[1] == acc_replica.rect.hi[1]); assert(acc_input_grad.rect.lo[1] == acc_replica.rect.lo[1]); //#ifndef DISABLE_LEGION_CUDA_HIJACK // hipStream_t stream; // checkCUDA(hipStreamCreate(&stream)); // checkCUDA(hipblasSetStream(m->handle.blas, stream)); // checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); //#endif int num_replica = acc_replica.rect.hi[NDIM] - acc_replica.rect.lo[NDIM] + 1; const float *replica_ptr = acc_replica.ptr; for (int i = 0; i < num_replica; i++) { size_t num_elements = acc_input_grad.rect.volume(); hipLaunchKernelGGL(( apply_add_with_scale), dim3(GET_BLOCKS(num_elements)), dim3(CUDA_NUM_THREADS), 0, 0, acc_input_grad.ptr, replica_ptr, num_elements, 1.0f); replica_ptr += acc_input_grad.rect.volume(); } } void Linear::backward(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return backward_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::backward_with_dim(const FFModel& ff) { Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; { ArgumentMap argmap; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](I/O): replica_grad if (replica.region_grad != LogicalRegion::NO_REGION) { launcher.add_region_requirement( RegionRequirement(replica.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); } else { launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); } // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(4, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(5, FID_DATA); if (use_bias) { // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(6, FID_DATA); } runtime->execute_index_space(ctx, launcher); } if (replica.region_grad != LogicalRegion::NO_REGION) { // We aggregate parameters from replica tensor to input tensor // Note we use input's task_is to reduce extra data transfers ArgumentMap argmap; Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part_grad.get_index_partition()); IndexSpaceT<2> input_task_is = IndexSpaceT<2>(ff.get_task_is(input_rect)); IndexLauncher launcher(LINEAR_BWD2_TASK_ID, input_task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(inputs[0].owner_op->name))); launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); // Note that replica.part save's a partition of replica.region_grad launcher.add_region_requirement( RegionRequirement(replica.part, 0/*partition id*/, READ_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } } /* __host__ Parameter* Linear::get_parameter(int index) { if (index == 0) { return &weights[0]; } else if (index == 1){ return &weights[1]; } else { assert(0); return NULL; } } */ __host__ void Linear::print_layer(const FFModel& ff) { printf("linear layer\n"); Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region); kernel_req.add_field(FID_DATA); InlineLauncher kernel_launcher(kernel_req); PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher); kernel_region.wait_until_valid(); RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region); bias_req.add_field(FID_DATA); InlineLauncher bias_launcher(bias_req); PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher); bias_region.wait_until_valid(); TensorAccessorW<float, 2> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true); TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true); const float *kernel_ptr = acc_kernel.ptr; const float *bias_ptr = acc_bias.ptr; size_t kernel_size = acc_kernel.rect.volume(); int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1; size_t bias_size = acc_bias.rect.volume(); printf("kernel, %p, %zu, [%d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2); printf("bias, %p, %zu\n", bias_ptr, bias_size); for (int i = 0; i < bias_size; i++) { printf("%f ", bias_ptr[i]); } printf("\n"); for (int i = 0; i < kernel_size; i++) { printf("%f ", kernel_ptr[i]); } printf("\n"); runtime->unmap_region(ctx, kernel_region); runtime->unmap_region(ctx, bias_region); } LinearMeta::LinearMeta(FFHandler handler, int batch_size) : OpMeta(handler) { // Allocate an all-one's vector float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size); for (int i = 0; i < batch_size; i++) dram_one_ptr[i] = 1.0f; float* fb_one_ptr; checkCUDA(hipMalloc(&fb_one_ptr, sizeof(float) * batch_size)); checkCUDA(hipMemcpy(fb_one_ptr, dram_one_ptr, sizeof(float) * batch_size, hipMemcpyHostToDevice)); one_ptr = (const float*) fb_one_ptr; // Allocate descriptors checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); } bool Linear::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_output, sub_input; if (!outputs[0].get_output_sub_tensor(pc, sub_output, OP_LINEAR)) return false; if (!inputs[0].get_input_sub_tensor(pc, sub_input, OP_LINEAR)) return false; int input_c = sub_input.adim[0]; int input_n = sub_input.get_volume() / input_c; int output_c = sub_output.adim[0]; int output_n = sub_output.get_volume() / output_c; LinearMeta* m = sim->linear_meta; m->activation = activation; if (use_cudnn_activation(m->activation)) { cudnnActivationMode_t mode; switch (activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, 1, 1)); } // allocate tensors in simulator sim->free_all(); float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert(input_ptr != NULL); float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert(output_ptr != NULL); float* kernel_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT); assert(kernel_ptr != NULL); float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_ptr != NULL); std::function<void()> forward, backward; forward = [&] { forward_kernel(m, input_ptr, output_ptr, kernel_ptr, bias_ptr, input_c, output_c, input_n); }; if (sim->computationMode == COMP_MODE_TRAINING) { float* input_grad_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); float *output_grad_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); float* kernel_grad_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT); float* bias_grad_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_grad_ptr != NULL); backward = [&] { backward_kernel(m, input_ptr, input_grad_ptr, output_ptr, output_grad_ptr, kernel_ptr, kernel_grad_ptr, bias_grad_ptr, input_c, output_c, input_n); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure Linear] name(%s) in(%d %d) out(%d %d) forward_time(%.4lf) backward_time(%.4lf)\n", name, input_n, input_c, output_n, output_c, cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure Linear] name(%s) in(%d %d) out(%d %d) forward_time(%.4lf)\n", name, input_n, input_c, output_n, output_c, cost_metrics.forward_time); } return true; } ParallelConfig Linear::get_random_parallel_config(const FFModel& ff) const { if (!ff.config.enable_parameter_parallel) return Op::get_random_parallel_config(ff); std::vector<int> batch_candidates; std::vector<int> channel_candidates; int batch = outputs[0].adim[outputs[0].numDim-1]; int channel = outputs[0].adim[0]; int total_devices = ff.config.workersPerNode * ff.config.numNodes; for (int i = 1; i <= ff.config.workersPerNode; i++) if (channel % i == 0) for (int j = 1; i * j <= total_devices; j++) if (batch % j == 0) { batch_candidates.push_back(j); channel_candidates.push_back(i); } assert(batch_candidates.size() > 0); int idx = std::rand() % batch_candidates.size(); int num_par_c = channel_candidates[idx]; int num_par_b = batch_candidates[idx]; ParallelConfig pc; pc.device_type = ParallelConfig::GPU; pc.nDims = outputs[0].numDim; pc.dim[0] = num_par_c; pc.dim[pc.nDims-1] = num_par_b; for (int i = 1; i < pc.nDims - 1; i++) pc.dim[i] = 1; int start_idx = std::rand() % (total_devices - num_par_c * num_par_b + 1); start_idx = start_idx - start_idx % num_par_c; for (int i = 0; i < num_par_c * num_par_b; i++) pc.device_ids[i] = start_idx + i; return pc; } bool Linear::is_valid_parallel_config(const FFModel& ff, const ParallelConfig& pc) const { if (!ff.config.enable_parameter_parallel) return Op::is_valid_parallel_config(ff, pc); // Support data and parameter parallel if (pc.nDims != outputs[0].numDim) return false; for (int i = 1; i < pc.nDims-1; i++) if (pc.dim[i] != 1) return false; return true; }
9f84ec45ea66729b0c4a7e9514fafcb3d18bf86e.cu
/* Copyright 2020 Stanford, NVIDIA, Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::dense(const Tensor& input, int outDim, ActiMode activation, bool use_bias, const Op* shared_op, Initializer* kernel_initializer, Initializer* bias_initializer, const char *name) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL) { bias_initializer = new ZeroInitializer(); } Linear *li = new Linear(*this, input, outDim, activation, use_bias, shared_op, kernel_initializer, bias_initializer, name); layers.push_back(li); return li->outputs[0]; } Linear::Linear(FFModel& model, const Tensor& _input, int out_dim, ActiMode _activation, bool _use_bias, const Op* shared_op, Initializer* _kernel_initializer, Initializer* _bias_initializer, const char* name) : Op(model, OP_LINEAR, shared_op, name, _input), in_channels(_input.adim[0]), out_channels(out_dim), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer) { numInputs = 1; numOutputs = 1; outputs[0].numDim = _input.numDim; for (int i = 1; i < outputs[0].numDim; i++) outputs[0].adim[i] = _input.adim[i]; outputs[0].adim[0] = out_dim; weights[0].numDim = 2; weights[0].adim[0] = in_channels; weights[0].adim[1] = out_channels; numWeights = 1; if (use_bias) { weights[1].numDim = 1; weights[1].adim[0] = out_channels; numWeights = 2; } } void Linear::create_weights(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_weights_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim assert(false); } } } template<int NDIM> void Linear::create_weights_with_dim(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname)); #ifdef FF_USE_NCCL ParameterSyncType comm_type = ParameterSyncType::NCCL; #else ParameterSyncType comm_type = ParameterSyncType::PS; #endif // Create kernel tensor { const int dims[2] = {out_channels, in_channels}; weights[0] = model.create_linear_weight<2, NDIM>(this, dims, DT_FLOAT, kernel_initializer, true/*create_grad*/, comm_type); } // Create bias tensor if (use_bias) { const int dims[1] = {out_channels}; weights[1] = model.create_linear_weight<1, NDIM>(this, dims, DT_FLOAT, bias_initializer, true/*create_grad*/, comm_type); assert(numWeights == 2); } else { assert(numWeights == 1); } } void Linear::create_output_and_partition(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_output_and_partition_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void Linear::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_n = part_rect.hi[NDIM-1] - part_rect.lo[NDIM-1] + 1; int in_dim = inputs[0].adim[0]; assert(in_dim == in_channels); int batch_size = inputs[0].adim[NDIM-1]; { int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = outputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<NDIM> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Create replica tensor if (num_par_c > 1) { { Rect<NDIM> extent; for (int i = 1; i < NDIM; i++) { extent.lo[i] = 0; assert(outputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0); extent.hi[i] = outputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1; } extent.lo[0] = 0; extent.hi[0] = in_dim-1; Transform<NDIM, NDIM> transform; for (int i = 0; i < NDIM; i++) for (int j = 0; j < NDIM; j++) transform[i][j] = 0; for (int i = 1; i < NDIM; i++) transform[i][i] = extent.hi[i] + 1; IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); } if (model.config.computationMode == COMP_MODE_TRAINING) { if (NDIM==1) { const int dims[2] = {num_par_c, in_dim}; replica = model.create_linear_replica<2>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else if (NDIM==2) { const int dims[3] = {num_par_c, batch_size, in_dim}; replica = model.create_linear_replica<3>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else if (NDIM==3) { const int dims[4] = {num_par_c, batch_size, inputs[0].adim[1], in_dim}; replica = model.create_linear_replica<4>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else { assert(false && "Unsupported dimension for parallelizing Linear operators" " using the parameter dim."); } // Backward use the same ip as inputs[0] input_grad_lps[0] = inputs[0].part_grad; { IndexSpaceT<NDIM> input_task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(input_rect)); Rect<NDIM+1> extent; for (int i = 0; i < NDIM; i++) { extent.lo[i] = 0; assert(inputs[0].adim[i] % (input_rect.hi[i] - input_rect.lo[i] + 1) == 0); extent.hi[i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1) - 1; } extent.lo[NDIM] = 0; extent.hi[NDIM] = num_par_c - 1; Transform<NDIM+1, NDIM> transform; for (int i = 0; i < NDIM+1; i++) for (int j = 0; j < NDIM; j++) transform[i][j] = 0; for (int i = 0; i < NDIM; i++) transform[i][i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1); IndexPartition ip = runtime->create_partition_by_restriction( ctx, replica.region_grad.get_index_space(), input_task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); // Note we use replica.part to save how to partition the replica // to compute input_grad_lps replica.part = runtime->get_logical_partition( ctx, replica.region_grad, ip); } } // if COMP_MODE_TRAINING } else { // when num_par_c == 1 if (input_rect == part_rect) { input_lps[0] = inputs[0].part; if (model.config.computationMode == COMP_MODE_TRAINING) { input_grad_lps[0] = inputs[0].part_grad; } } else { Rect<NDIM> extent; for (int i = 0; i < NDIM; i++) { extent.lo[i] = 0; assert(inputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0); extent.hi[i] = inputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1; } Transform<NDIM, NDIM> transform; for (int i = 0; i < NDIM; i++) for (int j = 0; j < NDIM; j++) { transform[i][j] = 0; if (i==j) transform[i][j] = extent.hi[i] + 1; } IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); if (model.config.computationMode == COMP_MODE_TRAINING) { input_grad_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region_grad, ip); } } } } /* regions[0](O): output regions[1](I): kernel regions[2](I): bias */ OpMeta* Linear::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (out_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return init_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } return NULL; } bool Linear::use_cudnn_activation(ActiMode mode) { switch (mode) { case AC_MODE_RELU: case AC_MODE_SIGMOID: case AC_MODE_TANH: return true; } return false; } template<int NDIM> OpMeta* Linear::init_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == task->regions.size()); assert(regions.size() == 2 || regions.size() == 3); const Linear* linear = (Linear*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); //TensorAccessorR<float, 2> acc_input( // regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, NDIM> acc_output( regions[0], task->regions[0], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[1], task->regions[1], FID_DATA, ctx, runtime); // TensorAccessorR<float, 1> acc_bias( // regions[3], task->regions[3], FID_DATA, ctx, runtime); //int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int in_dim = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; printf("init linear (input): in_dim(%d) out_dim(%d) batch_size(%d)\n", in_dim, out_dim, batch_size); LinearMeta* m = new LinearMeta(handle, batch_size); m->activation = linear->activation; m->use_bias = linear->use_bias; m->profiling = linear->profiling; std::strcpy(m->op_name, linear->name); if (use_cudnn_activation(m->activation)) { cudnnActivationMode_t mode; switch (linear->activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, out_dim, 1, 1)); } return m; } void Linear::init(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return init_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::init_with_dim(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(NDIM, pcname, pc); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; #ifdef FF_USE_NCCL handle.ncclComm = pc.nccl_comms[idx-1]; #endif argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(LINEAR_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); //launcher.add_region_requirement( // RegionRequirement(input_lps[0], 0/*projection id*/, // READ_ONLY, EXCLUSIVE, inputs[0].region)); //launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(1, FID_DATA); // launcher.add_region_requirement( // RegionRequirement(weights[1].part, 0/*projection id*/, // READ_ONLY, EXCLUSIVE, weights[1].region)); // launcher.add_field(3, FID_DATA); if (ff.config.computationMode == COMP_MODE_TRAINING) { // Add inputs[0].region_grad to avoid Legion warning launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(2, FID_DATA); } FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /*static*/ void Linear::forward_kernel(const LinearMeta* m, const float* input_ptr, float* output_ptr, const float* kernel_ptr, const float* bias_ptr, int in_dim, int out_dim, int batch_size) { float alpha = 1.0f, beta = 0.0f; checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N, out_dim, batch_size, in_dim, &alpha, kernel_ptr, in_dim, input_ptr, in_dim, &beta, output_ptr, out_dim)); // use_bias = True if (bias_ptr != NULL) { checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N, out_dim, batch_size, 1, &alpha, bias_ptr, 1, m->one_ptr, 1, &alpha, output_ptr, out_dim)); } if (use_cudnn_activation(m->activation)) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, output_ptr, &beta, m->outputTensor, output_ptr)); } else if (m->activation == AC_MODE_GELU) { size_t elements = (size_t)out_dim * (size_t) batch_size; constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI) constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI) gelu_forward_kernel<<<GET_BLOCKS(elements), CUDA_NUM_THREADS>>>( elements, B, C, output_ptr); } else if (m->activation == AC_MODE_NONE) { // Do nothing } else { assert(false && "Unsupported activation for Linear"); } } __host__ void Linear::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return forward_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I); input regions[1](O): output regions[2](I): kernel regions[3](I): bias */ template<int NDIM> void Linear::forward_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); assert(regions.size() == (3 + int(m->use_bias))); assert(task->regions.size() == (3 + int(m->use_bias))); TensorAccessorR<float, NDIM> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, NDIM> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_input.rect.volume() == in_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); const float* acc_bias_ptr = NULL; if (m->use_bias) { TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); assert(acc_bias.rect.volume() == out_dim); acc_bias_ptr = acc_bias.ptr; } cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif Linear::forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias_ptr, in_dim, out_dim, batch_size); if (m->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("%s [Linear] forward time = %.2lfms\n", m->op_name, elapsed); //print_tensor<NDIM, float>(acc_input.ptr, acc_input.rect, "[Linear:forward:input]"); //print_tensor<2, float>(acc_kernel.ptr, acc_kernel.rect, "[Linear:forward:kernel]"); //print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Linear:forward:bias]"); //print_tensor<NDIM, float>(acc_output.ptr, acc_output.rect, "[Linear:forward:output]"); } } void Linear::forward(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return forward_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::forward_with_dim(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); if (use_bias) { launcher.add_region_requirement( RegionRequirement(weights[1].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); } runtime->execute_index_space(ctx, launcher); } __global__ void sigmoid_backward(float *grad_ptr, const float *output, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = grad_ptr[i] * output[i] * (1 - output[i]); } } /*static*/ void Linear::backward_kernel(const LinearMeta* m, const float* input_ptr, float* input_grad_ptr, const float* output_ptr, float* output_grad_ptr, const float* kernel_ptr, float* kernel_grad_ptr, float* bias_grad_ptr, int in_dim, int out_dim, int batch_size) { float alpha = 1.0f; int output_size = out_dim * batch_size; if (m->activation == AC_MODE_RELU) { reluBackward<<<GET_BLOCKS(output_size), CUDA_NUM_THREADS>>>( output_grad_ptr, output_ptr, output_size); } else if (m->activation == AC_MODE_SIGMOID) { sigmoid_backward<<<GET_BLOCKS(output_size), CUDA_NUM_THREADS>>>( output_grad_ptr, output_ptr, output_size); } else { // TODO: only support relu and sigmoid for now assert(m->activation == AC_MODE_NONE); } // Compute weight gradiant // NOTE: we use alpha=1 for kernel_grad to accumulate gradients checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_T, in_dim, out_dim, batch_size, &alpha, input_ptr, in_dim, output_grad_ptr, out_dim, &alpha, kernel_grad_ptr, in_dim)); // Compute bias gradiant // NOTE: we use alpha=1 for bias_grad to accumulate gradients // use_bias = True if (bias_grad_ptr != NULL) { checkCUDA(cublasSgemv(m->handle.blas, CUBLAS_OP_N, out_dim, batch_size, &alpha, output_grad_ptr, out_dim, m->one_ptr, 1, &alpha, bias_grad_ptr, 1)); } // Compute data gradiant // NOTE: we use alpha=1 for input_grad to accumulate gradients checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_N, in_dim, batch_size, out_dim, &alpha, kernel_ptr, in_dim, output_grad_ptr, out_dim, &alpha, input_grad_ptr, in_dim)); } void Linear::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return backward_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I): input regions[1](I/O): replica_grad or input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): filter regions[5](I/O): filter_grad regions[6](I/O): bias_grad */ template<int NDIM> __host__ void Linear::backward_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); assert(regions.size() == (6 + int(m->use_bias))); assert(task->regions.size() == (6 + int(m->use_bias))); float* input_grad = NULL; TensorAccessorR<float, NDIM> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorR<float, NDIM> acc_output( regions[2], task->regions[2], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; Domain domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); if (domain.get_dim() == NDIM+1) { assert(domain.get_volume() == in_dim * batch_size); input_grad = helperGetTensorPointerWO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); } else { TensorAccessorW<float, NDIM> acc_replica_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, true/*readOutput*/); assert(acc_replica_grad.rect.volume() == in_dim * batch_size); input_grad = acc_replica_grad.ptr; } TensorAccessorW<float, NDIM> acc_output_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[4], task->regions[4], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_kernel_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, true/*readOutput*/); // make sure the sizes match assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_output_grad.rect.volume() == out_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); assert(acc_kernel_grad.rect.volume() == in_dim * out_dim); float* acc_bias_grad_ptr = NULL; if (m->use_bias) { TensorAccessorW<float, 1> acc_bias_grad( regions[6], task->regions[6], FID_DATA, ctx, runtime, true/*readOutput*/); assert(acc_bias_grad.rect.volume() == out_dim); acc_bias_grad_ptr = static_cast<float*>(acc_bias_grad.ptr); } cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif Linear::backward_kernel(m, acc_input.ptr, input_grad, acc_output.ptr, acc_output_grad.ptr, acc_kernel.ptr, acc_kernel_grad.ptr, acc_bias_grad_ptr, in_dim, out_dim, batch_size); if (m->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Linear backward time = %.2lfms\n", elapsed); //print_tensor<NDIM, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Linear:backward:output_grad]"); //print_tensor<2, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Linear:backward:kernel_grad]"); //print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Linear:backward:bias_grad]"); //print_tensor<2, float>(input_grad, acc_input.rect, "[Linear:backward:input_grad]"); } } void Linear::backward2_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return backward2_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I/O): input_grad regions[1](I): replicas */ template<int NDIM> __host__ void Linear::backward2_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //const LinearMeta* m = *((LinearMeta**) task->local_args); TensorAccessorW<float, NDIM> acc_input_grad( regions[0], task->regions[0], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 3> acc_replica( regions[1], task->regions[1], FID_DATA, ctx, runtime); assert(acc_input_grad.rect.hi[0] == acc_replica.rect.hi[0]); assert(acc_input_grad.rect.lo[0] == acc_replica.rect.lo[0]); assert(acc_input_grad.rect.hi[1] == acc_replica.rect.hi[1]); assert(acc_input_grad.rect.lo[1] == acc_replica.rect.lo[1]); //#ifndef DISABLE_LEGION_CUDA_HIJACK // cudaStream_t stream; // checkCUDA(cudaStreamCreate(&stream)); // checkCUDA(cublasSetStream(m->handle.blas, stream)); // checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); //#endif int num_replica = acc_replica.rect.hi[NDIM] - acc_replica.rect.lo[NDIM] + 1; const float *replica_ptr = acc_replica.ptr; for (int i = 0; i < num_replica; i++) { size_t num_elements = acc_input_grad.rect.volume(); apply_add_with_scale<<<GET_BLOCKS(num_elements), CUDA_NUM_THREADS>>>( acc_input_grad.ptr, replica_ptr, num_elements, 1.0f); replica_ptr += acc_input_grad.rect.volume(); } } void Linear::backward(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return backward_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::backward_with_dim(const FFModel& ff) { Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; { ArgumentMap argmap; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](I/O): replica_grad if (replica.region_grad != LogicalRegion::NO_REGION) { launcher.add_region_requirement( RegionRequirement(replica.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); } else { launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); } // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(4, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(5, FID_DATA); if (use_bias) { // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(6, FID_DATA); } runtime->execute_index_space(ctx, launcher); } if (replica.region_grad != LogicalRegion::NO_REGION) { // We aggregate parameters from replica tensor to input tensor // Note we use input's task_is to reduce extra data transfers ArgumentMap argmap; Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part_grad.get_index_partition()); IndexSpaceT<2> input_task_is = IndexSpaceT<2>(ff.get_task_is(input_rect)); IndexLauncher launcher(LINEAR_BWD2_TASK_ID, input_task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(inputs[0].owner_op->name))); launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); // Note that replica.part save's a partition of replica.region_grad launcher.add_region_requirement( RegionRequirement(replica.part, 0/*partition id*/, READ_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } } /* __host__ Parameter* Linear::get_parameter(int index) { if (index == 0) { return &weights[0]; } else if (index == 1){ return &weights[1]; } else { assert(0); return NULL; } } */ __host__ void Linear::print_layer(const FFModel& ff) { printf("linear layer\n"); Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region); kernel_req.add_field(FID_DATA); InlineLauncher kernel_launcher(kernel_req); PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher); kernel_region.wait_until_valid(); RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region); bias_req.add_field(FID_DATA); InlineLauncher bias_launcher(bias_req); PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher); bias_region.wait_until_valid(); TensorAccessorW<float, 2> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true); TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true); const float *kernel_ptr = acc_kernel.ptr; const float *bias_ptr = acc_bias.ptr; size_t kernel_size = acc_kernel.rect.volume(); int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1; size_t bias_size = acc_bias.rect.volume(); printf("kernel, %p, %zu, [%d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2); printf("bias, %p, %zu\n", bias_ptr, bias_size); for (int i = 0; i < bias_size; i++) { printf("%f ", bias_ptr[i]); } printf("\n"); for (int i = 0; i < kernel_size; i++) { printf("%f ", kernel_ptr[i]); } printf("\n"); runtime->unmap_region(ctx, kernel_region); runtime->unmap_region(ctx, bias_region); } LinearMeta::LinearMeta(FFHandler handler, int batch_size) : OpMeta(handler) { // Allocate an all-one's vector float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size); for (int i = 0; i < batch_size; i++) dram_one_ptr[i] = 1.0f; float* fb_one_ptr; checkCUDA(cudaMalloc(&fb_one_ptr, sizeof(float) * batch_size)); checkCUDA(cudaMemcpy(fb_one_ptr, dram_one_ptr, sizeof(float) * batch_size, cudaMemcpyHostToDevice)); one_ptr = (const float*) fb_one_ptr; // Allocate descriptors checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); } bool Linear::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_output, sub_input; if (!outputs[0].get_output_sub_tensor(pc, sub_output, OP_LINEAR)) return false; if (!inputs[0].get_input_sub_tensor(pc, sub_input, OP_LINEAR)) return false; int input_c = sub_input.adim[0]; int input_n = sub_input.get_volume() / input_c; int output_c = sub_output.adim[0]; int output_n = sub_output.get_volume() / output_c; LinearMeta* m = sim->linear_meta; m->activation = activation; if (use_cudnn_activation(m->activation)) { cudnnActivationMode_t mode; switch (activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, 1, 1)); } // allocate tensors in simulator sim->free_all(); float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert(input_ptr != NULL); float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert(output_ptr != NULL); float* kernel_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT); assert(kernel_ptr != NULL); float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_ptr != NULL); std::function<void()> forward, backward; forward = [&] { forward_kernel(m, input_ptr, output_ptr, kernel_ptr, bias_ptr, input_c, output_c, input_n); }; if (sim->computationMode == COMP_MODE_TRAINING) { float* input_grad_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); float *output_grad_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); float* kernel_grad_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT); float* bias_grad_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_grad_ptr != NULL); backward = [&] { backward_kernel(m, input_ptr, input_grad_ptr, output_ptr, output_grad_ptr, kernel_ptr, kernel_grad_ptr, bias_grad_ptr, input_c, output_c, input_n); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure Linear] name(%s) in(%d %d) out(%d %d) forward_time(%.4lf) backward_time(%.4lf)\n", name, input_n, input_c, output_n, output_c, cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure Linear] name(%s) in(%d %d) out(%d %d) forward_time(%.4lf)\n", name, input_n, input_c, output_n, output_c, cost_metrics.forward_time); } return true; } ParallelConfig Linear::get_random_parallel_config(const FFModel& ff) const { if (!ff.config.enable_parameter_parallel) return Op::get_random_parallel_config(ff); std::vector<int> batch_candidates; std::vector<int> channel_candidates; int batch = outputs[0].adim[outputs[0].numDim-1]; int channel = outputs[0].adim[0]; int total_devices = ff.config.workersPerNode * ff.config.numNodes; for (int i = 1; i <= ff.config.workersPerNode; i++) if (channel % i == 0) for (int j = 1; i * j <= total_devices; j++) if (batch % j == 0) { batch_candidates.push_back(j); channel_candidates.push_back(i); } assert(batch_candidates.size() > 0); int idx = std::rand() % batch_candidates.size(); int num_par_c = channel_candidates[idx]; int num_par_b = batch_candidates[idx]; ParallelConfig pc; pc.device_type = ParallelConfig::GPU; pc.nDims = outputs[0].numDim; pc.dim[0] = num_par_c; pc.dim[pc.nDims-1] = num_par_b; for (int i = 1; i < pc.nDims - 1; i++) pc.dim[i] = 1; int start_idx = std::rand() % (total_devices - num_par_c * num_par_b + 1); start_idx = start_idx - start_idx % num_par_c; for (int i = 0; i < num_par_c * num_par_b; i++) pc.device_ids[i] = start_idx + i; return pc; } bool Linear::is_valid_parallel_config(const FFModel& ff, const ParallelConfig& pc) const { if (!ff.config.enable_parameter_parallel) return Op::is_valid_parallel_config(ff, pc); // Support data and parameter parallel if (pc.nDims != outputs[0].numDim) return false; for (int i = 1; i < pc.nDims-1; i++) if (pc.dim[i] != 1) return false; return true; }
d6e49a46a93bd3dc3305817e97d1a21c609088ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdlib.h> #include "../Helpers.h" #include "device_launch_parameters.h" //__constant__ int warpSize; //__constant__ int perlin_A[256]; #define FULL_MASK 0xffffffff __constant__ int N_size; int _N_size; __global__ void CUDAgameLoop(bool* data, int currentGen) { //Indexing similiar to 2d flattening //Treating blocks as 2nd dim (y), and thread as 1st dim (x) const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < N && x < N) { bool isAlive = data[((currentGen - 1) * N + y) * N + x]; unsigned int totalNeighbours = 0; for (int i = (y - 1 > 0) ? y - 1 : 0; i < ((y + 2 < N) ? y + 2 : N); i++) for (int j = (x - 1 > 0) ? x - 1 : 0; j < ((x + 2 < N) ? x + 2 : N); j++) { totalNeighbours += data[((currentGen - 1) * N + i) * N + j]; } //Attempt manual looping in cardinal directions to test speed totalNeighbours -= isAlive; bool currentStatus = (isAlive && !(totalNeighbours < 2 || totalNeighbours > 3)) || (totalNeighbours == 3); data[(currentGen * N + y) * N + x] = currentStatus; //data[((currentGen * N + y) * N + x) / warpSize] = currentStatus; //For bit packing indexing } } __global__ void CUDA_GL_gameLoop(bool* prevGen, bool* nextGen, uchar4* gl_buffer) { //It is possible to place the const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < N && x < N) { bool isAlive = prevGen[y * N + x]; unsigned int totalNeighbours = 0; for (int i = (y - 1 > 0) ? y - 1 : 0; i < ((y + 2 < N) ? y + 2 : N); i++) for (int j = (x - 1 > 0) ? x - 1 : 0; j < ((x + 2 < N) ? x + 2 : N); j++) { totalNeighbours += prevGen[i * N + j]; } //Attempt manual looping in cardinal directions to test speed totalNeighbours -= isAlive; bool currentStatus = (isAlive && !(totalNeighbours < 2 || totalNeighbours > 3)) || (totalNeighbours == 3); nextGen[y * N + x] = currentStatus; //printf("x, y: %d, %d | status: %d\n", x, y, currentStatus); gl_buffer[y * N + x].w = 255; gl_buffer[y * N + x].x = 255 * currentStatus; gl_buffer[y * N + x].y = 255 * currentStatus; gl_buffer[y * N + x].z = 255 * currentStatus; //gl_buffer[y * N + x].x = (x % 12 ==0) * 255; //gl_buffer[y * N + x].y = (x % 12 == 0) * 255; //gl_buffer[y * N + x].z = (x % 12 == 0) * 255; } } __global__ void BPKernel(uint32_t* data, int currentGen) { //Only works if BLOCKSIZE is 32 __shared__ bool packing[32]; const uint16_t tid = threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + tid; uint32_t cell = data[(((currentGen - 1) * N + y) * N + x) / BP]; bool isAlive = (cell & (1 << (x % 32))) != 0; int totalNeighbours = 0; for (int i = (y - 1 > 0) ? y - 1 : 0; i < ((y + 2 < N) ? y + 2 : N); i++) for (int j = (x - 1 > 0) ? x - 1 : 0; j < ((x + 2 < N) ? x + 2 : N); j++) { uint32_t cell = data[(((currentGen - 1) * N + i) * N + j) / BP]; totalNeighbours += (cell & (1 << (j % 32))) != 0; } totalNeighbours -= isAlive; bool currentStatus = (isAlive && !(totalNeighbours < 2 || totalNeighbours > 3)) || (totalNeighbours == 3); packing[tid] = currentStatus; //printf("x, y: %d, %d | status: %d\n", x, y, currentStatus); //HERE all threads in block sync to submit the mask to the final Buffer data[((currentGen * N + y) * N + x) / BP] = __ballot_sync(FULL_MASK, packing[tid]); //https://stackoverflow.com/questions/39488441/how-to-pack-bits-efficiently-in-cuda/39488714#39488714 printf("x, y: % d, % d | status : %d, wasAlive: %d | total: %d\n", x, y, currentStatus, isAlive, data[((currentGen * N + y) * N + x) / BP]); } void printDevProp(hipDeviceProp_t devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } //void printDeviceMemory(void* deviceptr) //{ // size_t size; // hipMemGetAddressRange(NULL, size, deviceptr); //} void runKernel(int gen, bool* device_A) { dim3 blockSize(BLOCKSIZE, BLOCKSIZE); dim3 gridSize(N / BLOCKSIZE, N / BLOCKSIZE); //#pragma warning disable E0029 CUDAgameLoop << <gridSize, blockSize >> > (device_A, gen); } void runBP(int gen, uint32_t* device_A) { dim3 blockSize(BLOCKSIZE, BLOCKSIZE); dim3 gridSize(N / BLOCKSIZE, N / BLOCKSIZE); //#pragma warning disable E0029 BPKernel << <gridSize, blockSize >> > (device_A, gen); } void render(cudaGraphicsResource* cuda_pbo_resource, bool* prevGen, bool* currGen) { uchar4* d_out = 0; hipGraphicsMapResources(1, &cuda_pbo_resource, 0); hipGraphicsResourceGetMappedPointer((void**)&d_out, NULL, cuda_pbo_resource); dim3 blockSize(BLOCKSIZE, BLOCKSIZE); dim3 gridSize(N / BLOCKSIZE, N / BLOCKSIZE); CUDA_GL_gameLoop << <gridSize, blockSize >> > (prevGen, currGen, d_out); hipDeviceSynchronize(); hipGraphicsUnmapResources(1, &cuda_pbo_resource, 0); }
d6e49a46a93bd3dc3305817e97d1a21c609088ec.cu
#include "cuda_runtime.h" #include <iostream> #include <stdlib.h> #include "../Helpers.h" #include "device_launch_parameters.h" //__constant__ int warpSize; //__constant__ int perlin_A[256]; #define FULL_MASK 0xffffffff __constant__ int N_size; int _N_size; __global__ void CUDAgameLoop(bool* data, int currentGen) { //Indexing similiar to 2d flattening //Treating blocks as 2nd dim (y), and thread as 1st dim (x) const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < N && x < N) { bool isAlive = data[((currentGen - 1) * N + y) * N + x]; unsigned int totalNeighbours = 0; for (int i = (y - 1 > 0) ? y - 1 : 0; i < ((y + 2 < N) ? y + 2 : N); i++) for (int j = (x - 1 > 0) ? x - 1 : 0; j < ((x + 2 < N) ? x + 2 : N); j++) { totalNeighbours += data[((currentGen - 1) * N + i) * N + j]; } //Attempt manual looping in cardinal directions to test speed totalNeighbours -= isAlive; bool currentStatus = (isAlive && !(totalNeighbours < 2 || totalNeighbours > 3)) || (totalNeighbours == 3); data[(currentGen * N + y) * N + x] = currentStatus; //data[((currentGen * N + y) * N + x) / warpSize] = currentStatus; //For bit packing indexing } } __global__ void CUDA_GL_gameLoop(bool* prevGen, bool* nextGen, uchar4* gl_buffer) { //It is possible to place the const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < N && x < N) { bool isAlive = prevGen[y * N + x]; unsigned int totalNeighbours = 0; for (int i = (y - 1 > 0) ? y - 1 : 0; i < ((y + 2 < N) ? y + 2 : N); i++) for (int j = (x - 1 > 0) ? x - 1 : 0; j < ((x + 2 < N) ? x + 2 : N); j++) { totalNeighbours += prevGen[i * N + j]; } //Attempt manual looping in cardinal directions to test speed totalNeighbours -= isAlive; bool currentStatus = (isAlive && !(totalNeighbours < 2 || totalNeighbours > 3)) || (totalNeighbours == 3); nextGen[y * N + x] = currentStatus; //printf("x, y: %d, %d | status: %d\n", x, y, currentStatus); gl_buffer[y * N + x].w = 255; gl_buffer[y * N + x].x = 255 * currentStatus; gl_buffer[y * N + x].y = 255 * currentStatus; gl_buffer[y * N + x].z = 255 * currentStatus; //gl_buffer[y * N + x].x = (x % 12 ==0) * 255; //gl_buffer[y * N + x].y = (x % 12 == 0) * 255; //gl_buffer[y * N + x].z = (x % 12 == 0) * 255; } } __global__ void BPKernel(uint32_t* data, int currentGen) { //Only works if BLOCKSIZE is 32 __shared__ bool packing[32]; const uint16_t tid = threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + tid; uint32_t cell = data[(((currentGen - 1) * N + y) * N + x) / BP]; bool isAlive = (cell & (1 << (x % 32))) != 0; int totalNeighbours = 0; for (int i = (y - 1 > 0) ? y - 1 : 0; i < ((y + 2 < N) ? y + 2 : N); i++) for (int j = (x - 1 > 0) ? x - 1 : 0; j < ((x + 2 < N) ? x + 2 : N); j++) { uint32_t cell = data[(((currentGen - 1) * N + i) * N + j) / BP]; totalNeighbours += (cell & (1 << (j % 32))) != 0; } totalNeighbours -= isAlive; bool currentStatus = (isAlive && !(totalNeighbours < 2 || totalNeighbours > 3)) || (totalNeighbours == 3); packing[tid] = currentStatus; //printf("x, y: %d, %d | status: %d\n", x, y, currentStatus); //HERE all threads in block sync to submit the mask to the final Buffer data[((currentGen * N + y) * N + x) / BP] = __ballot_sync(FULL_MASK, packing[tid]); //https://stackoverflow.com/questions/39488441/how-to-pack-bits-efficiently-in-cuda/39488714#39488714 printf("x, y: % d, % d | status : %d, wasAlive: %d | total: %d\n", x, y, currentStatus, isAlive, data[((currentGen * N + y) * N + x) / BP]); } void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } //void printDeviceMemory(void* deviceptr) //{ // size_t size; // cuMemGetAddressRange(NULL, size, deviceptr); //} void runKernel(int gen, bool* device_A) { dim3 blockSize(BLOCKSIZE, BLOCKSIZE); dim3 gridSize(N / BLOCKSIZE, N / BLOCKSIZE); //#pragma warning disable E0029 CUDAgameLoop << <gridSize, blockSize >> > (device_A, gen); } void runBP(int gen, uint32_t* device_A) { dim3 blockSize(BLOCKSIZE, BLOCKSIZE); dim3 gridSize(N / BLOCKSIZE, N / BLOCKSIZE); //#pragma warning disable E0029 BPKernel << <gridSize, blockSize >> > (device_A, gen); } void render(cudaGraphicsResource* cuda_pbo_resource, bool* prevGen, bool* currGen) { uchar4* d_out = 0; cudaGraphicsMapResources(1, &cuda_pbo_resource, 0); cudaGraphicsResourceGetMappedPointer((void**)&d_out, NULL, cuda_pbo_resource); dim3 blockSize(BLOCKSIZE, BLOCKSIZE); dim3 gridSize(N / BLOCKSIZE, N / BLOCKSIZE); CUDA_GL_gameLoop << <gridSize, blockSize >> > (prevGen, currGen, d_out); cudaDeviceSynchronize(); cudaGraphicsUnmapResources(1, &cuda_pbo_resource, 0); }
7d4cd1940755ee0b71bcf43b7469b8aa92697056.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date @author Ahmad Abdelfattah */ #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #include "magma_internal.h" #define BLK_X 32 #define BLK_Y 4 /******************************************************************************/ __device__ void hlag2s_device( int m, int n, magmaHalf_const_ptr A, int lda, float *SA, int ldsa ) { #if TORCH_HIP_VERSION >= 7500 const int gtx = blockIdx.x * BLK_X + threadIdx.x; const int gty = blockIdx.y * BLK_Y + threadIdx.y; for(int j = 0; j < n; j+= gridDim.y) { const int gty_ = gty + j; for(int i = 0; i < m; i+= gridDim.x) { const int gtx_ = gtx + i; if(gtx_ < m && gty_ < n) { SA[gty_ * ldsa + gtx_] = __half2float( A[gty_ * lda + gtx_] ); } } } #endif } /******************************************************************************/ __global__ void hlag2s_kernel( int m, int n, magmaHalf_const_ptr dA, int lda, float *dSA, int ldsa ) { hlag2s_device(m, n, dA, lda, dSA, ldsa); } /******************************************************************************/ __global__ void hlag2s_kernel_batched( int m, int n, magmaHalf const * const * dAarray, int lda, float** dSAarray, int ldsa ) { const int batchid = blockIdx.z; hlag2s_device(m, n, dAarray[batchid], lda, dSAarray[batchid], ldsa); } /******************************************************************************/ extern "C" void magmablas_hlag2s( magma_int_t m, magma_int_t n, magmaHalf_const_ptr dA, magma_int_t lda, float *dSA, magma_int_t ldsa, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( lda < max(1,m) ) info = -4; else if ( ldsa < max(1,m) ) info = -6; if (info != 0) { magma_xerbla( __func__, -(info) ); return; } /* quick return */ if ( m == 0 || n == 0 ) { return; } const int max_gridy = 65000; // the kernel can work with any gridx/gridy dimension dim3 threads( BLK_X, BLK_Y ); dim3 grid(magma_ceildiv( m, BLK_X ), min(max_gridy, magma_ceildiv(n, BLK_Y)), 1); hipLaunchKernelGGL(( hlag2s_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, lda, dSA, ldsa ); } /******************************************************************************/ extern "C" void magmablas_hlag2s_batched( magma_int_t m, magma_int_t n, magmaHalf const * const * dAarray, magma_int_t lda, float **dSAarray, magma_int_t ldsa, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( lda < max(1,m) ) info = -4; else if ( ldsa < max(1,m) ) info = -6; if (info != 0) { magma_xerbla( __func__, -(info) ); return; } /* quick return */ if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, BLK_Y ); const int maxBatch = queue->get_maxBatch(); for(int i = 0; i < batchCount; i+=maxBatch){ magma_int_t batch = min(maxBatch, batchCount-i); dim3 grid(magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batch); hipLaunchKernelGGL(( hlag2s_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dAarray + i, lda, dSAarray + i, ldsa); } }
7d4cd1940755ee0b71bcf43b7469b8aa92697056.cu
/* -- MAGMA (version 2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date @author Ahmad Abdelfattah */ #include <cuda.h> // for CUDA_VERSION #include "magma_internal.h" #define BLK_X 32 #define BLK_Y 4 /******************************************************************************/ __device__ void hlag2s_device( int m, int n, magmaHalf_const_ptr A, int lda, float *SA, int ldsa ) { #if CUDA_VERSION >= 7500 const int gtx = blockIdx.x * BLK_X + threadIdx.x; const int gty = blockIdx.y * BLK_Y + threadIdx.y; for(int j = 0; j < n; j+= gridDim.y) { const int gty_ = gty + j; for(int i = 0; i < m; i+= gridDim.x) { const int gtx_ = gtx + i; if(gtx_ < m && gty_ < n) { SA[gty_ * ldsa + gtx_] = __half2float( A[gty_ * lda + gtx_] ); } } } #endif } /******************************************************************************/ __global__ void hlag2s_kernel( int m, int n, magmaHalf_const_ptr dA, int lda, float *dSA, int ldsa ) { hlag2s_device(m, n, dA, lda, dSA, ldsa); } /******************************************************************************/ __global__ void hlag2s_kernel_batched( int m, int n, magmaHalf const * const * dAarray, int lda, float** dSAarray, int ldsa ) { const int batchid = blockIdx.z; hlag2s_device(m, n, dAarray[batchid], lda, dSAarray[batchid], ldsa); } /******************************************************************************/ extern "C" void magmablas_hlag2s( magma_int_t m, magma_int_t n, magmaHalf_const_ptr dA, magma_int_t lda, float *dSA, magma_int_t ldsa, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( lda < max(1,m) ) info = -4; else if ( ldsa < max(1,m) ) info = -6; if (info != 0) { magma_xerbla( __func__, -(info) ); return; } /* quick return */ if ( m == 0 || n == 0 ) { return; } const int max_gridy = 65000; // the kernel can work with any gridx/gridy dimension dim3 threads( BLK_X, BLK_Y ); dim3 grid(magma_ceildiv( m, BLK_X ), min(max_gridy, magma_ceildiv(n, BLK_Y)), 1); hlag2s_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, lda, dSA, ldsa ); } /******************************************************************************/ extern "C" void magmablas_hlag2s_batched( magma_int_t m, magma_int_t n, magmaHalf const * const * dAarray, magma_int_t lda, float **dSAarray, magma_int_t ldsa, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( lda < max(1,m) ) info = -4; else if ( ldsa < max(1,m) ) info = -6; if (info != 0) { magma_xerbla( __func__, -(info) ); return; } /* quick return */ if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, BLK_Y ); const int maxBatch = queue->get_maxBatch(); for(int i = 0; i < batchCount; i+=maxBatch){ magma_int_t batch = min(maxBatch, batchCount-i); dim3 grid(magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batch); hlag2s_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dAarray + i, lda, dSAarray + i, ldsa); } }
8ff2e1510abb44a81ee196c24c2ba76e020c6e8c.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/types.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> namespace { template <typename scalar_t> __forceinline__ __device__ scalar_t sigmoidf(scalar_t x) { return (scalar_t)1.f / ((scalar_t)1.f + expf(-x)); } template <typename scalar_t> __forceinline__ __device__ scalar_t calc_activation(int type, scalar_t x) { return type ? (scalar_t)tanhf(x) : x; } template <typename scalar_t> __forceinline__ __device__ scalar_t calc_grad_activation(int type, scalar_t x) { return type ? ((scalar_t)1.f - x * x) : (scalar_t)1.f; } template <typename scalar_t> __global__ void sru_cuda_forward_kernel( scalar_t* __restrict__ h, scalar_t* __restrict__ c, const scalar_t* __restrict__ u, const scalar_t* __restrict__ x, const scalar_t* __restrict__ weight_c, const scalar_t* __restrict__ bias, const scalar_t* __restrict__ init, const scalar_t* __restrict__ mask_c, const unsigned char* __restrict__ mask_pad, const int len, const int batch, const int d, const int k, const int activation_type, const int skip_type, const int is_custom) { assert ((skip_type >= 0) && (skip_type <= 2)); assert ((skip_type != 1) || (k == 3)); assert ((skip_type != 2) || (k == 4)); const int ncols = batch*d; const int col = blockIdx.x * blockDim.x + threadIdx.x; if (col >= ncols) return; const int ncols_u = ncols*k; const int ncols_x = (k == 3) ? ncols : ncols_u; const auto* vp1 = is_custom ? (weight_c + col*2) : (weight_c + (col%d)); const auto* vp2 = is_custom ? (weight_c + col*2 + 1) : (weight_c + (col%d) + d); const auto bias1 = *(bias + (col%d)); const auto bias2 = *(bias + (col%d) + d); const auto mask = (mask_c == NULL) ? (scalar_t)1.f : (*(mask_c + col)); auto cur = *(init + col); const auto* up = u + (col*k); const auto* xp = (skip_type == 0) ? NULL : ((skip_type == 1) ? (x + col) : (up + 3)); const unsigned char* pad_p = (mask_pad == NULL) ? NULL : (mask_pad + (col/d)); auto* cp = c + col; auto* hp = h + col; for (int row = 0; row < len; ++row) { if ((pad_p == NULL) || !(*pad_p)) { const auto u0 = *up; const auto u1 = *(up + 1); const auto u2 = *(up + 2); const auto wc1 = *vp1; const auto wc2 = *vp2; const auto x_val = (skip_type) ? (*xp) : (scalar_t)0.f; const auto g1 = sigmoidf(u1 + wc1*cur + bias1); const auto g2 = sigmoidf(u2 + wc2*cur + bias2); cur = (cur-u0)*g1 + u0; const auto val = calc_activation(activation_type, cur); *hp = skip_type ? ((val - x_val) * mask * g2 + x_val) : (val * mask * g2); } //else { // *hp = 0; // output 0 for a pad token //} *cp = cur; // useful for backward up += ncols_u; cp += ncols; hp += ncols; xp = skip_type ? (xp + ncols_x) : NULL; pad_p = mask_pad ? (pad_p + batch) : NULL; vp1 = is_custom ? (vp1 + ncols*2) : vp1; vp2 = is_custom ? (vp2 + ncols*2) : vp2; } } template <typename scalar_t> __global__ void sru_cuda_backward_kernel( scalar_t* __restrict__ grad_u, scalar_t* __restrict__ grad_x, scalar_t* __restrict__ grad_wc, scalar_t* __restrict__ grad_bias, scalar_t* __restrict__ grad_init, const scalar_t* __restrict__ u, const scalar_t* __restrict__ x, const scalar_t* __restrict__ weight_c, const scalar_t* __restrict__ bias, const scalar_t* __restrict__ init, const scalar_t* __restrict__ mask_c, const unsigned char * __restrict__ mask_pad, const scalar_t* __restrict__ c, const scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ grad_last, const int len, const int batch, const int d, const int k, const int activation_type, const int skip_type, const int is_custom) { assert ((skip_type >= 0) && (skip_type <= 2)); assert ((skip_type != 1) || (k == 3)); assert ((skip_type != 2) || (k == 4)); const int ncols = batch*d; const int col = blockIdx.x * blockDim.x + threadIdx.x; if (col >= ncols) return; const int ncols_u = ncols*k; const int ncols_x = (k == 3) ? ncols : ncols_u; const auto* vp1 = is_custom ? (weight_c + col*2 + (len-1)*ncols*2) : (weight_c + (col%d)); const auto* vp2 = is_custom ? (weight_c + col*2 + 1 + (len-1)*ncols*2) : (weight_c + (col%d) + d); auto* gvp1 = is_custom ? (grad_wc + col*2 + (len-1)*ncols*2) : (grad_wc + col); auto* gvp2 = is_custom ? (grad_wc + col*2 + 1 + (len-1)*ncols*2) : (grad_wc + col + ncols); const auto bias1 = *(bias + (col%d)); const auto bias2 = *(bias + (col%d) + d); const auto mask = (mask_c == NULL) ? (scalar_t)1.f : (*(mask_c + col)); scalar_t gwc1 = 0; scalar_t gwc2 = 0; scalar_t gbias1 = 0; scalar_t gbias2 = 0; auto cur = *(grad_last + col); const auto* up = u + (col*k) + (len-1)*ncols_u; const auto* xp = (skip_type == 0) ? NULL : ( (skip_type == 1) ? (x + col + (len-1)*ncols) : (up + 3) ); const auto* cp = c + col + (len-1)*ncols; const auto* ghp = grad_h + col + (len-1)*ncols; const unsigned char* pad_p = (mask_pad == NULL) ? NULL : (mask_pad + (col/d) + (len-1)*batch); auto* gup = grad_u + (col*k) + (len-1)*ncols_u; auto* gxp = (skip_type == 0) ? NULL : ( (skip_type == 1) ? (grad_x + col + (len-1)*ncols) : (gup + 3) ); for (int row = len-1; row >= 0; --row) { if ((pad_p == NULL) || !(*pad_p)) { const auto prev_c_val = (row>0) ? (*(cp-ncols)) : (*(init+col)); const auto cp_val = *cp; const auto u0 = *up; const auto u1 = *(up + 1); const auto u2 = *(up + 2); const auto wc1 = *vp1; const auto wc2 = *vp2; const auto x_val = (skip_type) ? (*xp) : (scalar_t)0.f; const auto gh_val = *ghp; const auto g1 = sigmoidf(u1 + wc1*prev_c_val + bias1); const auto g2 = sigmoidf(u2 + wc2*prev_c_val + bias2); const auto c_val = calc_activation(activation_type, cp_val); // h = c*g2 + x*(1-g2) = (c-x)*g2 + x // c = c'*g1 + u0*(1-g1) = (c'-u0)*g1 + g0 // gradient with respect to values in the second gate g2 const auto gg2 = gh_val*(c_val-x_val)*mask*(g2*(1.f-g2)); gbias2 += gg2; gwc2 += gg2*prev_c_val; *gvp2 = gg2*prev_c_val; // gradient with respect to c[t] const auto tmp = g2*calc_grad_activation(activation_type, c_val); const auto gc = gh_val*mask*tmp + cur; // gradient with respect to values in the first gate g1 const auto gg1 = gc*(prev_c_val-u0)*(g1*(1.f-g1)); gbias1 += gg1; gwc1 += gg1*prev_c_val; *gvp1 = gg1*prev_c_val; // gradient with respect to c[t-1] cur = gc*g1 + gg1*wc1 + gg2*wc2; // gradient with respect to U *gup = gc*(1.f-g1); *(gup + 1) = gg1; *(gup + 2) = gg2; // gradient with respect to x[t] if (skip_type) *gxp = gh_val*(1.f-g2*mask); } up -= ncols_u; cp -= ncols; gup -= ncols_u; ghp -= ncols; xp = skip_type ? (xp - ncols_x) : NULL; gxp = skip_type ? (gxp - ncols_x) : NULL; pad_p = mask_pad ? (pad_p - batch) : NULL; vp1 = is_custom ? (vp1 - ncols*2) : vp1; vp2 = is_custom ? (vp2 - ncols*2) : vp2; gvp1 = is_custom ? (gvp1 - ncols*2) : gvp1; gvp2 = is_custom ? (gvp2 - ncols*2) : gvp2; } //const int bias_idx = col % d; //atomicAdd(grad_wc + bias_idx, gwc1); //atomicAdd(grad_wc + bias_idx + d, gwc2); //atomicAdd(grad_bias + bias_idx, gbias1); //atomicAdd(grad_bias + bias_idx + d, gbias2); if (!is_custom) { *(grad_wc + col) = gwc1; *(grad_wc + col + ncols) = gwc2; } *(grad_bias + col) = gbias1; *(grad_bias + col + ncols) = gbias2; *(grad_init + col) = cur; } template <typename scalar_t> __global__ void sru_cuda_bi_forward_kernel( scalar_t* __restrict__ h, scalar_t* __restrict__ c, const scalar_t* __restrict__ u, const scalar_t* __restrict__ x, const scalar_t* __restrict__ weight_c, const scalar_t* __restrict__ bias, const scalar_t* __restrict__ init, const scalar_t* __restrict__ mask_c, const unsigned char * __restrict__ mask_pad, const int len, const int batch, const int d, const int k, const int activation_type, const int skip_type, const int is_custom) { assert ((skip_type >= 0) && (skip_type <= 2)); assert ((skip_type != 1) || (k == 3)); assert ((skip_type != 2) || (k == 4)); const int ncols = batch*d*2; const int col = blockIdx.x * blockDim.x + threadIdx.x; if (col >= ncols) return; const int ncols_u = ncols*k; const int ncols_x = (k == 3) ? ncols : ncols_u; const scalar_t mask = (mask_c == NULL) ? (scalar_t)1.f : (*(mask_c + col)); auto cur = *(init + col); const int d2 = d*2; const auto* vp1 = is_custom ? (weight_c + col*2) : (weight_c + (col%d2)); const auto* vp2 = is_custom ? (weight_c + col*2 + 1) : (weight_c + (col%d2) + d2); const auto bias1 = *(bias + (col%d2)); const auto bias2 = *(bias + (col%d2) + d2); const auto *up = u + (col*k); const auto *xp = (skip_type == 0) ? NULL : ((skip_type == 1) ? (x + col) : (up + 3)); const unsigned char *pad_p = (mask_pad == NULL) ? NULL : (mask_pad + (col/d2)); auto *cp = c + col; auto *hp = h + col; const bool flip = (col%d2) >= d; if (flip) { up += (len-1)*ncols_u; cp += (len-1)*ncols; hp += (len-1)*ncols; if (skip_type) xp += (len-1)*ncols_x; if (pad_p) pad_p += (len-1)*batch; if (is_custom) { vp1 += (len-1)*ncols*2; vp2 += (len-1)*ncols*2; } } const int ncols_u_ = flip ? -ncols_u : ncols_u; const int ncols_x_ = flip ? -ncols_x : ncols_x; const int ncols_ = flip ? -ncols : ncols; const int batch_ = flip ? -batch : batch; for (int cnt = 0; cnt < len; ++cnt) { if ((pad_p == NULL) || !(*pad_p)) { const auto u0 = *up; const auto u1 = *(up + 1); const auto u2 = *(up + 2); const auto wc1 = *vp1; const auto wc2 = *vp2; const auto x_val = (skip_type) ? (*xp) : (scalar_t)0.f; const auto g1 = sigmoidf(u1 + wc1*cur + bias1); const auto g2 = sigmoidf(u2 + wc2*cur + bias2); cur = (cur-u0)*g1 + u0; const auto val = calc_activation(activation_type, cur); *hp = skip_type ? ((val - x_val) * mask * g2 + x_val) : (val * mask * g2); } //else { // *hp = 0; // ouptut 0 for a pad token //} *cp = cur; // useful for backward up += ncols_u_; cp += ncols_; hp += ncols_; xp = skip_type ? (xp + ncols_x_) : NULL; pad_p = mask_pad ? (pad_p + batch_) : NULL; vp1 = is_custom ? (vp1 + ncols_*2) : vp1; vp2 = is_custom ? (vp2 + ncols_*2) : vp2; } } template <typename scalar_t> __global__ void sru_cuda_bi_backward_kernel( scalar_t* __restrict__ grad_u, scalar_t* __restrict__ grad_x, scalar_t* __restrict__ grad_wc, scalar_t* __restrict__ grad_bias, scalar_t* __restrict__ grad_init, const scalar_t* __restrict__ u, const scalar_t* __restrict__ x, const scalar_t* __restrict__ weight_c, const scalar_t* __restrict__ bias, const scalar_t* __restrict__ init, const scalar_t* __restrict__ mask_c, const unsigned char * __restrict__ mask_pad, const scalar_t* __restrict__ c, const scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ grad_last, const int len, const int batch, const int d, const int k, const int activation_type, const int skip_type, const int is_custom) { assert ((skip_type >= 0) && (skip_type <= 2)); assert ((skip_type != 1) || (k == 3)); assert ((skip_type != 2) || (k == 4)); int ncols = batch*d*2; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col >= ncols) return; int ncols_u = ncols*k; int ncols_x = (k == 3) ? ncols : ncols_u; const scalar_t mask = (mask_c == NULL) ? (scalar_t)1.f : (*(mask_c + col)); scalar_t gwc1 = 0; scalar_t gwc2 = 0; scalar_t gbias1 = 0; scalar_t gbias2 = 0; auto cur = *(grad_last + col); const int d2 = d*2; const auto* vp1 = is_custom ? (weight_c + col*2) : (weight_c + (col%d2)); const auto* vp2 = is_custom ? (weight_c + col*2 + 1) : (weight_c + (col%d2) + d2); auto* gvp1 = is_custom ? (grad_wc + col*2) : (grad_wc + col); auto* gvp2 = is_custom ? (grad_wc + col*2 + 1) : (grad_wc + col + ncols); const auto bias1 = *(bias + (col%d2)); const auto bias2 = *(bias + (col%d2) + d2); const auto *up = u + (col*k); const auto *xp = (skip_type == 0) ? NULL : ( (skip_type == 1) ? (x + col) : (up + 3) ); const auto *cp = c + col; const auto *ghp = grad_h + col; const unsigned char *pad_p = (mask_pad == NULL) ? NULL : (mask_pad + (col/d2)); auto *gup = grad_u + (col*k); auto *gxp = (skip_type == 0) ? NULL : ( (skip_type == 1) ? (grad_x + col) : (gup + 3) ); const bool flip = ((col%d2) >= d); if (!flip) { up += (len-1)*ncols_u; cp += (len-1)*ncols; ghp += (len-1)*ncols; gup += (len-1)*ncols_u; if (skip_type) { xp += (len-1)*ncols_x; gxp += (len-1)*ncols_x; } if (pad_p) pad_p += (len-1)*batch; if (is_custom) { vp1 += (len-1)*ncols*2; vp2 += (len-1)*ncols*2; gvp1 += (len-1)*ncols*2; gvp2 += (len-1)*ncols*2; } } const int ncols_u_ = flip ? -ncols_u : ncols_u; const int ncols_x_ = flip ? -ncols_x : ncols_x; const int ncols_ = flip ? -ncols : ncols; const int batch_ = flip ? -batch : batch; for (int cnt = 0; cnt < len; ++cnt) { if ((pad_p == NULL) || !(*pad_p)) { const auto prev_c_val = (cnt<len-1) ? (*(cp-ncols_)) : (*(init+col)); const auto cp_val = *cp; const auto u0 = *up; const auto u1 = *(up + 1); const auto u2 = *(up + 2); const auto wc1 = *vp1; const auto wc2 = *vp2; const auto x_val = (skip_type) ? (*xp) : (scalar_t)0.f; const auto gh_val = *ghp; const auto g1 = sigmoidf(u1 + wc1*prev_c_val + bias1); const auto g2 = sigmoidf(u2 + wc2*prev_c_val + bias2); const auto c_val = calc_activation(activation_type, cp_val); // h = c*g2 + x*(1-g2) = (c-x)*g2 + x // c = c'*g1 + u0*(1-g1) = (c'-u0)*g1 + u0 // gradient with respect to values in the second gate g2 const auto gg2 = gh_val*(c_val-x_val)*mask*(g2*(1.f-g2)); gbias2 += gg2; gwc2 += gg2*prev_c_val; *gvp2 = gg2*prev_c_val; // gradient with respect to c[t] const auto tmp = g2*calc_grad_activation(activation_type, c_val); const auto gc = gh_val*mask*tmp + cur; // gradient with respect to values in the first gate g1 const auto gg1 = gc*(prev_c_val-u0)*(g1*(1.f-g1)); gbias1 += gg1; gwc1 += gg1*prev_c_val; *gvp1 = gg1*prev_c_val; // gradient with respect to c[t-1] cur = gc*g1 + gg1*wc1 + gg2*wc2; // gradient with respect to U *gup = gc*(1.f-g1); *(gup + 1) = gg1; *(gup + 2) = gg2; // gradient with respect to x[t] if (skip_type) *gxp = gh_val*(1.f-g2*mask); } up -= ncols_u_; cp -= ncols_; gup -= ncols_u_; ghp -= ncols_; xp = skip_type ? (xp - ncols_x_) : NULL; gxp = skip_type ? (gxp - ncols_x_) : NULL; pad_p = mask_pad ? (pad_p - batch_) : NULL; vp1 = is_custom ? (vp1 - ncols_*2) : vp1; vp2 = is_custom ? (vp2 - ncols_*2) : vp2; gvp1 = is_custom ? (gvp1 - ncols_*2) : gvp1; gvp2 = is_custom ? (gvp2 - ncols_*2) : gvp2; } //const int bias_idx = col % d2; //atomicAdd(grad_wc + bias_idx, gwc1); //atomicAdd(grad_wc + bias_idx + d2, gwc2); //atomicAdd(grad_bias + bias_idx, gbias1); //atomicAdd(grad_bias + bias_idx + d2, gbias2); if (!is_custom) { *(grad_wc + col) = gwc1; *(grad_wc + col + ncols) = gwc2; } *(grad_bias + col) = gbias1; *(grad_bias + col + ncols) = gbias2; *(grad_init +col) = cur; } } // end of namespace // unidirectional forward() void sru_cuda_forward( torch::Tensor & h, torch::Tensor & c, const torch::Tensor & U, const torch::Tensor & x, const torch::Tensor & weight_c, const torch::Tensor & bias, const torch::Tensor & c_init, const torch::Tensor & mask_c, const torch::Tensor & mask_pad, const int64_t length, const int64_t batch_size, const int64_t hidden_size, const int64_t k, const int64_t activation_type, const int64_t skip_type, const int64_t is_custom) { const int threads = 512; const int total = batch_size * hidden_size; const dim3 blocks( (total - 1) / threads + 1 ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(U.type(), "sru_forward_cuda", ([&] { hipLaunchKernelGGL(( sru_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, h.data<scalar_t>(), c.data<scalar_t>(), U.data<scalar_t>(), x.numel() ? x.data<scalar_t>() : NULL, weight_c.data<scalar_t>(), bias.data<scalar_t>(), c_init.data<scalar_t>(), mask_c.numel() ? mask_c.data<scalar_t>() : NULL, mask_pad.numel() ? mask_pad.data<unsigned char>() : NULL, length, batch_size, hidden_size, k, activation_type, skip_type, is_custom); })); } // bidirectional forward() void sru_cuda_bi_forward( torch::Tensor & h, torch::Tensor & c, const torch::Tensor & U, const torch::Tensor & x, const torch::Tensor & weight_c, const torch::Tensor & bias, const torch::Tensor & c_init, const torch::Tensor & mask_c, const torch::Tensor & mask_pad, const int64_t length, const int64_t batch_size, const int64_t hidden_size, const int64_t k, const int64_t activation_type, const int64_t skip_type, const int64_t is_custom) { const int threads = 512; const int total = batch_size * hidden_size * 2; const dim3 blocks( (total - 1) / threads + 1 ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(U.type(), "sru_bi_forward_cuda", ([&] { hipLaunchKernelGGL(( sru_cuda_bi_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, h.data<scalar_t>(), c.data<scalar_t>(), U.data<scalar_t>(), x.numel() ? x.data<scalar_t>() : NULL, weight_c.data<scalar_t>(), bias.data<scalar_t>(), c_init.data<scalar_t>(), mask_c.numel() ? mask_c.data<scalar_t>() : NULL, mask_pad.numel() ? mask_pad.data<unsigned char>() : NULL, length, batch_size, hidden_size, k, activation_type, skip_type, is_custom); })); } // unidirectional backward() void sru_cuda_backward( torch::Tensor & grad_u, torch::Tensor & grad_x, torch::Tensor & grad_wc, torch::Tensor & grad_bias, torch::Tensor & grad_init, const torch::Tensor & U, const torch::Tensor & x, const torch::Tensor & weight_c, const torch::Tensor & bias, const torch::Tensor & c_init, const torch::Tensor & mask_c, const torch::Tensor & mask_pad, const torch::Tensor & c, const torch::Tensor & grad_h, const torch::Tensor & grad_last, const int64_t length, const int64_t batch_size, const int64_t hidden_size, const int64_t k, const int64_t activation_type, const int64_t skip_type, const int64_t is_custom) { const int threads = 512; const int total = batch_size * hidden_size; const dim3 blocks( (total - 1) / threads + 1 ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(U.type(), "sru_backward_cuda", ([&] { hipLaunchKernelGGL(( sru_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, grad_u.data<scalar_t>(), grad_x.numel() ? grad_x.data<scalar_t>() : NULL, grad_wc.data<scalar_t>(), grad_bias.data<scalar_t>(), grad_init.data<scalar_t>(), U.data<scalar_t>(), x.numel() ? x.data<scalar_t>() : NULL, weight_c.data<scalar_t>(), bias.data<scalar_t>(), c_init.data<scalar_t>(), mask_c.numel() ? mask_c.data<scalar_t>() : NULL, mask_pad.numel() ? mask_pad.data<unsigned char>() : NULL, c.data<scalar_t>(), grad_h.data<scalar_t>(), grad_last.data<scalar_t>(), length, batch_size, hidden_size, k, activation_type, skip_type, is_custom); })); } // bidirectional backward() void sru_cuda_bi_backward( torch::Tensor & grad_u, torch::Tensor & grad_x, torch::Tensor & grad_wc, torch::Tensor & grad_bias, torch::Tensor & grad_init, const torch::Tensor & U, const torch::Tensor & x, const torch::Tensor & weight_c, const torch::Tensor & bias, const torch::Tensor & c_init, const torch::Tensor & mask_c, const torch::Tensor & mask_pad, const torch::Tensor & c, const torch::Tensor & grad_h, const torch::Tensor & grad_last, const int64_t length, const int64_t batch_size, const int64_t hidden_size, const int64_t k, const int64_t activation_type, const int64_t skip_type, const int64_t is_custom) { const int threads = 512; const int total = batch_size * hidden_size * 2; const dim3 blocks( (total - 1) / threads + 1 ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(U.type(), "sru_bi_backward_cuda", ([&] { hipLaunchKernelGGL(( sru_cuda_bi_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, grad_u.data<scalar_t>(), grad_x.numel() ? grad_x.data<scalar_t>() : NULL, grad_wc.data<scalar_t>(), grad_bias.data<scalar_t>(), grad_init.data<scalar_t>(), U.data<scalar_t>(), x.numel() ? x.data<scalar_t>() : NULL, weight_c.data<scalar_t>(), bias.data<scalar_t>(), c_init.data<scalar_t>(), mask_c.numel() ? mask_c.data<scalar_t>() : NULL, mask_pad.numel() ? mask_pad.data<unsigned char>() : NULL, c.data<scalar_t>(), grad_h.data<scalar_t>(), grad_last.data<scalar_t>(), length, batch_size, hidden_size, k, activation_type, skip_type, is_custom); })); }
8ff2e1510abb44a81ee196c24c2ba76e020c6e8c.cu
#include <torch/types.h> #include <cuda.h> #include <cuda_runtime.h> namespace { template <typename scalar_t> __forceinline__ __device__ scalar_t sigmoidf(scalar_t x) { return (scalar_t)1.f / ((scalar_t)1.f + expf(-x)); } template <typename scalar_t> __forceinline__ __device__ scalar_t calc_activation(int type, scalar_t x) { return type ? (scalar_t)tanhf(x) : x; } template <typename scalar_t> __forceinline__ __device__ scalar_t calc_grad_activation(int type, scalar_t x) { return type ? ((scalar_t)1.f - x * x) : (scalar_t)1.f; } template <typename scalar_t> __global__ void sru_cuda_forward_kernel( scalar_t* __restrict__ h, scalar_t* __restrict__ c, const scalar_t* __restrict__ u, const scalar_t* __restrict__ x, const scalar_t* __restrict__ weight_c, const scalar_t* __restrict__ bias, const scalar_t* __restrict__ init, const scalar_t* __restrict__ mask_c, const unsigned char* __restrict__ mask_pad, const int len, const int batch, const int d, const int k, const int activation_type, const int skip_type, const int is_custom) { assert ((skip_type >= 0) && (skip_type <= 2)); assert ((skip_type != 1) || (k == 3)); assert ((skip_type != 2) || (k == 4)); const int ncols = batch*d; const int col = blockIdx.x * blockDim.x + threadIdx.x; if (col >= ncols) return; const int ncols_u = ncols*k; const int ncols_x = (k == 3) ? ncols : ncols_u; const auto* vp1 = is_custom ? (weight_c + col*2) : (weight_c + (col%d)); const auto* vp2 = is_custom ? (weight_c + col*2 + 1) : (weight_c + (col%d) + d); const auto bias1 = *(bias + (col%d)); const auto bias2 = *(bias + (col%d) + d); const auto mask = (mask_c == NULL) ? (scalar_t)1.f : (*(mask_c + col)); auto cur = *(init + col); const auto* up = u + (col*k); const auto* xp = (skip_type == 0) ? NULL : ((skip_type == 1) ? (x + col) : (up + 3)); const unsigned char* pad_p = (mask_pad == NULL) ? NULL : (mask_pad + (col/d)); auto* cp = c + col; auto* hp = h + col; for (int row = 0; row < len; ++row) { if ((pad_p == NULL) || !(*pad_p)) { const auto u0 = *up; const auto u1 = *(up + 1); const auto u2 = *(up + 2); const auto wc1 = *vp1; const auto wc2 = *vp2; const auto x_val = (skip_type) ? (*xp) : (scalar_t)0.f; const auto g1 = sigmoidf(u1 + wc1*cur + bias1); const auto g2 = sigmoidf(u2 + wc2*cur + bias2); cur = (cur-u0)*g1 + u0; const auto val = calc_activation(activation_type, cur); *hp = skip_type ? ((val - x_val) * mask * g2 + x_val) : (val * mask * g2); } //else { // *hp = 0; // output 0 for a pad token //} *cp = cur; // useful for backward up += ncols_u; cp += ncols; hp += ncols; xp = skip_type ? (xp + ncols_x) : NULL; pad_p = mask_pad ? (pad_p + batch) : NULL; vp1 = is_custom ? (vp1 + ncols*2) : vp1; vp2 = is_custom ? (vp2 + ncols*2) : vp2; } } template <typename scalar_t> __global__ void sru_cuda_backward_kernel( scalar_t* __restrict__ grad_u, scalar_t* __restrict__ grad_x, scalar_t* __restrict__ grad_wc, scalar_t* __restrict__ grad_bias, scalar_t* __restrict__ grad_init, const scalar_t* __restrict__ u, const scalar_t* __restrict__ x, const scalar_t* __restrict__ weight_c, const scalar_t* __restrict__ bias, const scalar_t* __restrict__ init, const scalar_t* __restrict__ mask_c, const unsigned char * __restrict__ mask_pad, const scalar_t* __restrict__ c, const scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ grad_last, const int len, const int batch, const int d, const int k, const int activation_type, const int skip_type, const int is_custom) { assert ((skip_type >= 0) && (skip_type <= 2)); assert ((skip_type != 1) || (k == 3)); assert ((skip_type != 2) || (k == 4)); const int ncols = batch*d; const int col = blockIdx.x * blockDim.x + threadIdx.x; if (col >= ncols) return; const int ncols_u = ncols*k; const int ncols_x = (k == 3) ? ncols : ncols_u; const auto* vp1 = is_custom ? (weight_c + col*2 + (len-1)*ncols*2) : (weight_c + (col%d)); const auto* vp2 = is_custom ? (weight_c + col*2 + 1 + (len-1)*ncols*2) : (weight_c + (col%d) + d); auto* gvp1 = is_custom ? (grad_wc + col*2 + (len-1)*ncols*2) : (grad_wc + col); auto* gvp2 = is_custom ? (grad_wc + col*2 + 1 + (len-1)*ncols*2) : (grad_wc + col + ncols); const auto bias1 = *(bias + (col%d)); const auto bias2 = *(bias + (col%d) + d); const auto mask = (mask_c == NULL) ? (scalar_t)1.f : (*(mask_c + col)); scalar_t gwc1 = 0; scalar_t gwc2 = 0; scalar_t gbias1 = 0; scalar_t gbias2 = 0; auto cur = *(grad_last + col); const auto* up = u + (col*k) + (len-1)*ncols_u; const auto* xp = (skip_type == 0) ? NULL : ( (skip_type == 1) ? (x + col + (len-1)*ncols) : (up + 3) ); const auto* cp = c + col + (len-1)*ncols; const auto* ghp = grad_h + col + (len-1)*ncols; const unsigned char* pad_p = (mask_pad == NULL) ? NULL : (mask_pad + (col/d) + (len-1)*batch); auto* gup = grad_u + (col*k) + (len-1)*ncols_u; auto* gxp = (skip_type == 0) ? NULL : ( (skip_type == 1) ? (grad_x + col + (len-1)*ncols) : (gup + 3) ); for (int row = len-1; row >= 0; --row) { if ((pad_p == NULL) || !(*pad_p)) { const auto prev_c_val = (row>0) ? (*(cp-ncols)) : (*(init+col)); const auto cp_val = *cp; const auto u0 = *up; const auto u1 = *(up + 1); const auto u2 = *(up + 2); const auto wc1 = *vp1; const auto wc2 = *vp2; const auto x_val = (skip_type) ? (*xp) : (scalar_t)0.f; const auto gh_val = *ghp; const auto g1 = sigmoidf(u1 + wc1*prev_c_val + bias1); const auto g2 = sigmoidf(u2 + wc2*prev_c_val + bias2); const auto c_val = calc_activation(activation_type, cp_val); // h = c*g2 + x*(1-g2) = (c-x)*g2 + x // c = c'*g1 + u0*(1-g1) = (c'-u0)*g1 + g0 // gradient with respect to values in the second gate g2 const auto gg2 = gh_val*(c_val-x_val)*mask*(g2*(1.f-g2)); gbias2 += gg2; gwc2 += gg2*prev_c_val; *gvp2 = gg2*prev_c_val; // gradient with respect to c[t] const auto tmp = g2*calc_grad_activation(activation_type, c_val); const auto gc = gh_val*mask*tmp + cur; // gradient with respect to values in the first gate g1 const auto gg1 = gc*(prev_c_val-u0)*(g1*(1.f-g1)); gbias1 += gg1; gwc1 += gg1*prev_c_val; *gvp1 = gg1*prev_c_val; // gradient with respect to c[t-1] cur = gc*g1 + gg1*wc1 + gg2*wc2; // gradient with respect to U *gup = gc*(1.f-g1); *(gup + 1) = gg1; *(gup + 2) = gg2; // gradient with respect to x[t] if (skip_type) *gxp = gh_val*(1.f-g2*mask); } up -= ncols_u; cp -= ncols; gup -= ncols_u; ghp -= ncols; xp = skip_type ? (xp - ncols_x) : NULL; gxp = skip_type ? (gxp - ncols_x) : NULL; pad_p = mask_pad ? (pad_p - batch) : NULL; vp1 = is_custom ? (vp1 - ncols*2) : vp1; vp2 = is_custom ? (vp2 - ncols*2) : vp2; gvp1 = is_custom ? (gvp1 - ncols*2) : gvp1; gvp2 = is_custom ? (gvp2 - ncols*2) : gvp2; } //const int bias_idx = col % d; //atomicAdd(grad_wc + bias_idx, gwc1); //atomicAdd(grad_wc + bias_idx + d, gwc2); //atomicAdd(grad_bias + bias_idx, gbias1); //atomicAdd(grad_bias + bias_idx + d, gbias2); if (!is_custom) { *(grad_wc + col) = gwc1; *(grad_wc + col + ncols) = gwc2; } *(grad_bias + col) = gbias1; *(grad_bias + col + ncols) = gbias2; *(grad_init + col) = cur; } template <typename scalar_t> __global__ void sru_cuda_bi_forward_kernel( scalar_t* __restrict__ h, scalar_t* __restrict__ c, const scalar_t* __restrict__ u, const scalar_t* __restrict__ x, const scalar_t* __restrict__ weight_c, const scalar_t* __restrict__ bias, const scalar_t* __restrict__ init, const scalar_t* __restrict__ mask_c, const unsigned char * __restrict__ mask_pad, const int len, const int batch, const int d, const int k, const int activation_type, const int skip_type, const int is_custom) { assert ((skip_type >= 0) && (skip_type <= 2)); assert ((skip_type != 1) || (k == 3)); assert ((skip_type != 2) || (k == 4)); const int ncols = batch*d*2; const int col = blockIdx.x * blockDim.x + threadIdx.x; if (col >= ncols) return; const int ncols_u = ncols*k; const int ncols_x = (k == 3) ? ncols : ncols_u; const scalar_t mask = (mask_c == NULL) ? (scalar_t)1.f : (*(mask_c + col)); auto cur = *(init + col); const int d2 = d*2; const auto* vp1 = is_custom ? (weight_c + col*2) : (weight_c + (col%d2)); const auto* vp2 = is_custom ? (weight_c + col*2 + 1) : (weight_c + (col%d2) + d2); const auto bias1 = *(bias + (col%d2)); const auto bias2 = *(bias + (col%d2) + d2); const auto *up = u + (col*k); const auto *xp = (skip_type == 0) ? NULL : ((skip_type == 1) ? (x + col) : (up + 3)); const unsigned char *pad_p = (mask_pad == NULL) ? NULL : (mask_pad + (col/d2)); auto *cp = c + col; auto *hp = h + col; const bool flip = (col%d2) >= d; if (flip) { up += (len-1)*ncols_u; cp += (len-1)*ncols; hp += (len-1)*ncols; if (skip_type) xp += (len-1)*ncols_x; if (pad_p) pad_p += (len-1)*batch; if (is_custom) { vp1 += (len-1)*ncols*2; vp2 += (len-1)*ncols*2; } } const int ncols_u_ = flip ? -ncols_u : ncols_u; const int ncols_x_ = flip ? -ncols_x : ncols_x; const int ncols_ = flip ? -ncols : ncols; const int batch_ = flip ? -batch : batch; for (int cnt = 0; cnt < len; ++cnt) { if ((pad_p == NULL) || !(*pad_p)) { const auto u0 = *up; const auto u1 = *(up + 1); const auto u2 = *(up + 2); const auto wc1 = *vp1; const auto wc2 = *vp2; const auto x_val = (skip_type) ? (*xp) : (scalar_t)0.f; const auto g1 = sigmoidf(u1 + wc1*cur + bias1); const auto g2 = sigmoidf(u2 + wc2*cur + bias2); cur = (cur-u0)*g1 + u0; const auto val = calc_activation(activation_type, cur); *hp = skip_type ? ((val - x_val) * mask * g2 + x_val) : (val * mask * g2); } //else { // *hp = 0; // ouptut 0 for a pad token //} *cp = cur; // useful for backward up += ncols_u_; cp += ncols_; hp += ncols_; xp = skip_type ? (xp + ncols_x_) : NULL; pad_p = mask_pad ? (pad_p + batch_) : NULL; vp1 = is_custom ? (vp1 + ncols_*2) : vp1; vp2 = is_custom ? (vp2 + ncols_*2) : vp2; } } template <typename scalar_t> __global__ void sru_cuda_bi_backward_kernel( scalar_t* __restrict__ grad_u, scalar_t* __restrict__ grad_x, scalar_t* __restrict__ grad_wc, scalar_t* __restrict__ grad_bias, scalar_t* __restrict__ grad_init, const scalar_t* __restrict__ u, const scalar_t* __restrict__ x, const scalar_t* __restrict__ weight_c, const scalar_t* __restrict__ bias, const scalar_t* __restrict__ init, const scalar_t* __restrict__ mask_c, const unsigned char * __restrict__ mask_pad, const scalar_t* __restrict__ c, const scalar_t* __restrict__ grad_h, const scalar_t* __restrict__ grad_last, const int len, const int batch, const int d, const int k, const int activation_type, const int skip_type, const int is_custom) { assert ((skip_type >= 0) && (skip_type <= 2)); assert ((skip_type != 1) || (k == 3)); assert ((skip_type != 2) || (k == 4)); int ncols = batch*d*2; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col >= ncols) return; int ncols_u = ncols*k; int ncols_x = (k == 3) ? ncols : ncols_u; const scalar_t mask = (mask_c == NULL) ? (scalar_t)1.f : (*(mask_c + col)); scalar_t gwc1 = 0; scalar_t gwc2 = 0; scalar_t gbias1 = 0; scalar_t gbias2 = 0; auto cur = *(grad_last + col); const int d2 = d*2; const auto* vp1 = is_custom ? (weight_c + col*2) : (weight_c + (col%d2)); const auto* vp2 = is_custom ? (weight_c + col*2 + 1) : (weight_c + (col%d2) + d2); auto* gvp1 = is_custom ? (grad_wc + col*2) : (grad_wc + col); auto* gvp2 = is_custom ? (grad_wc + col*2 + 1) : (grad_wc + col + ncols); const auto bias1 = *(bias + (col%d2)); const auto bias2 = *(bias + (col%d2) + d2); const auto *up = u + (col*k); const auto *xp = (skip_type == 0) ? NULL : ( (skip_type == 1) ? (x + col) : (up + 3) ); const auto *cp = c + col; const auto *ghp = grad_h + col; const unsigned char *pad_p = (mask_pad == NULL) ? NULL : (mask_pad + (col/d2)); auto *gup = grad_u + (col*k); auto *gxp = (skip_type == 0) ? NULL : ( (skip_type == 1) ? (grad_x + col) : (gup + 3) ); const bool flip = ((col%d2) >= d); if (!flip) { up += (len-1)*ncols_u; cp += (len-1)*ncols; ghp += (len-1)*ncols; gup += (len-1)*ncols_u; if (skip_type) { xp += (len-1)*ncols_x; gxp += (len-1)*ncols_x; } if (pad_p) pad_p += (len-1)*batch; if (is_custom) { vp1 += (len-1)*ncols*2; vp2 += (len-1)*ncols*2; gvp1 += (len-1)*ncols*2; gvp2 += (len-1)*ncols*2; } } const int ncols_u_ = flip ? -ncols_u : ncols_u; const int ncols_x_ = flip ? -ncols_x : ncols_x; const int ncols_ = flip ? -ncols : ncols; const int batch_ = flip ? -batch : batch; for (int cnt = 0; cnt < len; ++cnt) { if ((pad_p == NULL) || !(*pad_p)) { const auto prev_c_val = (cnt<len-1) ? (*(cp-ncols_)) : (*(init+col)); const auto cp_val = *cp; const auto u0 = *up; const auto u1 = *(up + 1); const auto u2 = *(up + 2); const auto wc1 = *vp1; const auto wc2 = *vp2; const auto x_val = (skip_type) ? (*xp) : (scalar_t)0.f; const auto gh_val = *ghp; const auto g1 = sigmoidf(u1 + wc1*prev_c_val + bias1); const auto g2 = sigmoidf(u2 + wc2*prev_c_val + bias2); const auto c_val = calc_activation(activation_type, cp_val); // h = c*g2 + x*(1-g2) = (c-x)*g2 + x // c = c'*g1 + u0*(1-g1) = (c'-u0)*g1 + u0 // gradient with respect to values in the second gate g2 const auto gg2 = gh_val*(c_val-x_val)*mask*(g2*(1.f-g2)); gbias2 += gg2; gwc2 += gg2*prev_c_val; *gvp2 = gg2*prev_c_val; // gradient with respect to c[t] const auto tmp = g2*calc_grad_activation(activation_type, c_val); const auto gc = gh_val*mask*tmp + cur; // gradient with respect to values in the first gate g1 const auto gg1 = gc*(prev_c_val-u0)*(g1*(1.f-g1)); gbias1 += gg1; gwc1 += gg1*prev_c_val; *gvp1 = gg1*prev_c_val; // gradient with respect to c[t-1] cur = gc*g1 + gg1*wc1 + gg2*wc2; // gradient with respect to U *gup = gc*(1.f-g1); *(gup + 1) = gg1; *(gup + 2) = gg2; // gradient with respect to x[t] if (skip_type) *gxp = gh_val*(1.f-g2*mask); } up -= ncols_u_; cp -= ncols_; gup -= ncols_u_; ghp -= ncols_; xp = skip_type ? (xp - ncols_x_) : NULL; gxp = skip_type ? (gxp - ncols_x_) : NULL; pad_p = mask_pad ? (pad_p - batch_) : NULL; vp1 = is_custom ? (vp1 - ncols_*2) : vp1; vp2 = is_custom ? (vp2 - ncols_*2) : vp2; gvp1 = is_custom ? (gvp1 - ncols_*2) : gvp1; gvp2 = is_custom ? (gvp2 - ncols_*2) : gvp2; } //const int bias_idx = col % d2; //atomicAdd(grad_wc + bias_idx, gwc1); //atomicAdd(grad_wc + bias_idx + d2, gwc2); //atomicAdd(grad_bias + bias_idx, gbias1); //atomicAdd(grad_bias + bias_idx + d2, gbias2); if (!is_custom) { *(grad_wc + col) = gwc1; *(grad_wc + col + ncols) = gwc2; } *(grad_bias + col) = gbias1; *(grad_bias + col + ncols) = gbias2; *(grad_init +col) = cur; } } // end of namespace // unidirectional forward() void sru_cuda_forward( torch::Tensor & h, torch::Tensor & c, const torch::Tensor & U, const torch::Tensor & x, const torch::Tensor & weight_c, const torch::Tensor & bias, const torch::Tensor & c_init, const torch::Tensor & mask_c, const torch::Tensor & mask_pad, const int64_t length, const int64_t batch_size, const int64_t hidden_size, const int64_t k, const int64_t activation_type, const int64_t skip_type, const int64_t is_custom) { const int threads = 512; const int total = batch_size * hidden_size; const dim3 blocks( (total - 1) / threads + 1 ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(U.type(), "sru_forward_cuda", ([&] { sru_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( h.data<scalar_t>(), c.data<scalar_t>(), U.data<scalar_t>(), x.numel() ? x.data<scalar_t>() : NULL, weight_c.data<scalar_t>(), bias.data<scalar_t>(), c_init.data<scalar_t>(), mask_c.numel() ? mask_c.data<scalar_t>() : NULL, mask_pad.numel() ? mask_pad.data<unsigned char>() : NULL, length, batch_size, hidden_size, k, activation_type, skip_type, is_custom); })); } // bidirectional forward() void sru_cuda_bi_forward( torch::Tensor & h, torch::Tensor & c, const torch::Tensor & U, const torch::Tensor & x, const torch::Tensor & weight_c, const torch::Tensor & bias, const torch::Tensor & c_init, const torch::Tensor & mask_c, const torch::Tensor & mask_pad, const int64_t length, const int64_t batch_size, const int64_t hidden_size, const int64_t k, const int64_t activation_type, const int64_t skip_type, const int64_t is_custom) { const int threads = 512; const int total = batch_size * hidden_size * 2; const dim3 blocks( (total - 1) / threads + 1 ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(U.type(), "sru_bi_forward_cuda", ([&] { sru_cuda_bi_forward_kernel<scalar_t><<<blocks, threads>>>( h.data<scalar_t>(), c.data<scalar_t>(), U.data<scalar_t>(), x.numel() ? x.data<scalar_t>() : NULL, weight_c.data<scalar_t>(), bias.data<scalar_t>(), c_init.data<scalar_t>(), mask_c.numel() ? mask_c.data<scalar_t>() : NULL, mask_pad.numel() ? mask_pad.data<unsigned char>() : NULL, length, batch_size, hidden_size, k, activation_type, skip_type, is_custom); })); } // unidirectional backward() void sru_cuda_backward( torch::Tensor & grad_u, torch::Tensor & grad_x, torch::Tensor & grad_wc, torch::Tensor & grad_bias, torch::Tensor & grad_init, const torch::Tensor & U, const torch::Tensor & x, const torch::Tensor & weight_c, const torch::Tensor & bias, const torch::Tensor & c_init, const torch::Tensor & mask_c, const torch::Tensor & mask_pad, const torch::Tensor & c, const torch::Tensor & grad_h, const torch::Tensor & grad_last, const int64_t length, const int64_t batch_size, const int64_t hidden_size, const int64_t k, const int64_t activation_type, const int64_t skip_type, const int64_t is_custom) { const int threads = 512; const int total = batch_size * hidden_size; const dim3 blocks( (total - 1) / threads + 1 ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(U.type(), "sru_backward_cuda", ([&] { sru_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( grad_u.data<scalar_t>(), grad_x.numel() ? grad_x.data<scalar_t>() : NULL, grad_wc.data<scalar_t>(), grad_bias.data<scalar_t>(), grad_init.data<scalar_t>(), U.data<scalar_t>(), x.numel() ? x.data<scalar_t>() : NULL, weight_c.data<scalar_t>(), bias.data<scalar_t>(), c_init.data<scalar_t>(), mask_c.numel() ? mask_c.data<scalar_t>() : NULL, mask_pad.numel() ? mask_pad.data<unsigned char>() : NULL, c.data<scalar_t>(), grad_h.data<scalar_t>(), grad_last.data<scalar_t>(), length, batch_size, hidden_size, k, activation_type, skip_type, is_custom); })); } // bidirectional backward() void sru_cuda_bi_backward( torch::Tensor & grad_u, torch::Tensor & grad_x, torch::Tensor & grad_wc, torch::Tensor & grad_bias, torch::Tensor & grad_init, const torch::Tensor & U, const torch::Tensor & x, const torch::Tensor & weight_c, const torch::Tensor & bias, const torch::Tensor & c_init, const torch::Tensor & mask_c, const torch::Tensor & mask_pad, const torch::Tensor & c, const torch::Tensor & grad_h, const torch::Tensor & grad_last, const int64_t length, const int64_t batch_size, const int64_t hidden_size, const int64_t k, const int64_t activation_type, const int64_t skip_type, const int64_t is_custom) { const int threads = 512; const int total = batch_size * hidden_size * 2; const dim3 blocks( (total - 1) / threads + 1 ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(U.type(), "sru_bi_backward_cuda", ([&] { sru_cuda_bi_backward_kernel<scalar_t><<<blocks, threads>>>( grad_u.data<scalar_t>(), grad_x.numel() ? grad_x.data<scalar_t>() : NULL, grad_wc.data<scalar_t>(), grad_bias.data<scalar_t>(), grad_init.data<scalar_t>(), U.data<scalar_t>(), x.numel() ? x.data<scalar_t>() : NULL, weight_c.data<scalar_t>(), bias.data<scalar_t>(), c_init.data<scalar_t>(), mask_c.numel() ? mask_c.data<scalar_t>() : NULL, mask_pad.numel() ? mask_pad.data<unsigned char>() : NULL, c.data<scalar_t>(), grad_h.data<scalar_t>(), grad_last.data<scalar_t>(), length, batch_size, hidden_size, k, activation_type, skip_type, is_custom); })); }
120eb2675c9b7169dfa0f9d796f8dd3db25c2e01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "basic/GPUBasic.h" #include "model/GPUDevice.h" #include "math/GPUMath.h" #include "math/AdagradUpdate.h" namespace Deep8 { namespace Math { template <typename T> __global__ void AdagradUpdateKernel(T* value, T* gradient, T* accumulate, T epsilon, T learningRate, T weightDecay, int N) { int start = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = start; i < N; i += stride) { gradient[i] += value[i] * weightDecay; accumulate[i] += gradient[i] * gradient[i]; value[i] -= learningRate * gradient[i] / cudaSqrt(accumulate[i] + epsilon); } } void AdagradUpdateGPU(Tensor& value, Tensor& gradient, Tensor& accumulate, float epsilon, float learningRate, float weightDecay) { int N = (int)value.shape.size(); int blockSize = DEEP8_GPU_BLOCK_SIZE; int grideSize = (N + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE; switch (value.elementType.id) { case DType::Float32: AdagradUpdateKernel<float> << <grideSize, blockSize >> > (value.data<float>(), gradient.data<float>(), accumulate.data<float>(), epsilon, learningRate, weightDecay, N); break; case DType::Float64: AdagradUpdateKernel<double> << <grideSize, blockSize >> > (value.data<double>(), gradient.data<double>(), accumulate.data<double>(), double(epsilon), double(learningRate), double(weightDecay), N); break; #ifdef HAVE_HALF case DType::Float16: AdagradUpdateKernel<half> << <grideSize, blockSize >> > (value.data<half>(), gradient.data<half>(), accumulate.data<half>(), __float2half(epsilon), __float2half(learningRate), __float2half(weightDecay), N); break; #endif default: DEEP8_RUNTIME_ERROR("type " << value.elementType.name << " is not support"); break; } } } }
120eb2675c9b7169dfa0f9d796f8dd3db25c2e01.cu
#include "basic/GPUBasic.h" #include "model/GPUDevice.h" #include "math/GPUMath.h" #include "math/AdagradUpdate.h" namespace Deep8 { namespace Math { template <typename T> __global__ void AdagradUpdateKernel(T* value, T* gradient, T* accumulate, T epsilon, T learningRate, T weightDecay, int N) { int start = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = start; i < N; i += stride) { gradient[i] += value[i] * weightDecay; accumulate[i] += gradient[i] * gradient[i]; value[i] -= learningRate * gradient[i] / cudaSqrt(accumulate[i] + epsilon); } } void AdagradUpdateGPU(Tensor& value, Tensor& gradient, Tensor& accumulate, float epsilon, float learningRate, float weightDecay) { int N = (int)value.shape.size(); int blockSize = DEEP8_GPU_BLOCK_SIZE; int grideSize = (N + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE; switch (value.elementType.id) { case DType::Float32: AdagradUpdateKernel<float> << <grideSize, blockSize >> > (value.data<float>(), gradient.data<float>(), accumulate.data<float>(), epsilon, learningRate, weightDecay, N); break; case DType::Float64: AdagradUpdateKernel<double> << <grideSize, blockSize >> > (value.data<double>(), gradient.data<double>(), accumulate.data<double>(), double(epsilon), double(learningRate), double(weightDecay), N); break; #ifdef HAVE_HALF case DType::Float16: AdagradUpdateKernel<half> << <grideSize, blockSize >> > (value.data<half>(), gradient.data<half>(), accumulate.data<half>(), __float2half(epsilon), __float2half(learningRate), __float2half(weightDecay), N); break; #endif default: DEEP8_RUNTIME_ERROR("type " << value.elementType.name << " is not support"); break; } } } }
e3b982662d7328f849dbad00fd114fb4211d03ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset unsigned char x, y, z; int idx = blockIdx.x * blockDim.x + threadIdx.x; greyImage[idx] = 0.299f * rgbaImage[idx].x + 0.587f * rgbaImage[idx].y + 0.114f * rgbaImage[idx].z; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(1024, 1, 1); //TODO const dim3 gridSize( numRows*numCols/1024 + 1, 1, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
e3b982662d7328f849dbad00fd114fb4211d03ca.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset unsigned char x, y, z; int idx = blockIdx.x * blockDim.x + threadIdx.x; greyImage[idx] = 0.299f * rgbaImage[idx].x + 0.587f * rgbaImage[idx].y + 0.114f * rgbaImage[idx].z; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(1024, 1, 1); //TODO const dim3 gridSize( numRows*numCols/1024 + 1, 1, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
403b07c460e52088478103b3fa41bf1fe63c1bcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 26.04.2019 // #include<ops/declarable/helpers/polyGamma.h> #include<ops/declarable/helpers/zeta.h> #include <NDArrayFactory.h> namespace nd4j { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void polyGammaCuda(const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { const auto n = reinterpret_cast<const T*>(vn); const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len; if (threadIdx.x == 0) len = shape::length(nShapeInfo); __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto totalThreads = gridDim.x * blockDim.x; for (int i = tid; i < len; i += totalThreads) { const auto nOffset = shape::getIndexOffset(i, nShapeInfo, len); const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len); const auto zOffset = shape::getIndexOffset(i, zShapeInfo, len); const T nVal = n[nOffset]; int sign = (static_cast<int>(nVal) + 1) % 2 ? -1 : 1; T factorial = 1; if(nVal != 0 && nVal != 1) for(int i = 2; i <= nVal; ++i) factorial *= i; z[zOffset] = sign * factorial * zetaScalar<T>(nVal + 1, x[xOffset]); } } /////////////////////////////////////////////////////////////////// template<typename T> static void polyGammaCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { hipLaunchKernelGGL(( polyGammaCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vn, nShapeInfo, vx, xShapeInfo, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// void polyGamma(nd4j::LaunchContext * context, const NDArray& n, const NDArray& x, NDArray& z) { if(!n.isActualOnDeviceSide()) n.syncToDevice(); if(!x.isActualOnDeviceSide()) x.syncToDevice(); int threadsPerBlock = MAX_NUM_THREADS; int blocksPerGrid = (z.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; BUILD_SINGLE_SELECTOR(n.dataType(), polyGammaCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), n.getSpecialBuffer(), n.getSpecialShapeInfo(), x.getSpecialBuffer(), x.getSpecialShapeInfo(), z.getSpecialBuffer(), z.getSpecialShapeInfo()), FLOAT_TYPES); n.tickReadHost(); x.tickReadHost(); z.tickWriteDevice(); } BUILD_SINGLE_TEMPLATE(template void polyGammaCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo), FLOAT_TYPES); } } }
403b07c460e52088478103b3fa41bf1fe63c1bcc.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 26.04.2019 // #include<ops/declarable/helpers/polyGamma.h> #include<ops/declarable/helpers/zeta.h> #include <NDArrayFactory.h> namespace nd4j { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void polyGammaCuda(const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { const auto n = reinterpret_cast<const T*>(vn); const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len; if (threadIdx.x == 0) len = shape::length(nShapeInfo); __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto totalThreads = gridDim.x * blockDim.x; for (int i = tid; i < len; i += totalThreads) { const auto nOffset = shape::getIndexOffset(i, nShapeInfo, len); const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len); const auto zOffset = shape::getIndexOffset(i, zShapeInfo, len); const T nVal = n[nOffset]; int sign = (static_cast<int>(nVal) + 1) % 2 ? -1 : 1; T factorial = 1; if(nVal != 0 && nVal != 1) for(int i = 2; i <= nVal; ++i) factorial *= i; z[zOffset] = sign * factorial * zetaScalar<T>(nVal + 1, x[xOffset]); } } /////////////////////////////////////////////////////////////////// template<typename T> static void polyGammaCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { polyGammaCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vn, nShapeInfo, vx, xShapeInfo, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// void polyGamma(nd4j::LaunchContext * context, const NDArray& n, const NDArray& x, NDArray& z) { if(!n.isActualOnDeviceSide()) n.syncToDevice(); if(!x.isActualOnDeviceSide()) x.syncToDevice(); int threadsPerBlock = MAX_NUM_THREADS; int blocksPerGrid = (z.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; BUILD_SINGLE_SELECTOR(n.dataType(), polyGammaCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), n.getSpecialBuffer(), n.getSpecialShapeInfo(), x.getSpecialBuffer(), x.getSpecialShapeInfo(), z.getSpecialBuffer(), z.getSpecialShapeInfo()), FLOAT_TYPES); n.tickReadHost(); x.tickReadHost(); z.tickWriteDevice(); } BUILD_SINGLE_TEMPLATE(template void polyGammaCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo), FLOAT_TYPES); } } }
a60d154d7226536af2f4077c2422e1128b054ea2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Optimizer.cuh" __global__ void gradient_descent_kernal(const val_t* deltas, val_t* weigts, size_t weightsSize, val_t alpha, val_t lambda) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < weightsSize; i += blockDim.x * gridDim.x) { weigts[i] = weigts[i] - alpha * (deltas[i] + lambda * weigts[i]); } } void gradient_descent::update(const val_t* deltas, val_t* weights, size_t weightsSize, hipStream_t stream) { size_t threads = DEFAULT_THREAD_SIZE; size_t blocks = ::ceil(weightsSize / (float)threads); gradient_descent_kernal << <blocks, threads, 0, stream >> > (deltas, weights, weightsSize, alpha, lambda); }
a60d154d7226536af2f4077c2422e1128b054ea2.cu
#include "Optimizer.cuh" __global__ void gradient_descent_kernal(const val_t* deltas, val_t* weigts, size_t weightsSize, val_t alpha, val_t lambda) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < weightsSize; i += blockDim.x * gridDim.x) { weigts[i] = weigts[i] - alpha * (deltas[i] + lambda * weigts[i]); } } void gradient_descent::update(const val_t* deltas, val_t* weights, size_t weightsSize, cudaStream_t stream) { size_t threads = DEFAULT_THREAD_SIZE; size_t blocks = std::ceil(weightsSize / (float)threads); gradient_descent_kernal << <blocks, threads, 0, stream >> > (deltas, weights, weightsSize, alpha, lambda); }
8e7cf1f519b40b68ef09c42377057c13204d0ca6.hip
// !!! This is a file automatically generated by hipify!!! #include "parameters.cuh" #include <hip/hip_runtime_api.h> __global__ void d_tumble(point *ra, point *pos_colloid, point len, int no_of_colloid, int *iv, int *seed, int *idum, int *iy){ for(int i = 1; i <= no_of_colloid; i++) { ra[i] = img(pos_colloid[i] - ra[i].random(iv, seed, idum, iy)*len, len); ra[i] = ra[i]/sqrt((ra[i]*ra[i]).sum()); } } void tumble() { hipLaunchKernelGGL(( d_tumble), dim3(1), dim3(1), 0, 0, ra, pos_colloid, len, no_of_colloid, iv, seed, idum, iy); } void d_nbrc(point *ra, point *vel_colloid, point *pos_fl, point *pos_colloid, point len, int *no_neigh, int **nbr, int **neigh_fl, int *cnt, int no_of_colloid, double v0, double sigma) { point vector; // int i = blockIdx.x*blockDim.x + threadIdx.x + 1; // int j = blockIdx.y*blockDim.y + threadIdx.y + 1; for(int i = 1; i <= no_of_colloid; i++) { // if(i <= no_of_colloid) { vel_colloid[i] += ra[i]*v0; cnt[i] = 0; for(int j = 1; j <= no_neigh[i]; j++) { vector = img(pos_fl[neigh_fl[i][j]] - pos_colloid[i], len); if((vector*vector).sum() <= power(sigma*0.5+0.5, 2) && (vector*vel_colloid[i]).sum() <= 0) nbr[++cnt[i]][i] = neigh_fl[i][j]; } } } void d_velc(point *ra, point *vel_fl, int **nbr, int *cnt, int no_of_colloid, double mass_colloid, double mass_fl, double v0) { point del; double temp; // int i = blockIdx.x*blockDim.x + threadIdx.x + 1; for(int i = 1; i <= no_of_colloid; i++) { // if(i <= no_of_colloid) { del = ra[i]*v0, temp = mass_colloid/(mass_fl*cnt[i]); for(int j = 1; j <= cnt[i]; j++) { //atomic Sub Double supported in tesla P100; // atomicAdd(&vel_fl[nbr[j][i]].x, - del.x*temp); // atomicAdd(&vel_fl[nbr[j][i]].y, - del.y*temp); // atomicAdd(&vel_fl[nbr[j][i]].z, - del.z*temp); vel_fl[nbr[j][i]] -= del*temp; } } } void run() { dim3 thr(32), blk((no_of_colloid + thr.x -1)/thr.x); hipDeviceSynchronize(); d_nbrc(ra, vel_colloid, pos_fl, pos_colloid, len, no_neigh, nbr, neigh_fl, cnt, no_of_colloid, v0, sigma); // hipDeviceSynchronize(); d_velc(ra, vel_fl, nbr, cnt, no_of_colloid, mass_colloid, mass_fl, v0); } void updown_velocity() { point up_vel = point(0, 0, 0), vector, vel; for (int i = 1; i <= no_of_colloid; i++){ cnt[i] = 0, up_cnt[i] = 0, vel = point(0, 0, 0); for (int j = 1; j <= no_neigh[i]; j++) { vector = img(pos_fl[neigh_fl[i][j]] - pos_colloid[i], len); if((vector*vector).sum() <= pow((sigma*0.5 + 0.5), 2) && (vector*vel_colloid[i]).sum() <= 0.0) nbr[++cnt[i]][i] = neigh_fl[i][j]; if((vector*vector).sum() <= pow((sigma*0.5 + 0.1), 2) && (vector*vel_colloid[i]).sum() <= 0.0) up_nbr[++up_cnt[i]][i] = neigh_fl[i][j]; } for (int j = 1; j <= cnt[i]; j++) vel += vel_fl[nbr[j][i]]; for(int j = 1; j <= up_cnt[i]; j++) up_vel += vel_fl[up_nbr[j][i]]; up_vel = (up_cnt[i] > 0)? up_vel/up_cnt[i] - vel_colloid[i]: up_vel; vel = (up_cnt[i] > 0)? vel/cnt[i] - vel_colloid[i]: vel; } }
8e7cf1f519b40b68ef09c42377057c13204d0ca6.cu
#include "parameters.cuh" #include <cuda_profiler_api.h> __global__ void d_tumble(point *ra, point *pos_colloid, point len, int no_of_colloid, int *iv, int *seed, int *idum, int *iy){ for(int i = 1; i <= no_of_colloid; i++) { ra[i] = img(pos_colloid[i] - ra[i].random(iv, seed, idum, iy)*len, len); ra[i] = ra[i]/sqrt((ra[i]*ra[i]).sum()); } } void tumble() { d_tumble<<<1, 1>>>(ra, pos_colloid, len, no_of_colloid, iv, seed, idum, iy); } void d_nbrc(point *ra, point *vel_colloid, point *pos_fl, point *pos_colloid, point len, int *no_neigh, int **nbr, int **neigh_fl, int *cnt, int no_of_colloid, double v0, double sigma) { point vector; // int i = blockIdx.x*blockDim.x + threadIdx.x + 1; // int j = blockIdx.y*blockDim.y + threadIdx.y + 1; for(int i = 1; i <= no_of_colloid; i++) { // if(i <= no_of_colloid) { vel_colloid[i] += ra[i]*v0; cnt[i] = 0; for(int j = 1; j <= no_neigh[i]; j++) { vector = img(pos_fl[neigh_fl[i][j]] - pos_colloid[i], len); if((vector*vector).sum() <= power(sigma*0.5+0.5, 2) && (vector*vel_colloid[i]).sum() <= 0) nbr[++cnt[i]][i] = neigh_fl[i][j]; } } } void d_velc(point *ra, point *vel_fl, int **nbr, int *cnt, int no_of_colloid, double mass_colloid, double mass_fl, double v0) { point del; double temp; // int i = blockIdx.x*blockDim.x + threadIdx.x + 1; for(int i = 1; i <= no_of_colloid; i++) { // if(i <= no_of_colloid) { del = ra[i]*v0, temp = mass_colloid/(mass_fl*cnt[i]); for(int j = 1; j <= cnt[i]; j++) { //atomic Sub Double supported in tesla P100; // atomicAdd(&vel_fl[nbr[j][i]].x, - del.x*temp); // atomicAdd(&vel_fl[nbr[j][i]].y, - del.y*temp); // atomicAdd(&vel_fl[nbr[j][i]].z, - del.z*temp); vel_fl[nbr[j][i]] -= del*temp; } } } void run() { dim3 thr(32), blk((no_of_colloid + thr.x -1)/thr.x); cudaDeviceSynchronize(); d_nbrc(ra, vel_colloid, pos_fl, pos_colloid, len, no_neigh, nbr, neigh_fl, cnt, no_of_colloid, v0, sigma); // cudaDeviceSynchronize(); d_velc(ra, vel_fl, nbr, cnt, no_of_colloid, mass_colloid, mass_fl, v0); } void updown_velocity() { point up_vel = point(0, 0, 0), vector, vel; for (int i = 1; i <= no_of_colloid; i++){ cnt[i] = 0, up_cnt[i] = 0, vel = point(0, 0, 0); for (int j = 1; j <= no_neigh[i]; j++) { vector = img(pos_fl[neigh_fl[i][j]] - pos_colloid[i], len); if((vector*vector).sum() <= pow((sigma*0.5 + 0.5), 2) && (vector*vel_colloid[i]).sum() <= 0.0) nbr[++cnt[i]][i] = neigh_fl[i][j]; if((vector*vector).sum() <= pow((sigma*0.5 + 0.1), 2) && (vector*vel_colloid[i]).sum() <= 0.0) up_nbr[++up_cnt[i]][i] = neigh_fl[i][j]; } for (int j = 1; j <= cnt[i]; j++) vel += vel_fl[nbr[j][i]]; for(int j = 1; j <= up_cnt[i]; j++) up_vel += vel_fl[up_nbr[j][i]]; up_vel = (up_cnt[i] > 0)? up_vel/up_cnt[i] - vel_colloid[i]: up_vel; vel = (up_cnt[i] > 0)? vel/cnt[i] - vel_colloid[i]: vel; } }
932a8d2861ef32e904d11614c981ca0fdd181749.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "reduce.h" __device__ double merge(double old,double opOutput,double *extraParams) { return opOutput * old; } __device__ double update(double old,double opOutput,double *extraParams) { return opOutput * old; } __device__ double op(double d1,double *extraParams) { return d1; } __device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) { return reduction; } extern "C" __global__ void prod_strided_double(int n, int xOffset,double *dx,int incx,double *extraParams,double *result) { transform(n,xOffset,dx,incx,extraParams,result); }
932a8d2861ef32e904d11614c981ca0fdd181749.cu
#include "reduce.h" __device__ double merge(double old,double opOutput,double *extraParams) { return opOutput * old; } __device__ double update(double old,double opOutput,double *extraParams) { return opOutput * old; } __device__ double op(double d1,double *extraParams) { return d1; } __device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) { return reduction; } extern "C" __global__ void prod_strided_double(int n, int xOffset,double *dx,int incx,double *extraParams,double *result) { transform(n,xOffset,dx,incx,extraParams,result); }
c191ca89c0b71f576084520e33f0cb035d06a32d.hip
// !!! This is a file automatically generated by hipify!!! #include <redistance.h> #include <redistance_kernels.h> #include <Vec.h> #include <math.h> #include <stdio.h> #include <mycutil.h> #include "cusp/print.h" void redistance::ReInitTsign(TetMesh* mesh, Vector_d& vertT_after_permute_d, int nparts, int largest_vert_part, int largest_ele_part, int largest_num_inside_mem, int full_num_ele, Vector_d& vert_after_permute_d, IdxVector_d& vert_offsets_d, IdxVector_d& ele_after_permute_d, IdxVector_d& ele_offsets_d, Vector_d& ele_local_coords_d, IdxVector_d& mem_location_offsets, IdxVector_d& mem_locations, IdxVector_d& part_label_d, IdxVector_d& block_xadj, IdxVector_d& block_adjncy) { int nn = mesh->vertices.size(); int nthreads = 256; int nblocks = min((int) ceil((LevelsetValueType) nn / nthreads), 65535); cudaSafeCall((kernel_reinit_Tsign << <nblocks, nthreads >> >(nn, CAST(vertT_after_permute_d), CAST(m_Tsign_d)))); } void redistance::FindSeedPoint(const IdxVector_d& old_narrowband, const int num_old_narrowband, TetMesh* mesh, Vector_d& vertT_after_permute_d, int nparts, int largest_vert_part, int largest_ele_part, int largest_num_inside_mem, int full_num_ele, Vector_d& vert_after_permute_d, IdxVector_d& vert_offsets_d, IdxVector_d& ele_after_permute_d, IdxVector_d& ele_offsets_d, Vector_d& ele_local_coords_d, IdxVector_d& mem_location_offsets, IdxVector_d& mem_locations, IdxVector_d& part_label_d, IdxVector_d& block_xadj, IdxVector_d& block_adjncy) { int ne = mesh->tets.size(); int nn = mesh->vertices.size(); int nnb = num_old_narrowband; // printf("nnb = %d\n", nnb); thrust::fill(m_DT_d.begin(), m_DT_d.end(), LARGENUM); // m_active_block_list_d.resize(nparts + 1); m_active_block_list_d[0] = 0; if (nnb == 0) { // m_Label_d.resize(nn, FarPoint); thrust::fill(m_Label_d.begin(), m_Label_d.end(), FarPoint); int nthreads = largest_ele_part; int nblocks = nparts; cudaSafeCall((kernel_seedlabel << <nblocks, nthreads >> >(nn, full_num_ele, CAST(vert_after_permute_d), CAST(vert_offsets_d), CAST(ele_after_permute_d), CAST(ele_offsets_d), CAST(m_Label_d), CAST(vertT_after_permute_d), CAST(m_DT_d), CAST(m_active_block_list_d)))); // Vector_h tmp = m_DT_d; // for(int i=0; i<tmp.size(); i++) // printf("m_DT_d[%d] = %f\n", i, tmp[i]); // // IdxVector_h idxtmp = m_active_block_list_d; // for(int i=0; i<idxtmp.size(); i++) // printf("m_active_block_list_d[%d] = %d\n", i, idxtmp[i]); } else { // m_Label_d.resize(nn, FarPoint); thrust::fill(m_Label_d.begin(), m_Label_d.end(), FarPoint); int nthreads = largest_ele_part; // Vector_h dtmp_h = m_vertT_after_permute_d; // for(int i =0; i<dtmp_h.size(); i++) // { // printf("m_vertT_after_permute_d[%d] = %f\n", i, dtmp_h[i]); // } int nblocks = nnb; cudaSafeCall((kernel_seedlabel_narrowband << <nblocks, nthreads >> >(nn, full_num_ele, CAST(old_narrowband), CAST(vert_after_permute_d), CAST(vert_offsets_d), CAST(ele_after_permute_d), CAST(ele_offsets_d), CAST(m_Label_d), CAST(vertT_after_permute_d), CAST(m_DT_d), CAST(m_active_block_list_d)))); } // Vector_h tmp_h = m_DT_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } // // tmp_h = m_active_block_list_d; // printf("Num active list = %d\n", tmp_h[0]); // for(int i = 0; i < tmp_h[0]; i++) // { // printf("m_active_block_list_d[%d]=%d\n", i, tmp_h[i + 1]); // } } void redistance::GenerateData(IdxVector_d& new_narrowband, int& new_num_narrowband, LevelsetValueType bandwidth, int stepcount, TetMesh* mesh, Vector_d& vertT_after_permute_d, int nparts, int largest_vert_part, int largest_ele_part, int largest_num_inside_mem, int full_num_ele, Vector_d& vert_after_permute_d, IdxVector_d& vert_offsets_d, IdxVector_d& ele_after_permute_d, IdxVector_d& ele_offsets_d, Vector_d& ele_local_coords_d, IdxVector_d& mem_location_offsets, IdxVector_d& mem_locations, IdxVector_d& part_label_d, IdxVector_d& block_xadj, IdxVector_d& block_adjncy) { int nn = mesh->vertices.size(); int totalIterationNumber = 0; int nblocks, nthreads, shared_size; int NUM_ITER = 5; int nTotalIter = 0; int numActive = m_active_block_list_d[0]; // printf("numActive=%d\n", numActive); thrust::copy(m_DT_d.begin(), m_DT_d.end(), DT_d_out.begin()); thrust::fill(d_vert_con.begin(), d_vert_con.end(), 0); thrust::fill(d_block_con.begin(), d_block_con.end(), 0); // h_block_con = BoolVector_h(nparts); // block_xadj_h = IdxVector_h(block_xadj.begin(), block_xadj.end()); // block_adjncy_h = IdxVector_h(block_adjncy.begin(), block_adjncy.end()); thrust::copy(m_active_block_list_d.begin() + 1, m_active_block_list_d.begin() + 1 + numActive, h_ActiveList.begin()); h_BlockLabel.assign(nparts, FarPoint); // d_block_vertT_min = Vector_d(nparts); // h_block_vertT_min = Vector_h(nparts); while (numActive > 0) { printf("nTotalIter = %d, numActive=%d\n", nTotalIter, numActive); ///////////////////////////step 1: run solver ////////////////////////////////////////////////////////////////// nTotalIter++; totalIterationNumber += numActive; nblocks = numActive; nthreads = largest_ele_part; m_active_block_list_d = h_ActiveList; shared_size = sizeof (LevelsetValueType)* 4 * largest_ele_part + sizeof (short) *largest_vert_part*largest_num_inside_mem; // if(stepcount == 1 && nTotalIter == 1) // Vector_h tmp_h = m_DT_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } // // IdxVector_h itmp_h = m_active_block_list_d; // for(int i = 0; i < numActive; i++) // { // printf("m_active_block_list_d[%d] = %d\n", i, itmp_h[i]); // } // // tmp_h = m_mem_location_offsets; // for (int i = 0; i < tmp_h.size(); i++) // { // printf("m_mem_location_offsets[%d] = %d\n", i, tmp_h[i]); // } // Vector_h oldDT_h = m_DT_d; cudaSafeCall((kernel_update_values << <nblocks, nthreads, shared_size >> >(CAST(m_active_block_list_d), CAST(m_Label_d), largest_ele_part, largest_vert_part, full_num_ele, CAST(ele_after_permute_d), CAST(ele_offsets_d), CAST(vert_offsets_d), CAST(m_DT_d), CAST(ele_local_coords_d), largest_num_inside_mem, CAST(mem_locations), CAST(mem_location_offsets), NUM_ITER, CAST(DT_d_out), CAST(d_vert_con)))); // tmp_h = DT_d_out; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } nthreads = largest_vert_part; cudaSafeCall((CopyOutBack << <nblocks, nthreads >> >(CAST(m_active_block_list_d), CAST(vert_offsets_d), CAST(m_DT_d), CAST(DT_d_out)))); // IdxVector_h itmp_h = d_vert_con; // if(stepcount == 1 && nTotalIter ==1) // { // Vector_h dtmp_h = m_DT_d; // IdxVector_h tmp_h = m_Label_d; // for (int i = 0; i < dtmp_h.size(); i++) // { // printf("oldDT_h[%d] = %f, m_DT_d[%d] = %f, m_Label_d[%d] = %d\n", i, oldDT_h[i], i, dtmp_h[i], i, tmp_h[i]); // } // } // Vector_h tmp_h = m_DT_d; //////////////////////step 2: reduction//////////////////////////////////////////////// if (nthreads <= 32) { cudaSafeCall((run_reduction_bandwidth < 32 > << <nblocks, 32 >> > (CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(DT_d_out), CAST(d_block_vertT_min), CAST(vert_offsets_d)))); } else if (nthreads <= 64) { cudaSafeCall((run_reduction_bandwidth < 64 > << <nblocks, 64 >> > (CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(DT_d_out), CAST(d_block_vertT_min), CAST(vert_offsets_d)))); } else if (nthreads <= 128) { cudaSafeCall((run_reduction_bandwidth < 128 > << <nblocks, 128 >> > (CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(DT_d_out), CAST(d_block_vertT_min), CAST(vert_offsets_d)))); } else if (nthreads <= 256) { cudaSafeCall((run_reduction_bandwidth < 256 > << <nblocks, 256 >> > (CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(DT_d_out), CAST(d_block_vertT_min), CAST(vert_offsets_d)))); } else { printf("Error: nthreads greater than 256!!!\n"); } // Vector_h dtmp_h = d_block_vertT_min; // for(int i = 0; i < dtmp_h.size(); i++) // { // printf("d_block_con[%d]=%f\n", i, dtmp_h[i]); // } // 3. check neighbor tiles of converged tile thrust::copy(d_block_con.begin(), d_block_con.end(), h_block_con.begin()); // if(stepcount == 2 && nTotalIter == 3) // { // Vector_h tmp_h = d_block_vertT_min; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("d_block_vertT_min[%d] = %f\n", i, tmp_h[i]); // } // } h_block_vertT_min = d_block_vertT_min; int nOldActiveBlock = numActive; numActive = 0; h_ActiveListNew.clear(); for (int i = 0; i < nOldActiveBlock; i++) { int currBlkIdx = h_ActiveList[i]; h_BlockLabel[currBlkIdx] = FarPoint; if (!h_block_con[currBlkIdx]) // if not converged { h_BlockLabel[currBlkIdx] = ActivePoint; } } for (int i = 0; i < nOldActiveBlock; i++) { int currBlkIdx = h_ActiveList[i]; if (h_block_con[currBlkIdx] && h_block_vertT_min[currBlkIdx] < bandwidth) //converged { int start = block_xadj_h[currBlkIdx]; int end = block_xadj_h[currBlkIdx + 1]; for (int iter = 0; iter < end - start; iter++) { int currIdx = block_adjncy_h[iter + start]; if (h_BlockLabel[currIdx] == FarPoint) { h_BlockLabel[currIdx] = ActivePoint; h_ActiveListNew.push_back(currIdx); } } } } for (int i = 0; i < nOldActiveBlock; i++) { int currBlkIdx = h_ActiveList[i]; if (!h_block_con[currBlkIdx]) // if not converged { h_ActiveList[numActive++] = currBlkIdx; } } // for(int i=0; i<h_ActiveListNew.size(); i++) // { // printf("h_ActiveListNew[%d]=%d\n", i, h_ActiveListNew[i]); // } ////////////////////////////////////////////////////////////////// // 4. run solver only once for neighbor blocks of converged block // current active list contains active blocks and neighbor blocks of // any converged blocks if (h_ActiveListNew.size() > 0) { int numActiveNew = h_ActiveListNew.size(); m_active_block_list_d = h_ActiveListNew; nblocks = numActiveNew; nthreads = largest_ele_part; int sharedSize = sizeof (LevelsetValueType)* 4 * largest_ele_part + sizeof (short) *largest_vert_part*largest_num_inside_mem; cudaSafeCall((kernel_run_check_neghbor << <nblocks, nthreads, shared_size >> >(CAST(m_active_block_list_d), CAST(m_Label_d), largest_ele_part, largest_vert_part, full_num_ele, CAST(ele_after_permute_d), CAST(ele_offsets_d), CAST(vert_offsets_d), CAST(m_DT_d), CAST(ele_local_coords_d), largest_num_inside_mem, CAST(mem_locations), CAST(mem_location_offsets), 1, CAST(DT_d_out), CAST(d_vert_con)))); // nthreads = largest_vert_part; // cudaSafeCall((CopyOutBack << <nblocks, nthreads >> >(CAST(m_active_block_list_d), // CAST(vert_offsets_d), CAST(m_DT_d), CAST(DT_d_out)))); //////////////////////////////////////////////////////////////// // 5. reduction //////////////////////////////////////////////////////////////// nthreads = largest_vert_part; run_reduction << <nblocks, nthreads >> >(CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(vert_offsets_d)); ////////////////////////////////////////////////////////////////// // 6. update active list // read back active volume from the device and add // active block to active list on the host memory h_block_con = d_block_con; for (int i = 0; i < h_ActiveListNew.size(); i++) { int currBlkIdx = h_ActiveListNew[i]; if (!h_block_con[currBlkIdx]) // false : activate block (not converged) { h_ActiveList[numActive++] = currBlkIdx; } else h_BlockLabel[currBlkIdx] = FarPoint; } } } //compute new narrow band list // if(stepcount == 2) // { // Vector_h tmp_h = m_DT_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } // } nblocks = nparts; nthreads = largest_vert_part; tmp_new_narrowband[0] = 0; if (nthreads <= 32) { cudaSafeCall((kernel_compute_new_narrowband < 32 > << <nblocks, 32 >> > (CAST(tmp_new_narrowband), CAST(m_DT_d), CAST(vert_offsets_d), bandwidth))); } else if (nthreads <= 64) { cudaSafeCall((kernel_compute_new_narrowband < 64 > << <nblocks, 64 >> >(CAST(tmp_new_narrowband), CAST(m_DT_d), CAST(vert_offsets_d), bandwidth))); } else if (nthreads <= 128) { cudaSafeCall((kernel_compute_new_narrowband < 128 > << <nblocks, 128 >> >(CAST(tmp_new_narrowband), CAST(m_DT_d), CAST(vert_offsets_d), bandwidth))); } else if (nthreads <= 256) { cudaSafeCall((kernel_compute_new_narrowband < 256 > << <nblocks, 256 >> >(CAST(tmp_new_narrowband), CAST(m_DT_d), CAST(vert_offsets_d), bandwidth))); } else { printf("Error: nthreads greater then 256!!!\n"); } int numb = tmp_new_narrowband[0]; new_num_narrowband = numb; printf("Size of new narrowband is %d\n", numb); // new_narrowband.resize(numb); // IdxVector_h tmp_h = new_narrowband; // for(int i=0; i<tmp_h.size(); i++) // { // printf("new_narrowband[%d]=%d\n", i, tmp_h[i]); // } nblocks = numb; // Vector_h tmp_h = m_DT_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } thrust::copy(m_DT_d.begin(), m_DT_d.end(), vertT_after_permute_d.begin()); thrust::copy(tmp_new_narrowband.begin() + 1, tmp_new_narrowband.begin() + numb + 1, new_narrowband.begin()); // if(stepcount == 1) // { // IdxVector_h tmp_h = new_narrowband; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("new_narrowband[%d] = %d\n", i, tmp_h[i]); // } // tmp_h = m_vert_offsets_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_vert_offsets_d[%d] = %d\n", i, tmp_h[i]); // } // // CharVector_h ctmp_h = m_Tsign_d; // for(int i = 0; i < ctmp_h.size(); i++) // { // printf("m_Tsign_d[%d] = %d\n", i, ctmp_h[i]); // } // Vector_h tmpT = m_vertT_after_permute_d; // for(int i=0; i<tmpT.size(); i++) // { // tmpT[i] *= 2*ctmp_h[i]-1; // } // m_vertT_after_permute_d = tmpT; // } // printf("nblocks=%d, nthreads=%d\n", nblocks, nthreads); // if(nblocks > 0) // { // cudaSafeCall((kernel_recover_Tsign << <nblocks, nthreads >> >(CAST(new_narrowband), CAST(vert_offsets_d), CAST(vertT_after_permute_d), CAST(m_Tsign_d)))); // } // else // printf("nblocks for kernel_recover_Tsign is 0!!\n"); nthreads = 256; nblocks = min((int) ceil((LevelsetValueType) nn / nthreads), 65535); cudaSafeCall((kernel_recover_Tsign_whole << <nblocks, nthreads >> >(nn, CAST(vertT_after_permute_d), CAST(m_Tsign_d)))); // tmp_h = vertT_after_permute_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("vertT_after_permute_d[%d] = %f\n", i, tmp_h[i]); // } // hipDeviceSynchronize(); // int tmp = 0; // if(stepcount == 1) // { // Vector_h tmpT = m_vertT_after_permute_d; // for(int i = 0; i < tmpT.size(); i++) // { // printf("m_vertT_after_permute_d[%d] = %f\n", i, tmpT[i]); // } // } }
c191ca89c0b71f576084520e33f0cb035d06a32d.cu
#include <redistance.h> #include <redistance_kernels.h> #include <Vec.h> #include <math.h> #include <stdio.h> #include <mycutil.h> #include "cusp/print.h" void redistance::ReInitTsign(TetMesh* mesh, Vector_d& vertT_after_permute_d, int nparts, int largest_vert_part, int largest_ele_part, int largest_num_inside_mem, int full_num_ele, Vector_d& vert_after_permute_d, IdxVector_d& vert_offsets_d, IdxVector_d& ele_after_permute_d, IdxVector_d& ele_offsets_d, Vector_d& ele_local_coords_d, IdxVector_d& mem_location_offsets, IdxVector_d& mem_locations, IdxVector_d& part_label_d, IdxVector_d& block_xadj, IdxVector_d& block_adjncy) { int nn = mesh->vertices.size(); int nthreads = 256; int nblocks = min((int) ceil((LevelsetValueType) nn / nthreads), 65535); cudaSafeCall((kernel_reinit_Tsign << <nblocks, nthreads >> >(nn, CAST(vertT_after_permute_d), CAST(m_Tsign_d)))); } void redistance::FindSeedPoint(const IdxVector_d& old_narrowband, const int num_old_narrowband, TetMesh* mesh, Vector_d& vertT_after_permute_d, int nparts, int largest_vert_part, int largest_ele_part, int largest_num_inside_mem, int full_num_ele, Vector_d& vert_after_permute_d, IdxVector_d& vert_offsets_d, IdxVector_d& ele_after_permute_d, IdxVector_d& ele_offsets_d, Vector_d& ele_local_coords_d, IdxVector_d& mem_location_offsets, IdxVector_d& mem_locations, IdxVector_d& part_label_d, IdxVector_d& block_xadj, IdxVector_d& block_adjncy) { int ne = mesh->tets.size(); int nn = mesh->vertices.size(); int nnb = num_old_narrowband; // printf("nnb = %d\n", nnb); thrust::fill(m_DT_d.begin(), m_DT_d.end(), LARGENUM); // m_active_block_list_d.resize(nparts + 1); m_active_block_list_d[0] = 0; if (nnb == 0) { // m_Label_d.resize(nn, FarPoint); thrust::fill(m_Label_d.begin(), m_Label_d.end(), FarPoint); int nthreads = largest_ele_part; int nblocks = nparts; cudaSafeCall((kernel_seedlabel << <nblocks, nthreads >> >(nn, full_num_ele, CAST(vert_after_permute_d), CAST(vert_offsets_d), CAST(ele_after_permute_d), CAST(ele_offsets_d), CAST(m_Label_d), CAST(vertT_after_permute_d), CAST(m_DT_d), CAST(m_active_block_list_d)))); // Vector_h tmp = m_DT_d; // for(int i=0; i<tmp.size(); i++) // printf("m_DT_d[%d] = %f\n", i, tmp[i]); // // IdxVector_h idxtmp = m_active_block_list_d; // for(int i=0; i<idxtmp.size(); i++) // printf("m_active_block_list_d[%d] = %d\n", i, idxtmp[i]); } else { // m_Label_d.resize(nn, FarPoint); thrust::fill(m_Label_d.begin(), m_Label_d.end(), FarPoint); int nthreads = largest_ele_part; // Vector_h dtmp_h = m_vertT_after_permute_d; // for(int i =0; i<dtmp_h.size(); i++) // { // printf("m_vertT_after_permute_d[%d] = %f\n", i, dtmp_h[i]); // } int nblocks = nnb; cudaSafeCall((kernel_seedlabel_narrowband << <nblocks, nthreads >> >(nn, full_num_ele, CAST(old_narrowband), CAST(vert_after_permute_d), CAST(vert_offsets_d), CAST(ele_after_permute_d), CAST(ele_offsets_d), CAST(m_Label_d), CAST(vertT_after_permute_d), CAST(m_DT_d), CAST(m_active_block_list_d)))); } // Vector_h tmp_h = m_DT_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } // // tmp_h = m_active_block_list_d; // printf("Num active list = %d\n", tmp_h[0]); // for(int i = 0; i < tmp_h[0]; i++) // { // printf("m_active_block_list_d[%d]=%d\n", i, tmp_h[i + 1]); // } } void redistance::GenerateData(IdxVector_d& new_narrowband, int& new_num_narrowband, LevelsetValueType bandwidth, int stepcount, TetMesh* mesh, Vector_d& vertT_after_permute_d, int nparts, int largest_vert_part, int largest_ele_part, int largest_num_inside_mem, int full_num_ele, Vector_d& vert_after_permute_d, IdxVector_d& vert_offsets_d, IdxVector_d& ele_after_permute_d, IdxVector_d& ele_offsets_d, Vector_d& ele_local_coords_d, IdxVector_d& mem_location_offsets, IdxVector_d& mem_locations, IdxVector_d& part_label_d, IdxVector_d& block_xadj, IdxVector_d& block_adjncy) { int nn = mesh->vertices.size(); int totalIterationNumber = 0; int nblocks, nthreads, shared_size; int NUM_ITER = 5; int nTotalIter = 0; int numActive = m_active_block_list_d[0]; // printf("numActive=%d\n", numActive); thrust::copy(m_DT_d.begin(), m_DT_d.end(), DT_d_out.begin()); thrust::fill(d_vert_con.begin(), d_vert_con.end(), 0); thrust::fill(d_block_con.begin(), d_block_con.end(), 0); // h_block_con = BoolVector_h(nparts); // block_xadj_h = IdxVector_h(block_xadj.begin(), block_xadj.end()); // block_adjncy_h = IdxVector_h(block_adjncy.begin(), block_adjncy.end()); thrust::copy(m_active_block_list_d.begin() + 1, m_active_block_list_d.begin() + 1 + numActive, h_ActiveList.begin()); h_BlockLabel.assign(nparts, FarPoint); // d_block_vertT_min = Vector_d(nparts); // h_block_vertT_min = Vector_h(nparts); while (numActive > 0) { printf("nTotalIter = %d, numActive=%d\n", nTotalIter, numActive); ///////////////////////////step 1: run solver ////////////////////////////////////////////////////////////////// nTotalIter++; totalIterationNumber += numActive; nblocks = numActive; nthreads = largest_ele_part; m_active_block_list_d = h_ActiveList; shared_size = sizeof (LevelsetValueType)* 4 * largest_ele_part + sizeof (short) *largest_vert_part*largest_num_inside_mem; // if(stepcount == 1 && nTotalIter == 1) // Vector_h tmp_h = m_DT_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } // // IdxVector_h itmp_h = m_active_block_list_d; // for(int i = 0; i < numActive; i++) // { // printf("m_active_block_list_d[%d] = %d\n", i, itmp_h[i]); // } // // tmp_h = m_mem_location_offsets; // for (int i = 0; i < tmp_h.size(); i++) // { // printf("m_mem_location_offsets[%d] = %d\n", i, tmp_h[i]); // } // Vector_h oldDT_h = m_DT_d; cudaSafeCall((kernel_update_values << <nblocks, nthreads, shared_size >> >(CAST(m_active_block_list_d), CAST(m_Label_d), largest_ele_part, largest_vert_part, full_num_ele, CAST(ele_after_permute_d), CAST(ele_offsets_d), CAST(vert_offsets_d), CAST(m_DT_d), CAST(ele_local_coords_d), largest_num_inside_mem, CAST(mem_locations), CAST(mem_location_offsets), NUM_ITER, CAST(DT_d_out), CAST(d_vert_con)))); // tmp_h = DT_d_out; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } nthreads = largest_vert_part; cudaSafeCall((CopyOutBack << <nblocks, nthreads >> >(CAST(m_active_block_list_d), CAST(vert_offsets_d), CAST(m_DT_d), CAST(DT_d_out)))); // IdxVector_h itmp_h = d_vert_con; // if(stepcount == 1 && nTotalIter ==1) // { // Vector_h dtmp_h = m_DT_d; // IdxVector_h tmp_h = m_Label_d; // for (int i = 0; i < dtmp_h.size(); i++) // { // printf("oldDT_h[%d] = %f, m_DT_d[%d] = %f, m_Label_d[%d] = %d\n", i, oldDT_h[i], i, dtmp_h[i], i, tmp_h[i]); // } // } // Vector_h tmp_h = m_DT_d; //////////////////////step 2: reduction//////////////////////////////////////////////// if (nthreads <= 32) { cudaSafeCall((run_reduction_bandwidth < 32 > << <nblocks, 32 >> > (CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(DT_d_out), CAST(d_block_vertT_min), CAST(vert_offsets_d)))); } else if (nthreads <= 64) { cudaSafeCall((run_reduction_bandwidth < 64 > << <nblocks, 64 >> > (CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(DT_d_out), CAST(d_block_vertT_min), CAST(vert_offsets_d)))); } else if (nthreads <= 128) { cudaSafeCall((run_reduction_bandwidth < 128 > << <nblocks, 128 >> > (CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(DT_d_out), CAST(d_block_vertT_min), CAST(vert_offsets_d)))); } else if (nthreads <= 256) { cudaSafeCall((run_reduction_bandwidth < 256 > << <nblocks, 256 >> > (CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(DT_d_out), CAST(d_block_vertT_min), CAST(vert_offsets_d)))); } else { printf("Error: nthreads greater than 256!!!\n"); } // Vector_h dtmp_h = d_block_vertT_min; // for(int i = 0; i < dtmp_h.size(); i++) // { // printf("d_block_con[%d]=%f\n", i, dtmp_h[i]); // } // 3. check neighbor tiles of converged tile thrust::copy(d_block_con.begin(), d_block_con.end(), h_block_con.begin()); // if(stepcount == 2 && nTotalIter == 3) // { // Vector_h tmp_h = d_block_vertT_min; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("d_block_vertT_min[%d] = %f\n", i, tmp_h[i]); // } // } h_block_vertT_min = d_block_vertT_min; int nOldActiveBlock = numActive; numActive = 0; h_ActiveListNew.clear(); for (int i = 0; i < nOldActiveBlock; i++) { int currBlkIdx = h_ActiveList[i]; h_BlockLabel[currBlkIdx] = FarPoint; if (!h_block_con[currBlkIdx]) // if not converged { h_BlockLabel[currBlkIdx] = ActivePoint; } } for (int i = 0; i < nOldActiveBlock; i++) { int currBlkIdx = h_ActiveList[i]; if (h_block_con[currBlkIdx] && h_block_vertT_min[currBlkIdx] < bandwidth) //converged { int start = block_xadj_h[currBlkIdx]; int end = block_xadj_h[currBlkIdx + 1]; for (int iter = 0; iter < end - start; iter++) { int currIdx = block_adjncy_h[iter + start]; if (h_BlockLabel[currIdx] == FarPoint) { h_BlockLabel[currIdx] = ActivePoint; h_ActiveListNew.push_back(currIdx); } } } } for (int i = 0; i < nOldActiveBlock; i++) { int currBlkIdx = h_ActiveList[i]; if (!h_block_con[currBlkIdx]) // if not converged { h_ActiveList[numActive++] = currBlkIdx; } } // for(int i=0; i<h_ActiveListNew.size(); i++) // { // printf("h_ActiveListNew[%d]=%d\n", i, h_ActiveListNew[i]); // } ////////////////////////////////////////////////////////////////// // 4. run solver only once for neighbor blocks of converged block // current active list contains active blocks and neighbor blocks of // any converged blocks if (h_ActiveListNew.size() > 0) { int numActiveNew = h_ActiveListNew.size(); m_active_block_list_d = h_ActiveListNew; nblocks = numActiveNew; nthreads = largest_ele_part; int sharedSize = sizeof (LevelsetValueType)* 4 * largest_ele_part + sizeof (short) *largest_vert_part*largest_num_inside_mem; cudaSafeCall((kernel_run_check_neghbor << <nblocks, nthreads, shared_size >> >(CAST(m_active_block_list_d), CAST(m_Label_d), largest_ele_part, largest_vert_part, full_num_ele, CAST(ele_after_permute_d), CAST(ele_offsets_d), CAST(vert_offsets_d), CAST(m_DT_d), CAST(ele_local_coords_d), largest_num_inside_mem, CAST(mem_locations), CAST(mem_location_offsets), 1, CAST(DT_d_out), CAST(d_vert_con)))); // nthreads = largest_vert_part; // cudaSafeCall((CopyOutBack << <nblocks, nthreads >> >(CAST(m_active_block_list_d), // CAST(vert_offsets_d), CAST(m_DT_d), CAST(DT_d_out)))); //////////////////////////////////////////////////////////////// // 5. reduction //////////////////////////////////////////////////////////////// nthreads = largest_vert_part; run_reduction << <nblocks, nthreads >> >(CAST(d_vert_con), CAST(d_block_con), CAST(m_active_block_list_d), CAST(vert_offsets_d)); ////////////////////////////////////////////////////////////////// // 6. update active list // read back active volume from the device and add // active block to active list on the host memory h_block_con = d_block_con; for (int i = 0; i < h_ActiveListNew.size(); i++) { int currBlkIdx = h_ActiveListNew[i]; if (!h_block_con[currBlkIdx]) // false : activate block (not converged) { h_ActiveList[numActive++] = currBlkIdx; } else h_BlockLabel[currBlkIdx] = FarPoint; } } } //compute new narrow band list // if(stepcount == 2) // { // Vector_h tmp_h = m_DT_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } // } nblocks = nparts; nthreads = largest_vert_part; tmp_new_narrowband[0] = 0; if (nthreads <= 32) { cudaSafeCall((kernel_compute_new_narrowband < 32 > << <nblocks, 32 >> > (CAST(tmp_new_narrowband), CAST(m_DT_d), CAST(vert_offsets_d), bandwidth))); } else if (nthreads <= 64) { cudaSafeCall((kernel_compute_new_narrowband < 64 > << <nblocks, 64 >> >(CAST(tmp_new_narrowband), CAST(m_DT_d), CAST(vert_offsets_d), bandwidth))); } else if (nthreads <= 128) { cudaSafeCall((kernel_compute_new_narrowband < 128 > << <nblocks, 128 >> >(CAST(tmp_new_narrowband), CAST(m_DT_d), CAST(vert_offsets_d), bandwidth))); } else if (nthreads <= 256) { cudaSafeCall((kernel_compute_new_narrowband < 256 > << <nblocks, 256 >> >(CAST(tmp_new_narrowband), CAST(m_DT_d), CAST(vert_offsets_d), bandwidth))); } else { printf("Error: nthreads greater then 256!!!\n"); } int numb = tmp_new_narrowband[0]; new_num_narrowband = numb; printf("Size of new narrowband is %d\n", numb); // new_narrowband.resize(numb); // IdxVector_h tmp_h = new_narrowband; // for(int i=0; i<tmp_h.size(); i++) // { // printf("new_narrowband[%d]=%d\n", i, tmp_h[i]); // } nblocks = numb; // Vector_h tmp_h = m_DT_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_DT_d[%d] = %f\n", i, tmp_h[i]); // } thrust::copy(m_DT_d.begin(), m_DT_d.end(), vertT_after_permute_d.begin()); thrust::copy(tmp_new_narrowband.begin() + 1, tmp_new_narrowband.begin() + numb + 1, new_narrowband.begin()); // if(stepcount == 1) // { // IdxVector_h tmp_h = new_narrowband; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("new_narrowband[%d] = %d\n", i, tmp_h[i]); // } // tmp_h = m_vert_offsets_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("m_vert_offsets_d[%d] = %d\n", i, tmp_h[i]); // } // // CharVector_h ctmp_h = m_Tsign_d; // for(int i = 0; i < ctmp_h.size(); i++) // { // printf("m_Tsign_d[%d] = %d\n", i, ctmp_h[i]); // } // Vector_h tmpT = m_vertT_after_permute_d; // for(int i=0; i<tmpT.size(); i++) // { // tmpT[i] *= 2*ctmp_h[i]-1; // } // m_vertT_after_permute_d = tmpT; // } // printf("nblocks=%d, nthreads=%d\n", nblocks, nthreads); // if(nblocks > 0) // { // cudaSafeCall((kernel_recover_Tsign << <nblocks, nthreads >> >(CAST(new_narrowband), CAST(vert_offsets_d), CAST(vertT_after_permute_d), CAST(m_Tsign_d)))); // } // else // printf("nblocks for kernel_recover_Tsign is 0!!\n"); nthreads = 256; nblocks = min((int) ceil((LevelsetValueType) nn / nthreads), 65535); cudaSafeCall((kernel_recover_Tsign_whole << <nblocks, nthreads >> >(nn, CAST(vertT_after_permute_d), CAST(m_Tsign_d)))); // tmp_h = vertT_after_permute_d; // for(int i = 0; i < tmp_h.size(); i++) // { // printf("vertT_after_permute_d[%d] = %f\n", i, tmp_h[i]); // } // cudaThreadSynchronize(); // int tmp = 0; // if(stepcount == 1) // { // Vector_h tmpT = m_vertT_after_permute_d; // for(int i = 0; i < tmpT.size(); i++) // { // printf("m_vertT_after_permute_d[%d] = %f\n", i, tmpT[i]); // } // } }
cbcd49580ac40604f66008d845da9b0f3c694e90.hip
// !!! This is a file automatically generated by hipify!!! #include "spmv.h" // CuSparse_SpMVfloat void spmvCuSparse(hipsparseHandle_t &handle, int m, int n, int nnz, const float *alpha, hipsparseMatDescr_t &descr, const float *value, const int *rowPtr, const int *col, const float *x, const float *beta, float *y) { checkCuSparseError(hipsparseScsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, alpha, descr, value, rowPtr, col, x, beta, y)); } // CuSparse_SpMVdouble void spmvCuSparse(hipsparseHandle_t &handle, int m, int n, int nnz, const double *alpha, hipsparseMatDescr_t &descr, const double *value, const int *rowPtr, const int *col, const double *x, const double *beta, double *y) { checkCuSparseError(hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, alpha, descr, value, rowPtr, col, x, beta, y)); }
cbcd49580ac40604f66008d845da9b0f3c694e90.cu
#include "spmv.h" // CuSparse_SpMV封装(float型重载) void spmvCuSparse(cusparseHandle_t &handle, int m, int n, int nnz, const float *alpha, cusparseMatDescr_t &descr, const float *value, const int *rowPtr, const int *col, const float *x, const float *beta, float *y) { checkCuSparseError(cusparseScsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, alpha, descr, value, rowPtr, col, x, beta, y)); } // CuSparse_SpMV封装(double型重载) void spmvCuSparse(cusparseHandle_t &handle, int m, int n, int nnz, const double *alpha, cusparseMatDescr_t &descr, const double *value, const int *rowPtr, const int *col, const double *x, const double *beta, double *y) { checkCuSparseError(cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, alpha, descr, value, rowPtr, col, x, beta, y)); }
ec4daabc2ef55015a3fb771d3ca8ccbf697cc119.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <sstream> #include <stdlib.h> #include <string.h> #include <iostream> #include <fstream> #include <assert.h> #include "tuningParameters.h" #include "qtclib.h" #include "support.h" #include "libdata.h" #include "cudacommon.h" #define _USE_MATH_DEFINES #include <float.h> #include <hip/hip_runtime.h> #include "../benchmark_common.h" texture<float, 2, hipReadModeElementType> texDistance; using namespace std; #include "kernels_common.h" #include "kernels_full_storage.h" #include "kernels_compact_storage.h" // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing. The user is allowed to specify // the size of the input data in megabytes if they are not using a // predefined size (i.e. the -s option). // // Arguments: // op: the options parser / parameter database // // Programmer: Anthony Danalis // Creation: February 04, 2011 // Returns: nothing // // **************************************************************************** /*void addBenchmarkSpecOptions(OptionParser &op){ op.addOption("PointCount", OPT_INT, "4096", "point count"); op.addOption("DataFile", OPT_STRING, "///", "BLAST data input file name"); op.addOption("Threshold", OPT_FLOAT, "1", "cluster diameter threshold"); op.addOption("SaveOutput", OPT_BOOL, "", "BLAST data input file name"); op.addOption("Verbose", OPT_BOOL, "", "Print cluster cardinalities"); op.addOption("TextureMem", OPT_BOOL, "0", "Use Texture memory for distance matrix"); op.addOption("CompactStorage", OPT_BOOL, "0", "Use compact storage distance matrix regardless of problem size"); }*/ // **************************************************************************** // Function: RunBenchmark // // Purpose: // Calls single precision and, if viable, double precision QT-Clustering // benchmark. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name,hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag); int main_QTC(hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag){ // Test to see if this device supports double precision hipGetDevice(&qtcDevice); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, qtcDevice); runTest("QTC", stream_app, mutexapp, flag); return 0; } // **************************************************************************** // Function: calculate_participants // // Purpose: // This function decides how many GPUs (up to the maximum requested by the user) // and threadblocks per GPU will be used. It also returns the total number of // thread-blocks across all GPUs and the number of thread-blocks that are in nodes // before the current one. // In the future, the behavior of this function should be decided based on // auto-tuning instead of arbitrary decisions. // // Arguments: // The number of nodes requested by the user and the four // variables that the function computes (passed by reference) // // // Returns: nothing // // Programmer: Anthony Danalis // Creation: May 25, 2011 // // **************************************************************************** void calculate_participants(int point_count, int node_count, int cwrank, int *thread_block_count, int *total_thread_block_count, int *active_node_count){ int ac_nd_cnt, thr_blc_cnt, total_thr_blc_cnt; ac_nd_cnt = node_count; if( point_count <= (node_count-1) * SM_COUNT * GPU_MIN_SATURATION_FACTOR ){ int K = SM_COUNT * GPU_MIN_SATURATION_FACTOR; ac_nd_cnt = (point_count+K-1) / K; } if( point_count >= ac_nd_cnt * SM_COUNT * OVR_SBSCR_FACTOR ){ thr_blc_cnt = SM_COUNT * OVR_SBSCR_FACTOR; total_thr_blc_cnt = thr_blc_cnt * ac_nd_cnt; }else{ thr_blc_cnt = point_count/ac_nd_cnt; if( cwrank < point_count%ac_nd_cnt ){ thr_blc_cnt++; } total_thr_blc_cnt = point_count; } *active_node_count = ac_nd_cnt; *thread_block_count = thr_blc_cnt; *total_thread_block_count = total_thr_blc_cnt; return; } unsigned long int estimate_memory_for_full_storage(unsigned long int pnt_cnt, float d){ unsigned long total, thread_block_count, max_degree; float density; thread_block_count = (unsigned long int)SM_COUNT * OVR_SBSCR_FACTOR; // The density calculations assume that we are dealing with generated Euclidean points // (as opposed to externally provided scientific data) that are constraint in a 20x20 2D square. density = 3.14159*(d*d)/(20.0*20.0); if(density > 1.0 ) density = 1.0; max_degree = (unsigned long int)((float)pnt_cnt*density); // average number of points in a cirlce with radius d. max_degree *= 10; // The distribution of points is not uniform, so throw in a factor of 10 for max/average. if( max_degree > pnt_cnt ) max_degree = pnt_cnt; // Due to the point generation algorithm, a cluster can have up to N/30 elements in an arbitratiry small radius. if( max_degree < pnt_cnt/30 ) max_degree = pnt_cnt/30; total = 0; total += pnt_cnt*pnt_cnt*sizeof(float); // Sparse distance matrix total += pnt_cnt*max_degree*sizeof(int); // Indirection matrix total += pnt_cnt*thread_block_count*sizeof(char); // Current candidate cluster mask total += pnt_cnt*sizeof(int); // Ungrouped elements indirection vector total += pnt_cnt*sizeof(int); // Degrees vector total += pnt_cnt*sizeof(int); // Result return total; } void findMemCharacteristics(unsigned long int *gmem, unsigned long int *text){ int device; hipDeviceProp_t deviceProp; hipGetDevice(&device); CHECK_CUDA_ERROR(); hipGetDeviceProperties(&deviceProp, device); CHECK_CUDA_ERROR(); *gmem = (unsigned long int)(0.75*(float)deviceProp.totalGlobalMem); *text = (unsigned long int)deviceProp.maxTexture2D[1]; return; } // **************************************************************************** // Function: runTest // // Purpose: // This benchmark measures the performance of applying QT-clustering on // single precision data. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { unsigned long int point_count, max_avail_memory, max_texture_dimension, needed_mem; int def_size = -1, matrix_type = 0x0; float threshold; bool use_texture = true, use_compact_storage = false; def_size = 1; point_count = 4096; threshold = 1; use_texture = 0; use_compact_storage = 0; if( use_compact_storage ){ use_texture = false; } switch( def_size ){ case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; threshold = 1; use_texture = false; use_compact_storage = false; break; case 2: point_count = 8*1024; threshold = 1; use_texture = true; use_compact_storage = false; break; case 3: point_count = 16*1024; threshold = 1; use_texture = true; use_compact_storage = false; break; case 4: point_count = 16*1024; threshold = 4; use_texture = true; use_compact_storage = false; break; case 5: point_count = 26*1024; threshold = 1; use_texture = false; use_compact_storage = true; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } //if( 0 == comm_get_rank() ){ // Make a reasonable estimate of the actual memory I can allocate // as well as the max texture size. findMemCharacteristics(&max_avail_memory, &max_texture_dimension); needed_mem = estimate_memory_for_full_storage(point_count, threshold); // see if we can fit the distance matrix in texture memory if( (point_count >= max_texture_dimension) || !use_texture ){ printf("Using global memory for distance matrix\n"); matrix_type |= GLOBAL_MEMORY; }else{ printf("Using texture memory for distance matrix\n"); matrix_type |= TEXTUR_MEMORY; } // find out what type of distance matrix we will be using. if( (max_avail_memory > needed_mem) && !use_compact_storage ){ printf("Using full storage distance matrix algorithm\n"); matrix_type |= FULL_STORAGE_MATRIX; }else{ printf("Using compact storage distance matrix algorithm\n"); matrix_type |= COMPACT_STORAGE_MATRIX; } //} //comm_broadcast ( &matrix_type, 1, COMM_TYPE_INT, 0); QTC(name, matrix_type, stream_app, mutexapp, flag); } //////////////////////////////////////////////////////////////////////////////// // void QTC(const string& name, int matrix_type,hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag){ ofstream debug_out, seeds_out; void *Ai_mask, *cardnl, *ungrpd_pnts_indr, *clustered_pnts_mask, *result, *dist_to_clust; void *indr_mtrx, *degrees; int *indr_mtrx_host, *ungrpd_pnts_indr_host, *cardinalities, *output; bool save_clusters, be_verbose, can_use_texture, synthetic_data; hipArray *distance_matrix_txt; void *distance_matrix_gmem, *distance_matrix; float *dist_source, *pnts; float threshold; int i, max_degree, thread_block_count, total_thread_block_count, active_node_count; int cwrank=0, node_count=1, tpb, max_card, iter=0; double t_krn, t_comm, t_trim, t_updt, t_redc, t_sync; unsigned long int dst_matrix_elems, point_count, max_point_count; string fname; point_count = 4096; threshold = 1; save_clusters = ""; be_verbose = ""; synthetic_data = true; /*fname = "///"; if( fname.compare("///") == 0 ){ synthetic_data = true; }else{ synthetic_data = false; save_clusters = false; }*/ can_use_texture = !!(matrix_type & TEXTUR_MEMORY); // TODO - only deal with this size-switch once int def_size = 1; switch( def_size ) { case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 2: point_count = 8*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 3: point_count = 16*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 4: point_count = 16*1024; threshold = 4; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 5: point_count = 26*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } cwrank = 0; node_count =1; if( cwrank == 0 ){ //if( synthetic_data ) pnts = generate_synthetic_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, point_count, matrix_type); // else //(void)read_BLAST_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, fname.c_str(), point_count, matrix_type); } //comm_broadcast ( &point_count, 1, COMM_TYPE_INT, 0); //comm_broadcast ( &max_degree, 1, COMM_TYPE_INT, 0); if( matrix_type & FULL_STORAGE_MATRIX ){ dst_matrix_elems = point_count*point_count; }else{ dst_matrix_elems = point_count*max_degree; } if( cwrank != 0 ){ // For all nodes except zero, in a distributed run. allocHostBuffer((void **)&dist_source, dst_matrix_elems*sizeof(float)); allocHostBuffer((void **)&indr_mtrx_host, point_count*max_degree*sizeof(int)); } // If we need to print the actual clusters later on, we'll need to have all points in all nodes. if( save_clusters ){ if( cwrank != 0 ){ pnts = (float *)malloc( 2*point_count*sizeof(float) ); } //comm_broadcast ( pnts, 2*point_count, COMM_TYPE_FLOAT, 0); } //comm_broadcast ( dist_source, dst_matrix_elems, COMM_TYPE_FLOAT, 0); //comm_broadcast ( indr_mtrx_host, point_count*max_degree, COMM_TYPE_INT, 0); assert( max_degree > 0 ); init(); calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); allocHostBuffer((void**)&ungrpd_pnts_indr_host, point_count*sizeof(int)); for(int i=0; i<point_count; i++){ ungrpd_pnts_indr_host[i] = i; } allocHostBuffer((void**)&cardinalities, 2*sizeof(int)); allocHostBuffer((void**)&output, max_degree*sizeof(int)); if( can_use_texture ){ texDistance.addressMode[0] = hipAddressModeClamp; texDistance.addressMode[1] = hipAddressModeClamp; texDistance.filterMode = hipFilterModePoint; texDistance.normalized = false; // do not normalize coordinates // This is the actual distance matrix (dst_matrix_elems should be "point_count^2, or point_count*max_degree) printf("Allocating: %luMB (%lux%lux%lu) bytes in texture memory\n", dst_matrix_elems*sizeof(float)/(1024*1024), dst_matrix_elems/point_count, point_count, (long unsigned int)sizeof(float)); hipMallocArray(&distance_matrix_txt, &texDistance.channelDesc, dst_matrix_elems/point_count, point_count); }else{ allocDeviceBuffer(&distance_matrix_gmem, dst_matrix_elems*sizeof(float)); } CHECK_CUDA_ERROR(); // This is the N*Delta indirection matrix allocDeviceBuffer(&indr_mtrx, point_count*max_degree*sizeof(int)); allocDeviceBuffer(&degrees, point_count*sizeof(int)); allocDeviceBuffer(&ungrpd_pnts_indr, point_count*sizeof(int)); allocDeviceBuffer(&Ai_mask, thread_block_count*point_count*sizeof(char)); allocDeviceBuffer(&dist_to_clust, thread_block_count*max_degree*sizeof(float)); allocDeviceBuffer(&clustered_pnts_mask, point_count*sizeof(char)); allocDeviceBuffer(&cardnl, thread_block_count*2*sizeof(int)); allocDeviceBuffer(&result, point_count*sizeof(int)); // Copy to device, and record transfer time //int pcie_TH = Timer::Start(); if( can_use_texture ){ hipMemcpyToArray(distance_matrix_txt, 0, 0, dist_source, dst_matrix_elems*sizeof(float), hipMemcpyHostToDevice); CHECK_CUDA_ERROR(); hipBindTextureToArray(texDistance, distance_matrix_txt); }else{ copyToDevice(distance_matrix_gmem, dist_source, dst_matrix_elems*sizeof(float), stream_app); } copyToDevice(indr_mtrx, indr_mtrx_host, point_count*max_degree*sizeof(int), stream_app); copyToDevice(ungrpd_pnts_indr, ungrpd_pnts_indr_host, point_count*sizeof(int), stream_app); hipMemset(clustered_pnts_mask, 0, point_count*sizeof(char)); hipMemset(dist_to_clust, 0, max_degree*thread_block_count*sizeof(float)); //double transfer_time = Timer::Stop(pcie_TH, "PCIe Transfer Time"); tpb = ( point_count > THREADSPERBLOCK )? THREADSPERBLOCK : point_count; hipLaunchKernelGGL(( compute_degrees), dim3(grid2D(thread_block_count)), dim3(tpb),0,stream_app, (int *)indr_mtrx, (int *)degrees, point_count, max_degree); pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); else hipDeviceSynchronize(); CHECK_CUDA_ERROR(); const char *sizeStr; stringstream ss; ss << "PointCount=" << (long)point_count; sizeStr = strdup(ss.str().c_str()); if( 0 == cwrank ){ if( save_clusters ){ debug_out.open("p"); for(i=0; i<point_count; i++){ debug_out << pnts[2*i] << " " << pnts[2*i+1] << endl; } debug_out.close(); seeds_out.open("p_seeds"); } cout << "\nInitial ThreadBlockCount: " << thread_block_count; cout << " PointCount: " << point_count; cout << " Max degree: " << max_degree << "\n" << endl; cout.flush(); } max_point_count = point_count; tpb = THREADSPERBLOCK; if( can_use_texture ){ distance_matrix = distance_matrix_txt; }else{ distance_matrix = distance_matrix_gmem; } ////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////// // // Kernel execution //int TH = Timer::Start(); do{ stringstream ss; int winner_node=-1; int winner_index=-1; bool this_node_participates = true; ++iter; calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); // If there are only a few elements left to cluster, reduce the number of participating nodes (GPUs). if( cwrank >= active_node_count ){ this_node_participates = false; } //comm_update_communicator(cwrank, active_node_count); if( !this_node_participates ) break; cwrank = 0; dim3 grid = grid2D(thread_block_count); //int Tkernel = Timer::Start(); pthread_mutex_lock(mutexapp); //////////////////////////////////////////////////////////////////////////////////////////////// ///////// ----------------- Main kernel ----------------- ///////// hipLaunchKernelGGL(( QTC_device), dim3(grid), dim3(tpb),0, stream_app, (float*)distance_matrix, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (int *)ungrpd_pnts_indr, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, cwrank, active_node_count, total_thread_block_count, matrix_type, can_use_texture); ///////// ----------------- Main kernel ----------------- ///////// //////////////////////////////////////////////////////////////////////////////////////////////// pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); else hipDeviceSynchronize(); CHECK_CUDA_ERROR(); //t_krn += Timer::Stop(Tkernel, "Kernel Only"); //int Tredc = Timer::Start(); if( thread_block_count > 1 ){ pthread_mutex_lock(mutexapp); // We are reducing 128 numbers or less, so one thread should be sufficient. hipLaunchKernelGGL(( reduce_card_device), dim3(grid2D(1)), dim3(1),0, stream_app, (int *)cardnl, thread_block_count); pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); else hipDeviceSynchronize(); CHECK_CUDA_ERROR(); } copyFromDevice( cardinalities, cardnl, 2*sizeof(int), stream_app ); max_card = cardinalities[0]; winner_index = cardinalities[1]; //t_redc += Timer::Stop(Tredc, "Reduce Only"); //int Tsync = Timer::Start(); //comm_barrier(); //t_sync += Timer::Stop(Tsync, "Sync Only"); //int Tcomm = Timer::Start(); //comm_find_winner(&max_card, &winner_node, &winner_index, cwrank, max_point_count+1); //t_comm += Timer::Stop(Tcomm, "Comm Only"); if( be_verbose && cwrank == winner_node){ // for non-parallel cases, both "cwrank" and "winner_node" should be zero. cout << "[" << cwrank << "] Cluster Cardinality: " << max_card << " (Node: " << cwrank << ", index: " << winner_index << ")" << endl; } //int Ttrim = Timer::Start(); pthread_mutex_lock(mutexapp); hipLaunchKernelGGL(( trim_ungrouped_pnts_indr_array), dim3(grid2D(1)), dim3(tpb),0, stream_app, winner_index, (int*)ungrpd_pnts_indr, (float*)distance_matrix, (int *)result, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, matrix_type, can_use_texture ); pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); else hipDeviceSynchronize(); CHECK_CUDA_ERROR(); //t_trim += Timer::Stop(Ttrim, "Trim Only"); if( cwrank == winner_node){ // for non-parallel cases, these should both be zero. if( save_clusters ){ ss << "p." << iter; debug_out.open(ss.str().c_str()); } copyFromDevice(output, (void *)result, max_card*sizeof(int) , stream_app); if( save_clusters ){ for(int i=0; i<max_card; i++){ debug_out << pnts[2*output[i]] << " " << pnts[2*output[i]+1] << endl; } seeds_out << pnts[2*winner_index] << " " << pnts[2*winner_index+1] << endl; debug_out.close(); } } //int Tupdt = Timer::Start(); pthread_mutex_lock(mutexapp); hipLaunchKernelGGL(( update_clustered_pnts_mask), dim3(grid2D(1)), dim3(tpb),0, stream_app, (char *)clustered_pnts_mask, (char *)Ai_mask, max_point_count); pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); else hipDeviceSynchronize(); CHECK_CUDA_ERROR(); //t_updt += Timer::Stop(Tupdt, "Update Only"); point_count -= max_card; }while( max_card > 1 && point_count ); //double t = Timer::Stop(TH, "QT_Clustering"); if( save_clusters ){ seeds_out.close(); } // //////////////////////////////////////////////////////////////////////////////// if( cwrank == 0){ cout << "Cluster count: " << iter << endl; cout.flush(); } //resultDB.AddResult(name+"_Synchron.", sizeStr, "s", t_sync); //resultDB.AddResult(name+"_Communic.", sizeStr, "s", t_comm); //resultDB.AddResult(name+"_Kernel", sizeStr, "s", t_krn); //resultDB.AddResult(name+"_Trimming", sizeStr, "s", t_trim); //resultDB.AddResult(name+"_Update", sizeStr, "s", t_updt); //resultDB.AddResult(name+"_Reduction", sizeStr, "s", t_redc); //resultDB.AddResult(name+"_Algorithm", sizeStr, "s", t); //resultDB.AddResult(name+"+PCI_Trans.", sizeStr, "s", t+transfer_time); pthread_mutex_lock(mutexapp); freeHostBuffer(dist_source); freeHostBuffer(indr_mtrx_host); if( can_use_texture ){ hipFreeArray(distance_matrix_txt); hipUnbindTexture(texDistance); }else{ freeDeviceBuffer(distance_matrix_gmem); } CHECK_CUDA_ERROR(); freeDeviceBuffer(indr_mtrx); freeDeviceBuffer(Ai_mask); freeDeviceBuffer(cardnl); freeDeviceBuffer(result); freeHostBuffer(output); printf("Hello\n"); pthread_mutex_unlock(mutexapp); return; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// int qtcDevice = -1; void init() { qtcDevice = 0; hipSetDevice(qtcDevice); hipGetDevice(&qtcDevice); } void allocHostBuffer(void** bufferp, unsigned long bytes) { hipHostMalloc(bufferp, bytes); CHECK_CUDA_ERROR(); } void allocDeviceBuffer(void** bufferp, unsigned long bytes) { hipMalloc(bufferp, bytes); CHECK_CUDA_ERROR(); } void freeHostBuffer(void* buffer) { hipHostFree(buffer); CHECK_CUDA_ERROR(); } void freeDeviceBuffer(void* buffer) { hipFree(buffer); } void copyToDevice(void* to_device, void* from_host, unsigned long bytes, hipStream_t stream_app) { hipMemcpyAsync(to_device, from_host, bytes, hipMemcpyHostToDevice, stream_app); CHECK_CUDA_ERROR(); } void copyFromDevice(void* to_host, void* from_device, unsigned long bytes,hipStream_t stream_app) { hipMemcpyAsync(to_host, from_device, bytes, hipMemcpyDeviceToHost, stream_app); CHECK_CUDA_ERROR(); }
ec4daabc2ef55015a3fb771d3ca8ccbf697cc119.cu
#include <math.h> #include <sstream> #include <stdlib.h> #include <string.h> #include <iostream> #include <fstream> #include <assert.h> #include "tuningParameters.h" #include "qtclib.h" #include "support.h" #include "libdata.h" #include "cudacommon.h" #define _USE_MATH_DEFINES #include <float.h> #include <cuda_runtime.h> #include "../benchmark_common.h" texture<float, 2, cudaReadModeElementType> texDistance; using namespace std; #include "kernels_common.h" #include "kernels_full_storage.h" #include "kernels_compact_storage.h" // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing. The user is allowed to specify // the size of the input data in megabytes if they are not using a // predefined size (i.e. the -s option). // // Arguments: // op: the options parser / parameter database // // Programmer: Anthony Danalis // Creation: February 04, 2011 // Returns: nothing // // **************************************************************************** /*void addBenchmarkSpecOptions(OptionParser &op){ op.addOption("PointCount", OPT_INT, "4096", "point count"); op.addOption("DataFile", OPT_STRING, "///", "BLAST data input file name"); op.addOption("Threshold", OPT_FLOAT, "1", "cluster diameter threshold"); op.addOption("SaveOutput", OPT_BOOL, "", "BLAST data input file name"); op.addOption("Verbose", OPT_BOOL, "", "Print cluster cardinalities"); op.addOption("TextureMem", OPT_BOOL, "0", "Use Texture memory for distance matrix"); op.addOption("CompactStorage", OPT_BOOL, "0", "Use compact storage distance matrix regardless of problem size"); }*/ // **************************************************************************** // Function: RunBenchmark // // Purpose: // Calls single precision and, if viable, double precision QT-Clustering // benchmark. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name,cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag); int main_QTC(cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag){ // Test to see if this device supports double precision cudaGetDevice(&qtcDevice); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, qtcDevice); runTest("QTC", stream_app, mutexapp, flag); return 0; } // **************************************************************************** // Function: calculate_participants // // Purpose: // This function decides how many GPUs (up to the maximum requested by the user) // and threadblocks per GPU will be used. It also returns the total number of // thread-blocks across all GPUs and the number of thread-blocks that are in nodes // before the current one. // In the future, the behavior of this function should be decided based on // auto-tuning instead of arbitrary decisions. // // Arguments: // The number of nodes requested by the user and the four // variables that the function computes (passed by reference) // // // Returns: nothing // // Programmer: Anthony Danalis // Creation: May 25, 2011 // // **************************************************************************** void calculate_participants(int point_count, int node_count, int cwrank, int *thread_block_count, int *total_thread_block_count, int *active_node_count){ int ac_nd_cnt, thr_blc_cnt, total_thr_blc_cnt; ac_nd_cnt = node_count; if( point_count <= (node_count-1) * SM_COUNT * GPU_MIN_SATURATION_FACTOR ){ int K = SM_COUNT * GPU_MIN_SATURATION_FACTOR; ac_nd_cnt = (point_count+K-1) / K; } if( point_count >= ac_nd_cnt * SM_COUNT * OVR_SBSCR_FACTOR ){ thr_blc_cnt = SM_COUNT * OVR_SBSCR_FACTOR; total_thr_blc_cnt = thr_blc_cnt * ac_nd_cnt; }else{ thr_blc_cnt = point_count/ac_nd_cnt; if( cwrank < point_count%ac_nd_cnt ){ thr_blc_cnt++; } total_thr_blc_cnt = point_count; } *active_node_count = ac_nd_cnt; *thread_block_count = thr_blc_cnt; *total_thread_block_count = total_thr_blc_cnt; return; } unsigned long int estimate_memory_for_full_storage(unsigned long int pnt_cnt, float d){ unsigned long total, thread_block_count, max_degree; float density; thread_block_count = (unsigned long int)SM_COUNT * OVR_SBSCR_FACTOR; // The density calculations assume that we are dealing with generated Euclidean points // (as opposed to externally provided scientific data) that are constraint in a 20x20 2D square. density = 3.14159*(d*d)/(20.0*20.0); if(density > 1.0 ) density = 1.0; max_degree = (unsigned long int)((float)pnt_cnt*density); // average number of points in a cirlce with radius d. max_degree *= 10; // The distribution of points is not uniform, so throw in a factor of 10 for max/average. if( max_degree > pnt_cnt ) max_degree = pnt_cnt; // Due to the point generation algorithm, a cluster can have up to N/30 elements in an arbitratiry small radius. if( max_degree < pnt_cnt/30 ) max_degree = pnt_cnt/30; total = 0; total += pnt_cnt*pnt_cnt*sizeof(float); // Sparse distance matrix total += pnt_cnt*max_degree*sizeof(int); // Indirection matrix total += pnt_cnt*thread_block_count*sizeof(char); // Current candidate cluster mask total += pnt_cnt*sizeof(int); // Ungrouped elements indirection vector total += pnt_cnt*sizeof(int); // Degrees vector total += pnt_cnt*sizeof(int); // Result return total; } void findMemCharacteristics(unsigned long int *gmem, unsigned long int *text){ int device; cudaDeviceProp deviceProp; cudaGetDevice(&device); CHECK_CUDA_ERROR(); cudaGetDeviceProperties(&deviceProp, device); CHECK_CUDA_ERROR(); *gmem = (unsigned long int)(0.75*(float)deviceProp.totalGlobalMem); *text = (unsigned long int)deviceProp.maxTexture2D[1]; return; } // **************************************************************************** // Function: runTest // // Purpose: // This benchmark measures the performance of applying QT-clustering on // single precision data. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { unsigned long int point_count, max_avail_memory, max_texture_dimension, needed_mem; int def_size = -1, matrix_type = 0x0; float threshold; bool use_texture = true, use_compact_storage = false; def_size = 1; point_count = 4096; threshold = 1; use_texture = 0; use_compact_storage = 0; if( use_compact_storage ){ use_texture = false; } switch( def_size ){ case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; threshold = 1; use_texture = false; use_compact_storage = false; break; case 2: point_count = 8*1024; threshold = 1; use_texture = true; use_compact_storage = false; break; case 3: point_count = 16*1024; threshold = 1; use_texture = true; use_compact_storage = false; break; case 4: point_count = 16*1024; threshold = 4; use_texture = true; use_compact_storage = false; break; case 5: point_count = 26*1024; threshold = 1; use_texture = false; use_compact_storage = true; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } //if( 0 == comm_get_rank() ){ // Make a reasonable estimate of the actual memory I can allocate // as well as the max texture size. findMemCharacteristics(&max_avail_memory, &max_texture_dimension); needed_mem = estimate_memory_for_full_storage(point_count, threshold); // see if we can fit the distance matrix in texture memory if( (point_count >= max_texture_dimension) || !use_texture ){ printf("Using global memory for distance matrix\n"); matrix_type |= GLOBAL_MEMORY; }else{ printf("Using texture memory for distance matrix\n"); matrix_type |= TEXTUR_MEMORY; } // find out what type of distance matrix we will be using. if( (max_avail_memory > needed_mem) && !use_compact_storage ){ printf("Using full storage distance matrix algorithm\n"); matrix_type |= FULL_STORAGE_MATRIX; }else{ printf("Using compact storage distance matrix algorithm\n"); matrix_type |= COMPACT_STORAGE_MATRIX; } //} //comm_broadcast ( &matrix_type, 1, COMM_TYPE_INT, 0); QTC(name, matrix_type, stream_app, mutexapp, flag); } //////////////////////////////////////////////////////////////////////////////// // void QTC(const string& name, int matrix_type,cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag){ ofstream debug_out, seeds_out; void *Ai_mask, *cardnl, *ungrpd_pnts_indr, *clustered_pnts_mask, *result, *dist_to_clust; void *indr_mtrx, *degrees; int *indr_mtrx_host, *ungrpd_pnts_indr_host, *cardinalities, *output; bool save_clusters, be_verbose, can_use_texture, synthetic_data; cudaArray *distance_matrix_txt; void *distance_matrix_gmem, *distance_matrix; float *dist_source, *pnts; float threshold; int i, max_degree, thread_block_count, total_thread_block_count, active_node_count; int cwrank=0, node_count=1, tpb, max_card, iter=0; double t_krn, t_comm, t_trim, t_updt, t_redc, t_sync; unsigned long int dst_matrix_elems, point_count, max_point_count; string fname; point_count = 4096; threshold = 1; save_clusters = ""; be_verbose = ""; synthetic_data = true; /*fname = "///"; if( fname.compare("///") == 0 ){ synthetic_data = true; }else{ synthetic_data = false; save_clusters = false; }*/ can_use_texture = !!(matrix_type & TEXTUR_MEMORY); // TODO - only deal with this size-switch once int def_size = 1; switch( def_size ) { case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 2: point_count = 8*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 3: point_count = 16*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 4: point_count = 16*1024; threshold = 4; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 5: point_count = 26*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } cwrank = 0; node_count =1; if( cwrank == 0 ){ //if( synthetic_data ) pnts = generate_synthetic_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, point_count, matrix_type); // else //(void)read_BLAST_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, fname.c_str(), point_count, matrix_type); } //comm_broadcast ( &point_count, 1, COMM_TYPE_INT, 0); //comm_broadcast ( &max_degree, 1, COMM_TYPE_INT, 0); if( matrix_type & FULL_STORAGE_MATRIX ){ dst_matrix_elems = point_count*point_count; }else{ dst_matrix_elems = point_count*max_degree; } if( cwrank != 0 ){ // For all nodes except zero, in a distributed run. allocHostBuffer((void **)&dist_source, dst_matrix_elems*sizeof(float)); allocHostBuffer((void **)&indr_mtrx_host, point_count*max_degree*sizeof(int)); } // If we need to print the actual clusters later on, we'll need to have all points in all nodes. if( save_clusters ){ if( cwrank != 0 ){ pnts = (float *)malloc( 2*point_count*sizeof(float) ); } //comm_broadcast ( pnts, 2*point_count, COMM_TYPE_FLOAT, 0); } //comm_broadcast ( dist_source, dst_matrix_elems, COMM_TYPE_FLOAT, 0); //comm_broadcast ( indr_mtrx_host, point_count*max_degree, COMM_TYPE_INT, 0); assert( max_degree > 0 ); init(); calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); allocHostBuffer((void**)&ungrpd_pnts_indr_host, point_count*sizeof(int)); for(int i=0; i<point_count; i++){ ungrpd_pnts_indr_host[i] = i; } allocHostBuffer((void**)&cardinalities, 2*sizeof(int)); allocHostBuffer((void**)&output, max_degree*sizeof(int)); if( can_use_texture ){ texDistance.addressMode[0] = cudaAddressModeClamp; texDistance.addressMode[1] = cudaAddressModeClamp; texDistance.filterMode = cudaFilterModePoint; texDistance.normalized = false; // do not normalize coordinates // This is the actual distance matrix (dst_matrix_elems should be "point_count^2, or point_count*max_degree) printf("Allocating: %luMB (%lux%lux%lu) bytes in texture memory\n", dst_matrix_elems*sizeof(float)/(1024*1024), dst_matrix_elems/point_count, point_count, (long unsigned int)sizeof(float)); cudaMallocArray(&distance_matrix_txt, &texDistance.channelDesc, dst_matrix_elems/point_count, point_count); }else{ allocDeviceBuffer(&distance_matrix_gmem, dst_matrix_elems*sizeof(float)); } CHECK_CUDA_ERROR(); // This is the N*Delta indirection matrix allocDeviceBuffer(&indr_mtrx, point_count*max_degree*sizeof(int)); allocDeviceBuffer(&degrees, point_count*sizeof(int)); allocDeviceBuffer(&ungrpd_pnts_indr, point_count*sizeof(int)); allocDeviceBuffer(&Ai_mask, thread_block_count*point_count*sizeof(char)); allocDeviceBuffer(&dist_to_clust, thread_block_count*max_degree*sizeof(float)); allocDeviceBuffer(&clustered_pnts_mask, point_count*sizeof(char)); allocDeviceBuffer(&cardnl, thread_block_count*2*sizeof(int)); allocDeviceBuffer(&result, point_count*sizeof(int)); // Copy to device, and record transfer time //int pcie_TH = Timer::Start(); if( can_use_texture ){ cudaMemcpyToArray(distance_matrix_txt, 0, 0, dist_source, dst_matrix_elems*sizeof(float), cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(); cudaBindTextureToArray(texDistance, distance_matrix_txt); }else{ copyToDevice(distance_matrix_gmem, dist_source, dst_matrix_elems*sizeof(float), stream_app); } copyToDevice(indr_mtrx, indr_mtrx_host, point_count*max_degree*sizeof(int), stream_app); copyToDevice(ungrpd_pnts_indr, ungrpd_pnts_indr_host, point_count*sizeof(int), stream_app); cudaMemset(clustered_pnts_mask, 0, point_count*sizeof(char)); cudaMemset(dist_to_clust, 0, max_degree*thread_block_count*sizeof(float)); //double transfer_time = Timer::Stop(pcie_TH, "PCIe Transfer Time"); tpb = ( point_count > THREADSPERBLOCK )? THREADSPERBLOCK : point_count; compute_degrees<<<grid2D(thread_block_count), tpb,0,stream_app>>>((int *)indr_mtrx, (int *)degrees, point_count, max_degree); pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); else cudaThreadSynchronize(); CHECK_CUDA_ERROR(); const char *sizeStr; stringstream ss; ss << "PointCount=" << (long)point_count; sizeStr = strdup(ss.str().c_str()); if( 0 == cwrank ){ if( save_clusters ){ debug_out.open("p"); for(i=0; i<point_count; i++){ debug_out << pnts[2*i] << " " << pnts[2*i+1] << endl; } debug_out.close(); seeds_out.open("p_seeds"); } cout << "\nInitial ThreadBlockCount: " << thread_block_count; cout << " PointCount: " << point_count; cout << " Max degree: " << max_degree << "\n" << endl; cout.flush(); } max_point_count = point_count; tpb = THREADSPERBLOCK; if( can_use_texture ){ distance_matrix = distance_matrix_txt; }else{ distance_matrix = distance_matrix_gmem; } ////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////// // // Kernel execution //int TH = Timer::Start(); do{ stringstream ss; int winner_node=-1; int winner_index=-1; bool this_node_participates = true; ++iter; calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); // If there are only a few elements left to cluster, reduce the number of participating nodes (GPUs). if( cwrank >= active_node_count ){ this_node_participates = false; } //comm_update_communicator(cwrank, active_node_count); if( !this_node_participates ) break; cwrank = 0; dim3 grid = grid2D(thread_block_count); //int Tkernel = Timer::Start(); pthread_mutex_lock(mutexapp); //////////////////////////////////////////////////////////////////////////////////////////////// ///////// ----------------- Main kernel ----------------- ///////// QTC_device<<<grid, tpb,0, stream_app>>>((float*)distance_matrix, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (int *)ungrpd_pnts_indr, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, cwrank, active_node_count, total_thread_block_count, matrix_type, can_use_texture); ///////// ----------------- Main kernel ----------------- ///////// //////////////////////////////////////////////////////////////////////////////////////////////// pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); else cudaThreadSynchronize(); CHECK_CUDA_ERROR(); //t_krn += Timer::Stop(Tkernel, "Kernel Only"); //int Tredc = Timer::Start(); if( thread_block_count > 1 ){ pthread_mutex_lock(mutexapp); // We are reducing 128 numbers or less, so one thread should be sufficient. reduce_card_device<<<grid2D(1), 1,0, stream_app>>>((int *)cardnl, thread_block_count); pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); else cudaThreadSynchronize(); CHECK_CUDA_ERROR(); } copyFromDevice( cardinalities, cardnl, 2*sizeof(int), stream_app ); max_card = cardinalities[0]; winner_index = cardinalities[1]; //t_redc += Timer::Stop(Tredc, "Reduce Only"); //int Tsync = Timer::Start(); //comm_barrier(); //t_sync += Timer::Stop(Tsync, "Sync Only"); //int Tcomm = Timer::Start(); //comm_find_winner(&max_card, &winner_node, &winner_index, cwrank, max_point_count+1); //t_comm += Timer::Stop(Tcomm, "Comm Only"); if( be_verbose && cwrank == winner_node){ // for non-parallel cases, both "cwrank" and "winner_node" should be zero. cout << "[" << cwrank << "] Cluster Cardinality: " << max_card << " (Node: " << cwrank << ", index: " << winner_index << ")" << endl; } //int Ttrim = Timer::Start(); pthread_mutex_lock(mutexapp); trim_ungrouped_pnts_indr_array<<<grid2D(1), tpb,0, stream_app>>>(winner_index, (int*)ungrpd_pnts_indr, (float*)distance_matrix, (int *)result, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, matrix_type, can_use_texture ); pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); else cudaThreadSynchronize(); CHECK_CUDA_ERROR(); //t_trim += Timer::Stop(Ttrim, "Trim Only"); if( cwrank == winner_node){ // for non-parallel cases, these should both be zero. if( save_clusters ){ ss << "p." << iter; debug_out.open(ss.str().c_str()); } copyFromDevice(output, (void *)result, max_card*sizeof(int) , stream_app); if( save_clusters ){ for(int i=0; i<max_card; i++){ debug_out << pnts[2*output[i]] << " " << pnts[2*output[i]+1] << endl; } seeds_out << pnts[2*winner_index] << " " << pnts[2*winner_index+1] << endl; debug_out.close(); } } //int Tupdt = Timer::Start(); pthread_mutex_lock(mutexapp); update_clustered_pnts_mask<<<grid2D(1), tpb,0, stream_app>>>((char *)clustered_pnts_mask, (char *)Ai_mask, max_point_count); pthread_mutex_unlock(mutexapp); if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); else cudaThreadSynchronize(); CHECK_CUDA_ERROR(); //t_updt += Timer::Stop(Tupdt, "Update Only"); point_count -= max_card; }while( max_card > 1 && point_count ); //double t = Timer::Stop(TH, "QT_Clustering"); if( save_clusters ){ seeds_out.close(); } // //////////////////////////////////////////////////////////////////////////////// if( cwrank == 0){ cout << "Cluster count: " << iter << endl; cout.flush(); } //resultDB.AddResult(name+"_Synchron.", sizeStr, "s", t_sync); //resultDB.AddResult(name+"_Communic.", sizeStr, "s", t_comm); //resultDB.AddResult(name+"_Kernel", sizeStr, "s", t_krn); //resultDB.AddResult(name+"_Trimming", sizeStr, "s", t_trim); //resultDB.AddResult(name+"_Update", sizeStr, "s", t_updt); //resultDB.AddResult(name+"_Reduction", sizeStr, "s", t_redc); //resultDB.AddResult(name+"_Algorithm", sizeStr, "s", t); //resultDB.AddResult(name+"+PCI_Trans.", sizeStr, "s", t+transfer_time); pthread_mutex_lock(mutexapp); freeHostBuffer(dist_source); freeHostBuffer(indr_mtrx_host); if( can_use_texture ){ cudaFreeArray(distance_matrix_txt); cudaUnbindTexture(texDistance); }else{ freeDeviceBuffer(distance_matrix_gmem); } CHECK_CUDA_ERROR(); freeDeviceBuffer(indr_mtrx); freeDeviceBuffer(Ai_mask); freeDeviceBuffer(cardnl); freeDeviceBuffer(result); freeHostBuffer(output); printf("Hello\n"); pthread_mutex_unlock(mutexapp); return; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// int qtcDevice = -1; void init() { qtcDevice = 0; cudaSetDevice(qtcDevice); cudaGetDevice(&qtcDevice); } void allocHostBuffer(void** bufferp, unsigned long bytes) { cudaMallocHost(bufferp, bytes); CHECK_CUDA_ERROR(); } void allocDeviceBuffer(void** bufferp, unsigned long bytes) { cudaMalloc(bufferp, bytes); CHECK_CUDA_ERROR(); } void freeHostBuffer(void* buffer) { cudaFreeHost(buffer); CHECK_CUDA_ERROR(); } void freeDeviceBuffer(void* buffer) { cudaFree(buffer); } void copyToDevice(void* to_device, void* from_host, unsigned long bytes, cudaStream_t stream_app) { cudaMemcpyAsync(to_device, from_host, bytes, cudaMemcpyHostToDevice, stream_app); CHECK_CUDA_ERROR(); } void copyFromDevice(void* to_host, void* from_device, unsigned long bytes,cudaStream_t stream_app) { cudaMemcpyAsync(to_host, from_device, bytes, cudaMemcpyDeviceToHost, stream_app); CHECK_CUDA_ERROR(); }
19d5a465d5370066dfe1bcba52f2c3180fd28c1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex arreg(hipComplex q, hipComplex r, hipComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ hipComplex out(0.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); hipComplex morra(-1.0,0.0); hipComplex tla(1.0,0.0); hipComplex vnn(0.0,0.0); hipComplex fou(4.0,0.0); hipComplex tw(2.0,0.0); hipComplex run(1.0,0.0); int v; for(v=0;v<20;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*sins(tw*z*run)/(run-roo); } return fou*out; } __device__ hipComplex urreg(hipComplex q, hipComplex r, hipComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ hipComplex out(0.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); hipComplex morra(-1.0,0.0); hipComplex tla(1.0,0.0); hipComplex vnn(0.0,0.0); hipComplex fou(4.0,0.0); hipComplex tw(2.0,0.0); hipComplex run(1.0,0.0); int v; for(v=0;v<10;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*the3(tw*z*run,r)/(run-roo); } return fou*out; } // * small q-exponential __device__ hipComplex qexp(hipComplex z, hipComplex q) { hipComplex mone(-1.0,0.0); hipComplex une(1.0,0.0); return une/qpoch(z,q); } //* large q exponential is just qpoch(-z,q) __device__ hipComplex qExp(hipComplex z, hipComplex q) { hipComplex mone(-1.0,0.0); hipComplex une(1.0,0.0); return qpoch(mone*z,q); } __device__ hipComplex sinq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qexp(z*aie,q) -qexp(z*aie,q))/doo; return out; } __device__ hipComplex cosq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qexp(z*aie,q) +qexp(z*aie,q))/doo; return out; } __device__ hipComplex Sinq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qExp(z*aie,q) -qExp(z*aie,q))/doo; return out; } __device__ hipComplex Cosq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qExp(z*aie,q) +qExp(z*aie,q))/doo; return out; } __device__ hipComplex asins(hipComplex z) { float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float fla = z.i/abs(z.i); // *signum, but without a comparison, probably a saner way to do this? // hipComplex out(0.0,0.0); out.r = asinf(bet); out.i = fla * logf(alp + sqrtf(alp*alp-1)); return out; } __device__ int gcd(int a, int b) { int remainder = a % b; if (remainder == 0) { return b; } return gcd(b, remainder); } /* Real Analytic Eisenstein Series */ __device__ hipComplex reis(hipComplex s, hipComplex z) { // see en.wikipedia.org/wiki/Real_analytic_Eisenstein_series hipComplex out(0.0,0.0); hipComplex hav(0.5,0.0); hipComplex xu=out; hipComplex yu=out; yu.r = z.i; int m,n; hipComplex ema=out; hipComplex ena=out; hipComplex den=out; for(m=-20;m<20;m++) { for(n=-20;n<20;n++) { if((m!=0)&&(n!=0)) { if((gcd(m,n)==1)) { ena.r = n; ema.r = m; den.r = norg(ema*z+ena); out = out + powc(yu,s)/powc(den,s/hav); } } } } return out; } __device__ hipComplex thu3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * asins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex trev(hipComplex lav, hipComplex mel, hipComplex rel) { hipComplex out(0.0,0.0); hipComplex V(0.739085133215160641655312087674,0.0); int v; for(v=0;v<70;v++) { lav = lav - (cosc(lav)-powc(V,rel))/sins(lav); out = out + (cosc(lav)-powc(V,mel)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale = 4.4; float fx = scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(0.0,0.0); hipComplex eccume(1.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); hipComplex gren(2.0,0.0); hipComplex next=flurn; hipComplex current = cue; hipComplex xnext = flurn; hipComplex xcurrent = cue; hipComplex rue=cue; hipComplex tinny(.0001,0.0001); hipComplex raga(0.5,27.0); hipComplex ruga(0.5,54.0); hipComplex senna(0.5,0.0); hipComplex finch(0.001,.001); float ah, ex, feig; feig = 3.67; ex = 2.10981; float xa,xb,ya,yb,tta,ttb; /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; int uu; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // One way of describing this would be we want to perform Newton's method //on the Mandelbrot set /* preiterate */ //tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s // this is not terribly hard to do with cuda // what we need: // x' = x - y -> dx / dt = x - y // y' = 1 - x^2 -> dy / dt = 1-x^2 // dy / dx = (dy / dt) / (dx/ dt) // so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult for(v=0;v<30;v++) { cue = cue - trev(cue,aon,uon)/((trev(cue+finch,aon,uon)-trev(cue,aon,uon))/finch); } double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
19d5a465d5370066dfe1bcba52f2c3180fd28c1a.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex arreg(cuComplex q, cuComplex r, cuComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ cuComplex out(0.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); cuComplex morra(-1.0,0.0); cuComplex tla(1.0,0.0); cuComplex vnn(0.0,0.0); cuComplex fou(4.0,0.0); cuComplex tw(2.0,0.0); cuComplex run(1.0,0.0); int v; for(v=0;v<20;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*sins(tw*z*run)/(run-roo); } return fou*out; } __device__ cuComplex urreg(cuComplex q, cuComplex r, cuComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ cuComplex out(0.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); cuComplex morra(-1.0,0.0); cuComplex tla(1.0,0.0); cuComplex vnn(0.0,0.0); cuComplex fou(4.0,0.0); cuComplex tw(2.0,0.0); cuComplex run(1.0,0.0); int v; for(v=0;v<10;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*the3(tw*z*run,r)/(run-roo); } return fou*out; } // * small q-exponential __device__ cuComplex qexp(cuComplex z, cuComplex q) { cuComplex mone(-1.0,0.0); cuComplex une(1.0,0.0); return une/qpoch(z,q); } //* large q exponential is just qpoch(-z,q) __device__ cuComplex qExp(cuComplex z, cuComplex q) { cuComplex mone(-1.0,0.0); cuComplex une(1.0,0.0); return qpoch(mone*z,q); } __device__ cuComplex sinq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qexp(z*aie,q) -qexp(z*aie,q))/doo; return out; } __device__ cuComplex cosq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qexp(z*aie,q) +qexp(z*aie,q))/doo; return out; } __device__ cuComplex Sinq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qExp(z*aie,q) -qExp(z*aie,q))/doo; return out; } __device__ cuComplex Cosq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qExp(z*aie,q) +qExp(z*aie,q))/doo; return out; } __device__ cuComplex asins(cuComplex z) { float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float fla = z.i/abs(z.i); // *signum, but without a comparison, probably a saner way to do this? // cuComplex out(0.0,0.0); out.r = asinf(bet); out.i = fla * logf(alp + sqrtf(alp*alp-1)); return out; } __device__ int gcd(int a, int b) { int remainder = a % b; if (remainder == 0) { return b; } return gcd(b, remainder); } /* Real Analytic Eisenstein Series */ __device__ cuComplex reis(cuComplex s, cuComplex z) { // see en.wikipedia.org/wiki/Real_analytic_Eisenstein_series cuComplex out(0.0,0.0); cuComplex hav(0.5,0.0); cuComplex xu=out; cuComplex yu=out; yu.r = z.i; int m,n; cuComplex ema=out; cuComplex ena=out; cuComplex den=out; for(m=-20;m<20;m++) { for(n=-20;n<20;n++) { if((m!=0)&&(n!=0)) { if((gcd(m,n)==1)) { ena.r = n; ema.r = m; den.r = norg(ema*z+ena); out = out + powc(yu,s)/powc(den,s/hav); } } } } return out; } __device__ cuComplex thu3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * asins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex trev(cuComplex lav, cuComplex mel, cuComplex rel) { cuComplex out(0.0,0.0); cuComplex V(0.739085133215160641655312087674,0.0); int v; for(v=0;v<70;v++) { lav = lav - (cosc(lav)-powc(V,rel))/sins(lav); out = out + (cosc(lav)-powc(V,mel)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale = 4.4; float fx = scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(0.0,0.0); cuComplex eccume(1.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); cuComplex gren(2.0,0.0); cuComplex next=flurn; cuComplex current = cue; cuComplex xnext = flurn; cuComplex xcurrent = cue; cuComplex rue=cue; cuComplex tinny(.0001,0.0001); cuComplex raga(0.5,27.0); cuComplex ruga(0.5,54.0); cuComplex senna(0.5,0.0); cuComplex finch(0.001,.001); float ah, ex, feig; feig = 3.67; ex = 2.10981; float xa,xb,ya,yb,tta,ttb; /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; int uu; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // One way of describing this would be we want to perform Newton's method //on the Mandelbrot set /* preiterate */ //tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s // this is not terribly hard to do with cuda // what we need: // x' = x - y -> dx / dt = x - y // y' = 1 - x^2 -> dy / dt = 1-x^2 // dy / dx = (dy / dt) / (dx/ dt) // so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult for(v=0;v<30;v++) { cue = cue - trev(cue,aon,uon)/((trev(cue+finch,aon,uon)-trev(cue,aon,uon))/finch); } double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
0b6c3049f18ec476f39fab143d45a69dbb8baba8.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include "util.h" __global__ void histo_prescan_kernel ( unsigned int* input, int size, unsigned int* minmax); __global__ void histo_main_kernel ( uchar4 *sm_mappings, unsigned int num_elements, unsigned int sm_range_min, unsigned int sm_range_max, unsigned int histo_height, unsigned int histo_width, unsigned int *global_subhisto, unsigned int *global_histo, unsigned int *global_overflow); __global__ void histo_intermediates_kernel ( uint2 *input, unsigned int height, unsigned int width, unsigned int input_pitch, uchar4 *sm_mappings); __global__ void histo_final_kernel ( unsigned int sm_range_min, unsigned int sm_range_max, unsigned int histo_height, unsigned int histo_width, unsigned int *global_subhisto, unsigned int *global_histo, unsigned int *global_overflow, unsigned int *final_histo); /****************************************************************************** * Implementation: GPU * Details: * in the GPU implementation of histogram, we begin by computing the span of the * input values into the histogram. Then the histogramming computation is carried * out by a (BLOCK_X, BLOCK_Y) sized grid, where every group of Y (same X) * computes its own partial histogram for a part of the input, and every Y in the * group exclusively writes to a portion of the span computed in the beginning. * Finally, a reduction is performed to combine all the partial histograms into * the final result. ******************************************************************************/ int main(int argc, char* argv[]) { if (argc < 2) return -1; if(!argv[1]){ fputs("Input file expected\n", stderr); return -1; } /////////////////////////////// // Running Control Added By Roy /////////////////////////////// hipDeviceProp_t prop; int dev_id = atoi(argv[1]); int num_iter_control = atoi(argv[2]); printf("Device ID is %d, Loop is %d \n",dev_id,num_iter_control); printf("Choosing CUDA Device....\n"); hipError_t set_result = hipSetDevice(dev_id); printf("Set Result is: %s\n",hipGetErrorString(set_result)); hipGetDevice(&dev_id); hipGetDeviceProperties(&prop, dev_id); printf("Name: %s\n", prop.name); /////////////////////////////// // End of Running Control /////////////////////////////// char *prescans = "PreScanKernel"; char *postpremems = "PostPreMems"; char *intermediates = "IntermediatesKernel"; char *mains = "MainKernel"; char *finals = "FinalKernel"; int numIterations = 1; // if (argc >= 2){ // numIterations = 1; // } else { // fputs("Expected at least one command line argument\n", stderr); // return -1; // } unsigned int img_width, img_height; unsigned int histo_width, histo_height; FILE* f = fopen(argv[1],"rb"); int result = 0; result += fread(&img_width, sizeof(unsigned int), 1, f); result += fread(&img_height, sizeof(unsigned int), 1, f); result += fread(&histo_width, sizeof(unsigned int), 1, f); result += fread(&histo_height, sizeof(unsigned int), 1, f); if (result != 4){ fputs("Error reading input and output dimensions from file\n", stderr); return -1; } unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int)); unsigned char* histo = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char)); result = fread(img, sizeof(unsigned int), img_width*img_height, f); fclose(f); if (result != img_width*img_height){ fputs("Error reading input array from file\n", stderr); return -1; } int even_width = ((img_width+1)/2)*2; unsigned int* input; unsigned int* ranges; uchar4* sm_mappings; unsigned int* global_subhisto; unsigned short* global_histo; unsigned int* global_overflow; unsigned char* final_histo; hipMalloc((void**)&input , even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int)); hipMalloc((void**)&ranges , 2*sizeof(unsigned int)); hipMalloc((void**)&sm_mappings , img_width*img_height*sizeof(uchar4)); hipMalloc((void**)&global_subhisto , BLOCK_X*img_width*histo_height*sizeof(unsigned int)); hipMalloc((void**)&global_histo , img_width*histo_height*sizeof(unsigned short)); hipMalloc((void**)&global_overflow , img_width*histo_height*sizeof(unsigned int)); hipMalloc((void**)&final_histo , img_width*histo_height*sizeof(unsigned char)); hipMemset(final_histo , 0 , img_width*histo_height*sizeof(unsigned char)); for (int y=0; y < img_height; y++){ hipMemcpy(&(((unsigned int*)input)[y*even_width]),&img[y*img_width],img_width*sizeof(unsigned int), hipMemcpyHostToDevice); } for (int i=0; i<num_iter_control;i++){ for (int iter = 0; iter < numIterations; iter++) { unsigned int ranges_h[2] = {UINT32_MAX, 0}; hipMemcpy(ranges,ranges_h, 2*sizeof(unsigned int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( histo_prescan_kernel), dim3(dim3(PRESCAN_BLOCKS_X)),dim3(dim3(PRESCAN_THREADS)), 0, 0, (unsigned int*)input, img_height*img_width, ranges); hipMemcpy(ranges_h,ranges, 2*sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemset(global_subhisto,0,img_width*histo_height*sizeof(unsigned int)); hipLaunchKernelGGL(( histo_intermediates_kernel), dim3(dim3((img_height + UNROLL-1)/UNROLL)), dim3(dim3((img_width+1)/2)), 0, 0, (uint2*)(input), (unsigned int)img_height, (unsigned int)img_width, (img_width+1)/2, (uchar4*)(sm_mappings) ); hipLaunchKernelGGL(( histo_main_kernel), dim3(dim3(BLOCK_X, ranges_h[1]-ranges_h[0]+1)), dim3(dim3(THREADS)), 0, 0, (uchar4*)(sm_mappings), img_height*img_width, ranges_h[0], ranges_h[1], histo_height, histo_width, (unsigned int*)(global_subhisto), (unsigned int*)(global_histo), (unsigned int*)(global_overflow) ); hipLaunchKernelGGL(( histo_final_kernel), dim3(dim3(BLOCK_X*3)), dim3(dim3(512)), 0, 0, ranges_h[0], ranges_h[1], histo_height, histo_width, (unsigned int*)(global_subhisto), (unsigned int*)(global_histo), (unsigned int*)(global_overflow), (unsigned int*)(final_histo) ); } } hipMemcpy(histo,final_histo, histo_height*histo_width*sizeof(unsigned char), hipMemcpyDeviceToHost); hipFree(input); hipFree(ranges); hipFree(sm_mappings); hipFree(global_subhisto); hipFree(global_histo); hipFree(global_overflow); hipFree(final_histo); if (argv[2]) { dump_histo_img(histo, histo_height, histo_width, argv[2]); } free(img); free(histo); printf("\n"); return 0; }
0b6c3049f18ec476f39fab143d45a69dbb8baba8.cu
/*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include "util.h" __global__ void histo_prescan_kernel ( unsigned int* input, int size, unsigned int* minmax); __global__ void histo_main_kernel ( uchar4 *sm_mappings, unsigned int num_elements, unsigned int sm_range_min, unsigned int sm_range_max, unsigned int histo_height, unsigned int histo_width, unsigned int *global_subhisto, unsigned int *global_histo, unsigned int *global_overflow); __global__ void histo_intermediates_kernel ( uint2 *input, unsigned int height, unsigned int width, unsigned int input_pitch, uchar4 *sm_mappings); __global__ void histo_final_kernel ( unsigned int sm_range_min, unsigned int sm_range_max, unsigned int histo_height, unsigned int histo_width, unsigned int *global_subhisto, unsigned int *global_histo, unsigned int *global_overflow, unsigned int *final_histo); /****************************************************************************** * Implementation: GPU * Details: * in the GPU implementation of histogram, we begin by computing the span of the * input values into the histogram. Then the histogramming computation is carried * out by a (BLOCK_X, BLOCK_Y) sized grid, where every group of Y (same X) * computes its own partial histogram for a part of the input, and every Y in the * group exclusively writes to a portion of the span computed in the beginning. * Finally, a reduction is performed to combine all the partial histograms into * the final result. ******************************************************************************/ int main(int argc, char* argv[]) { if (argc < 2) return -1; if(!argv[1]){ fputs("Input file expected\n", stderr); return -1; } /////////////////////////////// // Running Control Added By Roy /////////////////////////////// cudaDeviceProp prop; int dev_id = atoi(argv[1]); int num_iter_control = atoi(argv[2]); printf("Device ID is %d, Loop is %d \n",dev_id,num_iter_control); printf("Choosing CUDA Device....\n"); cudaError_t set_result = cudaSetDevice(dev_id); printf("Set Result is: %s\n",cudaGetErrorString(set_result)); cudaGetDevice(&dev_id); cudaGetDeviceProperties(&prop, dev_id); printf("Name: %s\n", prop.name); /////////////////////////////// // End of Running Control /////////////////////////////// char *prescans = "PreScanKernel"; char *postpremems = "PostPreMems"; char *intermediates = "IntermediatesKernel"; char *mains = "MainKernel"; char *finals = "FinalKernel"; int numIterations = 1; // if (argc >= 2){ // numIterations = 1; // } else { // fputs("Expected at least one command line argument\n", stderr); // return -1; // } unsigned int img_width, img_height; unsigned int histo_width, histo_height; FILE* f = fopen(argv[1],"rb"); int result = 0; result += fread(&img_width, sizeof(unsigned int), 1, f); result += fread(&img_height, sizeof(unsigned int), 1, f); result += fread(&histo_width, sizeof(unsigned int), 1, f); result += fread(&histo_height, sizeof(unsigned int), 1, f); if (result != 4){ fputs("Error reading input and output dimensions from file\n", stderr); return -1; } unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int)); unsigned char* histo = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char)); result = fread(img, sizeof(unsigned int), img_width*img_height, f); fclose(f); if (result != img_width*img_height){ fputs("Error reading input array from file\n", stderr); return -1; } int even_width = ((img_width+1)/2)*2; unsigned int* input; unsigned int* ranges; uchar4* sm_mappings; unsigned int* global_subhisto; unsigned short* global_histo; unsigned int* global_overflow; unsigned char* final_histo; cudaMalloc((void**)&input , even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int)); cudaMalloc((void**)&ranges , 2*sizeof(unsigned int)); cudaMalloc((void**)&sm_mappings , img_width*img_height*sizeof(uchar4)); cudaMalloc((void**)&global_subhisto , BLOCK_X*img_width*histo_height*sizeof(unsigned int)); cudaMalloc((void**)&global_histo , img_width*histo_height*sizeof(unsigned short)); cudaMalloc((void**)&global_overflow , img_width*histo_height*sizeof(unsigned int)); cudaMalloc((void**)&final_histo , img_width*histo_height*sizeof(unsigned char)); cudaMemset(final_histo , 0 , img_width*histo_height*sizeof(unsigned char)); for (int y=0; y < img_height; y++){ cudaMemcpy(&(((unsigned int*)input)[y*even_width]),&img[y*img_width],img_width*sizeof(unsigned int), cudaMemcpyHostToDevice); } for (int i=0; i<num_iter_control;i++){ for (int iter = 0; iter < numIterations; iter++) { unsigned int ranges_h[2] = {UINT32_MAX, 0}; cudaMemcpy(ranges,ranges_h, 2*sizeof(unsigned int), cudaMemcpyHostToDevice); histo_prescan_kernel<<<dim3(PRESCAN_BLOCKS_X),dim3(PRESCAN_THREADS)>>>((unsigned int*)input, img_height*img_width, ranges); cudaMemcpy(ranges_h,ranges, 2*sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemset(global_subhisto,0,img_width*histo_height*sizeof(unsigned int)); histo_intermediates_kernel<<<dim3((img_height + UNROLL-1)/UNROLL), dim3((img_width+1)/2)>>>( (uint2*)(input), (unsigned int)img_height, (unsigned int)img_width, (img_width+1)/2, (uchar4*)(sm_mappings) ); histo_main_kernel<<<dim3(BLOCK_X, ranges_h[1]-ranges_h[0]+1), dim3(THREADS)>>>( (uchar4*)(sm_mappings), img_height*img_width, ranges_h[0], ranges_h[1], histo_height, histo_width, (unsigned int*)(global_subhisto), (unsigned int*)(global_histo), (unsigned int*)(global_overflow) ); histo_final_kernel<<<dim3(BLOCK_X*3), dim3(512)>>>( ranges_h[0], ranges_h[1], histo_height, histo_width, (unsigned int*)(global_subhisto), (unsigned int*)(global_histo), (unsigned int*)(global_overflow), (unsigned int*)(final_histo) ); } } cudaMemcpy(histo,final_histo, histo_height*histo_width*sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaFree(input); cudaFree(ranges); cudaFree(sm_mappings); cudaFree(global_subhisto); cudaFree(global_histo); cudaFree(global_overflow); cudaFree(final_histo); if (argv[2]) { dump_histo_img(histo, histo_height, histo_width, argv[2]); } free(img); free(histo); printf("\n"); return 0; }
882229425ae97321ff003682eabe2cc330f4a153.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * pre_process.cu * * Created on: 2018-3-28 * Author: qiushuang * * This file preprocesses De Bruijn graph: removing one-directed edge, indexing vertices with their location index, * using new index to replace the neighbors of vertices, splitting and gathering vertices to junctions and linear vertices */ //#include <hipcub/hipcub.hpp> #include <pthread.h> #include "../include/dbgraph.h" #include "../include/comm.h" #include "../include/distribute.h" #include "../include/share.h" #include "malloc.cuh" #include "preprocess_hip.cuh" #include "../include/scan.cu" //#define SYNC_ALL2ALL_ static uint * size_prime_index_ptr; static uint * size_prime_index_host; extern float elem_factor; voff_t max_ss = 0; extern int cutoff; float push_offset_time[NUM_OF_PROCS] = {0,0,0,0,0}; float push_time[NUM_OF_PROCS] = {0,0,0,0,0}; float pull_intra_time[NUM_OF_PROCS] = {0,0,0,0,0}; float pull_inter_time[NUM_OF_PROCS] = {0,0,0,0,0}; float memcpydh_time[NUM_OF_PROCS] = {0,0,0,0,0}; float memcpyhd_time[NUM_OF_PROCS] = {0,0,0,0,0}; float over_time[NUM_OF_PROCS] = {0,0,0,0,0}; extern float all2all_time_async; extern int lock_flag[NUM_OF_PROCS]; extern double mssg_factor; double junction_factor = 0; extern float inmemory_time; extern uint gmax_lsize; extern uint gmax_jsize; extern "C" { void init_hashtab_data_gpu (int did, master_t * mst, dbmeta_t * dbm, dbtable_t * tbs) { int * num_partitions = mst->num_partitions; int * partition_list = mst->partition_list; int num_of_partitions = num_partitions[did+1]-num_partitions[did];// number of partitions in this processor int total_num_partitions = mst->total_num_partitions; // total number of partitions in this compute node voff_t * index_offset = mst->index_offset[did]; int world_size = mst->world_size; int world_rank = mst->world_rank; int np_per_node = (total_num_partitions + world_size - 1)/world_size; int start_partition_id = np_per_node*world_rank; #ifdef SINGLE_NODE int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif CUDA_CHECK_RETURN (hipMalloc (&size_prime_index_ptr, sizeof(uint) * num_of_partitions)); CUDA_CHECK_RETURN (hipMemcpyToSymbol (size_prime_index, &size_prime_index_ptr, sizeof(uint*))); size_prime_index_host = (uint *) malloc (sizeof(uint) * num_of_partitions); CHECK_PTR_RETURN (size_prime_index_host, "malloc size_prime_index_host error!\n"); int i; voff_t offset = 0; for (i=0; i<num_of_partitions; i++) { int poffset = num_partitions[did]; int pid = partition_list[poffset+i] - start_partition_id;//!!!be careful here, this pid is not the global partition id int pindex = mst->id2index[did][pid + start_partition_id]; if (pindex != i) { printf ("ERROR IN DISTRIBUTING PARTITIONS!!!!!!!!\n"); // exit(0); } voff_t size = tbs[pid].size; CUDA_CHECK_RETURN (hipMemcpy(dbm->comm.send, tbs[pid].buf, sizeof(entry_t) * size, hipMemcpyHostToDevice)); int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( init_hashtab) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, size, offset); // init_hashtab_gpu <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, offset); index_offset[i] = offset; offset += size; uint num_of_elems = tbs[pid].num_elems; size_prime_index_host[i] = higher_prime_index (num_of_elems * elem_factor); free (tbs[pid].buf); } index_offset[i] = offset; CUDA_CHECK_RETURN (hipMemcpy(size_prime_index_ptr, size_prime_index_host, sizeof(uint) * num_of_partitions, hipMemcpyHostToDevice)); // printf ("index offset on GPU %d: \n", did); // print_offsets(index_offset[i], num_of_partitions); } void finalize_hashtab_data_gpu (void) { hipFree (size_prime_index_ptr); free (size_prime_index_host); } void d2h_mem (ull * dkmers, vid_t * dvids, ull * hkmers, vid_t * hvids, pair_t * pairs, uint size) { CUDA_CHECK_RETURN (hipMemcpy(hkmers, dkmers, sizeof(ull) * size, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN (hipMemcpy(hvids, dvids, sizeof(vid_t) * size, hipMemcpyDeviceToHost)); uint i; for (i=0; i<size; i++) { pairs[i].kmer = hkmers[i]; pairs[i].vid = hvids[i]; } } void d2h_mem2 (kmer_t * dkmers, vid_t * dvids, kmer_t * hkmers, vid_t * hvids, kmer_vid_t * pairs, uint size) { CUDA_CHECK_RETURN (hipMemcpy(hkmers, dkmers, sizeof(kmer_t) * size, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN (hipMemcpy(hvids, dvids, sizeof(vid_t) * size, hipMemcpyDeviceToHost)); uint i; for (i=0; i<size; i++) { pairs[i].kmer = hkmers[i]; pairs[i].vid = hvids[i]; } } void h2d_mem (ull * dkmers, vid_t * dvids, ull * hkmers, vid_t * hvids, pair_t * pairs, uint size) { uint i; for (i=0; i<size; i++) { hkmers[i] = pairs[i].kmer; hvids[i] = pairs[i].vid; } CUDA_CHECK_RETURN (hipMemcpy(dkmers, hkmers, sizeof(ull) * size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN (hipMemcpy(dvids, hvids, sizeof(vid_t) * size, hipMemcpyHostToDevice)); } void h2d_mem2 (kmer_t * dkmers, vid_t * dvids, kmer_t * hkmers, vid_t * hvids, kmer_vid_t * pairs, uint size) { uint i; for (i=0; i<size; i++) { hkmers[i] = pairs[i].kmer; hvids[i] = pairs[i].vid; } CUDA_CHECK_RETURN (hipMemcpy(dkmers, hkmers, sizeof(kmer_t) * size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN (hipMemcpy(dvids, hvids, sizeof(vid_t) * size, hipMemcpyHostToDevice)); } void init_binary_data_gpu (int did, master_t * mst, dbmeta_t * dbm, dbtable_t * tbs) { int * num_partitions = mst->num_partitions; int * partition_list = mst->partition_list; int num_of_partitions = num_partitions[did+1]-num_partitions[did];// number of partitions in this processor int total_num_partitions = mst->total_num_partitions; // total number of partitions in this compute node voff_t * index_offset = mst->index_offset[did]; int world_size = mst->world_size; int world_rank = mst->world_rank; int np_per_node = (total_num_partitions + world_size - 1)/world_size; int start_partition_id = np_per_node*world_rank; #ifdef SINGLE_NODE int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif int i; uint offset = 0; kmer_vid_t * pairs = (kmer_vid_t *) malloc (sizeof(kmer_vid_t) * max_ss); kmer_t * hkmers = (kmer_t *) malloc (sizeof(kmer_t) * max_ss); vid_t * hvids = (vid_t *) malloc (sizeof(vid_t) * max_ss); #ifdef USE_CUB_ void * dtmp; size_t temp_size = 0; hipcub::DeviceRadixSort::SortPairs (dtmp, temp_size, dbm->before_sort, dbm->sorted_kmers, dbm->before_vids, dbm->sorted_vids, max_ss, 0, sizeof(ull) * 8); printf ("max subsize: %u, cub device temp size for sort:%lu\n", max_ss, temp_size); CUDA_CHECK_RETURN (hipMalloc(&dtmp, temp_size)); #endif for (i=0; i<num_of_partitions; i++) { int poffset = num_partitions[did]; int pid = partition_list[poffset+i] - start_partition_id;//!!!be careful here, this pid is not the global partition id int pindex = mst->id2index[did][pid + start_partition_id]; if (pindex != i) { printf ("ERROR IN DISTRIBUTING PARTITIONS!!!!!!!!\n"); // exit(0); } voff_t size = tbs[pid].size; CUDA_CHECK_RETURN (hipMemset (dbm->lvld, 0, sizeof(voff_t) * (size+1))); CUDA_CHECK_RETURN (hipMemcpy(dbm->comm.send, tbs[pid].buf, sizeof(entry_t) * size, hipMemcpyHostToDevice)); int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( init_kmers) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, size, offset); inclusive_scan<voff_t> (dbm->lvld + 1, size, NULL); CUDA_CHECK_RETURN (hipMemcpy (&offset, &dbm->lvld[size], sizeof(voff_t), hipMemcpyDeviceToHost)); hipLaunchKernelGGL(( gather_kmers) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, size, offset); #ifdef USE_CUB_ hipcub::DeviceRadixSort::SortPairs (dtmp, temp_size, dbm->before_sort, dbm->sorted_kmers+index_offset[i], dbm->before_vids, dbm->sorted_vids+index_offset[i], offset, 0, sizeof(ull) * 8); #endif if (offset > max_ss) { printf ("error!!!!!!\n"); // exit(0); } #ifndef USE_CUB_ d2h_mem2 (dbm->before_sort, dbm->before_vids, hkmers, hvids, pairs, offset); tbb_kmer_vid_sort (pairs, offset); // sort the kmers with the vertex ids h2d_mem2 (dbm->sorted_kmers + index_offset[i], dbm->sorted_vids + index_offset[i], hkmers, hvids, pairs, offset); #endif num_of_blocks = (offset + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( gather_edges) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, offset, index_offset[i]); // gather edges with sorted vertices free (tbs[pid].buf); index_offset[i+1] = index_offset[i] + offset; } #ifdef USE_CUB_ hipFree(dtmp); #endif free (pairs); free (hkmers); free (hvids); } void init_binary_data_gpu_sorted (int did, master_t * mst, dbmeta_t * dbm, dbtable_t * tbs) { int * num_partitions = mst->num_partitions; int * partition_list = mst->partition_list; int num_of_partitions = num_partitions[did+1]-num_partitions[did];// number of partitions in this processor int total_num_partitions = mst->total_num_partitions; // total number of partitions in this compute node voff_t * index_offset = mst->index_offset[did]; int world_size = mst->world_size; int world_rank = mst->world_rank; int np_per_node = (total_num_partitions + world_size - 1)/world_size; int start_partition_id = np_per_node*world_rank; #ifdef SINGLE_NODE int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif int i; for (i=0; i<num_of_partitions; i++) { int poffset = num_partitions[did]; int pid = partition_list[poffset+i] - start_partition_id;//!!!be careful here, this pid is not the global partition id int pindex = mst->id2index[did][pid + start_partition_id]; if (pindex != i) { printf ("ERROR IN DISTRIBUTING PARTITIONS!!!!!!!!\n"); } voff_t size = tbs[pid].size; CUDA_CHECK_RETURN (hipMemcpy(dbm->comm.send, tbs[pid].buf, sizeof(entry_t) * size, hipMemcpyHostToDevice)); int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( gather_vs) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, size, index_offset[i]); // gather edges with sorted vertices free (tbs[pid].buf); index_offset[i+1] = index_offset[i] + size; } } void * neighbor_push_intra_pull_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; int did = garg->did; comm_t * cm = &garg->dbm->comm; master_t * mst = garg->mst; int k = garg->k; int p = garg->p; int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; if (mst->world_rank == 0) printf ("WORLD RANK %d: Neigbhors push intra pull gpu %d:\n", mst->world_rank, did); #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif CUDA_CHECK_RETURN (hipMemset(cm->send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ evaltime_t start, end; CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; // push_mssg_offset_assign_id <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, total_num_partitions, index_offset[i], k, p); // push_mssg_offset_assign_id_gpu <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, total_num_partitions, index_offset[i], k, p); hipLaunchKernelGGL(( push_mssg_offset_assign_id_binary) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, streams[did][0], size, total_num_partitions, index_offset[i], k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); push_offset_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH MSSG OFFSET FOR GPU *ASSIGNING IDS* INTRA PROCESSOR TIME: "); #endif inclusive_scan<voff_t> (cm->send_offsets, total_num_partitions + 1, NULL); CUDA_CHECK_RETURN (hipMemset(cm->tmp_send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i< num_of_partitions; i++) { voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( push_mssg_assign_id_binary) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, streams[did][0], size, total_num_partitions, index_offset[i], k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); push_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH MSSG FOR GPU *ASSIGNING IDS* INTRA PROCESSOR TIME: "); #endif CUDA_CHECK_RETURN (hipMemcpy(mst->roff[did], cm->send_offsets, sizeof(voff_t) * (total_num_partitions + 1), hipMemcpyDeviceToHost)); voff_t inter_start = mst->roff[did][num_of_partitions]; voff_t inter_end = mst->roff[did][total_num_partitions]; #ifndef SYNC_ALL2ALL_ if (atomic_set_value(&lock_flag[did], 1, 0) == false) printf ("!!!!!!!!!!! CAREFUL: ATOMIC SET VALUE ERROR IN GPU %d\n", did); #endif #ifdef MEASURE_MEMCPY_ gettimeofday(&start, NULL); CUDA_CHECK_RETURN (hipMemcpy(mst->receive[did], (assid_t *)cm->send + inter_start, sizeof(assid_t) * (inter_end-inter_start), hipMemcpyDeviceToHost)); gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #else #ifndef SYNC_ALL2ALL_ CUDA_CHECK_RETURN (hipMemcpyAsync(mst->receive[did], (assid_t *)cm->send + inter_start, sizeof(assid_t) * (inter_end-inter_start), hipMemcpyDeviceToHost, streams[did][1])); #else CUDA_CHECK_RETURN (hipMemcpy(mst->receive[did], (assid_t *)cm->send + inter_start, sizeof(assid_t) * (inter_end-inter_start), hipMemcpyDeviceToHost)); #endif #endif if (INTER_BUF_FACTOR == 1) { #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t num_mssgs = mst->roff[did][i+1] - mst->roff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( pull_mssg_assign_id_binary) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, streams[did][0], num_mssgs, pid, size, index_offset[i], total_num_partitions, 0, 1, did); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); pull_intra_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PULL MSSG FOR GPU %d LISTRANKING INTRA PROCESSOR TIME: ", did); #endif } return ((void *) 0); } void * neighbor_inter_pull_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; int did = garg->did; comm_t * cm = &garg->dbm->comm; master_t * mst = garg->mst; if (mst->world_rank == 0) printf ("WORLD RANK %d: neighbor inter pull gpu %d:\n", mst->world_rank, did); int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif voff_t receive_start = mst->roff[did][num_of_partitions]; #ifdef MEASURE_MEMCPY_ evaltime_t start, end; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (hipMemcpy(cm->receive_offsets, mst->soff[did], sizeof(voff_t) * (num_of_partitions + 1), hipMemcpyHostToDevice)); voff_t inter_size = mst->soff[did][num_of_partitions]; if (inter_size == 0) return ((void *) 0); // tbb_assid_sort ((assid_t *)(mst->send[did]), inter_size); CUDA_CHECK_RETURN (hipMemcpy((assid_t*)cm->send + receive_start, mst->send[did], sizeof(assid_t) * inter_size, hipMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; voff_t num_mssgs = mst->soff[did][i+1] - mst->soff[did][i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( pull_mssg_assign_id_binary) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, num_mssgs, pid, size, index_offset[i], total_num_partitions, receive_start, 0, did); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); pull_inter_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PULL MSSG FOR GPU %d *ASSIGNING IDS* INTER PROCESSORS TIME: ", did); #endif return ((void *) 0); } void * identify_vertices_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; master_t * mst = garg->mst; dbmeta_t * dbm = garg->dbm; int did = garg->did; if (mst->world_rank == 0) printf ("WORLD RANK %d: CPU identifying vertices DID=%d:\n", mst->world_rank, did); #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; hipMemset (dbm->lvld, 0, sizeof(voff_t) * (max_ss+1)); int i; #ifdef MEASURE_TIME_ evaltime_t start, end; gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; CUDA_CHECK_RETURN (hipMemset(dbm->jvld, 0, sizeof(uint) * size)); CUDA_CHECK_RETURN (hipMemset(dbm->lvld, 0, sizeof(uint) * size)); // label_vertex_with_flags <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, index_offset[i]); hipLaunchKernelGGL(( label_vertex_with_flags_binary) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, size, index_offset[i], cutoff); // inclusive_scan<uint> (dbm->jvld + index_offset[i], size, NULL); // inclusive_scan<uint> (dbm->lvld + index_offset[i], size, NULL); inclusive_scan<uint> (dbm->jvld, size, NULL); inclusive_scan<uint> (dbm->lvld, size, NULL); voff_t jsize, lsize; // CUDA_CHECK_RETURN (hipMemcpy(&jsize, &(dbm->jvld + index_offset[i])[size-1], sizeof(voff_t), hipMemcpyDeviceToHost)); // CUDA_CHECK_RETURN (hipMemcpy(&lsize, &(dbm->lvld + index_offset[i])[size-1], sizeof(voff_t), hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN (hipMemcpy(&jsize, &(dbm->jvld)[size-1], sizeof(voff_t), hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN (hipMemcpy(&lsize, &(dbm->lvld)[size-1], sizeof(voff_t), hipMemcpyDeviceToHost)); mst->jid_offset[pid] = jsize; mst->id_offsets[pid+1] = jsize + lsize; } #ifdef MEASURE_TIME_ gettimeofday (&end, NULL); print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& IDENTIFYING IDS OF VERTICES TIME: "); #endif return ((void *)0); } void * assign_vertex_ids_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; master_t * mst = garg->mst; int did = garg->did; dbmeta_t * dbm = garg->dbm; if (mst->world_rank == 0) printf ("WORLD RANK %d: CPU assigning vertex ids DID = %d:\n", mst->world_rank, did); int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif int i; #ifdef MEASURE_TIME_ evaltime_t start, end; gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; CUDA_CHECK_RETURN (hipMemset(dbm->jvld, 0, sizeof(uint) * size)); CUDA_CHECK_RETURN (hipMemset(dbm->lvld, 0, sizeof(uint) * size)); hipLaunchKernelGGL(( label_vertex_with_flags_binary) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, size, index_offset[i], cutoff); inclusive_scan<uint> (dbm->jvld, size, NULL); inclusive_scan<uint> (dbm->lvld, size, NULL); hipLaunchKernelGGL(( assid_vertex_with_flags) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, size, pid, index_offset[i]); } #ifdef MEASURE_TIME_ gettimeofday (&end, NULL); print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& ASSIGNING IDS OF VERTICES TIME: "); #endif return ((void *)0); } void * gather_vertices_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; master_t * mst = garg->mst; dbmeta_t * dbm = garg->dbm; int k = garg->k; int p = garg->p; d_jvs_t * js = garg->js; d_lvs_t * ls = garg->ls; ull * js_spids = garg->dbm->djs.spids; ull * js_spidsr = garg->dbm->djs.spidsr; uint * ls_spids = garg->dbm->dls.spids; subgraph_t * subgraph = garg->subgraph; int did = garg->did; printf ("identifying vertices gpu %d:\n", did); int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif int i; #ifdef MEASURE_TIME_ evaltime_t start, end; gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; // gather_vertex_binary <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, pid, index_offset[i], cutoff); CUDA_CHECK_RETURN (hipMemset (js_spids, 0, sizeof(ull) * gmax_jsize)); CUDA_CHECK_RETURN (hipMemset (js_spidsr, 0, sizeof(ull) * gmax_jsize)); CUDA_CHECK_RETURN (hipMemset (ls_spids, 0, sizeof(uint) * gmax_lsize)); hipLaunchKernelGGL(( gather_vertex_partitioned) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, size, pid, index_offset[i], cutoff, k, p, total_num_partitions); uint jsize = mst->jid_offset[pid]; uint lsize = mst->id_offsets[pid+1] - mst->id_offsets[pid] - jsize; // write_junctions_gpu (dbm, mst, jsize, lsize, pid, total_num_partitions, did); // write_linear_vertices_gpu (dbm, mst, jsize, lsize, pid, total_num_partitions, did); output_vertices_gpu (dbm, mst, jsize, lsize, pid, total_num_partitions, did, js, ls, subgraph); write_kmers_edges_gpu (dbm, mst, jsize, lsize, pid, total_num_partitions, did); } #ifdef MEASURE_TIME_ gettimeofday (&end, NULL); print_exec_time (start, end, "DEVICE %d: &&&&&&&&&&&&&&&&&&&& GATHERING VERTICES TIME: ", did); #endif // printf ("DEVICE %d: NUMBER OF VERTICES PROCESSED: %u\n", did, index_offset[num_of_partitions]); // write_ids_gpu (dbm, mst, num_of_partitions, did); return ((void *)0); } void * shakehands_push_respond_intra_push_gpu (void * arg) { evaltime_t overs, overe; pre_arg * garg = (pre_arg *) arg; int did = garg->did; comm_t * cm = &garg->dbm->comm; master_t * mst = garg->mst; int k = garg->k; int p = garg->p; if (mst->world_rank == 0) printf ("WORLD RANK %d: Shakehands push respond intra push gpu %d:\n", mst->world_rank, did); #ifdef SINGLE_NODE int num_of_devices = mst->num_of_devices; int world_rank = mst->world_rank; CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif gettimeofday (&overs, NULL); int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; int poffset = mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; CUDA_CHECK_RETURN (hipMemset(cm->send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ evaltime_t start, end; CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( push_mssg_offset_shakehands) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), sizeof(vid_t)*(total_num_partitions+1), streams[did][0], size, total_num_partitions, index_offset[i], k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); push_offset_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH MSSG OFFSET FOR GPU *SHAKEHANDS* INTRA PROCESSOR TIME: "); #endif inclusive_scan<voff_t> (cm->send_offsets, total_num_partitions+1, NULL); CUDA_CHECK_RETURN (hipMemset(cm->tmp_send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i< num_of_partitions; i++) { int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( push_mssg_shakehands) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), sizeof(vid_t)*(total_num_partitions+1), streams[did][0], size, total_num_partitions, index_offset[i], k, p, pid, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); push_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH MSSG FOR GPU *SHAKEHANDS* INTRA PROCESSOR TIME: "); #endif #ifdef MEASURE_MEMCPY_ gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (hipMemcpy(mst->roff[did], cm->send_offsets, sizeof(voff_t) * (total_num_partitions + 1), hipMemcpyDeviceToHost)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif voff_t inter_start = mst->roff[did][num_of_partitions]; voff_t inter_end = mst->roff[did][total_num_partitions]; #ifndef SYNC_ALL2ALL_ if (atomic_set_value(&lock_flag[did], 1, 0) == false) printf ("!!!!!!!!!!! CAREFUL: ATOMIC SET VALUE ERROR IN GPU %d\n", did); #endif #ifdef MEASURE_MEMCPY_ gettimeofday(&start, NULL); CUDA_CHECK_RETURN (hipMemcpy(mst->receive[did], (shakehands_t *)cm->send + inter_start, sizeof(shakehands_t) * (inter_end-inter_start), hipMemcpyDeviceToHost)); gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #else #ifndef SYNC_ALL2ALL_ CUDA_CHECK_RETURN (hipMemcpyAsync(mst->receive[did], (shakehands_t *)cm->send + inter_start, sizeof(shakehands_t) * (inter_end-inter_start), hipMemcpyDeviceToHost, streams[did][1])); #else CUDA_CHECK_RETURN (hipMemcpy(mst->receive[did], (shakehands_t *)cm->send + inter_start, sizeof(shakehands_t) * (inter_end-inter_start), hipMemcpyDeviceToHost)); #endif #endif CUDA_CHECK_RETURN (hipMemset (cm->extra_send_offsets, 0, sizeof(voff_t) * (total_num_partitions+1))); #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset + i]; voff_t num_mssgs = mst->roff[did][i+1] - mst->roff[did][i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; voff_t size = index_offset[i+1] - index_offset[i]; hipLaunchKernelGGL(( push_mssg_offset_respond) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), sizeof(vid_t)*(total_num_partitions+1), streams[did][0], num_mssgs, pid, size, index_offset[i], total_num_partitions, 0, 1, k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); pull_intra_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH *RESPOND* OFFSET GPU INTRA PROCESSOR TIME: "); #endif gettimeofday (&overe, NULL); over_time[did] += (float)((overe.tv_sec * 1000000 + overe.tv_usec) - (overs.tv_sec * 1000000 + overs.tv_usec)) / 1000; return ((void *) 0); } void * shakehands_pull_respond_inter_push_intra_pull_gpu (void * arg) { evaltime_t overs, overe; pre_arg * carg = (pre_arg *) arg; int did = carg->did; int k = carg->k; int p = carg->p; comm_t * cm = &carg->dbm->comm; master_t * mst = carg->mst; int num_of_devices = mst->num_of_devices; int world_rank = mst->world_rank; if (world_rank == 0) printf ("WORLD RANK %d: shakehands pull respond inter push intra pull gpu %d\n", world_rank, did); #ifdef SINGLE_NODE CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif gettimeofday (&overs, NULL); int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; int poffset = mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; voff_t receive_start = mst->roff[did][num_of_partitions]; voff_t inter_size = mst->soff[did][num_of_partitions]; #ifdef MEASURE_MEMCPY_ evaltime_t start, end; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (hipMemcpy(cm->receive_offsets, mst->soff[did], sizeof(voff_t) * (num_of_partitions + 1), hipMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (hipMemcpy((shakehands_t*)(cm->send) + receive_start, mst->send[did], sizeof(shakehands_t) * inter_size, hipMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset + i]; voff_t num_mssgs = mst->soff[did][i+1] - mst->soff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( push_mssg_offset_respond) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), sizeof(vid_t)*(total_num_partitions+1), streams[did][0], num_mssgs, pid, size, index_offset[i], total_num_partitions, receive_start, 0, k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); pull_inter_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH *RESPOND* OFFSET GPU INTER PROCESSORS TIME: "); #endif inclusive_scan<voff_t> (cm->extra_send_offsets, total_num_partitions+1, NULL); // *************** malloc (send and) receive buffer for pull and push mode voff_t rcv_size; CUDA_CHECK_RETURN (hipMemcpy (&rcv_size, cm->extra_send_offsets + num_of_partitions, sizeof(voff_t), hipMemcpyDeviceToHost)); if (rcv_size == 0) { printf ("CCCCCCCCCcccareful:::::::::: receive size from intra junction update push is 0!!!!!!!!\n"); rcv_size = 1000; } cm->temp_size = malloc_pull_push_receive_device (&cm->receive, sizeof(shakehands_t), did, rcv_size, 2*(total_num_partitions+num_of_partitions-1)/num_of_partitions, world_rank, num_of_devices); set_receive_buffer_gpu (&cm->receive, did, world_rank, num_of_devices); CUDA_CHECK_RETURN (hipMemset(cm->tmp_send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset+i]; voff_t num_mssgs = mst->roff[did][i+1] - mst->roff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( push_mssg_respond) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), sizeof(vid_t)*(total_num_partitions+1), streams[did][0], num_mssgs, pid, size, index_offset[i], total_num_partitions, 0, 1, k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); push_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH *RESPOND* GPU INTRA PROCESSOR TIME: "); #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset+i]; voff_t num_mssgs = mst->soff[did][i+1] - mst->soff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( push_mssg_respond) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), sizeof(vid_t)*(total_num_partitions+1), streams[did][0], num_mssgs, pid, size, index_offset[i], total_num_partitions, receive_start, 0, k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); push_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH *RESPOND* GPU INTER PROCESSORS TIME: "); #endif #ifdef MEASURE_MEMCPY_ gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (hipMemcpy(mst->roff[did], cm->extra_send_offsets, sizeof(voff_t)*(total_num_partitions + 1), hipMemcpyDeviceToHost)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif voff_t inter_start = mst->roff[did][num_of_partitions]; voff_t inter_end = mst->roff[did][total_num_partitions]; printf ("WORLD RANK %d: @@@@@@@@@@@@@@@@@@@@@ total number of shakehands pushed in device %d: %lu\n", mst->world_rank, did, inter_end); printf ("WORLD RANK %d: ############### number of intra mssgs pulled for inter shakehands of device %d: %lu\n", mst->world_rank, did, inter_start); #ifndef SYNC_ALL2ALL_ if (atomic_set_value(&lock_flag[did], 1, 0) == false) printf ("!!!!!!!!!!! CAREFUL: ATOMIC SET VALUE ERROR IN GPU %d\n", did); #endif #ifdef MEASURE_TIME_ gettimeofday(&start, NULL); CUDA_CHECK_RETURN (hipMemcpy(mst->receive[did], (shakehands_t *)cm->receive+inter_start, (inter_end-inter_start)*sizeof(shakehands_t), hipMemcpyDeviceToHost)); gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #else #ifndef SYNC_ALL2ALL_ CUDA_CHECK_RETURN (hipMemcpyAsync(mst->receive[did], (shakehands_t *)cm->receive+inter_start, (inter_end-inter_start)*sizeof(shakehands_t), hipMemcpyDeviceToHost, streams[did][1])); #else CUDA_CHECK_RETURN (hipMemcpy(mst->receive[did], (shakehands_t *)cm->receive+inter_start, (inter_end-inter_start)*sizeof(shakehands_t), hipMemcpyDeviceToHost)); #endif #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset + i]; voff_t num_mssgs = mst->roff[did][i+1] - mst->roff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( pull_mssg_respond) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, streams[did][0], num_mssgs, pid, size, index_offset[i], cm->receive, 1); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); pull_intra_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PULL *RESPOND* GPU %d INTRA PROCESSOR TIME: ", did); #endif gettimeofday (&overe, NULL); over_time[did] += (float)((overe.tv_sec * 1000000 + overe.tv_usec) - (overs.tv_sec * 1000000 + overs.tv_usec)) / 1000; return ((void *) 0); } void * respond_inter_pull_gpu (void * arg) { evaltime_t overs, overe; pre_arg * carg = (pre_arg *) arg; comm_t * cm = &carg->dbm->comm; master_t * mst = carg->mst; int did = carg->did; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; int poffset = mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; int num_of_devices = mst->num_of_devices; int world_rank = mst->world_rank; if (world_rank == 0) printf ("WORLD RANK %d: respond inter pull gpu %d:\n", world_rank, did); #ifdef SINGLE_NODE CUDA_CHECK_RETURN(hipSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(hipSetDevice (did + DEVICE_SHIFT)); #endif gettimeofday (&overs, NULL); voff_t receive_start = mst->roff[did][num_of_partitions]; voff_t inter_size = mst->soff[did][num_of_partitions]; printf ("WORLD RANK %d: ############### number of inter mssgs pulled for inter shakehands of device %d: %lu\n", mst->world_rank, did, inter_size); if (cm->temp_size <= (inter_size+receive_start)*sizeof(shakehands_t)) { printf("WORLD RANK %d: Error:::::::: malloced receive buffer size smaller than actual receive buffer size!\n", mst->world_rank); exit(0); } #ifdef MEASURE_MEMCPY_ evaltime_t start, end; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (hipMemcpy(cm->receive_offsets, mst->soff[did], sizeof(voff_t) * (num_of_partitions+1), hipMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (hipMemcpy((shakehands_t *)(cm->receive) + receive_start, mst->send[did], sizeof(shakehands_t) * inter_size, hipMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { if (inter_size == 0) break; int pid = mst->partition_list[poffset + i]; voff_t num_mssgs = mst->soff[did][i+1] - mst->soff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; hipLaunchKernelGGL(( pull_mssg_respond) , dim3(block_size), dim3(THREADS_PER_BLOCK_NODES), 0, 0, num_mssgs, pid, size, index_offset[i], (char*)cm->receive + sizeof(shakehands_t) * receive_start, 0); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (hipDeviceSynchronize()); gettimeofday (&end, NULL); pull_inter_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PULL *RESPOND* GPU INTER PROCESSORS TIME: "); #endif gettimeofday (&overe, NULL); over_time[did] += (float)((overe.tv_sec * 1000000 + overe.tv_usec) - (overs.tv_sec * 1000000 + overs.tv_usec)) / 1000; // *************** free (send and) receive buffer for pull and push mode free_pull_push_receive_device (did, cm, world_rank, num_of_devices); return ((void *) 0); } void pre_process_dbgraph (int num_of_partitions, int k, int p, dbtable_t * tbs, master_t * mst, subgraph_t * subgraph, d_jvs_t * js, d_lvs_t * ls, int world_size, int world_rank) { float all2all_time = 0; evaltime_t start, end; evaltime_t overs, overe; evaltime_t tmps, tmpe; evaltime_t inms, inme; // gettimeofday (&start, NULL); mst->total_num_partitions = num_of_partitions; //total_num_of_partitions=input num_of_partitions mst->world_size = world_size; mst->world_rank = world_rank; int np_per_node; int np_node; get_np_node (&np_per_node, &np_node, num_of_partitions, world_size, world_rank); if (mst->world_rank == 0) printf ("WORLD RANK %d IIIIIIIIIII initialize distributing partitions: \n", mst->world_rank); gettimeofday(&start, NULL); init_distribute_partitions (num_of_partitions, mst, world_size); get_subgraph_sizes (subgraph, np_node); if (mssg_factor == 0) mssg_factor = MSSG_FACTOR; double unit_vsize = sizeof(assid_t)*(mssg_factor+0.1)+sizeof(kmer_t)+sizeof(vid_t)*EDGE_DIC_SIZE+sizeof(ull)+sizeof(voff_t)*2+sizeof(vid_t); distribute_partitions (num_of_partitions, mst, subgraph, uneven, world_size, world_rank, subgraph->total_graph_size, unit_vsize); gettimeofday(&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d #################### distributing partitions time: ", mst->world_rank); int num_of_cpus = mst->num_of_cpus; int num_of_devices = mst->num_of_devices; dbmeta_t * dbm = (dbmeta_t *) malloc (sizeof(dbmeta_t) * (num_of_devices + num_of_cpus)); pthread_t cpu_threads[NUM_OF_CPUS]; pthread_t gpu_threads[NUM_OF_DEVICES]; pre_arg arg[NUM_OF_DEVICES + NUM_OF_CPUS]; #ifndef SYNC_ALL2ALL_ pthread_t comm_thread; comm_arg cm_arg; cm_arg.mst=mst; #endif uint intra_mssgs[NUM_OF_PROCS*MAX_NUM_ITERATION]; uint inter_mssgs[NUM_OF_PROCS*MAX_NUM_ITERATION]; init_mssg_count (intra_mssgs, inter_mssgs); int i; for (i = 0; i < num_of_devices + num_of_cpus; i++) { arg[i].did = i; arg[i].dbm = &dbm[i]; arg[i].mst = mst; arg[i].k = k; arg[i].p = p; arg[i].js = js; arg[i].ls = ls; arg[i].subgraph = subgraph; #ifndef SYNC_ALL2ALL_ cm_arg.cm[i] = &dbm[i].comm; #endif } create_streams(num_of_devices, 2); init_lock_flag(); //***************** PRE-PROCESSING BEGINS: ***************** gettimeofday(&overs, NULL); uint max_subgraph_size; uint max_lsize; uint max_jsize; int intra_num_of_partitions = mst->num_partitions[num_of_devices + num_of_cpus]; max_ss = get_max (subgraph->subgraphs, NULL, NULL, &max_subgraph_size, &max_jsize, &max_lsize, intra_num_of_partitions, num_of_partitions); gettimeofday (&start, NULL); mst->mssg_size = sizeof(assid_t); // IMPORTANT:::::::::: initiate MAXIMUM message size for message buffer init_device_filter1 (dbm, mst, max_subgraph_size); set_globals_filter1_gpu (dbm, mst); init_host_filter2 (dbm, mst, max_subgraph_size); gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: IIIIIIIIIIIIIIInit filter memory time device1 and host: \n", mst->world_rank); init_device_preprocessing (dbm, mst); set_globals_preprocessing_gpu (dbm, mst); init_host_preprocessing (dbm, mst); for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) { init_preprocessing_data_cpu (&dbm[i], num_of_partitions); } // **************** malloc writing offset buffer for pull and push mode for (i=0; i<num_of_devices; i++) { malloc_pull_push_offset_gpu (&dbm[i].comm.extra_send_offsets, mst, i); set_extra_send_offsets_gpu (&dbm[i].comm.extra_send_offsets, mst, i); } for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) { malloc_pull_push_offset_cpu(&dbm[i].comm.extra_send_offsets, mst); } gettimeofday (&start, NULL); for (i=0; i<num_of_devices; i++) { // init_hashtab_data_gpu (i, mst, &dbm[i], tbs); #ifdef USE_DISK_IO init_binary_data_gpu (i, mst, &dbm[i], tbs); #else init_binary_data_gpu_sorted (i, mst, &dbm[i], tbs); #endif } for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) { // init_hashtab_data_cpu (i, mst, &dbm[i], tbs); #ifdef USE_DISK_IO init_binary_data_cpu (i, mst, &dbm[i], tbs); #else init_binary_data_cpu_sorted (i, mst, &dbm[i], tbs); #endif } gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: ++++++++++++++++Init hash table data input time: ++++++++++++++++\n", mst->world_rank); free_dbgraph_hashtab (num_of_partitions, tbs); //************** ADD AN EXTRA STEP:::::::::::: modify edges here: *************** gettimeofday (&inms, NULL); // in-memory processing begins mst->mssg_size = sizeof(shakehands_t); // IMPORTANT:::::::::: RESET message size for message buffer for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, shakehands_push_respond_intra_push_gpu, &arg[i]) != 0) { printf ("create thread for shakehands push respond intra push on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, shakehands_push_respond_intra_push_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for shakehands push respond intra push on cpu %d error!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_create (&comm_thread, NULL, master_all2all_async, &cm_arg) != 0) { printf ("Create thread for communication error!\n"); } #endif for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("join thread on shakehands push respond intra push on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on shakehands push respond intra push on cpu %d failure!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_join (comm_thread, NULL) != 0) { printf ("Join communication thread failure!\n"); } #else gettimeofday (&start, NULL); master_all2all(mst); gettimeofday (&end, NULL); all2all_time += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: TICKTOCK TICKTOCK TICKTOCK:: master all to all time after listrank push: ", mst->world_rank); // while(debug) {} for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, shakehands_pull_respond_inter_push_intra_pull_gpu, &arg[i]) != 0) { printf ("create thread for shakehands pull respond inter upsh intra pull on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, shakehands_pull_respond_inter_push_intra_pull_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for shakehands pull respond inter push intra pull on cpu %d error!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_create (&comm_thread, NULL, master_all2all_async, &cm_arg) != 0) { printf ("Create thread for communication error!\n"); } #endif for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("join thread on shakehands pull respond inter push intra pull on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on shakehands pull respond inter push intra pull on cpu %d failure!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_join (comm_thread, NULL) != 0) { printf ("Join communication thread failure!\n"); } #else gettimeofday (&start, NULL); master_all2all(mst); gettimeofday (&end, NULL); all2all_time += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: TICKTOCK TICKTOCK TICKTOCK:: master all to all time after listrank pull modifygraph push: ", mst->world_rank); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, respond_inter_pull_gpu, &arg[i]) != 0) { printf ("create thread on respond inter pull on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, respond_inter_pull_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for respond inter pull on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on respond inter pull on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on respond inter pull on cpu %d failure!\n", i); } } gettimeofday (&inme, NULL); // in-memory processing end inmemory_time += (float)((inme.tv_sec * 1000000 + inme.tv_usec) - (inms.tv_sec * 1000000 + inms.tv_usec)) / 1000; // ************ free writing offset buffer for pull and push mode for (i=0; i<num_of_devices; i++) free_pull_push_offset_gpu(dbm[i].comm.extra_send_offsets); for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) free_pull_push_offset_cpu(dbm[i].comm.extra_send_offsets); //************** Reset memory here ************ gettimeofday (&start, NULL); init_device_filter2 (dbm, mst, max_subgraph_size); set_globals_filter2_gpu (dbm, mst); gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: IIIIIIIIIIIIIIInit filter memory time device2: \n", mst->world_rank); //*********** FIRST: ASSIGN EACH VERTEX A GLOBAL ID ************** gettimeofday (&inms, NULL); // in-memory processing begins gettimeofday (&start, NULL); if (mst->world_rank == 0) printf("\n++++++++++++++++ Identifying vertices: WORLD RANK %d ++++++++++++++++++\n", world_rank); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, identify_vertices_gpu, &arg[i]) != 0) { printf ("create thread for hashtab filtering on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, identify_vertices_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for hashtab filtering on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on hashtab filtering on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on hashtab filtering on cpu %d failure!\n", i); } } // ******** allgather id offsets here: ************ gettimeofday (&tmps, NULL); printf ("goffset size: %d\n", sizeof(goffset_t)); mpi_allgatherv_inplace (mst->id_offsets+1 + world_rank*np_per_node, mst->id_offsets + 1, num_of_partitions, world_size, world_rank, sizeof(goffset_t)); mpi_allgatherv_inplace (mst->jid_offset + world_rank*np_per_node, mst->jid_offset, num_of_partitions, world_size, world_rank, sizeof(goffset_t)); gettimeofday (&tmpe, NULL); all2all_time += (float)((tmpe.tv_sec * 1000000 + tmpe.tv_usec) - (tmps.tv_sec * 1000000 + tmps.tv_usec)) / 1000; inclusive_prefix_sum_long (mst->id_offsets, num_of_partitions + 1); ull total_num_junctions = 0; for (i=0; i<num_of_partitions; i++) total_num_junctions += mst->jid_offset[i]; junction_factor = (double)total_num_junctions/(mst->id_offsets[num_of_partitions]-total_num_junctions) + 0.01; printf ("WORLD RANK %d: TOTAL NUMBER OF JUNCITIONS: %u\nJUNCTION FACTOR SET TO BE::::::::: %f\n", mst->world_rank, total_num_junctions, junction_factor); set_id_offsets_cpu (dbm, mst); set_id_offsets_gpu (dbm, mst); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, assign_vertex_ids_gpu, &arg[i]) != 0) { printf ("create thread for assigning vertex ids on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, assign_vertex_ids_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for assigning vertex ids on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on assigning vertex ids on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on assigning vertex ids on cpu %d failure!\n", i); } } gettimeofday (&end, NULL); gettimeofday (&inme, NULL); // inmemory processing end inmemory_time += (float)((inme.tv_sec * 1000000 + inme.tv_usec) - (inms.tv_sec * 1000000 + inms.tv_usec)) / 1000; if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: ++++++++++++++++ Identifying vertex time: ++++++++++++++++++\n", mst->world_rank); gettimeofday (&start, NULL); finalize_device_filter2 (dbm, mst); set_globals_filter2_gpu (dbm, mst); finalize_host_filter2 (dbm, mst); gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: FFFFFFFFFFFfinalize device and host filter time:\n", mst->world_rank); //*********** SECOND: NEIGHBORING WITH VERTEX IDS **************** gettimeofday(&start, NULL); mst->mssg_size = sizeof(assid_t); // IMPORTANT:::::::::: RESET message size for message buffer gettimeofday(&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: IIIIIIIIIIIIIIInitializing pre-processing time: \n", mst->world_rank); // **************** STEP 1 gettimeofday (&inms, NULL); // in-memory processing begins gettimeofday (&start, NULL); if (mst->world_rank == 0) printf("\n++++++++++++++++ Identifying neighbors: WORLD RANK %d ++++++++++++++++++\n", world_rank); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, neighbor_push_intra_pull_gpu, &arg[i]) != 0) { printf ("create thread for NEIGHBORING push on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, neighbor_push_intra_pull_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for NEIGHBORING push on cpu %d error!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_create (&comm_thread, NULL, master_all2all_async, &cm_arg) != 0) { printf ("Create thread for communication error!\n"); } #endif for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on NEIGHBORING push on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on NEIGHBORING push on cpu %d failure!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_join (comm_thread, NULL) != 0) { printf ("Join communication thread failure!\n"); } #else gettimeofday (&tmps, NULL); master_all2all(mst); gettimeofday (&tmpe, NULL); all2all_time += (float)((tmpe.tv_sec * 1000000 + tmpe.tv_usec) - (tmps.tv_sec * 1000000 + tmps.tv_usec)) / 1000; #endif // get_mssg_count(mst, intra_mssgs, inter_mssgs, 0); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, neighbor_inter_pull_gpu, &arg[i]) != 0) { printf ("create thread for NEIGHBORING pull on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, neighbor_inter_pull_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for NEIGHBORING pull on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on NEIGHBORING pull on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on NEIGHBORING pull on cpu %d failure!\n", i); } } gettimeofday (&end, NULL); gettimeofday (&inme, NULL); // in-memory processing end inmemory_time += (float)((inme.tv_sec * 1000000 + inme.tv_usec) - (inms.tv_sec * 1000000 + inms.tv_usec)) / 1000; if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: ++++++++++++++++Neighboring time: ++++++++++++++++++\n", mst->world_rank); gettimeofday(&start, NULL); finalize_device_preprocessing (dbm, mst); finalize_host_preprocessing (dbm, mst); finalize_preprocessing_data_cpu (); finalize_receive_all2all(world_size); gettimeofday(&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: FFFFFFFFFFFFFFFFFFFinalizing pre-processing time: \n", mst->world_rank); //*********** THIRD: GARTHER VERTICES INTO VERTEX ARRAYS ************* gettimeofday (&start, NULL); get_max (subgraph->subgraphs, mst->jid_offset, mst->id_offsets, &max_subgraph_size, &max_jsize, &max_lsize, intra_num_of_partitions, num_of_partitions); if (mst->world_rank == 0) printf ("WORLD RANK %d: max subgraph size: %u, max junction size: %u, max linear vertex size: %u\n", world_rank, max_subgraph_size, max_jsize, max_lsize); gmax_jsize = max_jsize; gmax_lsize = max_lsize; init_write_buffer (num_of_devices); init_device_gather (dbm, mst, max_subgraph_size, max_jsize, max_lsize); set_globals_gather_gpu (dbm, mst); init_host_gather (dbm, mst, max_subgraph_size, max_jsize, max_lsize); for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) { reset_globals_gather_cpu (&dbm[i]); } gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: IIIIIIIIIIIIIIInitializing gathering time: \n", world_rank); gettimeofday (&inms, NULL); // in-memory processing begins gettimeofday (&start, NULL); if (mst->world_rank == 0) printf("\n++++++++++++++++ Gather vertices: WORLD RANK %d ++++++++++++++++++\n", world_rank); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, gather_vertices_gpu, &arg[i]) != 0) { printf ("create thread for hashtab filtering on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, gather_vertices_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for hashtab filtering on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on hashtab filtering on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on hashtab filtering on cpu %d failure!\n", i); } } gettimeofday (&end, NULL); gettimeofday (&inme, NULL); // in-memory processing end inmemory_time += (float)((inme.tv_sec * 1000000 + inme.tv_usec) - (inms.tv_sec * 1000000 + inms.tv_usec)) / 1000; if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: ++++++++++++++++Gathering vertex array time: ++++++++++++++++++\n", mst->world_rank); gettimeofday (&start, NULL); finalize_device_gather2 (dbm, mst); finalize_host_gather2 (dbm, mst); finalize_distribute_partitions (mst); // print_mssg_count(mst->num_of_cpus+mst->num_of_devices, intra_mssgs, inter_mssgs, 0); gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: FFFFFFFFFFFFFFFFFFFinalizing gathering time: \n", mst->world_rank); gettimeofday(&overe, NULL); print_exec_time(overs, overe, "WORLD RANK %d: ***********************Overall PRE-PROCESSING time: \n", mst->world_rank); finalize_write_buffer (num_of_devices); // print graph statistics: uint total_num_nodes = mst->id_offsets[num_of_partitions]; if (mst->world_rank==0) { printf ("WORLD RANK %d: &&&&&&&&&&&&&&& Total number of valid nodes in de bruijn graph: %lu\n", mst->world_rank, total_num_nodes); printf("WORLD RANK %d:TTTTTTTTTTTTTTTTTTTTIMING: within that:\n", mst->world_rank); printf ("WORLD RANK %d:~~~~~~~~~~~~~~~~ ALLTOALL TIME MEASURED: %f\n", mst->world_rank, all2all_time); } free (dbm); destroy_streams(num_of_devices, 2); } }
882229425ae97321ff003682eabe2cc330f4a153.cu
/* * pre_process.cu * * Created on: 2018-3-28 * Author: qiushuang * * This file preprocesses De Bruijn graph: removing one-directed edge, indexing vertices with their location index, * using new index to replace the neighbors of vertices, splitting and gathering vertices to junctions and linear vertices */ //#include <cub/cub.cuh> #include <pthread.h> #include "../include/dbgraph.h" #include "../include/comm.h" #include "../include/distribute.h" #include "../include/share.h" #include "malloc.cuh" #include "preprocess.cuh" #include "../include/scan.cu" //#define SYNC_ALL2ALL_ static uint * size_prime_index_ptr; static uint * size_prime_index_host; extern float elem_factor; voff_t max_ss = 0; extern int cutoff; float push_offset_time[NUM_OF_PROCS] = {0,0,0,0,0}; float push_time[NUM_OF_PROCS] = {0,0,0,0,0}; float pull_intra_time[NUM_OF_PROCS] = {0,0,0,0,0}; float pull_inter_time[NUM_OF_PROCS] = {0,0,0,0,0}; float memcpydh_time[NUM_OF_PROCS] = {0,0,0,0,0}; float memcpyhd_time[NUM_OF_PROCS] = {0,0,0,0,0}; float over_time[NUM_OF_PROCS] = {0,0,0,0,0}; extern float all2all_time_async; extern int lock_flag[NUM_OF_PROCS]; extern double mssg_factor; double junction_factor = 0; extern float inmemory_time; extern uint gmax_lsize; extern uint gmax_jsize; extern "C" { void init_hashtab_data_gpu (int did, master_t * mst, dbmeta_t * dbm, dbtable_t * tbs) { int * num_partitions = mst->num_partitions; int * partition_list = mst->partition_list; int num_of_partitions = num_partitions[did+1]-num_partitions[did];// number of partitions in this processor int total_num_partitions = mst->total_num_partitions; // total number of partitions in this compute node voff_t * index_offset = mst->index_offset[did]; int world_size = mst->world_size; int world_rank = mst->world_rank; int np_per_node = (total_num_partitions + world_size - 1)/world_size; int start_partition_id = np_per_node*world_rank; #ifdef SINGLE_NODE int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif CUDA_CHECK_RETURN (cudaMalloc (&size_prime_index_ptr, sizeof(uint) * num_of_partitions)); CUDA_CHECK_RETURN (cudaMemcpyToSymbol (size_prime_index, &size_prime_index_ptr, sizeof(uint*))); size_prime_index_host = (uint *) malloc (sizeof(uint) * num_of_partitions); CHECK_PTR_RETURN (size_prime_index_host, "malloc size_prime_index_host error!\n"); int i; voff_t offset = 0; for (i=0; i<num_of_partitions; i++) { int poffset = num_partitions[did]; int pid = partition_list[poffset+i] - start_partition_id;//!!!be careful here, this pid is not the global partition id int pindex = mst->id2index[did][pid + start_partition_id]; if (pindex != i) { printf ("ERROR IN DISTRIBUTING PARTITIONS!!!!!!!!\n"); // exit(0); } voff_t size = tbs[pid].size; CUDA_CHECK_RETURN (cudaMemcpy(dbm->comm.send, tbs[pid].buf, sizeof(entry_t) * size, cudaMemcpyHostToDevice)); int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; init_hashtab <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, offset); // init_hashtab_gpu <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, offset); index_offset[i] = offset; offset += size; uint num_of_elems = tbs[pid].num_elems; size_prime_index_host[i] = higher_prime_index (num_of_elems * elem_factor); free (tbs[pid].buf); } index_offset[i] = offset; CUDA_CHECK_RETURN (cudaMemcpy(size_prime_index_ptr, size_prime_index_host, sizeof(uint) * num_of_partitions, cudaMemcpyHostToDevice)); // printf ("index offset on GPU %d: \n", did); // print_offsets(index_offset[i], num_of_partitions); } void finalize_hashtab_data_gpu (void) { cudaFree (size_prime_index_ptr); free (size_prime_index_host); } void d2h_mem (ull * dkmers, vid_t * dvids, ull * hkmers, vid_t * hvids, pair_t * pairs, uint size) { CUDA_CHECK_RETURN (cudaMemcpy(hkmers, dkmers, sizeof(ull) * size, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN (cudaMemcpy(hvids, dvids, sizeof(vid_t) * size, cudaMemcpyDeviceToHost)); uint i; for (i=0; i<size; i++) { pairs[i].kmer = hkmers[i]; pairs[i].vid = hvids[i]; } } void d2h_mem2 (kmer_t * dkmers, vid_t * dvids, kmer_t * hkmers, vid_t * hvids, kmer_vid_t * pairs, uint size) { CUDA_CHECK_RETURN (cudaMemcpy(hkmers, dkmers, sizeof(kmer_t) * size, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN (cudaMemcpy(hvids, dvids, sizeof(vid_t) * size, cudaMemcpyDeviceToHost)); uint i; for (i=0; i<size; i++) { pairs[i].kmer = hkmers[i]; pairs[i].vid = hvids[i]; } } void h2d_mem (ull * dkmers, vid_t * dvids, ull * hkmers, vid_t * hvids, pair_t * pairs, uint size) { uint i; for (i=0; i<size; i++) { hkmers[i] = pairs[i].kmer; hvids[i] = pairs[i].vid; } CUDA_CHECK_RETURN (cudaMemcpy(dkmers, hkmers, sizeof(ull) * size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN (cudaMemcpy(dvids, hvids, sizeof(vid_t) * size, cudaMemcpyHostToDevice)); } void h2d_mem2 (kmer_t * dkmers, vid_t * dvids, kmer_t * hkmers, vid_t * hvids, kmer_vid_t * pairs, uint size) { uint i; for (i=0; i<size; i++) { hkmers[i] = pairs[i].kmer; hvids[i] = pairs[i].vid; } CUDA_CHECK_RETURN (cudaMemcpy(dkmers, hkmers, sizeof(kmer_t) * size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN (cudaMemcpy(dvids, hvids, sizeof(vid_t) * size, cudaMemcpyHostToDevice)); } void init_binary_data_gpu (int did, master_t * mst, dbmeta_t * dbm, dbtable_t * tbs) { int * num_partitions = mst->num_partitions; int * partition_list = mst->partition_list; int num_of_partitions = num_partitions[did+1]-num_partitions[did];// number of partitions in this processor int total_num_partitions = mst->total_num_partitions; // total number of partitions in this compute node voff_t * index_offset = mst->index_offset[did]; int world_size = mst->world_size; int world_rank = mst->world_rank; int np_per_node = (total_num_partitions + world_size - 1)/world_size; int start_partition_id = np_per_node*world_rank; #ifdef SINGLE_NODE int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif int i; uint offset = 0; kmer_vid_t * pairs = (kmer_vid_t *) malloc (sizeof(kmer_vid_t) * max_ss); kmer_t * hkmers = (kmer_t *) malloc (sizeof(kmer_t) * max_ss); vid_t * hvids = (vid_t *) malloc (sizeof(vid_t) * max_ss); #ifdef USE_CUB_ void * dtmp; size_t temp_size = 0; cub::DeviceRadixSort::SortPairs (dtmp, temp_size, dbm->before_sort, dbm->sorted_kmers, dbm->before_vids, dbm->sorted_vids, max_ss, 0, sizeof(ull) * 8); printf ("max subsize: %u, cub device temp size for sort:%lu\n", max_ss, temp_size); CUDA_CHECK_RETURN (cudaMalloc(&dtmp, temp_size)); #endif for (i=0; i<num_of_partitions; i++) { int poffset = num_partitions[did]; int pid = partition_list[poffset+i] - start_partition_id;//!!!be careful here, this pid is not the global partition id int pindex = mst->id2index[did][pid + start_partition_id]; if (pindex != i) { printf ("ERROR IN DISTRIBUTING PARTITIONS!!!!!!!!\n"); // exit(0); } voff_t size = tbs[pid].size; CUDA_CHECK_RETURN (cudaMemset (dbm->lvld, 0, sizeof(voff_t) * (size+1))); CUDA_CHECK_RETURN (cudaMemcpy(dbm->comm.send, tbs[pid].buf, sizeof(entry_t) * size, cudaMemcpyHostToDevice)); int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; init_kmers <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, offset); inclusive_scan<voff_t> (dbm->lvld + 1, size, NULL); CUDA_CHECK_RETURN (cudaMemcpy (&offset, &dbm->lvld[size], sizeof(voff_t), cudaMemcpyDeviceToHost)); gather_kmers <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, offset); #ifdef USE_CUB_ cub::DeviceRadixSort::SortPairs (dtmp, temp_size, dbm->before_sort, dbm->sorted_kmers+index_offset[i], dbm->before_vids, dbm->sorted_vids+index_offset[i], offset, 0, sizeof(ull) * 8); #endif if (offset > max_ss) { printf ("error!!!!!!\n"); // exit(0); } #ifndef USE_CUB_ d2h_mem2 (dbm->before_sort, dbm->before_vids, hkmers, hvids, pairs, offset); tbb_kmer_vid_sort (pairs, offset); // sort the kmers with the vertex ids h2d_mem2 (dbm->sorted_kmers + index_offset[i], dbm->sorted_vids + index_offset[i], hkmers, hvids, pairs, offset); #endif num_of_blocks = (offset + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; gather_edges <<<block_size, THREADS_PER_BLOCK_NODES>>> (offset, index_offset[i]); // gather edges with sorted vertices free (tbs[pid].buf); index_offset[i+1] = index_offset[i] + offset; } #ifdef USE_CUB_ cudaFree(dtmp); #endif free (pairs); free (hkmers); free (hvids); } void init_binary_data_gpu_sorted (int did, master_t * mst, dbmeta_t * dbm, dbtable_t * tbs) { int * num_partitions = mst->num_partitions; int * partition_list = mst->partition_list; int num_of_partitions = num_partitions[did+1]-num_partitions[did];// number of partitions in this processor int total_num_partitions = mst->total_num_partitions; // total number of partitions in this compute node voff_t * index_offset = mst->index_offset[did]; int world_size = mst->world_size; int world_rank = mst->world_rank; int np_per_node = (total_num_partitions + world_size - 1)/world_size; int start_partition_id = np_per_node*world_rank; #ifdef SINGLE_NODE int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif int i; for (i=0; i<num_of_partitions; i++) { int poffset = num_partitions[did]; int pid = partition_list[poffset+i] - start_partition_id;//!!!be careful here, this pid is not the global partition id int pindex = mst->id2index[did][pid + start_partition_id]; if (pindex != i) { printf ("ERROR IN DISTRIBUTING PARTITIONS!!!!!!!!\n"); } voff_t size = tbs[pid].size; CUDA_CHECK_RETURN (cudaMemcpy(dbm->comm.send, tbs[pid].buf, sizeof(entry_t) * size, cudaMemcpyHostToDevice)); int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; gather_vs <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, index_offset[i]); // gather edges with sorted vertices free (tbs[pid].buf); index_offset[i+1] = index_offset[i] + size; } } void * neighbor_push_intra_pull_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; int did = garg->did; comm_t * cm = &garg->dbm->comm; master_t * mst = garg->mst; int k = garg->k; int p = garg->p; int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; if (mst->world_rank == 0) printf ("WORLD RANK %d: Neigbhors push intra pull gpu %d:\n", mst->world_rank, did); #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif CUDA_CHECK_RETURN (cudaMemset(cm->send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ evaltime_t start, end; CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; // push_mssg_offset_assign_id <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, total_num_partitions, index_offset[i], k, p); // push_mssg_offset_assign_id_gpu <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, total_num_partitions, index_offset[i], k, p); push_mssg_offset_assign_id_binary <<<block_size, THREADS_PER_BLOCK_NODES, 0, streams[did][0]>>> (size, total_num_partitions, index_offset[i], k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); push_offset_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH MSSG OFFSET FOR GPU *ASSIGNING IDS* INTRA PROCESSOR TIME: "); #endif inclusive_scan<voff_t> (cm->send_offsets, total_num_partitions + 1, NULL); CUDA_CHECK_RETURN (cudaMemset(cm->tmp_send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i< num_of_partitions; i++) { voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; push_mssg_assign_id_binary <<<block_size, THREADS_PER_BLOCK_NODES, 0, streams[did][0]>>> (size, total_num_partitions, index_offset[i], k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); push_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH MSSG FOR GPU *ASSIGNING IDS* INTRA PROCESSOR TIME: "); #endif CUDA_CHECK_RETURN (cudaMemcpy(mst->roff[did], cm->send_offsets, sizeof(voff_t) * (total_num_partitions + 1), cudaMemcpyDeviceToHost)); voff_t inter_start = mst->roff[did][num_of_partitions]; voff_t inter_end = mst->roff[did][total_num_partitions]; #ifndef SYNC_ALL2ALL_ if (atomic_set_value(&lock_flag[did], 1, 0) == false) printf ("!!!!!!!!!!! CAREFUL: ATOMIC SET VALUE ERROR IN GPU %d\n", did); #endif #ifdef MEASURE_MEMCPY_ gettimeofday(&start, NULL); CUDA_CHECK_RETURN (cudaMemcpy(mst->receive[did], (assid_t *)cm->send + inter_start, sizeof(assid_t) * (inter_end-inter_start), cudaMemcpyDeviceToHost)); gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #else #ifndef SYNC_ALL2ALL_ CUDA_CHECK_RETURN (cudaMemcpyAsync(mst->receive[did], (assid_t *)cm->send + inter_start, sizeof(assid_t) * (inter_end-inter_start), cudaMemcpyDeviceToHost, streams[did][1])); #else CUDA_CHECK_RETURN (cudaMemcpy(mst->receive[did], (assid_t *)cm->send + inter_start, sizeof(assid_t) * (inter_end-inter_start), cudaMemcpyDeviceToHost)); #endif #endif if (INTER_BUF_FACTOR == 1) { #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t num_mssgs = mst->roff[did][i+1] - mst->roff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; pull_mssg_assign_id_binary <<<block_size, THREADS_PER_BLOCK_NODES, 0, streams[did][0]>>> (num_mssgs, pid, size, index_offset[i], total_num_partitions, 0, 1, did); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); pull_intra_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PULL MSSG FOR GPU %d LISTRANKING INTRA PROCESSOR TIME: ", did); #endif } return ((void *) 0); } void * neighbor_inter_pull_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; int did = garg->did; comm_t * cm = &garg->dbm->comm; master_t * mst = garg->mst; if (mst->world_rank == 0) printf ("WORLD RANK %d: neighbor inter pull gpu %d:\n", mst->world_rank, did); int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif voff_t receive_start = mst->roff[did][num_of_partitions]; #ifdef MEASURE_MEMCPY_ evaltime_t start, end; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (cudaMemcpy(cm->receive_offsets, mst->soff[did], sizeof(voff_t) * (num_of_partitions + 1), cudaMemcpyHostToDevice)); voff_t inter_size = mst->soff[did][num_of_partitions]; if (inter_size == 0) return ((void *) 0); // tbb_assid_sort ((assid_t *)(mst->send[did]), inter_size); CUDA_CHECK_RETURN (cudaMemcpy((assid_t*)cm->send + receive_start, mst->send[did], sizeof(assid_t) * inter_size, cudaMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; voff_t num_mssgs = mst->soff[did][i+1] - mst->soff[did][i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; pull_mssg_assign_id_binary <<<block_size, THREADS_PER_BLOCK_NODES>>> (num_mssgs, pid, size, index_offset[i], total_num_partitions, receive_start, 0, did); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); pull_inter_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PULL MSSG FOR GPU %d *ASSIGNING IDS* INTER PROCESSORS TIME: ", did); #endif return ((void *) 0); } void * identify_vertices_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; master_t * mst = garg->mst; dbmeta_t * dbm = garg->dbm; int did = garg->did; if (mst->world_rank == 0) printf ("WORLD RANK %d: CPU identifying vertices DID=%d:\n", mst->world_rank, did); #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; cudaMemset (dbm->lvld, 0, sizeof(voff_t) * (max_ss+1)); int i; #ifdef MEASURE_TIME_ evaltime_t start, end; gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; CUDA_CHECK_RETURN (cudaMemset(dbm->jvld, 0, sizeof(uint) * size)); CUDA_CHECK_RETURN (cudaMemset(dbm->lvld, 0, sizeof(uint) * size)); // label_vertex_with_flags <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, index_offset[i]); label_vertex_with_flags_binary <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, index_offset[i], cutoff); // inclusive_scan<uint> (dbm->jvld + index_offset[i], size, NULL); // inclusive_scan<uint> (dbm->lvld + index_offset[i], size, NULL); inclusive_scan<uint> (dbm->jvld, size, NULL); inclusive_scan<uint> (dbm->lvld, size, NULL); voff_t jsize, lsize; // CUDA_CHECK_RETURN (cudaMemcpy(&jsize, &(dbm->jvld + index_offset[i])[size-1], sizeof(voff_t), cudaMemcpyDeviceToHost)); // CUDA_CHECK_RETURN (cudaMemcpy(&lsize, &(dbm->lvld + index_offset[i])[size-1], sizeof(voff_t), cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN (cudaMemcpy(&jsize, &(dbm->jvld)[size-1], sizeof(voff_t), cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN (cudaMemcpy(&lsize, &(dbm->lvld)[size-1], sizeof(voff_t), cudaMemcpyDeviceToHost)); mst->jid_offset[pid] = jsize; mst->id_offsets[pid+1] = jsize + lsize; } #ifdef MEASURE_TIME_ gettimeofday (&end, NULL); print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& IDENTIFYING IDS OF VERTICES TIME: "); #endif return ((void *)0); } void * assign_vertex_ids_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; master_t * mst = garg->mst; int did = garg->did; dbmeta_t * dbm = garg->dbm; if (mst->world_rank == 0) printf ("WORLD RANK %d: CPU assigning vertex ids DID = %d:\n", mst->world_rank, did); int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif int i; #ifdef MEASURE_TIME_ evaltime_t start, end; gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; CUDA_CHECK_RETURN (cudaMemset(dbm->jvld, 0, sizeof(uint) * size)); CUDA_CHECK_RETURN (cudaMemset(dbm->lvld, 0, sizeof(uint) * size)); label_vertex_with_flags_binary <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, index_offset[i], cutoff); inclusive_scan<uint> (dbm->jvld, size, NULL); inclusive_scan<uint> (dbm->lvld, size, NULL); assid_vertex_with_flags <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, pid, index_offset[i]); } #ifdef MEASURE_TIME_ gettimeofday (&end, NULL); print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& ASSIGNING IDS OF VERTICES TIME: "); #endif return ((void *)0); } void * gather_vertices_gpu (void * arg) { pre_arg * garg = (pre_arg *) arg; master_t * mst = garg->mst; dbmeta_t * dbm = garg->dbm; int k = garg->k; int p = garg->p; d_jvs_t * js = garg->js; d_lvs_t * ls = garg->ls; ull * js_spids = garg->dbm->djs.spids; ull * js_spidsr = garg->dbm->djs.spidsr; uint * ls_spids = garg->dbm->dls.spids; subgraph_t * subgraph = garg->subgraph; int did = garg->did; printf ("identifying vertices gpu %d:\n", did); int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; #ifdef SINGLE_NODE int world_rank = mst->world_rank; int num_of_devices = mst->num_of_devices; CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif int i; #ifdef MEASURE_TIME_ evaltime_t start, end; gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int poffset = mst->num_partitions[did]; int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; // gather_vertex_binary <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, pid, index_offset[i], cutoff); CUDA_CHECK_RETURN (cudaMemset (js_spids, 0, sizeof(ull) * gmax_jsize)); CUDA_CHECK_RETURN (cudaMemset (js_spidsr, 0, sizeof(ull) * gmax_jsize)); CUDA_CHECK_RETURN (cudaMemset (ls_spids, 0, sizeof(uint) * gmax_lsize)); gather_vertex_partitioned <<<block_size, THREADS_PER_BLOCK_NODES>>> (size, pid, index_offset[i], cutoff, k, p, total_num_partitions); uint jsize = mst->jid_offset[pid]; uint lsize = mst->id_offsets[pid+1] - mst->id_offsets[pid] - jsize; // write_junctions_gpu (dbm, mst, jsize, lsize, pid, total_num_partitions, did); // write_linear_vertices_gpu (dbm, mst, jsize, lsize, pid, total_num_partitions, did); output_vertices_gpu (dbm, mst, jsize, lsize, pid, total_num_partitions, did, js, ls, subgraph); write_kmers_edges_gpu (dbm, mst, jsize, lsize, pid, total_num_partitions, did); } #ifdef MEASURE_TIME_ gettimeofday (&end, NULL); print_exec_time (start, end, "DEVICE %d: &&&&&&&&&&&&&&&&&&&& GATHERING VERTICES TIME: ", did); #endif // printf ("DEVICE %d: NUMBER OF VERTICES PROCESSED: %u\n", did, index_offset[num_of_partitions]); // write_ids_gpu (dbm, mst, num_of_partitions, did); return ((void *)0); } void * shakehands_push_respond_intra_push_gpu (void * arg) { evaltime_t overs, overe; pre_arg * garg = (pre_arg *) arg; int did = garg->did; comm_t * cm = &garg->dbm->comm; master_t * mst = garg->mst; int k = garg->k; int p = garg->p; if (mst->world_rank == 0) printf ("WORLD RANK %d: Shakehands push respond intra push gpu %d:\n", mst->world_rank, did); #ifdef SINGLE_NODE int num_of_devices = mst->num_of_devices; int world_rank = mst->world_rank; CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif gettimeofday (&overs, NULL); int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; int poffset = mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; CUDA_CHECK_RETURN (cudaMemset(cm->send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ evaltime_t start, end; CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; push_mssg_offset_shakehands <<<block_size, THREADS_PER_BLOCK_NODES, sizeof(vid_t)*(total_num_partitions+1), streams[did][0]>>> (size, total_num_partitions, index_offset[i], k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); push_offset_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH MSSG OFFSET FOR GPU *SHAKEHANDS* INTRA PROCESSOR TIME: "); #endif inclusive_scan<voff_t> (cm->send_offsets, total_num_partitions+1, NULL); CUDA_CHECK_RETURN (cudaMemset(cm->tmp_send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i< num_of_partitions; i++) { int pid = mst->partition_list[poffset+i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (size + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; push_mssg_shakehands <<<block_size, THREADS_PER_BLOCK_NODES, sizeof(vid_t)*(total_num_partitions+1), streams[did][0]>>> (size, total_num_partitions, index_offset[i], k, p, pid, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); push_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH MSSG FOR GPU *SHAKEHANDS* INTRA PROCESSOR TIME: "); #endif #ifdef MEASURE_MEMCPY_ gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (cudaMemcpy(mst->roff[did], cm->send_offsets, sizeof(voff_t) * (total_num_partitions + 1), cudaMemcpyDeviceToHost)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif voff_t inter_start = mst->roff[did][num_of_partitions]; voff_t inter_end = mst->roff[did][total_num_partitions]; #ifndef SYNC_ALL2ALL_ if (atomic_set_value(&lock_flag[did], 1, 0) == false) printf ("!!!!!!!!!!! CAREFUL: ATOMIC SET VALUE ERROR IN GPU %d\n", did); #endif #ifdef MEASURE_MEMCPY_ gettimeofday(&start, NULL); CUDA_CHECK_RETURN (cudaMemcpy(mst->receive[did], (shakehands_t *)cm->send + inter_start, sizeof(shakehands_t) * (inter_end-inter_start), cudaMemcpyDeviceToHost)); gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #else #ifndef SYNC_ALL2ALL_ CUDA_CHECK_RETURN (cudaMemcpyAsync(mst->receive[did], (shakehands_t *)cm->send + inter_start, sizeof(shakehands_t) * (inter_end-inter_start), cudaMemcpyDeviceToHost, streams[did][1])); #else CUDA_CHECK_RETURN (cudaMemcpy(mst->receive[did], (shakehands_t *)cm->send + inter_start, sizeof(shakehands_t) * (inter_end-inter_start), cudaMemcpyDeviceToHost)); #endif #endif CUDA_CHECK_RETURN (cudaMemset (cm->extra_send_offsets, 0, sizeof(voff_t) * (total_num_partitions+1))); #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset + i]; voff_t num_mssgs = mst->roff[did][i+1] - mst->roff[did][i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; voff_t size = index_offset[i+1] - index_offset[i]; push_mssg_offset_respond <<<block_size, THREADS_PER_BLOCK_NODES, sizeof(vid_t)*(total_num_partitions+1), streams[did][0]>>> (num_mssgs, pid, size, index_offset[i], total_num_partitions, 0, 1, k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); pull_intra_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH *RESPOND* OFFSET GPU INTRA PROCESSOR TIME: "); #endif gettimeofday (&overe, NULL); over_time[did] += (float)((overe.tv_sec * 1000000 + overe.tv_usec) - (overs.tv_sec * 1000000 + overs.tv_usec)) / 1000; return ((void *) 0); } void * shakehands_pull_respond_inter_push_intra_pull_gpu (void * arg) { evaltime_t overs, overe; pre_arg * carg = (pre_arg *) arg; int did = carg->did; int k = carg->k; int p = carg->p; comm_t * cm = &carg->dbm->comm; master_t * mst = carg->mst; int num_of_devices = mst->num_of_devices; int world_rank = mst->world_rank; if (world_rank == 0) printf ("WORLD RANK %d: shakehands pull respond inter push intra pull gpu %d\n", world_rank, did); #ifdef SINGLE_NODE CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif gettimeofday (&overs, NULL); int total_num_partitions = mst->total_num_partitions; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; int poffset = mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; voff_t receive_start = mst->roff[did][num_of_partitions]; voff_t inter_size = mst->soff[did][num_of_partitions]; #ifdef MEASURE_MEMCPY_ evaltime_t start, end; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (cudaMemcpy(cm->receive_offsets, mst->soff[did], sizeof(voff_t) * (num_of_partitions + 1), cudaMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (cudaMemcpy((shakehands_t*)(cm->send) + receive_start, mst->send[did], sizeof(shakehands_t) * inter_size, cudaMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset + i]; voff_t num_mssgs = mst->soff[did][i+1] - mst->soff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; push_mssg_offset_respond <<<block_size, THREADS_PER_BLOCK_NODES, sizeof(vid_t)*(total_num_partitions+1), streams[did][0]>>> (num_mssgs, pid, size, index_offset[i], total_num_partitions, receive_start, 0, k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); pull_inter_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH *RESPOND* OFFSET GPU INTER PROCESSORS TIME: "); #endif inclusive_scan<voff_t> (cm->extra_send_offsets, total_num_partitions+1, NULL); // *************** malloc (send and) receive buffer for pull and push mode voff_t rcv_size; CUDA_CHECK_RETURN (cudaMemcpy (&rcv_size, cm->extra_send_offsets + num_of_partitions, sizeof(voff_t), cudaMemcpyDeviceToHost)); if (rcv_size == 0) { printf ("CCCCCCCCCcccareful:::::::::: receive size from intra junction update push is 0!!!!!!!!\n"); rcv_size = 1000; } cm->temp_size = malloc_pull_push_receive_device (&cm->receive, sizeof(shakehands_t), did, rcv_size, 2*(total_num_partitions+num_of_partitions-1)/num_of_partitions, world_rank, num_of_devices); set_receive_buffer_gpu (&cm->receive, did, world_rank, num_of_devices); CUDA_CHECK_RETURN (cudaMemset(cm->tmp_send_offsets, 0, sizeof(voff_t) * (total_num_partitions + 1))); #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset+i]; voff_t num_mssgs = mst->roff[did][i+1] - mst->roff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; push_mssg_respond <<<block_size, THREADS_PER_BLOCK_NODES, sizeof(vid_t)*(total_num_partitions+1), streams[did][0]>>> (num_mssgs, pid, size, index_offset[i], total_num_partitions, 0, 1, k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); push_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH *RESPOND* GPU INTRA PROCESSOR TIME: "); #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset+i]; voff_t num_mssgs = mst->soff[did][i+1] - mst->soff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; push_mssg_respond <<<block_size, THREADS_PER_BLOCK_NODES, sizeof(vid_t)*(total_num_partitions+1), streams[did][0]>>> (num_mssgs, pid, size, index_offset[i], total_num_partitions, receive_start, 0, k, p, cutoff); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); push_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PUSH *RESPOND* GPU INTER PROCESSORS TIME: "); #endif #ifdef MEASURE_MEMCPY_ gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (cudaMemcpy(mst->roff[did], cm->extra_send_offsets, sizeof(voff_t)*(total_num_partitions + 1), cudaMemcpyDeviceToHost)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif voff_t inter_start = mst->roff[did][num_of_partitions]; voff_t inter_end = mst->roff[did][total_num_partitions]; printf ("WORLD RANK %d: @@@@@@@@@@@@@@@@@@@@@ total number of shakehands pushed in device %d: %lu\n", mst->world_rank, did, inter_end); printf ("WORLD RANK %d: ############### number of intra mssgs pulled for inter shakehands of device %d: %lu\n", mst->world_rank, did, inter_start); #ifndef SYNC_ALL2ALL_ if (atomic_set_value(&lock_flag[did], 1, 0) == false) printf ("!!!!!!!!!!! CAREFUL: ATOMIC SET VALUE ERROR IN GPU %d\n", did); #endif #ifdef MEASURE_TIME_ gettimeofday(&start, NULL); CUDA_CHECK_RETURN (cudaMemcpy(mst->receive[did], (shakehands_t *)cm->receive+inter_start, (inter_end-inter_start)*sizeof(shakehands_t), cudaMemcpyDeviceToHost)); gettimeofday(&end, NULL); memcpydh_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #else #ifndef SYNC_ALL2ALL_ CUDA_CHECK_RETURN (cudaMemcpyAsync(mst->receive[did], (shakehands_t *)cm->receive+inter_start, (inter_end-inter_start)*sizeof(shakehands_t), cudaMemcpyDeviceToHost, streams[did][1])); #else CUDA_CHECK_RETURN (cudaMemcpy(mst->receive[did], (shakehands_t *)cm->receive+inter_start, (inter_end-inter_start)*sizeof(shakehands_t), cudaMemcpyDeviceToHost)); #endif #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif for (i=0; i<num_of_partitions; i++) { int pid = mst->partition_list[poffset + i]; voff_t num_mssgs = mst->roff[did][i+1] - mst->roff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; pull_mssg_respond <<<block_size, THREADS_PER_BLOCK_NODES, 0, streams[did][0]>>> (num_mssgs, pid, size, index_offset[i], cm->receive, 1); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); pull_intra_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PULL *RESPOND* GPU %d INTRA PROCESSOR TIME: ", did); #endif gettimeofday (&overe, NULL); over_time[did] += (float)((overe.tv_sec * 1000000 + overe.tv_usec) - (overs.tv_sec * 1000000 + overs.tv_usec)) / 1000; return ((void *) 0); } void * respond_inter_pull_gpu (void * arg) { evaltime_t overs, overe; pre_arg * carg = (pre_arg *) arg; comm_t * cm = &carg->dbm->comm; master_t * mst = carg->mst; int did = carg->did; int num_of_partitions = mst->num_partitions[did + 1] - mst->num_partitions[did]; int poffset = mst->num_partitions[did]; voff_t * index_offset = mst->index_offset[did]; int num_of_devices = mst->num_of_devices; int world_rank = mst->world_rank; if (world_rank == 0) printf ("WORLD RANK %d: respond inter pull gpu %d:\n", world_rank, did); #ifdef SINGLE_NODE CUDA_CHECK_RETURN(cudaSetDevice (world_rank * num_of_devices + did)); #else CUDA_CHECK_RETURN(cudaSetDevice (did + DEVICE_SHIFT)); #endif gettimeofday (&overs, NULL); voff_t receive_start = mst->roff[did][num_of_partitions]; voff_t inter_size = mst->soff[did][num_of_partitions]; printf ("WORLD RANK %d: ############### number of inter mssgs pulled for inter shakehands of device %d: %lu\n", mst->world_rank, did, inter_size); if (cm->temp_size <= (inter_size+receive_start)*sizeof(shakehands_t)) { printf("WORLD RANK %d: Error:::::::: malloced receive buffer size smaller than actual receive buffer size!\n", mst->world_rank); exit(0); } #ifdef MEASURE_MEMCPY_ evaltime_t start, end; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (cudaMemcpy(cm->receive_offsets, mst->soff[did], sizeof(voff_t) * (num_of_partitions+1), cudaMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; gettimeofday(&start, NULL); #endif CUDA_CHECK_RETURN (cudaMemcpy((shakehands_t *)(cm->receive) + receive_start, mst->send[did], sizeof(shakehands_t) * inter_size, cudaMemcpyHostToDevice)); #ifdef MEASURE_MEMCPY_ gettimeofday(&end, NULL); memcpyhd_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&start, NULL); #endif int i; for (i=0; i<num_of_partitions; i++) { if (inter_size == 0) break; int pid = mst->partition_list[poffset + i]; voff_t num_mssgs = mst->soff[did][i+1] - mst->soff[did][i]; voff_t size = index_offset[i+1] - index_offset[i]; int num_of_blocks = (num_mssgs + THREADS_PER_BLOCK_NODES - 1) / THREADS_PER_BLOCK_NODES; int block_size = num_of_blocks > MAX_NUM_BLOCKS ? MAX_NUM_BLOCKS : num_of_blocks; pull_mssg_respond <<<block_size, THREADS_PER_BLOCK_NODES>>> (num_mssgs, pid, size, index_offset[i], (char*)cm->receive + sizeof(shakehands_t) * receive_start, 0); } #ifdef MEASURE_TIME_ CUDA_CHECK_RETURN (cudaDeviceSynchronize()); gettimeofday (&end, NULL); pull_inter_time[did] += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; print_exec_time (start, end, "&&&&&&&&&&&&&&&&&&&& PULL *RESPOND* GPU INTER PROCESSORS TIME: "); #endif gettimeofday (&overe, NULL); over_time[did] += (float)((overe.tv_sec * 1000000 + overe.tv_usec) - (overs.tv_sec * 1000000 + overs.tv_usec)) / 1000; // *************** free (send and) receive buffer for pull and push mode free_pull_push_receive_device (did, cm, world_rank, num_of_devices); return ((void *) 0); } void pre_process_dbgraph (int num_of_partitions, int k, int p, dbtable_t * tbs, master_t * mst, subgraph_t * subgraph, d_jvs_t * js, d_lvs_t * ls, int world_size, int world_rank) { float all2all_time = 0; evaltime_t start, end; evaltime_t overs, overe; evaltime_t tmps, tmpe; evaltime_t inms, inme; // gettimeofday (&start, NULL); mst->total_num_partitions = num_of_partitions; //total_num_of_partitions=input num_of_partitions mst->world_size = world_size; mst->world_rank = world_rank; int np_per_node; int np_node; get_np_node (&np_per_node, &np_node, num_of_partitions, world_size, world_rank); if (mst->world_rank == 0) printf ("WORLD RANK %d IIIIIIIIIII initialize distributing partitions: \n", mst->world_rank); gettimeofday(&start, NULL); init_distribute_partitions (num_of_partitions, mst, world_size); get_subgraph_sizes (subgraph, np_node); if (mssg_factor == 0) mssg_factor = MSSG_FACTOR; double unit_vsize = sizeof(assid_t)*(mssg_factor+0.1)+sizeof(kmer_t)+sizeof(vid_t)*EDGE_DIC_SIZE+sizeof(ull)+sizeof(voff_t)*2+sizeof(vid_t); distribute_partitions (num_of_partitions, mst, subgraph, uneven, world_size, world_rank, subgraph->total_graph_size, unit_vsize); gettimeofday(&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d #################### distributing partitions time: ", mst->world_rank); int num_of_cpus = mst->num_of_cpus; int num_of_devices = mst->num_of_devices; dbmeta_t * dbm = (dbmeta_t *) malloc (sizeof(dbmeta_t) * (num_of_devices + num_of_cpus)); pthread_t cpu_threads[NUM_OF_CPUS]; pthread_t gpu_threads[NUM_OF_DEVICES]; pre_arg arg[NUM_OF_DEVICES + NUM_OF_CPUS]; #ifndef SYNC_ALL2ALL_ pthread_t comm_thread; comm_arg cm_arg; cm_arg.mst=mst; #endif uint intra_mssgs[NUM_OF_PROCS*MAX_NUM_ITERATION]; uint inter_mssgs[NUM_OF_PROCS*MAX_NUM_ITERATION]; init_mssg_count (intra_mssgs, inter_mssgs); int i; for (i = 0; i < num_of_devices + num_of_cpus; i++) { arg[i].did = i; arg[i].dbm = &dbm[i]; arg[i].mst = mst; arg[i].k = k; arg[i].p = p; arg[i].js = js; arg[i].ls = ls; arg[i].subgraph = subgraph; #ifndef SYNC_ALL2ALL_ cm_arg.cm[i] = &dbm[i].comm; #endif } create_streams(num_of_devices, 2); init_lock_flag(); //***************** PRE-PROCESSING BEGINS: ***************** gettimeofday(&overs, NULL); uint max_subgraph_size; uint max_lsize; uint max_jsize; int intra_num_of_partitions = mst->num_partitions[num_of_devices + num_of_cpus]; max_ss = get_max (subgraph->subgraphs, NULL, NULL, &max_subgraph_size, &max_jsize, &max_lsize, intra_num_of_partitions, num_of_partitions); gettimeofday (&start, NULL); mst->mssg_size = sizeof(assid_t); // IMPORTANT:::::::::: initiate MAXIMUM message size for message buffer init_device_filter1 (dbm, mst, max_subgraph_size); set_globals_filter1_gpu (dbm, mst); init_host_filter2 (dbm, mst, max_subgraph_size); gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: IIIIIIIIIIIIIIInit filter memory time device1 and host: \n", mst->world_rank); init_device_preprocessing (dbm, mst); set_globals_preprocessing_gpu (dbm, mst); init_host_preprocessing (dbm, mst); for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) { init_preprocessing_data_cpu (&dbm[i], num_of_partitions); } // **************** malloc writing offset buffer for pull and push mode for (i=0; i<num_of_devices; i++) { malloc_pull_push_offset_gpu (&dbm[i].comm.extra_send_offsets, mst, i); set_extra_send_offsets_gpu (&dbm[i].comm.extra_send_offsets, mst, i); } for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) { malloc_pull_push_offset_cpu(&dbm[i].comm.extra_send_offsets, mst); } gettimeofday (&start, NULL); for (i=0; i<num_of_devices; i++) { // init_hashtab_data_gpu (i, mst, &dbm[i], tbs); #ifdef USE_DISK_IO init_binary_data_gpu (i, mst, &dbm[i], tbs); #else init_binary_data_gpu_sorted (i, mst, &dbm[i], tbs); #endif } for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) { // init_hashtab_data_cpu (i, mst, &dbm[i], tbs); #ifdef USE_DISK_IO init_binary_data_cpu (i, mst, &dbm[i], tbs); #else init_binary_data_cpu_sorted (i, mst, &dbm[i], tbs); #endif } gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: ++++++++++++++++Init hash table data input time: ++++++++++++++++\n", mst->world_rank); free_dbgraph_hashtab (num_of_partitions, tbs); //************** ADD AN EXTRA STEP:::::::::::: modify edges here: *************** gettimeofday (&inms, NULL); // in-memory processing begins mst->mssg_size = sizeof(shakehands_t); // IMPORTANT:::::::::: RESET message size for message buffer for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, shakehands_push_respond_intra_push_gpu, &arg[i]) != 0) { printf ("create thread for shakehands push respond intra push on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, shakehands_push_respond_intra_push_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for shakehands push respond intra push on cpu %d error!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_create (&comm_thread, NULL, master_all2all_async, &cm_arg) != 0) { printf ("Create thread for communication error!\n"); } #endif for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("join thread on shakehands push respond intra push on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on shakehands push respond intra push on cpu %d failure!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_join (comm_thread, NULL) != 0) { printf ("Join communication thread failure!\n"); } #else gettimeofday (&start, NULL); master_all2all(mst); gettimeofday (&end, NULL); all2all_time += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: TICKTOCK TICKTOCK TICKTOCK:: master all to all time after listrank push: ", mst->world_rank); // while(debug) {} for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, shakehands_pull_respond_inter_push_intra_pull_gpu, &arg[i]) != 0) { printf ("create thread for shakehands pull respond inter upsh intra pull on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, shakehands_pull_respond_inter_push_intra_pull_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for shakehands pull respond inter push intra pull on cpu %d error!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_create (&comm_thread, NULL, master_all2all_async, &cm_arg) != 0) { printf ("Create thread for communication error!\n"); } #endif for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("join thread on shakehands pull respond inter push intra pull on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on shakehands pull respond inter push intra pull on cpu %d failure!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_join (comm_thread, NULL) != 0) { printf ("Join communication thread failure!\n"); } #else gettimeofday (&start, NULL); master_all2all(mst); gettimeofday (&end, NULL); all2all_time += (float)((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) / 1000; #endif if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: TICKTOCK TICKTOCK TICKTOCK:: master all to all time after listrank pull modifygraph push: ", mst->world_rank); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, respond_inter_pull_gpu, &arg[i]) != 0) { printf ("create thread on respond inter pull on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, respond_inter_pull_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for respond inter pull on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on respond inter pull on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on respond inter pull on cpu %d failure!\n", i); } } gettimeofday (&inme, NULL); // in-memory processing end inmemory_time += (float)((inme.tv_sec * 1000000 + inme.tv_usec) - (inms.tv_sec * 1000000 + inms.tv_usec)) / 1000; // ************ free writing offset buffer for pull and push mode for (i=0; i<num_of_devices; i++) free_pull_push_offset_gpu(dbm[i].comm.extra_send_offsets); for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) free_pull_push_offset_cpu(dbm[i].comm.extra_send_offsets); //************** Reset memory here ************ gettimeofday (&start, NULL); init_device_filter2 (dbm, mst, max_subgraph_size); set_globals_filter2_gpu (dbm, mst); gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: IIIIIIIIIIIIIIInit filter memory time device2: \n", mst->world_rank); //*********** FIRST: ASSIGN EACH VERTEX A GLOBAL ID ************** gettimeofday (&inms, NULL); // in-memory processing begins gettimeofday (&start, NULL); if (mst->world_rank == 0) printf("\n++++++++++++++++ Identifying vertices: WORLD RANK %d ++++++++++++++++++\n", world_rank); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, identify_vertices_gpu, &arg[i]) != 0) { printf ("create thread for hashtab filtering on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, identify_vertices_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for hashtab filtering on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on hashtab filtering on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on hashtab filtering on cpu %d failure!\n", i); } } // ******** allgather id offsets here: ************ gettimeofday (&tmps, NULL); printf ("goffset size: %d\n", sizeof(goffset_t)); mpi_allgatherv_inplace (mst->id_offsets+1 + world_rank*np_per_node, mst->id_offsets + 1, num_of_partitions, world_size, world_rank, sizeof(goffset_t)); mpi_allgatherv_inplace (mst->jid_offset + world_rank*np_per_node, mst->jid_offset, num_of_partitions, world_size, world_rank, sizeof(goffset_t)); gettimeofday (&tmpe, NULL); all2all_time += (float)((tmpe.tv_sec * 1000000 + tmpe.tv_usec) - (tmps.tv_sec * 1000000 + tmps.tv_usec)) / 1000; inclusive_prefix_sum_long (mst->id_offsets, num_of_partitions + 1); ull total_num_junctions = 0; for (i=0; i<num_of_partitions; i++) total_num_junctions += mst->jid_offset[i]; junction_factor = (double)total_num_junctions/(mst->id_offsets[num_of_partitions]-total_num_junctions) + 0.01; printf ("WORLD RANK %d: TOTAL NUMBER OF JUNCITIONS: %u\nJUNCTION FACTOR SET TO BE::::::::: %f\n", mst->world_rank, total_num_junctions, junction_factor); set_id_offsets_cpu (dbm, mst); set_id_offsets_gpu (dbm, mst); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, assign_vertex_ids_gpu, &arg[i]) != 0) { printf ("create thread for assigning vertex ids on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, assign_vertex_ids_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for assigning vertex ids on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on assigning vertex ids on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on assigning vertex ids on cpu %d failure!\n", i); } } gettimeofday (&end, NULL); gettimeofday (&inme, NULL); // inmemory processing end inmemory_time += (float)((inme.tv_sec * 1000000 + inme.tv_usec) - (inms.tv_sec * 1000000 + inms.tv_usec)) / 1000; if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: ++++++++++++++++ Identifying vertex time: ++++++++++++++++++\n", mst->world_rank); gettimeofday (&start, NULL); finalize_device_filter2 (dbm, mst); set_globals_filter2_gpu (dbm, mst); finalize_host_filter2 (dbm, mst); gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: FFFFFFFFFFFfinalize device and host filter time:\n", mst->world_rank); //*********** SECOND: NEIGHBORING WITH VERTEX IDS **************** gettimeofday(&start, NULL); mst->mssg_size = sizeof(assid_t); // IMPORTANT:::::::::: RESET message size for message buffer gettimeofday(&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: IIIIIIIIIIIIIIInitializing pre-processing time: \n", mst->world_rank); // **************** STEP 1 gettimeofday (&inms, NULL); // in-memory processing begins gettimeofday (&start, NULL); if (mst->world_rank == 0) printf("\n++++++++++++++++ Identifying neighbors: WORLD RANK %d ++++++++++++++++++\n", world_rank); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, neighbor_push_intra_pull_gpu, &arg[i]) != 0) { printf ("create thread for NEIGHBORING push on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, neighbor_push_intra_pull_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for NEIGHBORING push on cpu %d error!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_create (&comm_thread, NULL, master_all2all_async, &cm_arg) != 0) { printf ("Create thread for communication error!\n"); } #endif for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on NEIGHBORING push on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on NEIGHBORING push on cpu %d failure!\n", i); } } #ifndef SYNC_ALL2ALL_ if (pthread_join (comm_thread, NULL) != 0) { printf ("Join communication thread failure!\n"); } #else gettimeofday (&tmps, NULL); master_all2all(mst); gettimeofday (&tmpe, NULL); all2all_time += (float)((tmpe.tv_sec * 1000000 + tmpe.tv_usec) - (tmps.tv_sec * 1000000 + tmps.tv_usec)) / 1000; #endif // get_mssg_count(mst, intra_mssgs, inter_mssgs, 0); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, neighbor_inter_pull_gpu, &arg[i]) != 0) { printf ("create thread for NEIGHBORING pull on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, neighbor_inter_pull_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for NEIGHBORING pull on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on NEIGHBORING pull on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on NEIGHBORING pull on cpu %d failure!\n", i); } } gettimeofday (&end, NULL); gettimeofday (&inme, NULL); // in-memory processing end inmemory_time += (float)((inme.tv_sec * 1000000 + inme.tv_usec) - (inms.tv_sec * 1000000 + inms.tv_usec)) / 1000; if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: ++++++++++++++++Neighboring time: ++++++++++++++++++\n", mst->world_rank); gettimeofday(&start, NULL); finalize_device_preprocessing (dbm, mst); finalize_host_preprocessing (dbm, mst); finalize_preprocessing_data_cpu (); finalize_receive_all2all(world_size); gettimeofday(&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: FFFFFFFFFFFFFFFFFFFinalizing pre-processing time: \n", mst->world_rank); //*********** THIRD: GARTHER VERTICES INTO VERTEX ARRAYS ************* gettimeofday (&start, NULL); get_max (subgraph->subgraphs, mst->jid_offset, mst->id_offsets, &max_subgraph_size, &max_jsize, &max_lsize, intra_num_of_partitions, num_of_partitions); if (mst->world_rank == 0) printf ("WORLD RANK %d: max subgraph size: %u, max junction size: %u, max linear vertex size: %u\n", world_rank, max_subgraph_size, max_jsize, max_lsize); gmax_jsize = max_jsize; gmax_lsize = max_lsize; init_write_buffer (num_of_devices); init_device_gather (dbm, mst, max_subgraph_size, max_jsize, max_lsize); set_globals_gather_gpu (dbm, mst); init_host_gather (dbm, mst, max_subgraph_size, max_jsize, max_lsize); for (i=num_of_devices; i<num_of_devices+num_of_cpus; i++) { reset_globals_gather_cpu (&dbm[i]); } gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: IIIIIIIIIIIIIIInitializing gathering time: \n", world_rank); gettimeofday (&inms, NULL); // in-memory processing begins gettimeofday (&start, NULL); if (mst->world_rank == 0) printf("\n++++++++++++++++ Gather vertices: WORLD RANK %d ++++++++++++++++++\n", world_rank); for (i = 0; i < num_of_devices; i++) { if (pthread_create (&gpu_threads[i], NULL, gather_vertices_gpu, &arg[i]) != 0) { printf ("create thread for hashtab filtering on gpu %d error!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_create (&cpu_threads[i], NULL, gather_vertices_cpu, &arg[num_of_devices + i]) != 0) { printf ("create thread for hashtab filtering on cpu %d error!\n", i); } } for (i = 0; i < num_of_devices; i++) { if (pthread_join (gpu_threads[i], NULL) != 0) { printf ("Join thread on hashtab filtering on gpu %d failure!\n", i); } } for (i = 0; i < num_of_cpus; i++) { if (pthread_join (cpu_threads[i], NULL) != 0) { printf ("Join thread on hashtab filtering on cpu %d failure!\n", i); } } gettimeofday (&end, NULL); gettimeofday (&inme, NULL); // in-memory processing end inmemory_time += (float)((inme.tv_sec * 1000000 + inme.tv_usec) - (inms.tv_sec * 1000000 + inms.tv_usec)) / 1000; if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: ++++++++++++++++Gathering vertex array time: ++++++++++++++++++\n", mst->world_rank); gettimeofday (&start, NULL); finalize_device_gather2 (dbm, mst); finalize_host_gather2 (dbm, mst); finalize_distribute_partitions (mst); // print_mssg_count(mst->num_of_cpus+mst->num_of_devices, intra_mssgs, inter_mssgs, 0); gettimeofday (&end, NULL); if (mst->world_rank == 0) print_exec_time (start, end, "WORLD RANK %d: FFFFFFFFFFFFFFFFFFFinalizing gathering time: \n", mst->world_rank); gettimeofday(&overe, NULL); print_exec_time(overs, overe, "WORLD RANK %d: ***********************Overall PRE-PROCESSING time: \n", mst->world_rank); finalize_write_buffer (num_of_devices); // print graph statistics: uint total_num_nodes = mst->id_offsets[num_of_partitions]; if (mst->world_rank==0) { printf ("WORLD RANK %d: &&&&&&&&&&&&&&& Total number of valid nodes in de bruijn graph: %lu\n", mst->world_rank, total_num_nodes); printf("WORLD RANK %d:TTTTTTTTTTTTTTTTTTTTIMING: within that:\n", mst->world_rank); printf ("WORLD RANK %d:~~~~~~~~~~~~~~~~ ALLTOALL TIME MEASURED: %f\n", mst->world_rank, all2all_time); } free (dbm); destroy_streams(num_of_devices, 2); } }
a1f8cf8a43fa99fadaccbef9c1c7426211ec201c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int index = (blockIdx.x*blockDim.x) + threadIdx.x; if (index >= n) return; if (idata[index] != 0) bools[index] = 1; else bools[index] = 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int index = (blockIdx.x*blockDim.x) + threadIdx.x; if (index >= n) return; if (bools[index] == 1) odata[indices[index]] = idata[index]; } } }
a1f8cf8a43fa99fadaccbef9c1c7426211ec201c.cu
#include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int index = (blockIdx.x*blockDim.x) + threadIdx.x; if (index >= n) return; if (idata[index] != 0) bools[index] = 1; else bools[index] = 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int index = (blockIdx.x*blockDim.x) + threadIdx.x; if (index >= n) return; if (bools[index] == 1) odata[indices[index]] = idata[index]; } } }
ac56df9fd3d2def86e39fa2032cf962b67b1a27d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <unistd.h> #include <sys/time.h> using namespace std; // Shorthand for formatting and printing usage options to stderr #define fpe(msg) fprintf(stderr, "\t%s\n", msg); // Shorthand for handling CUDA errors. #define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) ) /** * DEFINED VALUES HERE */ #define TILE_WIDTH 32 #define TILE_HEIGHT 4 #define TILE_DEPTH 1 #define PER_THREAD_X 1 #define PER_THREAD_Y 1 #define PER_THREAD_Z 1 /***************** * CUDA Utilites * *****************/ void HandleError(hipError_t err, const char *file, int line) { // // Handle and report on CUDA errors. // if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line); exit(EXIT_FAILURE); } } void checkCUDAError(const char *msg, bool exitOnError) { // // Check cuda error and print result if appropriate. // hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); if (exitOnError) { exit(-1); } } } void cleanupCuda(void) { // // Clean up CUDA resources. // // // Explicitly cleans up all runtime-related resources associated with the // calling host thread. // HANDLE_ERROR(hipDeviceReset()); } /********************* * End CUDA Utilites * *********************/ struct Args { bool debug; bool sequential; bool blocked; bool overlapped; // Data attributes int size, dimensions, alloc_size; int xSize, ySize, zSize; int xBlockSize, yBlockSize, zBlockSize, tBlockSize; // Run attributes int grid_size, block_count, thread_count, iterations; }; void usage(char *prog_name, string msg) { if (msg.size() > 0) { fputs(msg.c_str(), stderr); } fprintf(stderr, "%s\n", prog_name); fprintf(stderr, "Options are:\n"); fpe("-n<size> Set data size (default: 1024)"); fpe("-d<dims> Set number of data dimensions (1, 2, or 3) (default: 2)"); fpe("-g<size> Set grid size"); fpe("-b<num> Set block count"); fpe("-t<num> Set thread count"); fpe("-i<iter> Number of iterations to perform (default: 1000)"); fpe("-x<size> X Dimension"); fpe("-y<size> Y Dimension"); fpe("-z<size> Z Dimension"); fpe("-T<size> T Dimension"); fpe("-S Execute sequential, CPU version"); fpe("-B Execute blocked sequential, CPU version"); fpe("-O Execute sequential overlapped tiling, CPU version"); fpe("-D Print debug info"); fpe("-h Print usage info (this message)"); exit(EXIT_FAILURE); } Args parse_arguments(int argc, char *argv[]) { Args args = Args(); int opt; // Parse args while ((opt = getopt(argc, argv, "n:d:g:b:t:i:x:y:z:T:hSBOD")) != -1) { switch (opt) { case 'D': args.debug = true; break; case 'S': args.sequential = true; break; case 'B': args.blocked = true; break; case 'O': args.overlapped = true; break; case 'n': args.size = atoi(optarg); break; case 'd': args.dimensions = atoi(optarg); break; case 'g': args.grid_size = atoi(optarg); break; case 'b': args.block_count = atoi(optarg); break; case 't': args.thread_count = atoi(optarg); break; case 'i': args.iterations = atoi(optarg); break; case 'x': args.xBlockSize = atoi(optarg); break; case 'X': args.xSize = atoi(optarg); break; case 'y': args.yBlockSize = atoi(optarg); break; case 'Y': args.ySize = atoi(optarg); break; case 'z': args.zBlockSize = atoi(optarg); break; case 'Z': args.zSize = atoi(optarg); break; case 'T': args.tBlockSize = atoi(optarg); break; case 'h': usage(argv[0], ""); break; default: usage(argv[0], "Unrecognized option\n"); } } // check sizes if (args.size <= 0) { cout << "Data size must be larger than 0" << endl; exit(EXIT_FAILURE); } if (args.dimensions <= 0 || args.dimensions >= 4) { cerr << "Data must be 1, 2, or 3 dimensions" << endl; exit(EXIT_FAILURE); } // Calculations if (args.dimensions == 1) { args.alloc_size = args.size; } else if (args.dimensions == 2) { args.alloc_size = args.size * args.size; } else { args.alloc_size = args.size * args.size * args.size; } if (args.thread_count > 0) { args.block_count = args.alloc_size / args.thread_count; } else if (args.block_count > 0) { args.thread_count = args.alloc_size / args.block_count; } else { args.thread_count = 16; args.block_count = args.alloc_size / args.thread_count; } return args; } typedef struct { int dimensions; int height; int width; int depth; float *elements; } Matrix; Matrix initialize_matrix(int dimensions, int width, int height = 1, int depth = 1) { Matrix data; if (dimensions == 3 && width > 1 && height > 1 && depth > 1) { data.width = width; data.height = height; data.depth = depth; data.elements = (float *) malloc( width * height * depth * sizeof(float)); for (int z = 0; z < depth; z++) { // X = 0 & N planes for (int y = 0; y < height; y++) { for (int x = 0; x < width; x += width - 1) { data.elements[z * width * height + y * width + x] = 1.0; } } // Y = 0 & N planes for (int y = 0; y < height; y += height - 1) { for (int x = 0; x < width; x++) { data.elements[z * width * height + y * width + x] = 1.0; } } } // Z = 0 & N planes for (int z = 0; z < depth; z += depth - 1) { for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { data.elements[z * width * height + y * width + x] = 1.0; } } } } else { fprintf(stderr, "Improper dimension or size."); exit(1); } return data; } /**************** * CUDA KERNELS * ****************/ __global__ void cached_plane_shared_mem(Matrix data, Matrix result) { int threadCol = threadIdx.x; int threadRow = threadIdx.y; int threadDep = threadIdx.z; int blockCol = blockIdx.x; int blockRow = blockIdx.y; int blockDep = blockIdx.z; // Indexes so we don't have to recompute them. int globalIndex[PER_THREAD_Z][PER_THREAD_Y][PER_THREAD_X]; int globalX[PER_THREAD_X]; int globalY[PER_THREAD_Y]; int globalZ[PER_THREAD_Z]; int sharedX[PER_THREAD_X]; int sharedY[PER_THREAD_Y]; int sharedZ[PER_THREAD_Z]; // Shared and local data arrays __shared__ float shared[TILE_DEPTH + 2][TILE_HEIGHT + 2][TILE_WIDTH + 2]; float local[PER_THREAD_Z][PER_THREAD_Y][PER_THREAD_X]; /* * Calculate indexes into the global and shared arrays */ // X shared and global #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { sharedX[x] = threadCol + blockDim.x * x + 1; globalX[x] = blockCol * TILE_WIDTH + sharedX[x] - 1; } // Y shared and global #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { sharedY[y] = threadRow + blockDim.y * y + 1; globalY[y] = blockRow * TILE_HEIGHT + sharedY[y] - 1; } // Z shared and global #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { sharedZ[z] = threadDep + blockDim.z * z + 1; globalZ[z] = blockDep * TILE_DEPTH + sharedZ[z] - 1; } // // Global absolute index #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { int zTemp = globalZ[z] * data.width * data.height; #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { int yTemp = globalY[y] * data.width; #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { globalIndex[z][y][x] = globalX[x] + yTemp + zTemp; } } } /* * Copy into shared memory */ #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[sharedZ[z]][sharedY[y]][sharedX[x]] = data.elements[globalIndex[z][y][x]]; } } } // Copy below-block dependencies into shared memory if (threadRow == 0 && blockRow > 0) { #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[sharedZ[z]][0][sharedX[x]] = data.elements[globalIndex[z][0][x] - data.width]; } } } // Copy above-block dependencies into shared memory if (threadRow == blockDim.y - 1 && (blockRow + 1) * TILE_HEIGHT < data.height - 1) { #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[sharedZ[z]][TILE_HEIGHT + 1][sharedX[x]] = data.elements[globalIndex[z][PER_THREAD_Y - 1][x] + data.width]; } } } // Copy left-of-block dependencies into shared memory if (threadCol == 0 && blockCol > 0) { #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { shared[sharedZ[z]][sharedY[y]][0] = data.elements[globalIndex[z][y][0] - 1]; } } } // Copy right-of-block dependencies into shared memory if (threadCol == blockDim.x - 1 && (blockCol + 1) * TILE_WIDTH < data.width) { #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { shared[sharedZ[z]][sharedY[y]][TILE_WIDTH + 1] = data.elements[globalIndex[z][y][PER_THREAD_X - 1] + 1]; } } } // Copy in-front-of-block dependencies into shared memory if (threadDep == 0 && blockDep > 0) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[0][sharedY[y]][sharedX[x]] = data.elements[globalIndex[0][y][x] - data.width * data.height]; } } } // Copy behind-block dependencies into shared memory if (threadDep == blockDim.z - 1 && (blockDep + 1) * TILE_DEPTH < data.depth) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[TILE_DEPTH + 1][sharedY[y]][sharedX[x]] = data.elements[globalIndex[PER_THREAD_Z - 1][y][x] + data.width * data.height]; } } } __syncthreads(); /* * Calculate Values - we are only using the z dimension as the * x and y should be set to a value of 1 to calculate the data for the current plane of threads * Z goes from 0 to the size of the block of threads to make sure we have enough shared memory to * do the current set of calculations. */ for (int z = 0; z < PER_THREAD_Z; z++) { int globZ = globalZ[z]; int sharZ = sharedZ[z]; #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > 0 && globX < data.width - 1 && globY > 0 && globY < data.height - 1 && globZ > 0 && globZ < data.depth - 1) { // Calculate new value local[z][y][x] = (shared[sharZ][sharY][sharX] + shared[sharZ][sharY][sharX - 1] + shared[sharZ][sharY][sharX + 1] + shared[sharZ][sharY - 1][sharX] + shared[sharZ][sharY + 1][sharX] + shared[sharZ - 1][sharY][sharX] + shared[sharZ + 1][sharY][sharX]) / 7; } else if (globX == 0 || globX == data.width - 1 || globY == 0 || globY == data.height - 1 || globZ == 0 || globZ == data.depth - 1) { // On the edge local[z][y][x] = shared[sharZ][sharY][sharX]; } else { // Beyond the edge, shouldn't ever hit this unless we messed something up } } } } __syncthreads(); #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { result.elements[globalIndex[z][y][x]] = local[z][y][x]; } } } } /******************** * END CUDA KERNELS * ********************/ // // Initialize the dimensions of the Matrix object that contain the data // Matrix initialize_device(Matrix A, bool copyToDevice) { Matrix deviceA; deviceA.width = A.width; deviceA.height = A.height; deviceA.depth = A.depth; deviceA.dimensions = A.dimensions; size_t sizeA = A.width * A.height * A.depth * sizeof(float); HANDLE_ERROR(hipMalloc((void ** ) &deviceA.elements, sizeA)); if (copyToDevice) { HANDLE_ERROR( hipMemcpy(deviceA.elements, A.elements, sizeA, hipMemcpyHostToDevice)); } return deviceA; } void callKernel(Args args, Matrix A, Matrix B) { Matrix deviceA, deviceB; deviceA = initialize_device(A, true); deviceB = initialize_device(B, false); // hipDeviceSetLimit(hipLimitPrintfFifoSize,size); - include this to control behavior of L1 cache dim3 blocks(max(args.size / TILE_WIDTH, 1), max(args.size / TILE_HEIGHT, 1), args.size/TILE_DEPTH); dim3 threads(TILE_WIDTH, TILE_HEIGHT, 1); for (int t = 0; t < args.iterations; t++) { hipLaunchKernelGGL(( cached_plane_shared_mem), dim3(blocks), dim3(threads), 0, 0, deviceA, deviceB); checkCUDAError("cached_plane_shared_mem", true); swap(deviceA, deviceB); } HANDLE_ERROR( hipMemcpy(B.elements, deviceA.elements, A.width * A.height * A.depth * sizeof(float), hipMemcpyDeviceToHost)); } // Data output void print_data(float *data, int size, int dimensions) { if (size > 20) { cerr << "Data too big to print\n" << endl; return; } if (dimensions == 1) { for (int x = 0; x < size; x++) { printf("%.3f ", data[x]); } } else if (dimensions == 2) { for (int y = 0; y < size; y++) { for (int x = 0; x < size; x++) { printf("%.3f ", data[y * size + x]); } cout << endl; } } else if (dimensions == 3) { for (int z = 0; z < size; z++) { for (int y = 0; y < size; y++) { for (int x = 0; x < size; x++) { printf("%.3f ", data[z * size * size + y * size + x]); } cout << endl; } cout << endl; } } cout << endl << endl; } // Main int main(int argc, char *argv[]) { Args args = parse_arguments(argc, argv); float runtime; struct timeval start, end; Matrix A, B; A = initialize_matrix(args.dimensions, args.size, args.size, args.size); B = initialize_matrix(args.dimensions, args.size, args.size, args.size); // atexit(cleanupCuda); gettimeofday( &start, NULL ); callKernel(args, A, B); gettimeofday( &end, NULL ); runtime = ( ( end.tv_sec - start.tv_sec ) * 1000.0 ) + ( ( end.tv_usec - start.tv_usec ) / 1000.0 ); printf( "Processing Time: %4.4f milliseconds\n", runtime ); if (args.debug) { print_data(B.elements, args.size, args.dimensions); } }
ac56df9fd3d2def86e39fa2032cf962b67b1a27d.cu
#include <stdio.h> #include <iostream> #include <unistd.h> #include <sys/time.h> using namespace std; // Shorthand for formatting and printing usage options to stderr #define fpe(msg) fprintf(stderr, "\t%s\n", msg); // Shorthand for handling CUDA errors. #define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) ) /** * DEFINED VALUES HERE */ #define TILE_WIDTH 32 #define TILE_HEIGHT 4 #define TILE_DEPTH 1 #define PER_THREAD_X 1 #define PER_THREAD_Y 1 #define PER_THREAD_Z 1 /***************** * CUDA Utilites * *****************/ void HandleError(cudaError_t err, const char *file, int line) { // // Handle and report on CUDA errors. // if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } void checkCUDAError(const char *msg, bool exitOnError) { // // Check cuda error and print result if appropriate. // cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); if (exitOnError) { exit(-1); } } } void cleanupCuda(void) { // // Clean up CUDA resources. // // // Explicitly cleans up all runtime-related resources associated with the // calling host thread. // HANDLE_ERROR(cudaThreadExit()); } /********************* * End CUDA Utilites * *********************/ struct Args { bool debug; bool sequential; bool blocked; bool overlapped; // Data attributes int size, dimensions, alloc_size; int xSize, ySize, zSize; int xBlockSize, yBlockSize, zBlockSize, tBlockSize; // Run attributes int grid_size, block_count, thread_count, iterations; }; void usage(char *prog_name, string msg) { if (msg.size() > 0) { fputs(msg.c_str(), stderr); } fprintf(stderr, "%s\n", prog_name); fprintf(stderr, "Options are:\n"); fpe("-n<size> Set data size (default: 1024)"); fpe("-d<dims> Set number of data dimensions (1, 2, or 3) (default: 2)"); fpe("-g<size> Set grid size"); fpe("-b<num> Set block count"); fpe("-t<num> Set thread count"); fpe("-i<iter> Number of iterations to perform (default: 1000)"); fpe("-x<size> X Dimension"); fpe("-y<size> Y Dimension"); fpe("-z<size> Z Dimension"); fpe("-T<size> T Dimension"); fpe("-S Execute sequential, CPU version"); fpe("-B Execute blocked sequential, CPU version"); fpe("-O Execute sequential overlapped tiling, CPU version"); fpe("-D Print debug info"); fpe("-h Print usage info (this message)"); exit(EXIT_FAILURE); } Args parse_arguments(int argc, char *argv[]) { Args args = Args(); int opt; // Parse args while ((opt = getopt(argc, argv, "n:d:g:b:t:i:x:y:z:T:hSBOD")) != -1) { switch (opt) { case 'D': args.debug = true; break; case 'S': args.sequential = true; break; case 'B': args.blocked = true; break; case 'O': args.overlapped = true; break; case 'n': args.size = atoi(optarg); break; case 'd': args.dimensions = atoi(optarg); break; case 'g': args.grid_size = atoi(optarg); break; case 'b': args.block_count = atoi(optarg); break; case 't': args.thread_count = atoi(optarg); break; case 'i': args.iterations = atoi(optarg); break; case 'x': args.xBlockSize = atoi(optarg); break; case 'X': args.xSize = atoi(optarg); break; case 'y': args.yBlockSize = atoi(optarg); break; case 'Y': args.ySize = atoi(optarg); break; case 'z': args.zBlockSize = atoi(optarg); break; case 'Z': args.zSize = atoi(optarg); break; case 'T': args.tBlockSize = atoi(optarg); break; case 'h': usage(argv[0], ""); break; default: usage(argv[0], "Unrecognized option\n"); } } // check sizes if (args.size <= 0) { cout << "Data size must be larger than 0" << endl; exit(EXIT_FAILURE); } if (args.dimensions <= 0 || args.dimensions >= 4) { cerr << "Data must be 1, 2, or 3 dimensions" << endl; exit(EXIT_FAILURE); } // Calculations if (args.dimensions == 1) { args.alloc_size = args.size; } else if (args.dimensions == 2) { args.alloc_size = args.size * args.size; } else { args.alloc_size = args.size * args.size * args.size; } if (args.thread_count > 0) { args.block_count = args.alloc_size / args.thread_count; } else if (args.block_count > 0) { args.thread_count = args.alloc_size / args.block_count; } else { args.thread_count = 16; args.block_count = args.alloc_size / args.thread_count; } return args; } typedef struct { int dimensions; int height; int width; int depth; float *elements; } Matrix; Matrix initialize_matrix(int dimensions, int width, int height = 1, int depth = 1) { Matrix data; if (dimensions == 3 && width > 1 && height > 1 && depth > 1) { data.width = width; data.height = height; data.depth = depth; data.elements = (float *) malloc( width * height * depth * sizeof(float)); for (int z = 0; z < depth; z++) { // X = 0 & N planes for (int y = 0; y < height; y++) { for (int x = 0; x < width; x += width - 1) { data.elements[z * width * height + y * width + x] = 1.0; } } // Y = 0 & N planes for (int y = 0; y < height; y += height - 1) { for (int x = 0; x < width; x++) { data.elements[z * width * height + y * width + x] = 1.0; } } } // Z = 0 & N planes for (int z = 0; z < depth; z += depth - 1) { for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { data.elements[z * width * height + y * width + x] = 1.0; } } } } else { fprintf(stderr, "Improper dimension or size."); exit(1); } return data; } /**************** * CUDA KERNELS * ****************/ __global__ void cached_plane_shared_mem(Matrix data, Matrix result) { int threadCol = threadIdx.x; int threadRow = threadIdx.y; int threadDep = threadIdx.z; int blockCol = blockIdx.x; int blockRow = blockIdx.y; int blockDep = blockIdx.z; // Indexes so we don't have to recompute them. int globalIndex[PER_THREAD_Z][PER_THREAD_Y][PER_THREAD_X]; int globalX[PER_THREAD_X]; int globalY[PER_THREAD_Y]; int globalZ[PER_THREAD_Z]; int sharedX[PER_THREAD_X]; int sharedY[PER_THREAD_Y]; int sharedZ[PER_THREAD_Z]; // Shared and local data arrays __shared__ float shared[TILE_DEPTH + 2][TILE_HEIGHT + 2][TILE_WIDTH + 2]; float local[PER_THREAD_Z][PER_THREAD_Y][PER_THREAD_X]; /* * Calculate indexes into the global and shared arrays */ // X shared and global #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { sharedX[x] = threadCol + blockDim.x * x + 1; globalX[x] = blockCol * TILE_WIDTH + sharedX[x] - 1; } // Y shared and global #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { sharedY[y] = threadRow + blockDim.y * y + 1; globalY[y] = blockRow * TILE_HEIGHT + sharedY[y] - 1; } // Z shared and global #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { sharedZ[z] = threadDep + blockDim.z * z + 1; globalZ[z] = blockDep * TILE_DEPTH + sharedZ[z] - 1; } // // Global absolute index #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { int zTemp = globalZ[z] * data.width * data.height; #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { int yTemp = globalY[y] * data.width; #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { globalIndex[z][y][x] = globalX[x] + yTemp + zTemp; } } } /* * Copy into shared memory */ #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[sharedZ[z]][sharedY[y]][sharedX[x]] = data.elements[globalIndex[z][y][x]]; } } } // Copy below-block dependencies into shared memory if (threadRow == 0 && blockRow > 0) { #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[sharedZ[z]][0][sharedX[x]] = data.elements[globalIndex[z][0][x] - data.width]; } } } // Copy above-block dependencies into shared memory if (threadRow == blockDim.y - 1 && (blockRow + 1) * TILE_HEIGHT < data.height - 1) { #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[sharedZ[z]][TILE_HEIGHT + 1][sharedX[x]] = data.elements[globalIndex[z][PER_THREAD_Y - 1][x] + data.width]; } } } // Copy left-of-block dependencies into shared memory if (threadCol == 0 && blockCol > 0) { #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { shared[sharedZ[z]][sharedY[y]][0] = data.elements[globalIndex[z][y][0] - 1]; } } } // Copy right-of-block dependencies into shared memory if (threadCol == blockDim.x - 1 && (blockCol + 1) * TILE_WIDTH < data.width) { #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { shared[sharedZ[z]][sharedY[y]][TILE_WIDTH + 1] = data.elements[globalIndex[z][y][PER_THREAD_X - 1] + 1]; } } } // Copy in-front-of-block dependencies into shared memory if (threadDep == 0 && blockDep > 0) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[0][sharedY[y]][sharedX[x]] = data.elements[globalIndex[0][y][x] - data.width * data.height]; } } } // Copy behind-block dependencies into shared memory if (threadDep == blockDim.z - 1 && (blockDep + 1) * TILE_DEPTH < data.depth) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { shared[TILE_DEPTH + 1][sharedY[y]][sharedX[x]] = data.elements[globalIndex[PER_THREAD_Z - 1][y][x] + data.width * data.height]; } } } __syncthreads(); /* * Calculate Values - we are only using the z dimension as the * x and y should be set to a value of 1 to calculate the data for the current plane of threads * Z goes from 0 to the size of the block of threads to make sure we have enough shared memory to * do the current set of calculations. */ for (int z = 0; z < PER_THREAD_Z; z++) { int globZ = globalZ[z]; int sharZ = sharedZ[z]; #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { int globY = globalY[y]; int sharY = sharedY[y]; #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { int globX = globalX[x]; int sharX = sharedX[x]; if (globX > 0 && globX < data.width - 1 && globY > 0 && globY < data.height - 1 && globZ > 0 && globZ < data.depth - 1) { // Calculate new value local[z][y][x] = (shared[sharZ][sharY][sharX] + shared[sharZ][sharY][sharX - 1] + shared[sharZ][sharY][sharX + 1] + shared[sharZ][sharY - 1][sharX] + shared[sharZ][sharY + 1][sharX] + shared[sharZ - 1][sharY][sharX] + shared[sharZ + 1][sharY][sharX]) / 7; } else if (globX == 0 || globX == data.width - 1 || globY == 0 || globY == data.height - 1 || globZ == 0 || globZ == data.depth - 1) { // On the edge local[z][y][x] = shared[sharZ][sharY][sharX]; } else { // Beyond the edge, shouldn't ever hit this unless we messed something up } } } } __syncthreads(); #pragma unroll for (int z = 0; z < PER_THREAD_Z; z++) { #pragma unroll for (int y = 0; y < PER_THREAD_Y; y++) { #pragma unroll for (int x = 0; x < PER_THREAD_X; x++) { result.elements[globalIndex[z][y][x]] = local[z][y][x]; } } } } /******************** * END CUDA KERNELS * ********************/ // // Initialize the dimensions of the Matrix object that contain the data // Matrix initialize_device(Matrix A, bool copyToDevice) { Matrix deviceA; deviceA.width = A.width; deviceA.height = A.height; deviceA.depth = A.depth; deviceA.dimensions = A.dimensions; size_t sizeA = A.width * A.height * A.depth * sizeof(float); HANDLE_ERROR(cudaMalloc((void ** ) &deviceA.elements, sizeA)); if (copyToDevice) { HANDLE_ERROR( cudaMemcpy(deviceA.elements, A.elements, sizeA, cudaMemcpyHostToDevice)); } return deviceA; } void callKernel(Args args, Matrix A, Matrix B) { Matrix deviceA, deviceB; deviceA = initialize_device(A, true); deviceB = initialize_device(B, false); // cudaDeviceSetLimit(cudaLimitPrintfFifoSize,size); - include this to control behavior of L1 cache dim3 blocks(max(args.size / TILE_WIDTH, 1), max(args.size / TILE_HEIGHT, 1), args.size/TILE_DEPTH); dim3 threads(TILE_WIDTH, TILE_HEIGHT, 1); for (int t = 0; t < args.iterations; t++) { cached_plane_shared_mem<<<blocks, threads>>>(deviceA, deviceB); checkCUDAError("cached_plane_shared_mem", true); swap(deviceA, deviceB); } HANDLE_ERROR( cudaMemcpy(B.elements, deviceA.elements, A.width * A.height * A.depth * sizeof(float), cudaMemcpyDeviceToHost)); } // Data output void print_data(float *data, int size, int dimensions) { if (size > 20) { cerr << "Data too big to print\n" << endl; return; } if (dimensions == 1) { for (int x = 0; x < size; x++) { printf("%.3f ", data[x]); } } else if (dimensions == 2) { for (int y = 0; y < size; y++) { for (int x = 0; x < size; x++) { printf("%.3f ", data[y * size + x]); } cout << endl; } } else if (dimensions == 3) { for (int z = 0; z < size; z++) { for (int y = 0; y < size; y++) { for (int x = 0; x < size; x++) { printf("%.3f ", data[z * size * size + y * size + x]); } cout << endl; } cout << endl; } } cout << endl << endl; } // Main int main(int argc, char *argv[]) { Args args = parse_arguments(argc, argv); float runtime; struct timeval start, end; Matrix A, B; A = initialize_matrix(args.dimensions, args.size, args.size, args.size); B = initialize_matrix(args.dimensions, args.size, args.size, args.size); // atexit(cleanupCuda); gettimeofday( &start, NULL ); callKernel(args, A, B); gettimeofday( &end, NULL ); runtime = ( ( end.tv_sec - start.tv_sec ) * 1000.0 ) + ( ( end.tv_usec - start.tv_usec ) / 1000.0 ); printf( "Processing Time: %4.4f milliseconds\n", runtime ); if (args.debug) { print_data(B.elements, args.size, args.dimensions); } }
aa39c58318424d800ff92291e8217249a8498270.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "io.h" #include "utilities.h" #include <math.h> #define BLOCK_SIZE 1024 // Parallel SpMV with one Thread per Row __global__ void parallel_spmv_1(float * values, int * col_idx, int * row_off, float * vect, float * res, int m, int n, int nnz){ int row = blockIdx.x * blockDim.x + threadIdx.x; if(row<m){ int begin_index = row_off[row]; int end_index = row_off[row+1]; float row_sum = 0.0; for(int i = begin_index; i < end_index; i++){ row_sum += (values[i] * vect[col_idx[i]]); } res[row] = row_sum; } } //////////////////////////// // Parallel SpMV with one Warp per Row __global__ void parallel_spmv_2(float * values, int * col_idx, int * row_off, float * vect, float * res, int m, int n, int nnz){ int thread_id = blockDim.x * blockIdx.x + threadIdx.x; int warp_id = thread_id / 32; int lane_id = thread_id % 32; int row = warp_id; if(row < m){ int begin_index = row_off[row]; int end_index = row_off[row+1]; float thread_sum = 0.0; for(int i = begin_index + lane_id; i < end_index; i+=32) thread_sum += values[i] * vect[col_idx[i]]; thread_sum += __shfl_down(thread_sum,16); thread_sum += __shfl_down(thread_sum,8); thread_sum += __shfl_down(thread_sum,4); thread_sum += __shfl_down(thread_sum,2); thread_sum += __shfl_down(thread_sum,1); if(lane_id == 0) res[row] = thread_sum; } } //////////////////////////// // Parallel SpMV with Average threads per row __global__ void parallel_spmv_3(float * values, int * col_idx, int * row_off, float * vect, float * res, int m, int n, int nnz, int threads_per_row){ int thread_id = blockDim.x * blockIdx.x + threadIdx.x; int vector_id = thread_id / threads_per_row; int lane_id = thread_id % threads_per_row; int row = vector_id; if(row < m){ int begin_index = row_off[row]; int end_index = row_off[row+1]; float thread_sum = 0.0; for(int i = begin_index + lane_id; i < end_index; i+=threads_per_row) thread_sum += values[i] * vect[col_idx[i]]; int temp = threads_per_row/2; while(temp >= 1){ thread_sum += __shfl_down(thread_sum, temp); temp/=2; } if(lane_id == 0) res[row] = thread_sum; } } //////////////////////////// // Utility function to calculate thread_per_row for parallel_spmv_3 // int nearest_pow_2(float n){ int lg = (int)log2(n); return (int)pow(2,lg); } //////////////////////////// int main(){ // Create Cuda Events // hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //////////////////////////// // Reading Dataset // int m,n,nnz,nnz_max,nnz_avg,nnz_dev; conv(nnz,m,n,nnz_max,nnz_avg,nnz_dev); // Defined in io.h cout<<"\nrows = "<<m; cout<<"\ncolumns = "<<n; cout<<"\nnnz = "<<nnz; cout<<"\nnnz_max = "<<nnz_max; cout<<"\nnnz_avg = "<<nnz_avg; cout<<"\nnnz_dev = "<<nnz_dev; cout<<"\n\n"; float *vect = vect_gen(n); //generating dense vector //////////////////////////// // Serial SpMV // float *host_res = new float[m]; clock_t begin = clock(); simple_spmv(host_res, vect, values, col_idx, row_off, nnz, m, n); clock_t end = clock(); double cpu_time = double(end - begin) / CLOCKS_PER_SEC; cpu_time = cpu_time * 1000; //////////////////////////// // Device Memory allocation // float *d_values, *d_res, *d_vect; int *d_row_off, *d_col_idx; hipMalloc((void**)&d_values, sizeof(float)*nnz); hipMalloc((void**)&d_col_idx, sizeof(int)*nnz); hipMalloc((void**)&d_row_off, sizeof(int) * (m+1)); hipMalloc((void**)&d_res, sizeof(float) * m); hipMalloc((void**)&d_vect, sizeof(float) * n); //////////////////////////// // Host to device copy // hipMemcpy(d_values,values,sizeof(float) * nnz,hipMemcpyHostToDevice); hipMemcpy(d_col_idx,col_idx,sizeof(int) * nnz,hipMemcpyHostToDevice); hipMemcpy(d_row_off,row_off,sizeof(int) * (m+1),hipMemcpyHostToDevice); hipMemcpy(d_vect,vect,sizeof(float) * n,hipMemcpyHostToDevice); //////////////////////////// // Parallel SpMV // //////////////////////////// dim3 dimBlock(BLOCK_SIZE,1,1); dim3 dimGrid_1((m-1)/BLOCK_SIZE + 1,1,1); dim3 dimGrid_2((m-1)/32 + 1,1,1); int threads_per_row = min(32, nearest_pow_2(nnz_avg)); dim3 dimGrid_3((m-1)/(1024/threads_per_row)+1, 1, 1); // Calling one thread per row kernel hipEventRecord(start); hipLaunchKernelGGL(( parallel_spmv_1), dim3(dimGrid_1),dim3(dimBlock), 0, 0, d_values, d_col_idx, d_row_off, d_vect, d_res, m, n, nnz); hipEventRecord(stop); hipEventSynchronize(stop); float gpu_time_1 = 0; hipEventElapsedTime(&gpu_time_1, start, stop); // calling one warp per row kernel hipEventRecord(start); hipLaunchKernelGGL(( parallel_spmv_2), dim3(dimGrid_2),dim3(dimBlock), 0, 0, d_values, d_col_idx, d_row_off, d_vect, d_res, m, n, nnz); hipEventRecord(stop); hipEventSynchronize(stop); float gpu_time_2 = 0; hipEventElapsedTime(&gpu_time_2, start, stop); // calling avg threads per row kernel hipEventRecord(start); hipLaunchKernelGGL(( parallel_spmv_3), dim3(dimGrid_3),dim3(dimBlock), 0, 0, d_values, d_col_idx, d_row_off, d_vect, d_res, m, n, nnz, threads_per_row); hipEventRecord(stop); hipEventSynchronize(stop); float gpu_time_3 = 0; hipEventElapsedTime(&gpu_time_3, start, stop); //////////////////////////// // Copy result to host // float * result_from_device = new float[m]; hipMemcpy(result_from_device, d_res, sizeof(float)*n,hipMemcpyDeviceToHost); //////////////////////////// // Check Result // checker(result_from_device, host_res, m); //////////////////////////// // Free Device Memory // hipFree(d_values); hipFree(d_col_idx); hipFree(d_row_off); hipFree(d_res); hipFree(d_vect); //////////////////////////// // Print Statistics // cout<<"\n\nCPU Execution time = "<<cpu_time<<" ms"; cout<<"\n\nGPU Execution time - Thread per Row = "<<gpu_time_1<<" ms"; cout<<"\n\nGPU Execution time - Warp per Row = "<<gpu_time_2<<" ms"; cout<<"\n\n\nThreads per row in avrg per row = "<<threads_per_row; cout<<"\nGPU Execution time - Avrg per Row = "<<gpu_time_3<<" ms"; cout<<"\n\n"; //////////////////////////// }
aa39c58318424d800ff92291e8217249a8498270.cu
#include "io.h" #include "utilities.h" #include <math.h> #define BLOCK_SIZE 1024 // Parallel SpMV with one Thread per Row __global__ void parallel_spmv_1(float * values, int * col_idx, int * row_off, float * vect, float * res, int m, int n, int nnz){ int row = blockIdx.x * blockDim.x + threadIdx.x; if(row<m){ int begin_index = row_off[row]; int end_index = row_off[row+1]; float row_sum = 0.0; for(int i = begin_index; i < end_index; i++){ row_sum += (values[i] * vect[col_idx[i]]); } res[row] = row_sum; } } //////////////////////////// // Parallel SpMV with one Warp per Row __global__ void parallel_spmv_2(float * values, int * col_idx, int * row_off, float * vect, float * res, int m, int n, int nnz){ int thread_id = blockDim.x * blockIdx.x + threadIdx.x; int warp_id = thread_id / 32; int lane_id = thread_id % 32; int row = warp_id; if(row < m){ int begin_index = row_off[row]; int end_index = row_off[row+1]; float thread_sum = 0.0; for(int i = begin_index + lane_id; i < end_index; i+=32) thread_sum += values[i] * vect[col_idx[i]]; thread_sum += __shfl_down(thread_sum,16); thread_sum += __shfl_down(thread_sum,8); thread_sum += __shfl_down(thread_sum,4); thread_sum += __shfl_down(thread_sum,2); thread_sum += __shfl_down(thread_sum,1); if(lane_id == 0) res[row] = thread_sum; } } //////////////////////////// // Parallel SpMV with Average threads per row __global__ void parallel_spmv_3(float * values, int * col_idx, int * row_off, float * vect, float * res, int m, int n, int nnz, int threads_per_row){ int thread_id = blockDim.x * blockIdx.x + threadIdx.x; int vector_id = thread_id / threads_per_row; int lane_id = thread_id % threads_per_row; int row = vector_id; if(row < m){ int begin_index = row_off[row]; int end_index = row_off[row+1]; float thread_sum = 0.0; for(int i = begin_index + lane_id; i < end_index; i+=threads_per_row) thread_sum += values[i] * vect[col_idx[i]]; int temp = threads_per_row/2; while(temp >= 1){ thread_sum += __shfl_down(thread_sum, temp); temp/=2; } if(lane_id == 0) res[row] = thread_sum; } } //////////////////////////// // Utility function to calculate thread_per_row for parallel_spmv_3 // int nearest_pow_2(float n){ int lg = (int)log2(n); return (int)pow(2,lg); } //////////////////////////// int main(){ // Create Cuda Events // cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //////////////////////////// // Reading Dataset // int m,n,nnz,nnz_max,nnz_avg,nnz_dev; conv(nnz,m,n,nnz_max,nnz_avg,nnz_dev); // Defined in io.h cout<<"\nrows = "<<m; cout<<"\ncolumns = "<<n; cout<<"\nnnz = "<<nnz; cout<<"\nnnz_max = "<<nnz_max; cout<<"\nnnz_avg = "<<nnz_avg; cout<<"\nnnz_dev = "<<nnz_dev; cout<<"\n\n"; float *vect = vect_gen(n); //generating dense vector //////////////////////////// // Serial SpMV // float *host_res = new float[m]; clock_t begin = clock(); simple_spmv(host_res, vect, values, col_idx, row_off, nnz, m, n); clock_t end = clock(); double cpu_time = double(end - begin) / CLOCKS_PER_SEC; cpu_time = cpu_time * 1000; //////////////////////////// // Device Memory allocation // float *d_values, *d_res, *d_vect; int *d_row_off, *d_col_idx; cudaMalloc((void**)&d_values, sizeof(float)*nnz); cudaMalloc((void**)&d_col_idx, sizeof(int)*nnz); cudaMalloc((void**)&d_row_off, sizeof(int) * (m+1)); cudaMalloc((void**)&d_res, sizeof(float) * m); cudaMalloc((void**)&d_vect, sizeof(float) * n); //////////////////////////// // Host to device copy // cudaMemcpy(d_values,values,sizeof(float) * nnz,cudaMemcpyHostToDevice); cudaMemcpy(d_col_idx,col_idx,sizeof(int) * nnz,cudaMemcpyHostToDevice); cudaMemcpy(d_row_off,row_off,sizeof(int) * (m+1),cudaMemcpyHostToDevice); cudaMemcpy(d_vect,vect,sizeof(float) * n,cudaMemcpyHostToDevice); //////////////////////////// // Parallel SpMV // //////////////////////////// dim3 dimBlock(BLOCK_SIZE,1,1); dim3 dimGrid_1((m-1)/BLOCK_SIZE + 1,1,1); dim3 dimGrid_2((m-1)/32 + 1,1,1); int threads_per_row = min(32, nearest_pow_2(nnz_avg)); dim3 dimGrid_3((m-1)/(1024/threads_per_row)+1, 1, 1); // Calling one thread per row kernel cudaEventRecord(start); parallel_spmv_1<<<dimGrid_1,dimBlock>>> (d_values, d_col_idx, d_row_off, d_vect, d_res, m, n, nnz); cudaEventRecord(stop); cudaEventSynchronize(stop); float gpu_time_1 = 0; cudaEventElapsedTime(&gpu_time_1, start, stop); // calling one warp per row kernel cudaEventRecord(start); parallel_spmv_2<<<dimGrid_2,dimBlock>>> (d_values, d_col_idx, d_row_off, d_vect, d_res, m, n, nnz); cudaEventRecord(stop); cudaEventSynchronize(stop); float gpu_time_2 = 0; cudaEventElapsedTime(&gpu_time_2, start, stop); // calling avg threads per row kernel cudaEventRecord(start); parallel_spmv_3<<<dimGrid_3,dimBlock>>> (d_values, d_col_idx, d_row_off, d_vect, d_res, m, n, nnz, threads_per_row); cudaEventRecord(stop); cudaEventSynchronize(stop); float gpu_time_3 = 0; cudaEventElapsedTime(&gpu_time_3, start, stop); //////////////////////////// // Copy result to host // float * result_from_device = new float[m]; cudaMemcpy(result_from_device, d_res, sizeof(float)*n,cudaMemcpyDeviceToHost); //////////////////////////// // Check Result // checker(result_from_device, host_res, m); //////////////////////////// // Free Device Memory // cudaFree(d_values); cudaFree(d_col_idx); cudaFree(d_row_off); cudaFree(d_res); cudaFree(d_vect); //////////////////////////// // Print Statistics // cout<<"\n\nCPU Execution time = "<<cpu_time<<" ms"; cout<<"\n\nGPU Execution time - Thread per Row = "<<gpu_time_1<<" ms"; cout<<"\n\nGPU Execution time - Warp per Row = "<<gpu_time_2<<" ms"; cout<<"\n\n\nThreads per row in avrg per row = "<<threads_per_row; cout<<"\nGPU Execution time - Avrg per Row = "<<gpu_time_3<<" ms"; cout<<"\n\n"; //////////////////////////// }
05935ebe4c4b9c4bb769a7bc03892638a3cefb4e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) OpenMMLab. All rights reserved #include "bbox_overlaps_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" // Disable fp16 on ROCm device #ifndef HIP_DIFF #if __CUDA_ARCH__ >= 530 template <> __global__ void bbox_overlaps_cuda_kernel<at::Half>( const at::Half* bbox1, const at::Half* bbox2, at::Half* ious, const int num_bbox1, const int num_bbox2, const int mode, const bool aligned, const int offset) { bbox_overlaps_cuda_kernel_half(reinterpret_cast<const __half*>(bbox1), reinterpret_cast<const __half*>(bbox2), reinterpret_cast<__half*>(ious), num_bbox1, num_bbox2, mode, aligned, offset); } #endif // __CUDA_ARCH__ >= 530 #endif // HIP_DIFF void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, const int mode, const bool aligned, const int offset) { int output_size = ious.numel(); int num_bbox1 = bboxes1.size(0); int num_bbox2 = bboxes2.size(0); at::hip::HIPGuardMasqueradingAsCUDA device_guard(bboxes1.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( bboxes1.scalar_type(), "bbox_overlaps_cuda_kernel", ([&] { hipLaunchKernelGGL(( bbox_overlaps_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, bboxes1.data_ptr<scalar_t>(), bboxes2.data_ptr<scalar_t>(), ious.data_ptr<scalar_t>(), num_bbox1, num_bbox2, mode, aligned, offset); })); AT_CUDA_CHECK(hipGetLastError()); }
05935ebe4c4b9c4bb769a7bc03892638a3cefb4e.cu
// Copyright (c) OpenMMLab. All rights reserved #include "bbox_overlaps_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" // Disable fp16 on ROCm device #ifndef HIP_DIFF #if __CUDA_ARCH__ >= 530 template <> __global__ void bbox_overlaps_cuda_kernel<at::Half>( const at::Half* bbox1, const at::Half* bbox2, at::Half* ious, const int num_bbox1, const int num_bbox2, const int mode, const bool aligned, const int offset) { bbox_overlaps_cuda_kernel_half(reinterpret_cast<const __half*>(bbox1), reinterpret_cast<const __half*>(bbox2), reinterpret_cast<__half*>(ious), num_bbox1, num_bbox2, mode, aligned, offset); } #endif // __CUDA_ARCH__ >= 530 #endif // HIP_DIFF void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, const int mode, const bool aligned, const int offset) { int output_size = ious.numel(); int num_bbox1 = bboxes1.size(0); int num_bbox2 = bboxes2.size(0); at::cuda::CUDAGuard device_guard(bboxes1.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( bboxes1.scalar_type(), "bbox_overlaps_cuda_kernel", ([&] { bbox_overlaps_cuda_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( bboxes1.data_ptr<scalar_t>(), bboxes2.data_ptr<scalar_t>(), ious.data_ptr<scalar_t>(), num_bbox1, num_bbox2, mode, aligned, offset); })); AT_CUDA_CHECK(cudaGetLastError()); }
6df08e81049eb5723da5cfbe3b7efa4b9395d5c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Open source copyright declaration based on BSD open source template: * http://www.opensource.org/licenses/bsd-license.php * * This file is part of the OPS distribution. * * Copyright (c) 2013, Mike Giles and others. Please see the AUTHORS file in * the main source directory for a full list of copyright holders. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The name of Mike Giles may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Mike Giles ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Mike Giles BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file * @brief OPS mpi+cuda run-time support routines * @author Gihan Mudalige, Istvan Reguly * @details Implements the runtime support routines for the OPS mpi+cuda * backend */ #include <ops_cuda_rt_support.h> #ifdef __cplusplus extern "C" { #endif int halo_buffer_size = 0; char *halo_buffer_d = NULL; __global__ void ops_cuda_packer_1(const char *__restrict src, char *__restrict dest, int count, int len, int stride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { dest[idx] = src[stride * block + idx % len]; } } __global__ void ops_cuda_packer_1_soa(const char *__restrict src, char *__restrict dest, int count, int len, int stride, int dim, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { for (int d=0; d<dim; d++) { dest[idx*dim+d] = src[stride * block + idx % len + d * size]; } } } __global__ void ops_cuda_unpacker_1(const char *__restrict src, char *__restrict dest, int count, int len, int stride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { dest[stride * block + idx % len] = src[idx]; } } __global__ void ops_cuda_unpacker_1_soa(const char *__restrict src, char *__restrict dest, int count, int len, int stride, int dim, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { for (int d=0; d<dim; d++) { dest[stride * block + idx % len + d * size] = src[idx*dim + d]; } } } __global__ void ops_cuda_packer_4(const int *__restrict src, int *__restrict dest, int count, int len, int stride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { dest[idx] = src[stride * block + idx % len]; } } __global__ void ops_cuda_unpacker_4(const int *__restrict src, int *__restrict dest, int count, int len, int stride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { dest[stride * block + idx % len] = src[idx]; } } void ops_pack_cuda_internal(ops_dat dat, const int src_offset, char *__restrict dest, const int halo_blocklength, const int halo_stride, const int halo_count) { if (dat->dirty_hd == 1) { ops_upload_dat(dat); dat->dirty_hd = 0; } const char *__restrict src = dat->data_d + src_offset * (OPS_soa ? dat->type_size : dat->elem_size); if (halo_buffer_size < halo_count * halo_blocklength * dat->dim && !OPS_gpu_direct) { if (halo_buffer_d != NULL) cutilSafeCall(hipFree(halo_buffer_d)); cutilSafeCall(hipMalloc((void **)&halo_buffer_d, halo_count * halo_blocklength * dat->dim * 4)); halo_buffer_size = halo_count * halo_blocklength * dat->dim * 4; } char *device_buf = NULL; if (OPS_gpu_direct) device_buf = dest; else device_buf = halo_buffer_d; if (OPS_soa) { int num_threads = 128; int num_blocks = ((halo_blocklength * halo_count) - 1) / num_threads + 1; hipLaunchKernelGGL(( ops_cuda_packer_1_soa), dim3(num_blocks), dim3(num_threads), 0, 0, src, device_buf, halo_count, halo_blocklength, halo_stride, dat->dim, dat->size[0]*dat->size[1]*dat->size[2]*dat->type_size); cutilSafeCall(hipGetLastError()); } else if (halo_blocklength % 4 == 0) { int num_threads = 128; int num_blocks = (((dat->dim * halo_blocklength / 4) * halo_count) - 1) / num_threads + 1; hipLaunchKernelGGL(( ops_cuda_packer_4), dim3(num_blocks), dim3(num_threads), 0, 0, (const int *)src, (int *)device_buf, halo_count, halo_blocklength*dat->dim / 4, halo_stride*dat->dim / 4); cutilSafeCall(hipGetLastError()); } else { int num_threads = 128; int num_blocks = ((dat->dim * halo_blocklength * halo_count) - 1) / num_threads + 1; hipLaunchKernelGGL(( ops_cuda_packer_1), dim3(num_blocks), dim3(num_threads), 0, 0, src, device_buf, halo_count, halo_blocklength*dat->dim, halo_stride*dat->dim); cutilSafeCall(hipGetLastError()); } if (!OPS_gpu_direct) cutilSafeCall(hipMemcpy(dest, halo_buffer_d, halo_count * halo_blocklength * dat->dim, hipMemcpyDeviceToHost)); else cutilSafeCall(hipDeviceSynchronize()); } void ops_unpack_cuda_internal(ops_dat dat, const int dest_offset, const char *__restrict src, const int halo_blocklength, const int halo_stride, const int halo_count) { if (dat->dirty_hd == 1) { ops_upload_dat(dat); dat->dirty_hd = 0; } char *__restrict dest = dat->data_d + dest_offset * (OPS_soa ? dat->type_size : dat->elem_size); if (halo_buffer_size < halo_count * halo_blocklength * dat->dim && !OPS_gpu_direct) { if (halo_buffer_d != NULL) cutilSafeCall(hipFree(halo_buffer_d)); cutilSafeCall(hipMalloc((void **)&halo_buffer_d, halo_count * halo_blocklength * dat->dim * 4)); halo_buffer_size = halo_count * halo_blocklength * dat->dim * 4; } const char *device_buf = NULL; if (OPS_gpu_direct) device_buf = src; else device_buf = halo_buffer_d; if (!OPS_gpu_direct) cutilSafeCall(hipMemcpy(halo_buffer_d, src, halo_count * halo_blocklength * dat->dim, hipMemcpyHostToDevice)); if (OPS_soa) { int num_threads = 128; int num_blocks = ((halo_blocklength * halo_count) - 1) / num_threads + 1; hipLaunchKernelGGL(( ops_cuda_unpacker_1_soa), dim3(num_blocks), dim3(num_threads), 0, 0, device_buf, dest, halo_count, halo_blocklength, halo_stride, dat->dim, dat->size[0]*dat->size[1]*dat->size[2]*dat->type_size); cutilSafeCall(hipGetLastError()); } else if (halo_blocklength % 4 == 0) { int num_threads = 128; int num_blocks = (((dat->dim * halo_blocklength / 4) * halo_count) - 1) / num_threads + 1; hipLaunchKernelGGL(( ops_cuda_unpacker_4), dim3(num_blocks), dim3(num_threads), 0, 0, (const int *)device_buf, (int *)dest, halo_count, halo_blocklength*dat->dim / 4, halo_stride*dat->dim / 4); cutilSafeCall(hipGetLastError()); } else { int num_threads = 128; int num_blocks = ((dat->dim * halo_blocklength * halo_count) - 1) / num_threads + 1; hipLaunchKernelGGL(( ops_cuda_unpacker_1), dim3(num_blocks), dim3(num_threads), 0, 0, device_buf, dest, halo_count, halo_blocklength*dat->dim, halo_stride*dat->dim); cutilSafeCall(hipGetLastError()); } dat->dirty_hd = 2; } char* ops_realloc_fast(char *ptr, size_t olds, size_t news) { if (OPS_gpu_direct) { if (ptr == NULL) { cutilSafeCall(hipMalloc((void **)&ptr, news)); return ptr; } else { if (OPS_diags>3) printf("Warning: cuda cache realloc\n"); char *ptr2; cutilSafeCall(hipMalloc((void **)&ptr2, news)); cutilSafeCall(hipMemcpy(ptr2, ptr, olds, hipMemcpyDeviceToDevice)); cutilSafeCall(hipFree(ptr)); return ptr2; } } else { char *ptr2; cutilSafeCall(hipHostMalloc((void**)&ptr2,news)); //TODO: is this aligned?? if (olds > 0) memcpy(ptr2, ptr, olds); if (ptr != NULL) cutilSafeCall(hipHostFree(ptr)); return ptr2; } } __global__ void copy_kernel_tobuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) { int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z); int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y); int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x); if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) && (y_step == 1 ? idx_y < ry_e : idx_y > ry_e) && (z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) { if (OPS_soa) src += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size; else src += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim; dest += ((idx_z - rz_s) * z_step * buf_strides_z + (idx_y - ry_s) * y_step * buf_strides_y + (idx_x - rx_s) * x_step * buf_strides_x) * type_size * dim ; for (int d = 0; d < dim; d++) { memcpy(dest+d*type_size, src, type_size); if (OPS_soa) src += size_x * size_y * size_z * type_size; else src += type_size; } } } __global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) { int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z); int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y); int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x); if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) && (y_step == 1 ? idx_y < ry_e : idx_y > ry_e) && (z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) { if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size; else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim; src += ((idx_z - rz_s) * z_step * buf_strides_z + (idx_y - ry_s) * y_step * buf_strides_y + (idx_x - rx_s) * x_step * buf_strides_x) * type_size * dim; for (int d = 0; d < dim; d++) { memcpy(dest, src + d * type_size, type_size); if (OPS_soa) dest += size_x * size_y * size_z * type_size; else dest += type_size; } } } void ops_halo_copy_tobuf(char *dest, int dest_offset, ops_dat src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int buf_strides_x, int buf_strides_y, int buf_strides_z) { dest += dest_offset; int thr_x = abs(rx_s - rx_e); int blk_x = 1; if (abs(rx_s - rx_e) > 8) { blk_x = (thr_x - 1) / 8 + 1; thr_x = 8; } int thr_y = abs(ry_s - ry_e); int blk_y = 1; if (abs(ry_s - ry_e) > 8) { blk_y = (thr_y - 1) / 8 + 1; thr_y = 8; } int thr_z = abs(rz_s - rz_e); int blk_z = 1; if (abs(rz_s - rz_e) > 8) { blk_z = (thr_z - 1) / 8 + 1; thr_z = 8; } int size = abs(src->elem_size * (rx_e - rx_s) * (ry_e - ry_s) * (rz_e - rz_s)); char *gpu_ptr; if (OPS_gpu_direct) gpu_ptr = dest; else { if (halo_buffer_size < size) { if (halo_buffer_d != NULL) cutilSafeCall(hipFree(halo_buffer_d)); cutilSafeCall(hipMalloc((void **)&halo_buffer_d, size * sizeof(char))); halo_buffer_size = size; } gpu_ptr = halo_buffer_d; } if (src->dirty_hd == 1) { ops_upload_dat(src); src->dirty_hd = 0; } dim3 grid(blk_x, blk_y, blk_z); dim3 tblock(thr_x, thr_y, thr_z); hipLaunchKernelGGL(( copy_kernel_tobuf), dim3(grid), dim3(tblock), 0, 0, gpu_ptr, src->data_d, rx_s, rx_e, ry_s, ry_e, rz_s, rz_e, x_step, y_step, z_step, src->size[0], src->size[1], src->size[2], buf_strides_x, buf_strides_y, buf_strides_z, src->type_size, src->dim, OPS_soa); cutilSafeCall(hipGetLastError()); if (!OPS_gpu_direct) cutilSafeCall(hipMemcpy(dest, halo_buffer_d, size * sizeof(char), hipMemcpyDeviceToHost)); } void ops_halo_copy_frombuf(ops_dat dest, char *src, int src_offset, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int buf_strides_x, int buf_strides_y, int buf_strides_z) { src += src_offset; int thr_x = abs(rx_s - rx_e); int blk_x = 1; if (abs(rx_s - rx_e) > 8) { blk_x = (thr_x - 1) / 8 + 1; thr_x = 8; } int thr_y = abs(ry_s - ry_e); int blk_y = 1; if (abs(ry_s - ry_e) > 8) { blk_y = (thr_y - 1) / 8 + 1; thr_y = 8; } int thr_z = abs(rz_s - rz_e); int blk_z = 1; if (abs(rz_s - rz_e) > 8) { blk_z = (thr_z - 1) / 8 + 1; thr_z = 8; } int size = abs(dest->elem_size * (rx_e - rx_s) * (ry_e - ry_s) * (rz_e - rz_s)); char *gpu_ptr; if (OPS_gpu_direct) gpu_ptr = src; else { if (halo_buffer_size < size) { if (halo_buffer_d != NULL) cutilSafeCall(hipFree(halo_buffer_d)); cutilSafeCall(hipMalloc((void **)&halo_buffer_d, size * sizeof(char))); halo_buffer_size = size; } gpu_ptr = halo_buffer_d; cutilSafeCall(hipMemcpy(halo_buffer_d, src, size * sizeof(char), hipMemcpyHostToDevice)); } if (dest->dirty_hd == 1) { ops_upload_dat(dest); dest->dirty_hd = 0; } dim3 grid(blk_x, blk_y, blk_z); dim3 tblock(thr_x, thr_y, thr_z); hipLaunchKernelGGL(( copy_kernel_frombuf), dim3(grid), dim3(tblock), 0, 0, dest->data_d, gpu_ptr, rx_s, rx_e, ry_s, ry_e, rz_s, rz_e, x_step, y_step, z_step, dest->size[0], dest->size[1], dest->size[2], buf_strides_x, buf_strides_y, buf_strides_z, dest->type_size, dest->dim, OPS_soa); cutilSafeCall(hipGetLastError()); dest->dirty_hd = 2; } #ifdef __cplusplus } #endif
6df08e81049eb5723da5cfbe3b7efa4b9395d5c9.cu
/* * Open source copyright declaration based on BSD open source template: * http://www.opensource.org/licenses/bsd-license.php * * This file is part of the OPS distribution. * * Copyright (c) 2013, Mike Giles and others. Please see the AUTHORS file in * the main source directory for a full list of copyright holders. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The name of Mike Giles may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Mike Giles ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Mike Giles BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file * @brief OPS mpi+cuda run-time support routines * @author Gihan Mudalige, Istvan Reguly * @details Implements the runtime support routines for the OPS mpi+cuda * backend */ #include <ops_cuda_rt_support.h> #ifdef __cplusplus extern "C" { #endif int halo_buffer_size = 0; char *halo_buffer_d = NULL; __global__ void ops_cuda_packer_1(const char *__restrict src, char *__restrict dest, int count, int len, int stride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { dest[idx] = src[stride * block + idx % len]; } } __global__ void ops_cuda_packer_1_soa(const char *__restrict src, char *__restrict dest, int count, int len, int stride, int dim, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { for (int d=0; d<dim; d++) { dest[idx*dim+d] = src[stride * block + idx % len + d * size]; } } } __global__ void ops_cuda_unpacker_1(const char *__restrict src, char *__restrict dest, int count, int len, int stride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { dest[stride * block + idx % len] = src[idx]; } } __global__ void ops_cuda_unpacker_1_soa(const char *__restrict src, char *__restrict dest, int count, int len, int stride, int dim, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { for (int d=0; d<dim; d++) { dest[stride * block + idx % len + d * size] = src[idx*dim + d]; } } } __global__ void ops_cuda_packer_4(const int *__restrict src, int *__restrict dest, int count, int len, int stride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { dest[idx] = src[stride * block + idx % len]; } } __global__ void ops_cuda_unpacker_4(const int *__restrict src, int *__restrict dest, int count, int len, int stride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int block = idx / len; if (idx < count * len) { dest[stride * block + idx % len] = src[idx]; } } void ops_pack_cuda_internal(ops_dat dat, const int src_offset, char *__restrict dest, const int halo_blocklength, const int halo_stride, const int halo_count) { if (dat->dirty_hd == 1) { ops_upload_dat(dat); dat->dirty_hd = 0; } const char *__restrict src = dat->data_d + src_offset * (OPS_soa ? dat->type_size : dat->elem_size); if (halo_buffer_size < halo_count * halo_blocklength * dat->dim && !OPS_gpu_direct) { if (halo_buffer_d != NULL) cutilSafeCall(cudaFree(halo_buffer_d)); cutilSafeCall(cudaMalloc((void **)&halo_buffer_d, halo_count * halo_blocklength * dat->dim * 4)); halo_buffer_size = halo_count * halo_blocklength * dat->dim * 4; } char *device_buf = NULL; if (OPS_gpu_direct) device_buf = dest; else device_buf = halo_buffer_d; if (OPS_soa) { int num_threads = 128; int num_blocks = ((halo_blocklength * halo_count) - 1) / num_threads + 1; ops_cuda_packer_1_soa<<<num_blocks, num_threads>>>( src, device_buf, halo_count, halo_blocklength, halo_stride, dat->dim, dat->size[0]*dat->size[1]*dat->size[2]*dat->type_size); cutilSafeCall(cudaGetLastError()); } else if (halo_blocklength % 4 == 0) { int num_threads = 128; int num_blocks = (((dat->dim * halo_blocklength / 4) * halo_count) - 1) / num_threads + 1; ops_cuda_packer_4<<<num_blocks, num_threads>>>( (const int *)src, (int *)device_buf, halo_count, halo_blocklength*dat->dim / 4, halo_stride*dat->dim / 4); cutilSafeCall(cudaGetLastError()); } else { int num_threads = 128; int num_blocks = ((dat->dim * halo_blocklength * halo_count) - 1) / num_threads + 1; ops_cuda_packer_1<<<num_blocks, num_threads>>>( src, device_buf, halo_count, halo_blocklength*dat->dim, halo_stride*dat->dim); cutilSafeCall(cudaGetLastError()); } if (!OPS_gpu_direct) cutilSafeCall(cudaMemcpy(dest, halo_buffer_d, halo_count * halo_blocklength * dat->dim, cudaMemcpyDeviceToHost)); else cutilSafeCall(cudaDeviceSynchronize()); } void ops_unpack_cuda_internal(ops_dat dat, const int dest_offset, const char *__restrict src, const int halo_blocklength, const int halo_stride, const int halo_count) { if (dat->dirty_hd == 1) { ops_upload_dat(dat); dat->dirty_hd = 0; } char *__restrict dest = dat->data_d + dest_offset * (OPS_soa ? dat->type_size : dat->elem_size); if (halo_buffer_size < halo_count * halo_blocklength * dat->dim && !OPS_gpu_direct) { if (halo_buffer_d != NULL) cutilSafeCall(cudaFree(halo_buffer_d)); cutilSafeCall(cudaMalloc((void **)&halo_buffer_d, halo_count * halo_blocklength * dat->dim * 4)); halo_buffer_size = halo_count * halo_blocklength * dat->dim * 4; } const char *device_buf = NULL; if (OPS_gpu_direct) device_buf = src; else device_buf = halo_buffer_d; if (!OPS_gpu_direct) cutilSafeCall(cudaMemcpy(halo_buffer_d, src, halo_count * halo_blocklength * dat->dim, cudaMemcpyHostToDevice)); if (OPS_soa) { int num_threads = 128; int num_blocks = ((halo_blocklength * halo_count) - 1) / num_threads + 1; ops_cuda_unpacker_1_soa<<<num_blocks, num_threads>>>( device_buf, dest, halo_count, halo_blocklength, halo_stride, dat->dim, dat->size[0]*dat->size[1]*dat->size[2]*dat->type_size); cutilSafeCall(cudaGetLastError()); } else if (halo_blocklength % 4 == 0) { int num_threads = 128; int num_blocks = (((dat->dim * halo_blocklength / 4) * halo_count) - 1) / num_threads + 1; ops_cuda_unpacker_4<<<num_blocks, num_threads>>>( (const int *)device_buf, (int *)dest, halo_count, halo_blocklength*dat->dim / 4, halo_stride*dat->dim / 4); cutilSafeCall(cudaGetLastError()); } else { int num_threads = 128; int num_blocks = ((dat->dim * halo_blocklength * halo_count) - 1) / num_threads + 1; ops_cuda_unpacker_1<<<num_blocks, num_threads>>>( device_buf, dest, halo_count, halo_blocklength*dat->dim, halo_stride*dat->dim); cutilSafeCall(cudaGetLastError()); } dat->dirty_hd = 2; } char* ops_realloc_fast(char *ptr, size_t olds, size_t news) { if (OPS_gpu_direct) { if (ptr == NULL) { cutilSafeCall(cudaMalloc((void **)&ptr, news)); return ptr; } else { if (OPS_diags>3) printf("Warning: cuda cache realloc\n"); char *ptr2; cutilSafeCall(cudaMalloc((void **)&ptr2, news)); cutilSafeCall(cudaMemcpy(ptr2, ptr, olds, cudaMemcpyDeviceToDevice)); cutilSafeCall(cudaFree(ptr)); return ptr2; } } else { char *ptr2; cutilSafeCall(cudaMallocHost((void**)&ptr2,news)); //TODO: is this aligned?? if (olds > 0) memcpy(ptr2, ptr, olds); if (ptr != NULL) cutilSafeCall(cudaFreeHost(ptr)); return ptr2; } } __global__ void copy_kernel_tobuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) { int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z); int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y); int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x); if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) && (y_step == 1 ? idx_y < ry_e : idx_y > ry_e) && (z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) { if (OPS_soa) src += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size; else src += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim; dest += ((idx_z - rz_s) * z_step * buf_strides_z + (idx_y - ry_s) * y_step * buf_strides_y + (idx_x - rx_s) * x_step * buf_strides_x) * type_size * dim ; for (int d = 0; d < dim; d++) { memcpy(dest+d*type_size, src, type_size); if (OPS_soa) src += size_x * size_y * size_z * type_size; else src += type_size; } } } __global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) { int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z); int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y); int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x); if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) && (y_step == 1 ? idx_y < ry_e : idx_y > ry_e) && (z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) { if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size; else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim; src += ((idx_z - rz_s) * z_step * buf_strides_z + (idx_y - ry_s) * y_step * buf_strides_y + (idx_x - rx_s) * x_step * buf_strides_x) * type_size * dim; for (int d = 0; d < dim; d++) { memcpy(dest, src + d * type_size, type_size); if (OPS_soa) dest += size_x * size_y * size_z * type_size; else dest += type_size; } } } void ops_halo_copy_tobuf(char *dest, int dest_offset, ops_dat src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int buf_strides_x, int buf_strides_y, int buf_strides_z) { dest += dest_offset; int thr_x = abs(rx_s - rx_e); int blk_x = 1; if (abs(rx_s - rx_e) > 8) { blk_x = (thr_x - 1) / 8 + 1; thr_x = 8; } int thr_y = abs(ry_s - ry_e); int blk_y = 1; if (abs(ry_s - ry_e) > 8) { blk_y = (thr_y - 1) / 8 + 1; thr_y = 8; } int thr_z = abs(rz_s - rz_e); int blk_z = 1; if (abs(rz_s - rz_e) > 8) { blk_z = (thr_z - 1) / 8 + 1; thr_z = 8; } int size = abs(src->elem_size * (rx_e - rx_s) * (ry_e - ry_s) * (rz_e - rz_s)); char *gpu_ptr; if (OPS_gpu_direct) gpu_ptr = dest; else { if (halo_buffer_size < size) { if (halo_buffer_d != NULL) cutilSafeCall(cudaFree(halo_buffer_d)); cutilSafeCall(cudaMalloc((void **)&halo_buffer_d, size * sizeof(char))); halo_buffer_size = size; } gpu_ptr = halo_buffer_d; } if (src->dirty_hd == 1) { ops_upload_dat(src); src->dirty_hd = 0; } dim3 grid(blk_x, blk_y, blk_z); dim3 tblock(thr_x, thr_y, thr_z); copy_kernel_tobuf<<<grid, tblock>>>( gpu_ptr, src->data_d, rx_s, rx_e, ry_s, ry_e, rz_s, rz_e, x_step, y_step, z_step, src->size[0], src->size[1], src->size[2], buf_strides_x, buf_strides_y, buf_strides_z, src->type_size, src->dim, OPS_soa); cutilSafeCall(cudaGetLastError()); if (!OPS_gpu_direct) cutilSafeCall(cudaMemcpy(dest, halo_buffer_d, size * sizeof(char), cudaMemcpyDeviceToHost)); } void ops_halo_copy_frombuf(ops_dat dest, char *src, int src_offset, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int buf_strides_x, int buf_strides_y, int buf_strides_z) { src += src_offset; int thr_x = abs(rx_s - rx_e); int blk_x = 1; if (abs(rx_s - rx_e) > 8) { blk_x = (thr_x - 1) / 8 + 1; thr_x = 8; } int thr_y = abs(ry_s - ry_e); int blk_y = 1; if (abs(ry_s - ry_e) > 8) { blk_y = (thr_y - 1) / 8 + 1; thr_y = 8; } int thr_z = abs(rz_s - rz_e); int blk_z = 1; if (abs(rz_s - rz_e) > 8) { blk_z = (thr_z - 1) / 8 + 1; thr_z = 8; } int size = abs(dest->elem_size * (rx_e - rx_s) * (ry_e - ry_s) * (rz_e - rz_s)); char *gpu_ptr; if (OPS_gpu_direct) gpu_ptr = src; else { if (halo_buffer_size < size) { if (halo_buffer_d != NULL) cutilSafeCall(cudaFree(halo_buffer_d)); cutilSafeCall(cudaMalloc((void **)&halo_buffer_d, size * sizeof(char))); halo_buffer_size = size; } gpu_ptr = halo_buffer_d; cutilSafeCall(cudaMemcpy(halo_buffer_d, src, size * sizeof(char), cudaMemcpyHostToDevice)); } if (dest->dirty_hd == 1) { ops_upload_dat(dest); dest->dirty_hd = 0; } dim3 grid(blk_x, blk_y, blk_z); dim3 tblock(thr_x, thr_y, thr_z); copy_kernel_frombuf<<<grid, tblock>>>( dest->data_d, gpu_ptr, rx_s, rx_e, ry_s, ry_e, rz_s, rz_e, x_step, y_step, z_step, dest->size[0], dest->size[1], dest->size[2], buf_strides_x, buf_strides_y, buf_strides_z, dest->type_size, dest->dim, OPS_soa); cutilSafeCall(cudaGetLastError()); dest->dirty_hd = 2; } #ifdef __cplusplus } #endif
8807eb2f3ed31f032b080a25357e81ff3eee5c89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* nvcc StarBuilder.cu -o StarBuilder.exe -lglut -lGL -lGLU -lm -arch=sm_60 nvcc StarBuilder.cu -o StarBuilder.exe -lglut -lGL -lGLU -lm -arch=compute_60 -code=sm_60 nvcc StarBuilder.cu -o StarBuilder.exe -lglut -lGL -lGLU -lm --use_fast_math */ #include "../CommonCompileFiles/binaryStarCommonIncludes.h" #include "../CommonCompileFiles/binaryStarCommonDefines.h" #include "../CommonCompileFiles/binaryStarCommonGlobals.h" #include "../CommonCompileFiles/binaryStarCommonFunctions.h" //Globals read in from the BiuldSetup file all except the number of elements will need to be put into our units double MassOfStar1, DiameterStar1, MassOfCore1, DiameterCore1; double MassOfStar2, DiameterStar2, MassOfCore2, DiameterCore2; float4 InitialSpin1, InitialSpin2; //These next 4 do not need to be globals in the run files because they are store in the elements double PressurePlasma1; double PressurePlasma2; double PushBackCoreMult1; double PushBackCoreMult2; double MaxInitialPlasmaSpeed; double RedGiantVolumeGrowth; double RawStarDampAmount; double RawStarDampTime; int RawStarDampLevels; double RawStarRestTime; double DiameterTolerance; double DiameterAdjustmentSoftener; double DiameterAdjustmentDamp; double DiameterAdjustmentTime; double DiameterAdjustmentRestTime; double SpinRestTime; void createFolderForNewStars() { //Create output folder to store the stars time_t t = time(0); struct tm * now = localtime( & t ); int month = now->tm_mon + 1, day = now->tm_mday, curTimeHour = now->tm_hour, curTimeMin = now->tm_min; stringstream smonth, sday, stimeHour, stimeMin; smonth << month; sday << day; stimeHour << curTimeHour; stimeMin << curTimeMin; string monthday; if (curTimeMin <= 9) monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":0" + stimeMin.str(); else monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":" + stimeMin.str(); string foldernametemp = "Stars:" + monthday; const char *starFolderName = foldernametemp.c_str(); mkdir(starFolderName , S_IRWXU|S_IRWXG|S_IRWXO); FILE *fileIn; FILE *fileOut; long sizeOfFile; char *buffer; //Moving into the new directory and creating folders. chdir(starFolderName); mkdir("FilesFromBuild" , S_IRWXU|S_IRWXG|S_IRWXO); mkdir("CommonCompileFiles" , S_IRWXU|S_IRWXG|S_IRWXO); mkdir("MainSourceFiles" , S_IRWXU|S_IRWXG|S_IRWXO); mkdir("ExecutableFiles" , S_IRWXU|S_IRWXG|S_IRWXO); mkdir("ContinueFiles" , S_IRWXU|S_IRWXG|S_IRWXO); //Copying the files that were used to build the raw stars into the star folder. //BuildSteup file fileIn = fopen("../BuildSetup", "rb"); if(fileIn == NULL) { printf("\n\n The BuildSetup file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("FilesFromBuild/BuildSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonIncludes.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonIncludes.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonIncludes.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonDefines.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonDefines.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonDefines.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonGlobals.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonGlobals.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonGlobals.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonFunctions.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonFunctions.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonFunctions.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonRunGlobals.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonRunGlobals.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonRunGlobals.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonRunFunctions.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonRunFunctions.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonRunFunctions.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //Main source files fileIn = fopen("../MainSourceFiles/StarBuilder.cu", "rb"); if(fileIn == NULL) { printf("\n\n The MainSourceFiles/StarBuilder.cu file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("MainSourceFiles/StarBuilder.cu", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../MainSourceFiles/StarBranchRun.cu", "rb"); if(fileIn == NULL) { printf("\n\n The MainSourceFiles/StarBranchRun.cu file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("MainSourceFiles/StarBranchRun.cu", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../MainSourceFiles/StarContinueRun.cu", "rb"); if(fileIn == NULL) { printf("\n\n The MainSourceFiles/StarContinueRun.cu file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("MainSourceFiles/StarContinueRun.cu", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../MainSourceFiles/Viewer.cu", "rb"); if(fileIn == NULL) { printf("\n\n The MainSourceFiles/Viewer.cu file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("MainSourceFiles/Viewer.cu", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //Executable files fileIn = fopen("../ExecutableFiles/StarBuilder.exe", "rb"); if(fileIn == NULL) { printf("\n\n The ExecutableFiles/StarBuilder.exe file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ExecutableFiles/StarBuilder.exe", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../ExecutableFiles/StarBranchRun.exe", "rb"); if(fileIn == NULL) { printf("\n\n The ExecutableFiles/StarBranchRun.exe file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ExecutableFiles/StarBranchRun.exe", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../ExecutableFiles/StarContinueRun.exe", "rb"); if(fileIn == NULL) { printf("\n\n The ExecutableFiles/StarContinueRun.exe file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ExecutableFiles/StarContinueRun.exe", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../ExecutableFiles/Viewer.exe", "rb"); if(fileIn == NULL) { printf("\n\n The ExecutableFiles/Viewer.exe file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ExecutableFiles/Viewer.exe", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //Copying files into the main branch folder fileIn = fopen("../BranchAndContinueFiles/BranchRun", "rb"); if(fileIn == NULL) { printf("\n\n The BranchAndContinueFiles/BranchRun file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("BranchRun", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //We will also copy the branch run source code and put it in the main branch folder and the BranchSetupTemplate. fileIn = fopen("../BranchAndContinueFiles/BranchSetupTemplate", "rb"); if(fileIn == NULL) { printf("\n\n The BranchAndContinueFiles/BranchSetupTemplate file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("BranchSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //Finally copying the continue run script and viewer into the ContinueFiles foldes fileIn = fopen("../BranchAndContinueFiles/ContinueRun", "rb"); if(fileIn == NULL) { printf("\n\n The BranchAndContinueFiles/ContinueRun file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ContinueFiles/ContinueRun", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../BranchAndContinueFiles/Viewer", "rb"); if(fileIn == NULL) { printf("\n\n The BranchAndContinueFiles/Viewer file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ContinueFiles/Viewer", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); //Giving the apropriate file execute permisions. I didn't give the starBuild execute permision because it //should only be used in the main folder. This file is only for reference. system("chmod 755 ./ExecutableFiles/StarBranchRun.exe"); system("chmod 755 ./ExecutableFiles/StarContinueRun.exe"); system("chmod 755 ./ExecutableFiles/Viewer.exe"); system("chmod 755 ./BranchRun"); } void readBuildParameters() { ifstream data; string name; data.open("../BuildSetup"); if(data.is_open() == 1) { getline(data,name,'='); data >> DiameterStar1; getline(data,name,'='); data >> DiameterCore1; getline(data,name,'='); data >> DiameterStar2; getline(data,name,'='); data >> DiameterCore2; getline(data,name,'='); data >> MassOfStar1; getline(data,name,'='); data >> MassOfCore1; getline(data,name,'='); data >> MassOfStar2; getline(data,name,'='); data >> MassOfCore2; getline(data,name,'='); data >> NumberElements; getline(data,name,'='); data >> InitialSpin1.x; getline(data,name,'='); data >> InitialSpin1.y; getline(data,name,'='); data >> InitialSpin1.z; getline(data,name,'='); data >> InitialSpin1.w; getline(data,name,'='); data >> InitialSpin2.x; getline(data,name,'='); data >> InitialSpin2.y; getline(data,name,'='); data >> InitialSpin2.z; getline(data,name,'='); data >> InitialSpin2.w; getline(data,name,'='); data >> PressurePlasma1; getline(data,name,'='); data >> PressurePlasma2; getline(data,name,'='); data >> PushBackCoreMult1; getline(data,name,'='); data >> PushBackCoreMult2; getline(data,name,'='); data >> CoreCorePushBackReduction; getline(data,name,'='); data >> CorePlasmaPushBackReduction; getline(data,name,'='); data >> PlasmaPlasmaPushBackReduction; getline(data,name,'='); data >> MaxInitialPlasmaSpeed; getline(data,name,'='); data >> RedGiantVolumeGrowth; getline(data,name,'='); data >> RawStarDampAmount; getline(data,name,'='); data >> RawStarDampTime; getline(data,name,'='); data >> RawStarDampLevels; getline(data,name,'='); data >> RawStarRestTime; getline(data,name,'='); data >> DiameterTolerance; getline(data,name,'='); data >> DiameterAdjustmentSoftener; getline(data,name,'='); data >> DiameterAdjustmentDamp; getline(data,name,'='); data >> DiameterAdjustmentTime; getline(data,name,'='); data >> DiameterAdjustmentRestTime; getline(data,name,'='); data >> SpinRestTime; getline(data,name,'='); data >> Dt; getline(data,name,'='); data >> ZoomFactor; getline(data,name,'='); data >> DrawRate; getline(data,name,'='); data >> PrintRate; getline(data,name,'='); data >> Core1Color.x; getline(data,name,'='); data >> Core1Color.y; getline(data,name,'='); data >> Core1Color.z; getline(data,name,'='); data >> Core2Color.x; getline(data,name,'='); data >> Core2Color.y; getline(data,name,'='); data >> Core2Color.z; getline(data,name,'='); data >> Envelope1Color.x; getline(data,name,'='); data >> Envelope1Color.y; getline(data,name,'='); data >> Envelope1Color.z; getline(data,name,'='); data >> Envelope2Color.x; getline(data,name,'='); data >> Envelope2Color.y; getline(data,name,'='); data >> Envelope2Color.z; } else { printf("\nTSU Error could not open BuildSetup file\n"); exit(0); } data.close(); } //This function sets the units such that the mass unit is the mass of a plasma element, //the length unit is the diameter of a plasma element and time unit such that G is 1. //It also splits the number of elements between the stars and creates convetion factors to standard units. void generateAndSaveRunParameters() { double massPlasmaElement; double diameterPlasmaElement; double totalMassPlasmaElements; MassOfStar1 *= MASS_SUN; MassOfStar2 *= MASS_SUN; MassOfCore1 *= MASS_SUN; MassOfCore2 *= MASS_SUN; DiameterStar1 *= DIAMETER_SUN; DiameterStar2 *= DIAMETER_SUN; DiameterCore1 *= DIAMETER_SUN; DiameterCore2 *= DIAMETER_SUN; totalMassPlasmaElements = (MassOfStar1 - MassOfCore1) + (MassOfStar2 - MassOfCore2); //The mass of a plasma element is just the total mass divided by the number of elements used. Need to subtract 2 because you have 2 cores. massPlasmaElement = totalMassPlasmaElements/((double)NumberElements - 2); //We will use the mass of a plasma element as one unit of mass. //The following constant will convert system masses up to kilograms by multipling //or convert kilograms down to system units by dividing. SystemMassConverterToKilograms = massPlasmaElement; //Dividing up the plasma elements between the 2 stars. //Need to subtract 2 because you have 2 core elements. NumberElementsStar1 = ((MassOfStar1 - MassOfCore1)/totalMassPlasmaElements)*((double)NumberElements - 2); NumberElementsStar2 = (NumberElements -2) - NumberElementsStar1; //Adding back the core elements. NumberElementsStar1 += 1; NumberElementsStar2 += 1; //Finding the diameter of the plasma elements is a bit more involved. First find the volume of the plasma Vpl = Vsun - Vcore. double volumePlasma = (4.0*PI/3.0)*( pow((DiameterStar1/2.0),3.0) - pow((DiameterCore1/2.0),3.0) ) + (4.0*PI/3.0)*( pow((DiameterStar2/2.0),3.0) - pow((DiameterCore2/2.0),3.0) ); //Now randum spheres only pack at 68 persent so to adjust for this we need to adjust for this. volumePlasma *= 0.68; //Now this is the volume the plasma but we would the star to grow in size by up 100 times. //I'm assuming when they this they mean volume. I will also make the amount it can grow a #define so it can be changed easily. volumePlasma = volumePlasma*RedGiantVolumeGrowth; //Now to find the volume of a plasma element divide this by the number of plasma elements. double volumePlasmaElement = volumePlasma/(NumberElements -2); //Now to find the diameter of a plasma element we need to find the diameter to make this volume. diameterPlasmaElement = pow(6.0*volumePlasmaElement/PI, (1.0/3.0)); //We will use the diameter of a plasma element as one unit of length. //The following constant will convert system lengths up to kilometers by multipling //or convert kilometers down to system units by dividing. SystemLengthConverterToKilometers = diameterPlasmaElement; //We will use a time unit so that the universal gravitational constant will be 1. //The following constant will convert system times up to seconds by multipling //or convert seconds down to system units by dividing. Make sure UniversalGravity is fed into the program in kilograms kilometers and seconds! SystemTimeConverterToSeconds = sqrt(pow(SystemLengthConverterToKilometers,3)/(SystemMassConverterToKilograms*UNIVERSAL_GRAVITY_CONSTANT)); //Putting things with mass into our units. Taking kilograms into our units. MassOfStar1 /= SystemMassConverterToKilograms; MassOfCore1 /= SystemMassConverterToKilograms; MassOfStar2 /= SystemMassConverterToKilograms; MassOfCore2 /= SystemMassConverterToKilograms; //Putting things with length into our units. Taking kilometers into our units. DiameterStar1 /= SystemLengthConverterToKilometers; DiameterCore1 /= SystemLengthConverterToKilometers; DiameterStar2 /= SystemLengthConverterToKilometers; DiameterCore2 /= SystemLengthConverterToKilometers; //Putting things with time into our units. Dt *= (3600.0/SystemTimeConverterToSeconds); //It was in hours so take it to seconds first. RawStarDampTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. RawStarRestTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. DiameterAdjustmentTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. DiameterAdjustmentRestTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. SpinRestTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. PrintRate *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. //Putting Angular Velocities into our units. Taking revolutions/hour into our units. Must take it to seconds first. InitialSpin1.w *= SystemTimeConverterToSeconds/3600.0; InitialSpin2.w *= SystemTimeConverterToSeconds/3600.0; //Putting push back parameters into our units. kilograms*kilometersE-1*secondsE-2 into our units. //This will be multiplied by an area to make it a force PressurePlasma1 /= SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers); PressurePlasma2 /= SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers); FILE *runParametersFile; runParametersFile = fopen("FilesFromBuild/RunParameters", "wb"); fprintf(runParametersFile, "\n SystemLengthConverterToKilometers = %e", SystemLengthConverterToKilometers); fprintf(runParametersFile, "\n SystemMassConverterToKilograms = %e", SystemMassConverterToKilograms); fprintf(runParametersFile, "\n SystemTimeConverterToSeconds = %e", SystemTimeConverterToSeconds); fprintf(runParametersFile, "\n NumberElementsStar1 = %d", NumberElementsStar1); fprintf(runParametersFile, "\n NumberElementsStar2 = %d", NumberElementsStar2); fprintf(runParametersFile, "\n Core Core Push Back Reduction = %f", CoreCorePushBackReduction); fprintf(runParametersFile, "\n Core Plasma Push Back Reduction = %f", CorePlasmaPushBackReduction); fprintf(runParametersFile, "\n Plasma Plasma Push Back Reduction = %f", PlasmaPlasmaPushBackReduction); fprintf(runParametersFile, "\n Time step Dt = %f", Dt); fprintf(runParametersFile, "\n Zoom factor = %f", ZoomFactor); fprintf(runParametersFile, "\n Print rate = %f", PrintRate); fprintf(runParametersFile, "\n Core1Color.x = %f", Core1Color.x); fprintf(runParametersFile, "\n Core1Color.y = %f", Core1Color.y); fprintf(runParametersFile, "\n Core1Color.z = %f", Core1Color.z); fprintf(runParametersFile, "\n Core2Color.x = %f", Core2Color.x); fprintf(runParametersFile, "\n Core2Color.y = %f", Core2Color.y); fprintf(runParametersFile, "\n Core2Color.z = %f", Core2Color.z); fprintf(runParametersFile, "\n Envelope1Color.x = %f", Envelope1Color.x); fprintf(runParametersFile, "\n Envelope1Color.y = %f", Envelope1Color.y); fprintf(runParametersFile, "\n Envelope1Color.z = %f", Envelope1Color.z); fprintf(runParametersFile, "\n Envelope2Color.x = %f", Envelope2Color.x); fprintf(runParametersFile, "\n Envelope2Color.y = %f", Envelope2Color.y); fprintf(runParametersFile, "\n Envelope2Color.z = %f", Envelope2Color.z); fprintf(runParametersFile, "\n RadiusCore1 = %f", DiameterCore1/2.0); fprintf(runParametersFile, "\n RadiusCore2 = %f", DiameterCore2/2.0); fclose(runParametersFile); } int createRawStar(int starNumber) { //int cubeStart; int elementStart, elementStop; int element, cubeLayer; int x, y, z; double elementMass, elementDiameter, elementPressure; double mag, speed, seperation; time_t t; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; elementMass = 1.0; // The mass unit was set so 1 is the mass of an element. elementDiameter = 1.0; // The length unit was set so 1 is the diameter of an element. elementPressure = PressurePlasma1; PosCPU[0].x = 0.0; PosCPU[0].y = 0.0; PosCPU[0].z = 0.0; PosCPU[0].w = MassOfCore1; VelCPU[0].x = 0.0; VelCPU[0].y = 0.0; VelCPU[0].z = 0.0; VelCPU[0].w = PushBackCoreMult1; ForceCPU[0].x = 0.0; ForceCPU[0].y = 0.0; ForceCPU[0].z = 0.0; ForceCPU[0].w = DiameterCore1; if(DiameterCore2 < elementDiameter) { cubeLayer = elementDiameter ; } else { cubeLayer = (int)DiameterCore1 + 1; // This is the size of the cube the core takes up. Added 1 to be safe. } element = elementStart + 1; //Add 1 because the core is the first element. } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; elementMass = 1.0; // The mass unit was set so 1 is the mass of an element. elementDiameter = 1.0; // The length unit was set so 1 is the diameter of an element. elementPressure = PressurePlasma2; PosCPU[NumberElementsStar1].x = 0.0; PosCPU[NumberElementsStar1].y = 0.0; PosCPU[NumberElementsStar1].z = 0.0; PosCPU[NumberElementsStar1].w = MassOfCore2; VelCPU[NumberElementsStar1].x = 0.0; VelCPU[NumberElementsStar1].y = 0.0; VelCPU[NumberElementsStar1].z = 0.0; VelCPU[NumberElementsStar1].w = PushBackCoreMult2; ForceCPU[NumberElementsStar1].x = 0.0; ForceCPU[NumberElementsStar1].y = 0.0; ForceCPU[NumberElementsStar1].z = 0.0; ForceCPU[NumberElementsStar1].w = DiameterCore2; if(DiameterCore2 < elementDiameter) { cubeLayer = elementDiameter; } else { cubeLayer = (int)DiameterCore2 + 1; // This is the size of the cube the core takes up. Added 1 to be safe. } element = elementStart + 1; //Add 1 because the core is the first element. } // The core is at (0,0,0) we then place elements in a cubic grid around it. Each element radius is 1 so we will walk out in units of 1. while(element < elementStop) { cubeLayer++; x = -cubeLayer; for(y = -cubeLayer; y <= cubeLayer; y++) { for(z = -cubeLayer; z <= cubeLayer; z++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } x = cubeLayer; for(y = -cubeLayer; y <= cubeLayer; y++) { for(z = -cubeLayer; z <= cubeLayer; z++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } y = -cubeLayer; for(x = -cubeLayer + 1; x <= cubeLayer - 1; x++) { for(z = -cubeLayer; z <= cubeLayer; z++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } y = cubeLayer; for(x = -cubeLayer + 1; x <= cubeLayer - 1; x++) { for(z = -cubeLayer; z <= cubeLayer; z++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } z = -cubeLayer; for(x = -cubeLayer + 1; x <= cubeLayer - 1; x++) { for(y = -cubeLayer + 1; y <= cubeLayer - 1; y++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } z = cubeLayer; for(x = -cubeLayer + 1; x <= cubeLayer - 1; x++) { for(y = -cubeLayer + 1; y <= cubeLayer - 1; y++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } } //Just checking to make sure I didn't put any elements on top of each other. for(int i = elementStart; i < elementStop; i++) { for(int j = elementStart; j < elementStop; j++) { if(i != j) { seperation = sqrt((PosCPU[i].x - PosCPU[j].x)*(PosCPU[i].x - PosCPU[j].x) + (PosCPU[i].y - PosCPU[j].y)*(PosCPU[i].y - PosCPU[j].y) + (PosCPU[i].z - PosCPU[j].z)*(PosCPU[i].z - PosCPU[j].z)); if(seperation < ASSUME_ZERO_DOUBLE) { printf("\n TSU error: Two elements are on top of each other in the creatRawStars function\n"); exit(0); } } else break; } } // Setting the randum number generater seed. srand((unsigned) time(&t)); // Giving each noncore particle a randium velocity to shake things up a little. Also setting the pushback and diameter of noncore particles. speed = MaxInitialPlasmaSpeed/SystemLengthConverterToKilometers/SystemTimeConverterToSeconds; for(int i = elementStart + 1; i < elementStop; i++) { VelCPU[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0;; VelCPU[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0;; VelCPU[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0;; mag = sqrt(VelCPU[i].x*VelCPU[i].x + VelCPU[i].y*VelCPU[i].y + VelCPU[i].z*VelCPU[i].z); speed = ((float)rand()/(float)RAND_MAX)*speed; VelCPU[i].x *= speed/mag; VelCPU[i].y *= speed/mag; VelCPU[i].z *= speed/mag; VelCPU[i].w = elementPressure; ForceCPU[i].x = 0.0; ForceCPU[i].y = 0.0; ForceCPU[i].z = 0.0; ForceCPU[i].w = elementDiameter; } return(1); } float3 getCenterOfMass(int starNumber) { double totalMass,cmx,cmy,cmz; float3 centerOfMass; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; } cmx = 0.0; cmy = 0.0; cmz = 0.0; totalMass = 0.0; // This is asuming the mass of each element is 1. for(int i = elementStart; i < elementStop; i++) { cmx += PosCPU[i].x*PosCPU[i].w; cmy += PosCPU[i].y*PosCPU[i].w; cmz += PosCPU[i].z*PosCPU[i].w; totalMass += PosCPU[i].w; } centerOfMass.x = cmx/totalMass; centerOfMass.y = cmy/totalMass; centerOfMass.z = cmz/totalMass; return(centerOfMass); } float3 getAverageLinearVelocity(int starNumber) { double totalMass, avx, avy, avz; float3 averagelinearVelocity; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; } avx = 0.0; avy = 0.0; avz = 0.0; totalMass = 0.0; // This is asuming the mass of each element is 1. for(int i = elementStart; i < elementStop; i++) { avx += VelCPU[i].x*PosCPU[i].w; avy += VelCPU[i].y*PosCPU[i].w; avz += VelCPU[i].z*PosCPU[i].w; totalMass += PosCPU[i].w; } averagelinearVelocity.x = avx/totalMass; averagelinearVelocity.y = avy/totalMass; averagelinearVelocity.z = avz/totalMass; return(averagelinearVelocity); } void setCenterOfMassToZero(int starNumber) { float3 centerOfMass; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; } centerOfMass = getCenterOfMass(starNumber); for(int i = elementStart; i < elementStop; i++) { PosCPU[i].x -= centerOfMass.x; PosCPU[i].y -= centerOfMass.y; PosCPU[i].z -= centerOfMass.z; } } void setAverageVelocityToZero(int starNumber) { float3 averagelinearVelocity; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; } averagelinearVelocity = getAverageLinearVelocity(starNumber); for(int i = elementStart; i < elementStop; i++) { VelCPU[i].x -= averagelinearVelocity.x; VelCPU[i].y -= averagelinearVelocity.y; VelCPU[i].z -= averagelinearVelocity.z; } } void spinStar(int starNumber) { double rx, ry, rz; //vector from center of mass to the position vector double nx, ny, nz; //Unit vector perpendicular to the plane of spin float3 centerOfMass; float4 spinVector; double mag; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; spinVector.x = InitialSpin1.x; spinVector.y = InitialSpin1.y; spinVector.z = InitialSpin1.z; spinVector.w = InitialSpin1.w; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; spinVector.x = InitialSpin2.x; spinVector.y = InitialSpin2.y; spinVector.z = InitialSpin2.z; spinVector.w = InitialSpin2.w; } //Making sure the spin vector is a unit vector mag = sqrt(spinVector.x*spinVector.x + spinVector.y*spinVector.y + spinVector.z*spinVector.z); if(ASSUME_ZERO_DOUBLE < mag) { spinVector.x /= mag; spinVector.y /= mag; spinVector.z /= mag; } else { printf("\nTSU Error: In spinStar. The spin direction vector is zero.\n"); exit(0); } centerOfMass = getCenterOfMass(starNumber); for(int i = elementStart; i < elementStop; i++) { //Creating a vector from the center of mass to the point rx = PosCPU[i].x - centerOfMass.x; ry = PosCPU[i].y - centerOfMass.y; rz = PosCPU[i].z - centerOfMass.z; double magsquared = rx*rx + ry*ry + rz*rz; double spinDota = spinVector.x*rx + spinVector.y*ry + spinVector.z*rz; double perpendicularDistance = sqrt(magsquared - spinDota*spinDota); double perpendicularVelocity = spinVector.w*2.0*PI*perpendicularDistance; //finding unit vector perpendicular to both the position vector and the spin vector nx = (spinVector.y*rz - spinVector.z*ry); ny = -(spinVector.x*rz - spinVector.z*rx); nz = (spinVector.x*ry - spinVector.y*rx); mag = sqrt(nx*nx + ny*ny + nz*nz); if(mag != 0.0) { nx /= mag; ny /= mag; nz /= mag; //Spining the element VelCPU[i].x += perpendicularVelocity*nx; VelCPU[i].y += perpendicularVelocity*ny; VelCPU[i].z += perpendicularVelocity*nz; } } } double getStarRadius(int starNumber) { double max, radius, temp; double coreRadius; int elementStart, elementStop; int count; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; coreRadius = DiameterCore1/2.0; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; coreRadius = DiameterCore2/2.0; } if((elementStop - elementStart) == 1) { return(coreRadius); } else { radius = -1.0; for(int i = elementStart; i < elementStop; i++) { temp = sqrt(PosCPU[i].x*PosCPU[i].x + PosCPU[i].y*PosCPU[i].y + PosCPU[i].z*PosCPU[i].z); if(radius < temp) { radius = temp; } } max = radius; // At present the radius is the distance to the farthest element. I am going to reduce this radius by 1 percent // each iteration until 10 percent of the elements in the star are outside the radius. // Then average this with the farthest element. count = 0; while(count <= 0.1*elementStop) { radius = radius - radius*0.01; count = 0; for(int i = elementStart; i < elementStop; i++) { temp = sqrt(PosCPU[i].x*PosCPU[i].x + PosCPU[i].y*PosCPU[i].y + PosCPU[i].z*PosCPU[i].z); if(radius < temp) { count++; } } } return((radius+max)/2.0); } } void drawPictureSeperate() { double seperation; double diameterSun; double drawUnit; diameterSun = DIAMETER_SUN/SystemLengthConverterToKilometers; drawUnit = 1.0/(diameterSun/ZoomFactor); seperation = 3.0*diameterSun; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glColor3d(Core1Color.x,Core1Color.y,Core1Color.z); glPushMatrix(); glTranslatef(drawUnit*(PosCPU[0].x + seperation), drawUnit*PosCPU[0].y, drawUnit*PosCPU[0].z); glutSolidSphere(drawUnit*DiameterCore1/2.0,10,10); glPopMatrix(); glPointSize(2.0); glColor3d(Envelope1Color.x,Envelope1Color.y,Envelope1Color.z); glBegin(GL_POINTS); for(int i = 0 + 1; i < NumberElementsStar1; i++) { glVertex3f(drawUnit*(PosCPU[i].x + seperation), drawUnit*PosCPU[i].y, drawUnit*PosCPU[i].z); } glEnd(); glColor3d(1.0,1.0,1.0); glPushMatrix(); glTranslatef(drawUnit*seperation, 0.0, 0.0); glutWireSphere(drawUnit*DiameterStar1/2.0,10,10); glPopMatrix(); glColor3d(Core2Color.x,Core2Color.y,Core2Color.z); glPushMatrix(); glTranslatef(drawUnit*(PosCPU[NumberElementsStar1].x - seperation), drawUnit*PosCPU[NumberElementsStar1].y, drawUnit*PosCPU[NumberElementsStar1].z); glutSolidSphere(drawUnit*DiameterCore2/2.0,10,10); glPopMatrix(); glPointSize(2.0); glColor3d(Envelope2Color.x,Envelope2Color.y,Envelope2Color.z); glBegin(GL_POINTS); for(int i = NumberElementsStar1 + 1; i < NumberElements; i++) { glVertex3f(drawUnit*(PosCPU[i].x - seperation), drawUnit*PosCPU[i].y, drawUnit*PosCPU[i].z); } glEnd(); glColor3d(1.0,1.0,1.0); glPushMatrix(); glTranslatef(-drawUnit*seperation, 0.0, 0.0); glutWireSphere(drawUnit*DiameterStar2/2.0,10,10); glPopMatrix(); glutSwapBuffers(); } __global__ void getForcesSeperate(float4 *pos, float4 *vel, float4 *force, int numberElementsStar1, int NumberElements, float corePlasmaPushBackReduction, float plasmaPlasmaPushBackReduction, int gPUNumber, int gPUsUsed) { int id, ids, i, j, k; float4 posMe, velMe, forceMe; float4 partialForce; double forceSumX, forceSumY, forceSumZ; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; __shared__ float4 shForce[BLOCKSIZE]; //id = threadIdx.x + blockDim.x*blockIdx.x; id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber; if(NumberElements <= id) { printf("\n TSU error: id out of bounds in getForcesSeperate. \n"); } forceSumX = 0.0; forceSumY = 0.0; forceSumZ = 0.0; posMe.x = pos[id].x; posMe.y = pos[id].y; posMe.z = pos[id].z; posMe.w = pos[id].w; velMe.x = vel[id].x; velMe.y = vel[id].y; velMe.z = vel[id].z; velMe.w = vel[id].w; forceMe.x = force[id].x; forceMe.y = force[id].y; forceMe.z = force[id].z; forceMe.w = force[id].w; for(k =0; k < gPUsUsed; k++) { for(j = 0; j < gridDim.x; j++) { shPos[threadIdx.x] = pos [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k]; shVel[threadIdx.x] = vel [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k]; shForce[threadIdx.x] = force[threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k]; __syncthreads(); #pragma unroll 32 for(i = 0; i < blockDim.x; i++) { ids = i + blockDim.x*j + blockDim.x*gridDim.x*k; if((id < numberElementsStar1 && ids < numberElementsStar1) || (numberElementsStar1 <= id && numberElementsStar1 <= ids)) { if(id != ids) { if(id == 0 || id == numberElementsStar1) { partialForce = calculateCorePlasmaForce(0, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePlasmaPushBackReduction); } else if(ids == 0 || ids == numberElementsStar1) { partialForce = calculateCorePlasmaForce(1, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePlasmaPushBackReduction); } else { partialForce = calculatePlasmaPlasmaForce(posMe, shPos[i], velMe, shVel[i], plasmaPlasmaPushBackReduction); } forceSumX += partialForce.x; forceSumY += partialForce.y; forceSumZ += partialForce.z; } } } __syncthreads(); } } force[id].x = (float)forceSumX; force[id].y = (float)forceSumY; force[id].z = (float)forceSumZ; } __global__ void moveBodiesDamped(float4 *pos, float4 *vel, float4 *force, float damp, float dt, int gPUNumber) { int id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber; vel[id].x += ((force[id].x-damp*vel[id].x)/pos[id].w)*dt; vel[id].y += ((force[id].y-damp*vel[id].y)/pos[id].w)*dt; vel[id].z += ((force[id].z-damp*vel[id].z)/pos[id].w)*dt; pos[id].x += vel[id].x*dt; pos[id].y += vel[id].y*dt; pos[id].z += vel[id].z*dt; } void starNbody(float runTime, float damp, float dt, int gPUsUsed) { float time = 0.0; float printTime = 0.0; int tdraw = 0; int offSet = NumberElements/gPUsUsed; while(time < runTime) { //Finding the forces. for(int i = 0; i < gPUsUsed; i++) { hipSetDevice(i); errorCheck("hipSetDevice"); hipLaunchKernelGGL(( getForcesSeperate), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosGPU[i], VelGPU[i], ForceGPU[i], NumberElementsStar1, NumberElements, CorePlasmaPushBackReduction, PlasmaPlasmaPushBackReduction, i, gPUsUsed); errorCheck("getForcesSeperate"); } //Moving the elements. for(int i = 0; i < gPUsUsed; i++) { hipSetDevice(i); errorCheck("hipSetDevice"); hipLaunchKernelGGL(( moveBodiesDamped), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosGPU[i], VelGPU[i], ForceGPU[i], damp, dt, i); errorCheck("moveBodiesDamped"); } hipDeviceSynchronize(); errorCheck("hipDeviceSynchronize"); //Sharing memory for(int i = 0; i < gPUsUsed; i++) { hipSetDevice(i); errorCheck("hipSetDevice"); for(int j = 0; j < gPUsUsed; j++) { if(i != j) { hipMemcpyAsync(&PosGPU[j][i*offSet], &PosGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice); errorCheck("hipMemcpy Pos A"); hipMemcpyAsync(&VelGPU[j][i*offSet], &VelGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice); errorCheck("hipMemcpy Vel"); } } } hipDeviceSynchronize(); errorCheck("hipDeviceSynchronize"); time += dt; tdraw++; if(tdraw == DrawRate) { //Because it is shared above it will only need to be copied from one GPU. hipSetDevice(0); errorCheck("hipSetDevice"); hipMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost); errorCheck("hipMemcpy Pos draw"); drawPictureSeperate(); tdraw = 0; } printTime += dt; if(PrintRate <= printTime) { printf("\n Time = %f days", time/(24.0*3600.0/SystemTimeConverterToSeconds)); printTime = 0.0; } } } void recordStartPosVelForceOfCreatedStars() { FILE *startPosVelForceFile; float time = 0.0; startPosVelForceFile = fopen("FilesFromBuild/StartPosVelForce", "wb"); fwrite(&time, sizeof(float), 1, startPosVelForceFile); fwrite(PosCPU, sizeof(float4), NumberElements, startPosVelForceFile); fwrite(VelCPU, sizeof(float4), NumberElements, startPosVelForceFile); fwrite(ForceCPU, sizeof(float4), NumberElements, startPosVelForceFile); fclose(startPosVelForceFile); } void readStarsBackIn() { float time; FILE *startFile = fopen("FilesFromBuild/StartPosVelForce","rb"); if(startFile == NULL) { printf("\n\n The StartPosVelForce file does not exist\n\n"); exit(0); } fread(&time, sizeof(float), 1, startFile); fread(PosCPU, sizeof(float4), NumberElements, startFile); fread(VelCPU, sizeof(float4), NumberElements, startFile); fread(ForceCPU, sizeof(float4), NumberElements, startFile); fclose(startFile); } double getAveragePlasmaPressure(int star) { int start, stop; double temp = 0.0; if(star == 1) { start = 1; stop = NumberElementsStar1; } else { start = NumberElementsStar1 + 1; stop = NumberElements; } for(int i = start; i < stop; i++) { temp += VelCPU[i].w; } return(temp/((double)stop - (double)start)); } void recordStarStats() { FILE *starStatsFile; double massStar1, radiusStar1, densityStar1; double massStar2, radiusStar2, densityStar2; double averagePlasmaPressure1, averagePlasmaPressure2; massStar1 = (NumberElementsStar1 + MassOfCore1)*SystemMassConverterToKilograms; radiusStar1 = getStarRadius(1); radiusStar1 *= SystemLengthConverterToKilometers; densityStar1 = massStar1/((4.0/3.0)*PI*radiusStar1*radiusStar1*radiusStar1); massStar2 = (NumberElementsStar2 + MassOfCore1)*SystemMassConverterToKilograms; radiusStar2 = getStarRadius(2); radiusStar2 *= SystemLengthConverterToKilometers; densityStar2 = massStar1/((4.0/3.0)*PI*radiusStar2*radiusStar2*radiusStar2); starStatsFile = fopen("FilesFromBuild/StarBuildStats", "wb"); fprintf(starStatsFile, " The conversion parameters to take you to and from our units to kilograms, kilometers, seconds follow\n"); fprintf(starStatsFile, " Mass in our units is the mass of an element. In other words the mass of an element is one.\n"); fprintf(starStatsFile, " Length in our units is the diameter of an element. In other words the diameter of an element is one.\n"); fprintf(starStatsFile, " Time in our units is set so that the universal gravitational constant is 1."); fprintf(starStatsFile, "\n "); fprintf(starStatsFile, "\n Our length unit is this many kilometers: %e", SystemLengthConverterToKilometers); fprintf(starStatsFile, "\n Our mass unit is this many kilograms: %e", SystemMassConverterToKilograms); fprintf(starStatsFile, "\n Our time unit is this many seconds: %e or days %e\n\n", SystemTimeConverterToSeconds, SystemTimeConverterToSeconds/(60*60*24)); fprintf(starStatsFile, "\n Our time step is this many of our units %f", Dt); fprintf(starStatsFile, "\n Our time step is this many second: %e or hours: %e\n\n", Dt*SystemTimeConverterToSeconds, Dt*SystemTimeConverterToSeconds/(60.0*60.0)); averagePlasmaPressure1 = getAveragePlasmaPressure(1); averagePlasmaPressure2 = getAveragePlasmaPressure(2); fprintf(starStatsFile, "\n Average PressurePlasma1 in our units is: %e", averagePlasmaPressure1); fprintf(starStatsFile, "\n Average PressurePlasma2 in our units is: %e", averagePlasmaPressure2); fprintf(starStatsFile, "\n Average PressurePlasma1 in our given units is: %e", averagePlasmaPressure1*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); fprintf(starStatsFile, "\n Average PressurePlasma2 in our given units is: %e", averagePlasmaPressure2*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); fprintf(starStatsFile, "\n "); fprintf(starStatsFile, "\n Total number of elements in star1: %d", NumberElementsStar1); fprintf(starStatsFile, "\n Total number of elements in star2: %d", NumberElementsStar2); fprintf(starStatsFile, "\n "); fprintf(starStatsFile, "\n Mass of Star1 = %e kilograms in Sun units = %e", massStar1, massStar1/MASS_SUN); fprintf(starStatsFile, "\n Diameter of Star1 = %e kilometers in Sun units = %e", 2.0*radiusStar1, 2.0*radiusStar1/DIAMETER_SUN); fprintf(starStatsFile, "\n Density of star1 = %e kilograms/(cubic kilometers)", densityStar1); fprintf(starStatsFile, "\n Mass of Star2 = %e kilograms in Sun units = %e", massStar2, massStar2/MASS_SUN); fprintf(starStatsFile, "\n Diameter of Star2 = %e kilometers in Sun units = %e", 2.0*radiusStar2, 2.0*radiusStar2/DIAMETER_SUN); fprintf(starStatsFile, "\n Density of star2 = %e kilograms/(cubic kilometers)", densityStar2); fclose(starStatsFile); } static void signalHandler(int signum) { int command; cout << "\n\n******************************************************" << endl; cout << "Enter:666 to kill the run." << endl; cout << "******************************************************\n\nCommand: "; cin >> command; if(command == 666) { cout << "\n\n******************************************************" << endl; cout << "Are you sure you want to terminate the run?" << endl; cout << "Enter:666 again if you are sure. Enter anything else to continue the run." << endl; cout << "******************************************************\n\nCommand: "; cin >> command; if(command == 666) { exit(0); } } else { cout <<"\n\n Invalid Command\n" << endl; } exit(0); } void control() { struct sigaction sa; float damp, time; int gPUsUsed; clock_t startTimer, endTimer; //Starting the timer. startTimer = clock(); // Handling input from the screen. sa.sa_handler = signalHandler; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler if (sigaction(SIGINT, &sa, NULL) == -1) { printf("\nTSU Error: sigaction error\n"); } // Creating folder to hold the newly created stars and moving into that folder. It also makes a copy of the BiuldSetup file in this folder. printf("\n Creating folders for new stars. \n"); createFolderForNewStars(); // Reading in the build parameters to a file. printf("\n Reading build parameters. \n"); readBuildParameters(); // Creating and saving the run parameters to a file. printf("\n Saving run parameters. \n"); generateAndSaveRunParameters(); // Allocating memory for CPU and GPU. printf("\n Allocating memory. \n"); allocateCPUMemory(); // Generating raw stars printf("\n Generating raw star1. \n"); createRawStar(1); printf("\n Generating raw star2. \n"); createRawStar(2); drawPictureSeperate(); //while(1); // Seting up the GPU. printf("\n Setting up GPUs \n"); gPUsUsed = deviceSetup(); // The raw stars are in unnatural positions and have unnatural velocities. // The stars need to be run with a damping factor turned on // to let the stars move into naturl configurations. The damp will start high and be reduced to zero time = RawStarDampTime/RawStarDampLevels; printf("\n Damping raw stars for = %f hours, Dt = %f hours\n", time*SystemTimeConverterToSeconds/3600.0, Dt*SystemTimeConverterToSeconds/3600.0); copyStarsUpToGPU(gPUsUsed); for(int i = 0; i < RawStarDampLevels; i++) { damp = RawStarDampAmount - float(i)*RawStarDampAmount/((float)RawStarDampLevels); printf("\n Damping raw stars interation %d out of %d", i+1, RawStarDampLevels); starNbody(time, damp, Dt, gPUsUsed); } // Letting any residue from the damping settle out. time = RawStarRestTime; printf("\n\n Resting raw damped stars for %f hours", time*SystemTimeConverterToSeconds/3600.0); starNbody(time, 0.0, Dt, gPUsUsed); // Centering the stars and taking out any drift. copyStarsDownFromGPU(); setCenterOfMassToZero(1); setCenterOfMassToZero(2); setAverageVelocityToZero(1); setAverageVelocityToZero(2); // Now we need to set the push backs so that the radii of the stars is correct. printf("\n\n Running radius adjustment."); float corrector; float currentDiameterStar1 = 2.0*getStarRadius(1); float currentDiameterStar2 = 2.0*getStarRadius(2); printf("\n\n percent out1 = %f percent out2 = %f", (currentDiameterStar1 - DiameterStar1)/DiameterStar1, (currentDiameterStar2 - DiameterStar2)/DiameterStar2); printf("\n plasma pushback1 = %f or %e",VelCPU[2].w, VelCPU[2].w*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); printf("\n plasma pushback2 = %f or %e",VelCPU[NumberElementsStar1 +2].w, VelCPU[NumberElementsStar1 +2].w*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); while((DiameterTolerance < abs(currentDiameterStar1 - DiameterStar1)/DiameterStar1) || (DiameterTolerance < abs(currentDiameterStar2 - DiameterStar2)/DiameterStar2)) { if(DiameterStar1 < currentDiameterStar1) { corrector = DiameterAdjustmentSoftener*(currentDiameterStar1 - DiameterStar1)/currentDiameterStar1; } else { corrector = 10.0*DiameterAdjustmentSoftener*(currentDiameterStar1 - DiameterStar1)/DiameterStar1; } for(int i = 0; i < NumberElementsStar1; i++) { VelCPU[i].w = VelCPU[i].w*(1.0 - corrector); } //damp = DiameterAdjustmentDamp*abs(1.0 - corrector); damp = DiameterAdjustmentDamp*DiameterStar1/currentDiameterStar1; if(DiameterStar2 < currentDiameterStar2) { corrector = DiameterAdjustmentSoftener*(currentDiameterStar2 - DiameterStar2)/currentDiameterStar2; } else { corrector = 10.0*DiameterAdjustmentSoftener*(currentDiameterStar2 - DiameterStar2)/DiameterStar2; } for(int i = NumberElementsStar1; i < NumberElements; i++) { VelCPU[i].w = VelCPU[i].w*(1.0 - corrector); } if(damp < DiameterAdjustmentDamp*DiameterStar2/currentDiameterStar2) { //damp = DiameterAdjustmentDamp*abs(1.0 - corrector); damp = DiameterAdjustmentDamp*DiameterStar2/currentDiameterStar2; } copyStarsUpToGPU(gPUsUsed); time = DiameterAdjustmentTime; starNbody(time, damp, Dt, gPUsUsed); copyStarsDownFromGPU(); currentDiameterStar1 = 2.0*getStarRadius(1); currentDiameterStar2 = 2.0*getStarRadius(2); printf("\n\n percent out1 = %f percent out2 = %f", (currentDiameterStar1 - DiameterStar1)/DiameterStar1, (currentDiameterStar2 - DiameterStar2)/DiameterStar2); printf("\n plasma pushback1 = %f or %e",VelCPU[2].w, VelCPU[2].w*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); printf("\n plasma pushback2 = %f or %e",VelCPU[NumberElementsStar1 +2].w, VelCPU[NumberElementsStar1 +2].w*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); } // Letting any residue from the radius adjustment settle out. time = DiameterAdjustmentRestTime; printf("\n\n Resting diameter adjustment for %f hours", time*SystemTimeConverterToSeconds/3600.0); damp = 0.0; starNbody(time, damp, Dt, gPUsUsed); // Spinning stars copyStarsDownFromGPU(); setCenterOfMassToZero(1); setCenterOfMassToZero(2); setAverageVelocityToZero(1); setAverageVelocityToZero(2); printf("\n\n Spinning star1. \n"); spinStar(1); printf("\n Spinning star2. \n"); spinStar(2); // Letting any residue from the spinning settle out. copyStarsUpToGPU(gPUsUsed); time = SpinRestTime; damp = 0.0; printf("\n Running spin rest."); starNbody(time, 0.0, Dt, gPUsUsed); //Centering and removing any drift from stars. copyStarsDownFromGPU(); setCenterOfMassToZero(1); setCenterOfMassToZero(2); setAverageVelocityToZero(1); setAverageVelocityToZero(2); // Saving the stars positions and velocities to a file. printf("\n\n Saving final positions, velocities, and forces \n"); // Removing the fill that was used to hold temperaraly hold the stars before spinning. system("rm FilesFromBuild/StartPosVelForce"); recordStartPosVelForceOfCreatedStars(); printf("\n Recording stats \n"); recordStarStats(); // Freeing memory. printf("\n Cleaning up \n"); cleanUp(gPUsUsed); // Stopping timer and printing out run time. endTimer = clock(); int seconds = (endTimer - startTimer)/CLOCKS_PER_SEC; int hours = seconds/3600; int minutes = (seconds - hours*3600)/60; seconds = seconds - hours*3600 - minutes*60; printf("\n Total time taken for this run: %d hours %d minutes %d seconds\n", hours, minutes, seconds); printf("\n The run has finished successfully \n\n"); exit(0); } int main(int argc, char** argv) { glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB); glutInitWindowSize(XWindowSize,YWindowSize); glutInitWindowPosition(0,0); glutCreateWindow("Creating Stars"); glutReshapeFunc(reshape); init(); glShadeModel(GL_SMOOTH); glClearColor(0.0, 0.0, 0.0, 0.0); glutDisplayFunc(Display); glutReshapeFunc(reshape); glutIdleFunc(control); glutMainLoop(); return 0; }
8807eb2f3ed31f032b080a25357e81ff3eee5c89.cu
/* nvcc StarBuilder.cu -o StarBuilder.exe -lglut -lGL -lGLU -lm -arch=sm_60 nvcc StarBuilder.cu -o StarBuilder.exe -lglut -lGL -lGLU -lm -arch=compute_60 -code=sm_60 nvcc StarBuilder.cu -o StarBuilder.exe -lglut -lGL -lGLU -lm --use_fast_math */ #include "../CommonCompileFiles/binaryStarCommonIncludes.h" #include "../CommonCompileFiles/binaryStarCommonDefines.h" #include "../CommonCompileFiles/binaryStarCommonGlobals.h" #include "../CommonCompileFiles/binaryStarCommonFunctions.h" //Globals read in from the BiuldSetup file all except the number of elements will need to be put into our units double MassOfStar1, DiameterStar1, MassOfCore1, DiameterCore1; double MassOfStar2, DiameterStar2, MassOfCore2, DiameterCore2; float4 InitialSpin1, InitialSpin2; //These next 4 do not need to be globals in the run files because they are store in the elements double PressurePlasma1; double PressurePlasma2; double PushBackCoreMult1; double PushBackCoreMult2; double MaxInitialPlasmaSpeed; double RedGiantVolumeGrowth; double RawStarDampAmount; double RawStarDampTime; int RawStarDampLevels; double RawStarRestTime; double DiameterTolerance; double DiameterAdjustmentSoftener; double DiameterAdjustmentDamp; double DiameterAdjustmentTime; double DiameterAdjustmentRestTime; double SpinRestTime; void createFolderForNewStars() { //Create output folder to store the stars time_t t = time(0); struct tm * now = localtime( & t ); int month = now->tm_mon + 1, day = now->tm_mday, curTimeHour = now->tm_hour, curTimeMin = now->tm_min; stringstream smonth, sday, stimeHour, stimeMin; smonth << month; sday << day; stimeHour << curTimeHour; stimeMin << curTimeMin; string monthday; if (curTimeMin <= 9) monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":0" + stimeMin.str(); else monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":" + stimeMin.str(); string foldernametemp = "Stars:" + monthday; const char *starFolderName = foldernametemp.c_str(); mkdir(starFolderName , S_IRWXU|S_IRWXG|S_IRWXO); FILE *fileIn; FILE *fileOut; long sizeOfFile; char *buffer; //Moving into the new directory and creating folders. chdir(starFolderName); mkdir("FilesFromBuild" , S_IRWXU|S_IRWXG|S_IRWXO); mkdir("CommonCompileFiles" , S_IRWXU|S_IRWXG|S_IRWXO); mkdir("MainSourceFiles" , S_IRWXU|S_IRWXG|S_IRWXO); mkdir("ExecutableFiles" , S_IRWXU|S_IRWXG|S_IRWXO); mkdir("ContinueFiles" , S_IRWXU|S_IRWXG|S_IRWXO); //Copying the files that were used to build the raw stars into the star folder. //BuildSteup file fileIn = fopen("../BuildSetup", "rb"); if(fileIn == NULL) { printf("\n\n The BuildSetup file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("FilesFromBuild/BuildSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonIncludes.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonIncludes.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonIncludes.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonDefines.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonDefines.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonDefines.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonGlobals.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonGlobals.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonGlobals.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonFunctions.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonFunctions.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonFunctions.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonRunGlobals.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonRunGlobals.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonRunGlobals.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../CommonCompileFiles/binaryStarCommonRunFunctions.h", "rb"); if(fileIn == NULL) { printf("\n\n The CommonCompileFiles/binaryStarCommonRunFunctions.h file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("CommonCompileFiles/binaryStarCommonRunFunctions.h", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //Main source files fileIn = fopen("../MainSourceFiles/StarBuilder.cu", "rb"); if(fileIn == NULL) { printf("\n\n The MainSourceFiles/StarBuilder.cu file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("MainSourceFiles/StarBuilder.cu", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../MainSourceFiles/StarBranchRun.cu", "rb"); if(fileIn == NULL) { printf("\n\n The MainSourceFiles/StarBranchRun.cu file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("MainSourceFiles/StarBranchRun.cu", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../MainSourceFiles/StarContinueRun.cu", "rb"); if(fileIn == NULL) { printf("\n\n The MainSourceFiles/StarContinueRun.cu file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("MainSourceFiles/StarContinueRun.cu", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../MainSourceFiles/Viewer.cu", "rb"); if(fileIn == NULL) { printf("\n\n The MainSourceFiles/Viewer.cu file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("MainSourceFiles/Viewer.cu", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //Executable files fileIn = fopen("../ExecutableFiles/StarBuilder.exe", "rb"); if(fileIn == NULL) { printf("\n\n The ExecutableFiles/StarBuilder.exe file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ExecutableFiles/StarBuilder.exe", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../ExecutableFiles/StarBranchRun.exe", "rb"); if(fileIn == NULL) { printf("\n\n The ExecutableFiles/StarBranchRun.exe file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ExecutableFiles/StarBranchRun.exe", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../ExecutableFiles/StarContinueRun.exe", "rb"); if(fileIn == NULL) { printf("\n\n The ExecutableFiles/StarContinueRun.exe file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ExecutableFiles/StarContinueRun.exe", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../ExecutableFiles/Viewer.exe", "rb"); if(fileIn == NULL) { printf("\n\n The ExecutableFiles/Viewer.exe file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ExecutableFiles/Viewer.exe", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //Copying files into the main branch folder fileIn = fopen("../BranchAndContinueFiles/BranchRun", "rb"); if(fileIn == NULL) { printf("\n\n The BranchAndContinueFiles/BranchRun file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("BranchRun", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //We will also copy the branch run source code and put it in the main branch folder and the BranchSetupTemplate. fileIn = fopen("../BranchAndContinueFiles/BranchSetupTemplate", "rb"); if(fileIn == NULL) { printf("\n\n The BranchAndContinueFiles/BranchSetupTemplate file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("BranchSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); //Finally copying the continue run script and viewer into the ContinueFiles foldes fileIn = fopen("../BranchAndContinueFiles/ContinueRun", "rb"); if(fileIn == NULL) { printf("\n\n The BranchAndContinueFiles/ContinueRun file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ContinueFiles/ContinueRun", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); fileIn = fopen("../BranchAndContinueFiles/Viewer", "rb"); if(fileIn == NULL) { printf("\n\n The BranchAndContinueFiles/Viewer file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell(fileIn); rewind (fileIn); buffer = (char*)malloc(sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("ContinueFiles/Viewer", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); //Giving the apropriate file execute permisions. I didn't give the starBuild execute permision because it //should only be used in the main folder. This file is only for reference. system("chmod 755 ./ExecutableFiles/StarBranchRun.exe"); system("chmod 755 ./ExecutableFiles/StarContinueRun.exe"); system("chmod 755 ./ExecutableFiles/Viewer.exe"); system("chmod 755 ./BranchRun"); } void readBuildParameters() { ifstream data; string name; data.open("../BuildSetup"); if(data.is_open() == 1) { getline(data,name,'='); data >> DiameterStar1; getline(data,name,'='); data >> DiameterCore1; getline(data,name,'='); data >> DiameterStar2; getline(data,name,'='); data >> DiameterCore2; getline(data,name,'='); data >> MassOfStar1; getline(data,name,'='); data >> MassOfCore1; getline(data,name,'='); data >> MassOfStar2; getline(data,name,'='); data >> MassOfCore2; getline(data,name,'='); data >> NumberElements; getline(data,name,'='); data >> InitialSpin1.x; getline(data,name,'='); data >> InitialSpin1.y; getline(data,name,'='); data >> InitialSpin1.z; getline(data,name,'='); data >> InitialSpin1.w; getline(data,name,'='); data >> InitialSpin2.x; getline(data,name,'='); data >> InitialSpin2.y; getline(data,name,'='); data >> InitialSpin2.z; getline(data,name,'='); data >> InitialSpin2.w; getline(data,name,'='); data >> PressurePlasma1; getline(data,name,'='); data >> PressurePlasma2; getline(data,name,'='); data >> PushBackCoreMult1; getline(data,name,'='); data >> PushBackCoreMult2; getline(data,name,'='); data >> CoreCorePushBackReduction; getline(data,name,'='); data >> CorePlasmaPushBackReduction; getline(data,name,'='); data >> PlasmaPlasmaPushBackReduction; getline(data,name,'='); data >> MaxInitialPlasmaSpeed; getline(data,name,'='); data >> RedGiantVolumeGrowth; getline(data,name,'='); data >> RawStarDampAmount; getline(data,name,'='); data >> RawStarDampTime; getline(data,name,'='); data >> RawStarDampLevels; getline(data,name,'='); data >> RawStarRestTime; getline(data,name,'='); data >> DiameterTolerance; getline(data,name,'='); data >> DiameterAdjustmentSoftener; getline(data,name,'='); data >> DiameterAdjustmentDamp; getline(data,name,'='); data >> DiameterAdjustmentTime; getline(data,name,'='); data >> DiameterAdjustmentRestTime; getline(data,name,'='); data >> SpinRestTime; getline(data,name,'='); data >> Dt; getline(data,name,'='); data >> ZoomFactor; getline(data,name,'='); data >> DrawRate; getline(data,name,'='); data >> PrintRate; getline(data,name,'='); data >> Core1Color.x; getline(data,name,'='); data >> Core1Color.y; getline(data,name,'='); data >> Core1Color.z; getline(data,name,'='); data >> Core2Color.x; getline(data,name,'='); data >> Core2Color.y; getline(data,name,'='); data >> Core2Color.z; getline(data,name,'='); data >> Envelope1Color.x; getline(data,name,'='); data >> Envelope1Color.y; getline(data,name,'='); data >> Envelope1Color.z; getline(data,name,'='); data >> Envelope2Color.x; getline(data,name,'='); data >> Envelope2Color.y; getline(data,name,'='); data >> Envelope2Color.z; } else { printf("\nTSU Error could not open BuildSetup file\n"); exit(0); } data.close(); } //This function sets the units such that the mass unit is the mass of a plasma element, //the length unit is the diameter of a plasma element and time unit such that G is 1. //It also splits the number of elements between the stars and creates convetion factors to standard units. void generateAndSaveRunParameters() { double massPlasmaElement; double diameterPlasmaElement; double totalMassPlasmaElements; MassOfStar1 *= MASS_SUN; MassOfStar2 *= MASS_SUN; MassOfCore1 *= MASS_SUN; MassOfCore2 *= MASS_SUN; DiameterStar1 *= DIAMETER_SUN; DiameterStar2 *= DIAMETER_SUN; DiameterCore1 *= DIAMETER_SUN; DiameterCore2 *= DIAMETER_SUN; totalMassPlasmaElements = (MassOfStar1 - MassOfCore1) + (MassOfStar2 - MassOfCore2); //The mass of a plasma element is just the total mass divided by the number of elements used. Need to subtract 2 because you have 2 cores. massPlasmaElement = totalMassPlasmaElements/((double)NumberElements - 2); //We will use the mass of a plasma element as one unit of mass. //The following constant will convert system masses up to kilograms by multipling //or convert kilograms down to system units by dividing. SystemMassConverterToKilograms = massPlasmaElement; //Dividing up the plasma elements between the 2 stars. //Need to subtract 2 because you have 2 core elements. NumberElementsStar1 = ((MassOfStar1 - MassOfCore1)/totalMassPlasmaElements)*((double)NumberElements - 2); NumberElementsStar2 = (NumberElements -2) - NumberElementsStar1; //Adding back the core elements. NumberElementsStar1 += 1; NumberElementsStar2 += 1; //Finding the diameter of the plasma elements is a bit more involved. First find the volume of the plasma Vpl = Vsun - Vcore. double volumePlasma = (4.0*PI/3.0)*( pow((DiameterStar1/2.0),3.0) - pow((DiameterCore1/2.0),3.0) ) + (4.0*PI/3.0)*( pow((DiameterStar2/2.0),3.0) - pow((DiameterCore2/2.0),3.0) ); //Now randum spheres only pack at 68 persent so to adjust for this we need to adjust for this. volumePlasma *= 0.68; //Now this is the volume the plasma but we would the star to grow in size by up 100 times. //I'm assuming when they this they mean volume. I will also make the amount it can grow a #define so it can be changed easily. volumePlasma = volumePlasma*RedGiantVolumeGrowth; //Now to find the volume of a plasma element divide this by the number of plasma elements. double volumePlasmaElement = volumePlasma/(NumberElements -2); //Now to find the diameter of a plasma element we need to find the diameter to make this volume. diameterPlasmaElement = pow(6.0*volumePlasmaElement/PI, (1.0/3.0)); //We will use the diameter of a plasma element as one unit of length. //The following constant will convert system lengths up to kilometers by multipling //or convert kilometers down to system units by dividing. SystemLengthConverterToKilometers = diameterPlasmaElement; //We will use a time unit so that the universal gravitational constant will be 1. //The following constant will convert system times up to seconds by multipling //or convert seconds down to system units by dividing. Make sure UniversalGravity is fed into the program in kilograms kilometers and seconds! SystemTimeConverterToSeconds = sqrt(pow(SystemLengthConverterToKilometers,3)/(SystemMassConverterToKilograms*UNIVERSAL_GRAVITY_CONSTANT)); //Putting things with mass into our units. Taking kilograms into our units. MassOfStar1 /= SystemMassConverterToKilograms; MassOfCore1 /= SystemMassConverterToKilograms; MassOfStar2 /= SystemMassConverterToKilograms; MassOfCore2 /= SystemMassConverterToKilograms; //Putting things with length into our units. Taking kilometers into our units. DiameterStar1 /= SystemLengthConverterToKilometers; DiameterCore1 /= SystemLengthConverterToKilometers; DiameterStar2 /= SystemLengthConverterToKilometers; DiameterCore2 /= SystemLengthConverterToKilometers; //Putting things with time into our units. Dt *= (3600.0/SystemTimeConverterToSeconds); //It was in hours so take it to seconds first. RawStarDampTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. RawStarRestTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. DiameterAdjustmentTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. DiameterAdjustmentRestTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. SpinRestTime *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. PrintRate *= (24.0*3600.0/SystemTimeConverterToSeconds); //It was in days so take it to seconds first. //Putting Angular Velocities into our units. Taking revolutions/hour into our units. Must take it to seconds first. InitialSpin1.w *= SystemTimeConverterToSeconds/3600.0; InitialSpin2.w *= SystemTimeConverterToSeconds/3600.0; //Putting push back parameters into our units. kilograms*kilometersE-1*secondsE-2 into our units. //This will be multiplied by an area to make it a force PressurePlasma1 /= SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers); PressurePlasma2 /= SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers); FILE *runParametersFile; runParametersFile = fopen("FilesFromBuild/RunParameters", "wb"); fprintf(runParametersFile, "\n SystemLengthConverterToKilometers = %e", SystemLengthConverterToKilometers); fprintf(runParametersFile, "\n SystemMassConverterToKilograms = %e", SystemMassConverterToKilograms); fprintf(runParametersFile, "\n SystemTimeConverterToSeconds = %e", SystemTimeConverterToSeconds); fprintf(runParametersFile, "\n NumberElementsStar1 = %d", NumberElementsStar1); fprintf(runParametersFile, "\n NumberElementsStar2 = %d", NumberElementsStar2); fprintf(runParametersFile, "\n Core Core Push Back Reduction = %f", CoreCorePushBackReduction); fprintf(runParametersFile, "\n Core Plasma Push Back Reduction = %f", CorePlasmaPushBackReduction); fprintf(runParametersFile, "\n Plasma Plasma Push Back Reduction = %f", PlasmaPlasmaPushBackReduction); fprintf(runParametersFile, "\n Time step Dt = %f", Dt); fprintf(runParametersFile, "\n Zoom factor = %f", ZoomFactor); fprintf(runParametersFile, "\n Print rate = %f", PrintRate); fprintf(runParametersFile, "\n Core1Color.x = %f", Core1Color.x); fprintf(runParametersFile, "\n Core1Color.y = %f", Core1Color.y); fprintf(runParametersFile, "\n Core1Color.z = %f", Core1Color.z); fprintf(runParametersFile, "\n Core2Color.x = %f", Core2Color.x); fprintf(runParametersFile, "\n Core2Color.y = %f", Core2Color.y); fprintf(runParametersFile, "\n Core2Color.z = %f", Core2Color.z); fprintf(runParametersFile, "\n Envelope1Color.x = %f", Envelope1Color.x); fprintf(runParametersFile, "\n Envelope1Color.y = %f", Envelope1Color.y); fprintf(runParametersFile, "\n Envelope1Color.z = %f", Envelope1Color.z); fprintf(runParametersFile, "\n Envelope2Color.x = %f", Envelope2Color.x); fprintf(runParametersFile, "\n Envelope2Color.y = %f", Envelope2Color.y); fprintf(runParametersFile, "\n Envelope2Color.z = %f", Envelope2Color.z); fprintf(runParametersFile, "\n RadiusCore1 = %f", DiameterCore1/2.0); fprintf(runParametersFile, "\n RadiusCore2 = %f", DiameterCore2/2.0); fclose(runParametersFile); } int createRawStar(int starNumber) { //int cubeStart; int elementStart, elementStop; int element, cubeLayer; int x, y, z; double elementMass, elementDiameter, elementPressure; double mag, speed, seperation; time_t t; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; elementMass = 1.0; // The mass unit was set so 1 is the mass of an element. elementDiameter = 1.0; // The length unit was set so 1 is the diameter of an element. elementPressure = PressurePlasma1; PosCPU[0].x = 0.0; PosCPU[0].y = 0.0; PosCPU[0].z = 0.0; PosCPU[0].w = MassOfCore1; VelCPU[0].x = 0.0; VelCPU[0].y = 0.0; VelCPU[0].z = 0.0; VelCPU[0].w = PushBackCoreMult1; ForceCPU[0].x = 0.0; ForceCPU[0].y = 0.0; ForceCPU[0].z = 0.0; ForceCPU[0].w = DiameterCore1; if(DiameterCore2 < elementDiameter) { cubeLayer = elementDiameter ; } else { cubeLayer = (int)DiameterCore1 + 1; // This is the size of the cube the core takes up. Added 1 to be safe. } element = elementStart + 1; //Add 1 because the core is the first element. } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; elementMass = 1.0; // The mass unit was set so 1 is the mass of an element. elementDiameter = 1.0; // The length unit was set so 1 is the diameter of an element. elementPressure = PressurePlasma2; PosCPU[NumberElementsStar1].x = 0.0; PosCPU[NumberElementsStar1].y = 0.0; PosCPU[NumberElementsStar1].z = 0.0; PosCPU[NumberElementsStar1].w = MassOfCore2; VelCPU[NumberElementsStar1].x = 0.0; VelCPU[NumberElementsStar1].y = 0.0; VelCPU[NumberElementsStar1].z = 0.0; VelCPU[NumberElementsStar1].w = PushBackCoreMult2; ForceCPU[NumberElementsStar1].x = 0.0; ForceCPU[NumberElementsStar1].y = 0.0; ForceCPU[NumberElementsStar1].z = 0.0; ForceCPU[NumberElementsStar1].w = DiameterCore2; if(DiameterCore2 < elementDiameter) { cubeLayer = elementDiameter; } else { cubeLayer = (int)DiameterCore2 + 1; // This is the size of the cube the core takes up. Added 1 to be safe. } element = elementStart + 1; //Add 1 because the core is the first element. } // The core is at (0,0,0) we then place elements in a cubic grid around it. Each element radius is 1 so we will walk out in units of 1. while(element < elementStop) { cubeLayer++; x = -cubeLayer; for(y = -cubeLayer; y <= cubeLayer; y++) { for(z = -cubeLayer; z <= cubeLayer; z++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } x = cubeLayer; for(y = -cubeLayer; y <= cubeLayer; y++) { for(z = -cubeLayer; z <= cubeLayer; z++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } y = -cubeLayer; for(x = -cubeLayer + 1; x <= cubeLayer - 1; x++) { for(z = -cubeLayer; z <= cubeLayer; z++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } y = cubeLayer; for(x = -cubeLayer + 1; x <= cubeLayer - 1; x++) { for(z = -cubeLayer; z <= cubeLayer; z++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } z = -cubeLayer; for(x = -cubeLayer + 1; x <= cubeLayer - 1; x++) { for(y = -cubeLayer + 1; y <= cubeLayer - 1; y++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } z = cubeLayer; for(x = -cubeLayer + 1; x <= cubeLayer - 1; x++) { for(y = -cubeLayer + 1; y <= cubeLayer - 1; y++) { if(element < elementStop) { PosCPU[element].x = (float)x; PosCPU[element].y = (float)y; PosCPU[element].z = (float)z; PosCPU[element].w = elementMass; element++; } else break; } } } //Just checking to make sure I didn't put any elements on top of each other. for(int i = elementStart; i < elementStop; i++) { for(int j = elementStart; j < elementStop; j++) { if(i != j) { seperation = sqrt((PosCPU[i].x - PosCPU[j].x)*(PosCPU[i].x - PosCPU[j].x) + (PosCPU[i].y - PosCPU[j].y)*(PosCPU[i].y - PosCPU[j].y) + (PosCPU[i].z - PosCPU[j].z)*(PosCPU[i].z - PosCPU[j].z)); if(seperation < ASSUME_ZERO_DOUBLE) { printf("\n TSU error: Two elements are on top of each other in the creatRawStars function\n"); exit(0); } } else break; } } // Setting the randum number generater seed. srand((unsigned) time(&t)); // Giving each noncore particle a randium velocity to shake things up a little. Also setting the pushback and diameter of noncore particles. speed = MaxInitialPlasmaSpeed/SystemLengthConverterToKilometers/SystemTimeConverterToSeconds; for(int i = elementStart + 1; i < elementStop; i++) { VelCPU[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0;; VelCPU[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0;; VelCPU[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0;; mag = sqrt(VelCPU[i].x*VelCPU[i].x + VelCPU[i].y*VelCPU[i].y + VelCPU[i].z*VelCPU[i].z); speed = ((float)rand()/(float)RAND_MAX)*speed; VelCPU[i].x *= speed/mag; VelCPU[i].y *= speed/mag; VelCPU[i].z *= speed/mag; VelCPU[i].w = elementPressure; ForceCPU[i].x = 0.0; ForceCPU[i].y = 0.0; ForceCPU[i].z = 0.0; ForceCPU[i].w = elementDiameter; } return(1); } float3 getCenterOfMass(int starNumber) { double totalMass,cmx,cmy,cmz; float3 centerOfMass; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; } cmx = 0.0; cmy = 0.0; cmz = 0.0; totalMass = 0.0; // This is asuming the mass of each element is 1. for(int i = elementStart; i < elementStop; i++) { cmx += PosCPU[i].x*PosCPU[i].w; cmy += PosCPU[i].y*PosCPU[i].w; cmz += PosCPU[i].z*PosCPU[i].w; totalMass += PosCPU[i].w; } centerOfMass.x = cmx/totalMass; centerOfMass.y = cmy/totalMass; centerOfMass.z = cmz/totalMass; return(centerOfMass); } float3 getAverageLinearVelocity(int starNumber) { double totalMass, avx, avy, avz; float3 averagelinearVelocity; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; } avx = 0.0; avy = 0.0; avz = 0.0; totalMass = 0.0; // This is asuming the mass of each element is 1. for(int i = elementStart; i < elementStop; i++) { avx += VelCPU[i].x*PosCPU[i].w; avy += VelCPU[i].y*PosCPU[i].w; avz += VelCPU[i].z*PosCPU[i].w; totalMass += PosCPU[i].w; } averagelinearVelocity.x = avx/totalMass; averagelinearVelocity.y = avy/totalMass; averagelinearVelocity.z = avz/totalMass; return(averagelinearVelocity); } void setCenterOfMassToZero(int starNumber) { float3 centerOfMass; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; } centerOfMass = getCenterOfMass(starNumber); for(int i = elementStart; i < elementStop; i++) { PosCPU[i].x -= centerOfMass.x; PosCPU[i].y -= centerOfMass.y; PosCPU[i].z -= centerOfMass.z; } } void setAverageVelocityToZero(int starNumber) { float3 averagelinearVelocity; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; } averagelinearVelocity = getAverageLinearVelocity(starNumber); for(int i = elementStart; i < elementStop; i++) { VelCPU[i].x -= averagelinearVelocity.x; VelCPU[i].y -= averagelinearVelocity.y; VelCPU[i].z -= averagelinearVelocity.z; } } void spinStar(int starNumber) { double rx, ry, rz; //vector from center of mass to the position vector double nx, ny, nz; //Unit vector perpendicular to the plane of spin float3 centerOfMass; float4 spinVector; double mag; int elementStart, elementStop; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; spinVector.x = InitialSpin1.x; spinVector.y = InitialSpin1.y; spinVector.z = InitialSpin1.z; spinVector.w = InitialSpin1.w; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; spinVector.x = InitialSpin2.x; spinVector.y = InitialSpin2.y; spinVector.z = InitialSpin2.z; spinVector.w = InitialSpin2.w; } //Making sure the spin vector is a unit vector mag = sqrt(spinVector.x*spinVector.x + spinVector.y*spinVector.y + spinVector.z*spinVector.z); if(ASSUME_ZERO_DOUBLE < mag) { spinVector.x /= mag; spinVector.y /= mag; spinVector.z /= mag; } else { printf("\nTSU Error: In spinStar. The spin direction vector is zero.\n"); exit(0); } centerOfMass = getCenterOfMass(starNumber); for(int i = elementStart; i < elementStop; i++) { //Creating a vector from the center of mass to the point rx = PosCPU[i].x - centerOfMass.x; ry = PosCPU[i].y - centerOfMass.y; rz = PosCPU[i].z - centerOfMass.z; double magsquared = rx*rx + ry*ry + rz*rz; double spinDota = spinVector.x*rx + spinVector.y*ry + spinVector.z*rz; double perpendicularDistance = sqrt(magsquared - spinDota*spinDota); double perpendicularVelocity = spinVector.w*2.0*PI*perpendicularDistance; //finding unit vector perpendicular to both the position vector and the spin vector nx = (spinVector.y*rz - spinVector.z*ry); ny = -(spinVector.x*rz - spinVector.z*rx); nz = (spinVector.x*ry - spinVector.y*rx); mag = sqrt(nx*nx + ny*ny + nz*nz); if(mag != 0.0) { nx /= mag; ny /= mag; nz /= mag; //Spining the element VelCPU[i].x += perpendicularVelocity*nx; VelCPU[i].y += perpendicularVelocity*ny; VelCPU[i].z += perpendicularVelocity*nz; } } } double getStarRadius(int starNumber) { double max, radius, temp; double coreRadius; int elementStart, elementStop; int count; if(starNumber == 1) { elementStart = 0; elementStop = NumberElementsStar1; coreRadius = DiameterCore1/2.0; } if(starNumber == 2) { elementStart = NumberElementsStar1; elementStop = NumberElements; coreRadius = DiameterCore2/2.0; } if((elementStop - elementStart) == 1) { return(coreRadius); } else { radius = -1.0; for(int i = elementStart; i < elementStop; i++) { temp = sqrt(PosCPU[i].x*PosCPU[i].x + PosCPU[i].y*PosCPU[i].y + PosCPU[i].z*PosCPU[i].z); if(radius < temp) { radius = temp; } } max = radius; // At present the radius is the distance to the farthest element. I am going to reduce this radius by 1 percent // each iteration until 10 percent of the elements in the star are outside the radius. // Then average this with the farthest element. count = 0; while(count <= 0.1*elementStop) { radius = radius - radius*0.01; count = 0; for(int i = elementStart; i < elementStop; i++) { temp = sqrt(PosCPU[i].x*PosCPU[i].x + PosCPU[i].y*PosCPU[i].y + PosCPU[i].z*PosCPU[i].z); if(radius < temp) { count++; } } } return((radius+max)/2.0); } } void drawPictureSeperate() { double seperation; double diameterSun; double drawUnit; diameterSun = DIAMETER_SUN/SystemLengthConverterToKilometers; drawUnit = 1.0/(diameterSun/ZoomFactor); seperation = 3.0*diameterSun; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glColor3d(Core1Color.x,Core1Color.y,Core1Color.z); glPushMatrix(); glTranslatef(drawUnit*(PosCPU[0].x + seperation), drawUnit*PosCPU[0].y, drawUnit*PosCPU[0].z); glutSolidSphere(drawUnit*DiameterCore1/2.0,10,10); glPopMatrix(); glPointSize(2.0); glColor3d(Envelope1Color.x,Envelope1Color.y,Envelope1Color.z); glBegin(GL_POINTS); for(int i = 0 + 1; i < NumberElementsStar1; i++) { glVertex3f(drawUnit*(PosCPU[i].x + seperation), drawUnit*PosCPU[i].y, drawUnit*PosCPU[i].z); } glEnd(); glColor3d(1.0,1.0,1.0); glPushMatrix(); glTranslatef(drawUnit*seperation, 0.0, 0.0); glutWireSphere(drawUnit*DiameterStar1/2.0,10,10); glPopMatrix(); glColor3d(Core2Color.x,Core2Color.y,Core2Color.z); glPushMatrix(); glTranslatef(drawUnit*(PosCPU[NumberElementsStar1].x - seperation), drawUnit*PosCPU[NumberElementsStar1].y, drawUnit*PosCPU[NumberElementsStar1].z); glutSolidSphere(drawUnit*DiameterCore2/2.0,10,10); glPopMatrix(); glPointSize(2.0); glColor3d(Envelope2Color.x,Envelope2Color.y,Envelope2Color.z); glBegin(GL_POINTS); for(int i = NumberElementsStar1 + 1; i < NumberElements; i++) { glVertex3f(drawUnit*(PosCPU[i].x - seperation), drawUnit*PosCPU[i].y, drawUnit*PosCPU[i].z); } glEnd(); glColor3d(1.0,1.0,1.0); glPushMatrix(); glTranslatef(-drawUnit*seperation, 0.0, 0.0); glutWireSphere(drawUnit*DiameterStar2/2.0,10,10); glPopMatrix(); glutSwapBuffers(); } __global__ void getForcesSeperate(float4 *pos, float4 *vel, float4 *force, int numberElementsStar1, int NumberElements, float corePlasmaPushBackReduction, float plasmaPlasmaPushBackReduction, int gPUNumber, int gPUsUsed) { int id, ids, i, j, k; float4 posMe, velMe, forceMe; float4 partialForce; double forceSumX, forceSumY, forceSumZ; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; __shared__ float4 shForce[BLOCKSIZE]; //id = threadIdx.x + blockDim.x*blockIdx.x; id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber; if(NumberElements <= id) { printf("\n TSU error: id out of bounds in getForcesSeperate. \n"); } forceSumX = 0.0; forceSumY = 0.0; forceSumZ = 0.0; posMe.x = pos[id].x; posMe.y = pos[id].y; posMe.z = pos[id].z; posMe.w = pos[id].w; velMe.x = vel[id].x; velMe.y = vel[id].y; velMe.z = vel[id].z; velMe.w = vel[id].w; forceMe.x = force[id].x; forceMe.y = force[id].y; forceMe.z = force[id].z; forceMe.w = force[id].w; for(k =0; k < gPUsUsed; k++) { for(j = 0; j < gridDim.x; j++) { shPos[threadIdx.x] = pos [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k]; shVel[threadIdx.x] = vel [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k]; shForce[threadIdx.x] = force[threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k]; __syncthreads(); #pragma unroll 32 for(i = 0; i < blockDim.x; i++) { ids = i + blockDim.x*j + blockDim.x*gridDim.x*k; if((id < numberElementsStar1 && ids < numberElementsStar1) || (numberElementsStar1 <= id && numberElementsStar1 <= ids)) { if(id != ids) { if(id == 0 || id == numberElementsStar1) { partialForce = calculateCorePlasmaForce(0, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePlasmaPushBackReduction); } else if(ids == 0 || ids == numberElementsStar1) { partialForce = calculateCorePlasmaForce(1, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePlasmaPushBackReduction); } else { partialForce = calculatePlasmaPlasmaForce(posMe, shPos[i], velMe, shVel[i], plasmaPlasmaPushBackReduction); } forceSumX += partialForce.x; forceSumY += partialForce.y; forceSumZ += partialForce.z; } } } __syncthreads(); } } force[id].x = (float)forceSumX; force[id].y = (float)forceSumY; force[id].z = (float)forceSumZ; } __global__ void moveBodiesDamped(float4 *pos, float4 *vel, float4 *force, float damp, float dt, int gPUNumber) { int id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber; vel[id].x += ((force[id].x-damp*vel[id].x)/pos[id].w)*dt; vel[id].y += ((force[id].y-damp*vel[id].y)/pos[id].w)*dt; vel[id].z += ((force[id].z-damp*vel[id].z)/pos[id].w)*dt; pos[id].x += vel[id].x*dt; pos[id].y += vel[id].y*dt; pos[id].z += vel[id].z*dt; } void starNbody(float runTime, float damp, float dt, int gPUsUsed) { float time = 0.0; float printTime = 0.0; int tdraw = 0; int offSet = NumberElements/gPUsUsed; while(time < runTime) { //Finding the forces. for(int i = 0; i < gPUsUsed; i++) { cudaSetDevice(i); errorCheck("cudaSetDevice"); getForcesSeperate<<<GridConfig, BlockConfig>>>(PosGPU[i], VelGPU[i], ForceGPU[i], NumberElementsStar1, NumberElements, CorePlasmaPushBackReduction, PlasmaPlasmaPushBackReduction, i, gPUsUsed); errorCheck("getForcesSeperate"); } //Moving the elements. for(int i = 0; i < gPUsUsed; i++) { cudaSetDevice(i); errorCheck("cudaSetDevice"); moveBodiesDamped<<<GridConfig, BlockConfig>>>(PosGPU[i], VelGPU[i], ForceGPU[i], damp, dt, i); errorCheck("moveBodiesDamped"); } cudaDeviceSynchronize(); errorCheck("cudaDeviceSynchronize"); //Sharing memory for(int i = 0; i < gPUsUsed; i++) { cudaSetDevice(i); errorCheck("cudaSetDevice"); for(int j = 0; j < gPUsUsed; j++) { if(i != j) { cudaMemcpyAsync(&PosGPU[j][i*offSet], &PosGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice); errorCheck("cudaMemcpy Pos A"); cudaMemcpyAsync(&VelGPU[j][i*offSet], &VelGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice); errorCheck("cudaMemcpy Vel"); } } } cudaDeviceSynchronize(); errorCheck("cudaDeviceSynchronize"); time += dt; tdraw++; if(tdraw == DrawRate) { //Because it is shared above it will only need to be copied from one GPU. cudaSetDevice(0); errorCheck("cudaSetDevice"); cudaMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost); errorCheck("cudaMemcpy Pos draw"); drawPictureSeperate(); tdraw = 0; } printTime += dt; if(PrintRate <= printTime) { printf("\n Time = %f days", time/(24.0*3600.0/SystemTimeConverterToSeconds)); printTime = 0.0; } } } void recordStartPosVelForceOfCreatedStars() { FILE *startPosVelForceFile; float time = 0.0; startPosVelForceFile = fopen("FilesFromBuild/StartPosVelForce", "wb"); fwrite(&time, sizeof(float), 1, startPosVelForceFile); fwrite(PosCPU, sizeof(float4), NumberElements, startPosVelForceFile); fwrite(VelCPU, sizeof(float4), NumberElements, startPosVelForceFile); fwrite(ForceCPU, sizeof(float4), NumberElements, startPosVelForceFile); fclose(startPosVelForceFile); } void readStarsBackIn() { float time; FILE *startFile = fopen("FilesFromBuild/StartPosVelForce","rb"); if(startFile == NULL) { printf("\n\n The StartPosVelForce file does not exist\n\n"); exit(0); } fread(&time, sizeof(float), 1, startFile); fread(PosCPU, sizeof(float4), NumberElements, startFile); fread(VelCPU, sizeof(float4), NumberElements, startFile); fread(ForceCPU, sizeof(float4), NumberElements, startFile); fclose(startFile); } double getAveragePlasmaPressure(int star) { int start, stop; double temp = 0.0; if(star == 1) { start = 1; stop = NumberElementsStar1; } else { start = NumberElementsStar1 + 1; stop = NumberElements; } for(int i = start; i < stop; i++) { temp += VelCPU[i].w; } return(temp/((double)stop - (double)start)); } void recordStarStats() { FILE *starStatsFile; double massStar1, radiusStar1, densityStar1; double massStar2, radiusStar2, densityStar2; double averagePlasmaPressure1, averagePlasmaPressure2; massStar1 = (NumberElementsStar1 + MassOfCore1)*SystemMassConverterToKilograms; radiusStar1 = getStarRadius(1); radiusStar1 *= SystemLengthConverterToKilometers; densityStar1 = massStar1/((4.0/3.0)*PI*radiusStar1*radiusStar1*radiusStar1); massStar2 = (NumberElementsStar2 + MassOfCore1)*SystemMassConverterToKilograms; radiusStar2 = getStarRadius(2); radiusStar2 *= SystemLengthConverterToKilometers; densityStar2 = massStar1/((4.0/3.0)*PI*radiusStar2*radiusStar2*radiusStar2); starStatsFile = fopen("FilesFromBuild/StarBuildStats", "wb"); fprintf(starStatsFile, " The conversion parameters to take you to and from our units to kilograms, kilometers, seconds follow\n"); fprintf(starStatsFile, " Mass in our units is the mass of an element. In other words the mass of an element is one.\n"); fprintf(starStatsFile, " Length in our units is the diameter of an element. In other words the diameter of an element is one.\n"); fprintf(starStatsFile, " Time in our units is set so that the universal gravitational constant is 1."); fprintf(starStatsFile, "\n "); fprintf(starStatsFile, "\n Our length unit is this many kilometers: %e", SystemLengthConverterToKilometers); fprintf(starStatsFile, "\n Our mass unit is this many kilograms: %e", SystemMassConverterToKilograms); fprintf(starStatsFile, "\n Our time unit is this many seconds: %e or days %e\n\n", SystemTimeConverterToSeconds, SystemTimeConverterToSeconds/(60*60*24)); fprintf(starStatsFile, "\n Our time step is this many of our units %f", Dt); fprintf(starStatsFile, "\n Our time step is this many second: %e or hours: %e\n\n", Dt*SystemTimeConverterToSeconds, Dt*SystemTimeConverterToSeconds/(60.0*60.0)); averagePlasmaPressure1 = getAveragePlasmaPressure(1); averagePlasmaPressure2 = getAveragePlasmaPressure(2); fprintf(starStatsFile, "\n Average PressurePlasma1 in our units is: %e", averagePlasmaPressure1); fprintf(starStatsFile, "\n Average PressurePlasma2 in our units is: %e", averagePlasmaPressure2); fprintf(starStatsFile, "\n Average PressurePlasma1 in our given units is: %e", averagePlasmaPressure1*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); fprintf(starStatsFile, "\n Average PressurePlasma2 in our given units is: %e", averagePlasmaPressure2*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); fprintf(starStatsFile, "\n "); fprintf(starStatsFile, "\n Total number of elements in star1: %d", NumberElementsStar1); fprintf(starStatsFile, "\n Total number of elements in star2: %d", NumberElementsStar2); fprintf(starStatsFile, "\n "); fprintf(starStatsFile, "\n Mass of Star1 = %e kilograms in Sun units = %e", massStar1, massStar1/MASS_SUN); fprintf(starStatsFile, "\n Diameter of Star1 = %e kilometers in Sun units = %e", 2.0*radiusStar1, 2.0*radiusStar1/DIAMETER_SUN); fprintf(starStatsFile, "\n Density of star1 = %e kilograms/(cubic kilometers)", densityStar1); fprintf(starStatsFile, "\n Mass of Star2 = %e kilograms in Sun units = %e", massStar2, massStar2/MASS_SUN); fprintf(starStatsFile, "\n Diameter of Star2 = %e kilometers in Sun units = %e", 2.0*radiusStar2, 2.0*radiusStar2/DIAMETER_SUN); fprintf(starStatsFile, "\n Density of star2 = %e kilograms/(cubic kilometers)", densityStar2); fclose(starStatsFile); } static void signalHandler(int signum) { int command; cout << "\n\n******************************************************" << endl; cout << "Enter:666 to kill the run." << endl; cout << "******************************************************\n\nCommand: "; cin >> command; if(command == 666) { cout << "\n\n******************************************************" << endl; cout << "Are you sure you want to terminate the run?" << endl; cout << "Enter:666 again if you are sure. Enter anything else to continue the run." << endl; cout << "******************************************************\n\nCommand: "; cin >> command; if(command == 666) { exit(0); } } else { cout <<"\n\n Invalid Command\n" << endl; } exit(0); } void control() { struct sigaction sa; float damp, time; int gPUsUsed; clock_t startTimer, endTimer; //Starting the timer. startTimer = clock(); // Handling input from the screen. sa.sa_handler = signalHandler; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler if (sigaction(SIGINT, &sa, NULL) == -1) { printf("\nTSU Error: sigaction error\n"); } // Creating folder to hold the newly created stars and moving into that folder. It also makes a copy of the BiuldSetup file in this folder. printf("\n Creating folders for new stars. \n"); createFolderForNewStars(); // Reading in the build parameters to a file. printf("\n Reading build parameters. \n"); readBuildParameters(); // Creating and saving the run parameters to a file. printf("\n Saving run parameters. \n"); generateAndSaveRunParameters(); // Allocating memory for CPU and GPU. printf("\n Allocating memory. \n"); allocateCPUMemory(); // Generating raw stars printf("\n Generating raw star1. \n"); createRawStar(1); printf("\n Generating raw star2. \n"); createRawStar(2); drawPictureSeperate(); //while(1); // Seting up the GPU. printf("\n Setting up GPUs \n"); gPUsUsed = deviceSetup(); // The raw stars are in unnatural positions and have unnatural velocities. // The stars need to be run with a damping factor turned on // to let the stars move into naturl configurations. The damp will start high and be reduced to zero time = RawStarDampTime/RawStarDampLevels; printf("\n Damping raw stars for = %f hours, Dt = %f hours\n", time*SystemTimeConverterToSeconds/3600.0, Dt*SystemTimeConverterToSeconds/3600.0); copyStarsUpToGPU(gPUsUsed); for(int i = 0; i < RawStarDampLevels; i++) { damp = RawStarDampAmount - float(i)*RawStarDampAmount/((float)RawStarDampLevels); printf("\n Damping raw stars interation %d out of %d", i+1, RawStarDampLevels); starNbody(time, damp, Dt, gPUsUsed); } // Letting any residue from the damping settle out. time = RawStarRestTime; printf("\n\n Resting raw damped stars for %f hours", time*SystemTimeConverterToSeconds/3600.0); starNbody(time, 0.0, Dt, gPUsUsed); // Centering the stars and taking out any drift. copyStarsDownFromGPU(); setCenterOfMassToZero(1); setCenterOfMassToZero(2); setAverageVelocityToZero(1); setAverageVelocityToZero(2); // Now we need to set the push backs so that the radii of the stars is correct. printf("\n\n Running radius adjustment."); float corrector; float currentDiameterStar1 = 2.0*getStarRadius(1); float currentDiameterStar2 = 2.0*getStarRadius(2); printf("\n\n percent out1 = %f percent out2 = %f", (currentDiameterStar1 - DiameterStar1)/DiameterStar1, (currentDiameterStar2 - DiameterStar2)/DiameterStar2); printf("\n plasma pushback1 = %f or %e",VelCPU[2].w, VelCPU[2].w*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); printf("\n plasma pushback2 = %f or %e",VelCPU[NumberElementsStar1 +2].w, VelCPU[NumberElementsStar1 +2].w*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); while((DiameterTolerance < abs(currentDiameterStar1 - DiameterStar1)/DiameterStar1) || (DiameterTolerance < abs(currentDiameterStar2 - DiameterStar2)/DiameterStar2)) { if(DiameterStar1 < currentDiameterStar1) { corrector = DiameterAdjustmentSoftener*(currentDiameterStar1 - DiameterStar1)/currentDiameterStar1; } else { corrector = 10.0*DiameterAdjustmentSoftener*(currentDiameterStar1 - DiameterStar1)/DiameterStar1; } for(int i = 0; i < NumberElementsStar1; i++) { VelCPU[i].w = VelCPU[i].w*(1.0 - corrector); } //damp = DiameterAdjustmentDamp*abs(1.0 - corrector); damp = DiameterAdjustmentDamp*DiameterStar1/currentDiameterStar1; if(DiameterStar2 < currentDiameterStar2) { corrector = DiameterAdjustmentSoftener*(currentDiameterStar2 - DiameterStar2)/currentDiameterStar2; } else { corrector = 10.0*DiameterAdjustmentSoftener*(currentDiameterStar2 - DiameterStar2)/DiameterStar2; } for(int i = NumberElementsStar1; i < NumberElements; i++) { VelCPU[i].w = VelCPU[i].w*(1.0 - corrector); } if(damp < DiameterAdjustmentDamp*DiameterStar2/currentDiameterStar2) { //damp = DiameterAdjustmentDamp*abs(1.0 - corrector); damp = DiameterAdjustmentDamp*DiameterStar2/currentDiameterStar2; } copyStarsUpToGPU(gPUsUsed); time = DiameterAdjustmentTime; starNbody(time, damp, Dt, gPUsUsed); copyStarsDownFromGPU(); currentDiameterStar1 = 2.0*getStarRadius(1); currentDiameterStar2 = 2.0*getStarRadius(2); printf("\n\n percent out1 = %f percent out2 = %f", (currentDiameterStar1 - DiameterStar1)/DiameterStar1, (currentDiameterStar2 - DiameterStar2)/DiameterStar2); printf("\n plasma pushback1 = %f or %e",VelCPU[2].w, VelCPU[2].w*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); printf("\n plasma pushback2 = %f or %e",VelCPU[NumberElementsStar1 +2].w, VelCPU[NumberElementsStar1 +2].w*(SystemMassConverterToKilograms/(SystemTimeConverterToSeconds*SystemTimeConverterToSeconds*SystemLengthConverterToKilometers))); } // Letting any residue from the radius adjustment settle out. time = DiameterAdjustmentRestTime; printf("\n\n Resting diameter adjustment for %f hours", time*SystemTimeConverterToSeconds/3600.0); damp = 0.0; starNbody(time, damp, Dt, gPUsUsed); // Spinning stars copyStarsDownFromGPU(); setCenterOfMassToZero(1); setCenterOfMassToZero(2); setAverageVelocityToZero(1); setAverageVelocityToZero(2); printf("\n\n Spinning star1. \n"); spinStar(1); printf("\n Spinning star2. \n"); spinStar(2); // Letting any residue from the spinning settle out. copyStarsUpToGPU(gPUsUsed); time = SpinRestTime; damp = 0.0; printf("\n Running spin rest."); starNbody(time, 0.0, Dt, gPUsUsed); //Centering and removing any drift from stars. copyStarsDownFromGPU(); setCenterOfMassToZero(1); setCenterOfMassToZero(2); setAverageVelocityToZero(1); setAverageVelocityToZero(2); // Saving the stars positions and velocities to a file. printf("\n\n Saving final positions, velocities, and forces \n"); // Removing the fill that was used to hold temperaraly hold the stars before spinning. system("rm FilesFromBuild/StartPosVelForce"); recordStartPosVelForceOfCreatedStars(); printf("\n Recording stats \n"); recordStarStats(); // Freeing memory. printf("\n Cleaning up \n"); cleanUp(gPUsUsed); // Stopping timer and printing out run time. endTimer = clock(); int seconds = (endTimer - startTimer)/CLOCKS_PER_SEC; int hours = seconds/3600; int minutes = (seconds - hours*3600)/60; seconds = seconds - hours*3600 - minutes*60; printf("\n Total time taken for this run: %d hours %d minutes %d seconds\n", hours, minutes, seconds); printf("\n The run has finished successfully \n\n"); exit(0); } int main(int argc, char** argv) { glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB); glutInitWindowSize(XWindowSize,YWindowSize); glutInitWindowPosition(0,0); glutCreateWindow("Creating Stars"); glutReshapeFunc(reshape); init(); glShadeModel(GL_SMOOTH); glClearColor(0.0, 0.0, 0.0, 0.0); glutDisplayFunc(Display); glutReshapeFunc(reshape); glutIdleFunc(control); glutMainLoop(); return 0; }
8440c6be531cced7b663c83dfb99dd4296452974.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=512 --gridDim=1 --no-inline #include <call_kernel.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #define N 512 __global__ void helloCUDA(int x) { /// __requires(x == 143); __shared__ float S[256*32]; __shared__ float F[256]; unsigned int idx; //initialise data on shared memory for(int i = 0; // __invariant(__implies(__write(S), ((__write_offset_bytes(S)/sizeof(float)) % blockDim.x) == threadIdx.x)), i < x; i += (blockDim.x/32)) /* translate: i = 0; i < 143; i+=16 , total de iteraes: 8*/ { if((i+(threadIdx.x/32)) < x){ idx = (i+(threadIdx.x/32))*32+(threadIdx.x%32); S[idx] = F[i+(threadIdx.x/32)]; } } } int main (){ //helloCUDA <<<1,N>>>(143); ESBMC_verify_kernel(helloCUDA,1,N, 143); hipDeviceSynchronize(); return 0; }
8440c6be531cced7b663c83dfb99dd4296452974.cu
//pass //--blockDim=512 --gridDim=1 --no-inline #include <call_kernel.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <stdio.h> #define N 512 __global__ void helloCUDA(int x) { /// __requires(x == 143); __shared__ float S[256*32]; __shared__ float F[256]; unsigned int idx; //initialise data on shared memory for(int i = 0; // __invariant(__implies(__write(S), ((__write_offset_bytes(S)/sizeof(float)) % blockDim.x) == threadIdx.x)), i < x; i += (blockDim.x/32)) /* translate: i = 0; i < 143; i+=16 , total de iterações: 8*/ { if((i+(threadIdx.x/32)) < x){ idx = (i+(threadIdx.x/32))*32+(threadIdx.x%32); S[idx] = F[i+(threadIdx.x/32)]; } } } int main (){ //helloCUDA <<<1,N>>>(143); ESBMC_verify_kernel(helloCUDA,1,N, 143); cudaThreadSynchronize(); return 0; }
ForceCompositeGPU.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "hoomd/VectorMath.h" #include "hoomd/Index1D.h" #include "hoomd/ParticleData.cuh" // Maintainer: jglaser /*! \file ForceComposite.cu \brief Defines GPU kernel code for the composite particle integration on the GPU. */ //! Shared memory for body force and torque reduction, required allocation when the kernel is called extern __shared__ char sum[]; extern __shared__ Scalar sum_virial[]; //! Calculates the body forces and torques by summing the constituent particle forces using a fixed sliding window size /* Compute the force and torque sum on all bodies in the system from their constituent particles. n_bodies_per_block bodies are handled within each block of execution on the GPU. The reason for this is to decrease over-parallelism and use the GPU cores more effectively when bodies are smaller than the block size. Otherwise, small bodies leave many threads in the block idle with nothing to do. On start, the properties common to each body are read in, computed, and stored in shared memory for all the threads working on that body to access. Then, the threads loop over all particles that are part of the body with a sliding window. Each loop of the window computes the force and torque for block_size/n_bodies_per_block particles in as many threads in parallel. These quantities are summed over enough windows to cover the whole body. The block_size/n_bodies_per_block partial sums are stored in shared memory. Then n_bodies_per_block partial reductions are performed in parallel using all threads to sum the total force and torque on each body. This looks just like a normal reduction, except that it terminates at a certain level in the tree. To make the math for the partial reduction work out, block_size must be a power of 2 as must n_bodies_per_block. Performance testing on GF100 with many different bodies of different sizes ranging from 4-256 particles per body has found that the optimum block size for most bodies is 64 threads. Performance increases for all body sizes as n_bodies_per_block is increased, but only up to 8. n_bodies_per_block=16 slows performance significantly. Based on these performance results, this kernel is hardcoded to handle only 1,2,4,8 n_bodies_per_block with a power of 2 block size (hardcoded to 64 in the kernel launch). */ __global__ void gpu_rigid_force_sliding_kernel(Scalar4* d_force, Scalar4* d_torque, const unsigned int *d_molecule_len, const unsigned int *d_molecule_list, Index2D molecule_indexer, const Scalar4 *d_postype, const Scalar4* d_orientation, Index2D body_indexer, Scalar3* d_body_pos, Scalar4* d_body_orientation, const unsigned int *d_body_len, const unsigned int *d_body, unsigned int *d_flag, Scalar4* d_net_force, Scalar4* d_net_torque, unsigned int n_mol, unsigned int N, unsigned int window_size, unsigned int thread_mask, unsigned int n_bodies_per_block, bool zero_force) { // determine which body (0 ... n_bodies_per_block-1) this thread is working on // assign threads 0, 1, 2, ... to body 0, n, n+1, n+2, ... to body 1, and so on. unsigned int m = threadIdx.x / (blockDim.x / n_bodies_per_block); // body_force and body_torque are each shared memory arrays with 1 element per threads Scalar4 *body_force = (Scalar4 *)sum; Scalar3 *body_torque = (Scalar3 *) (body_force + blockDim.x); // store body type, orientation and the index in molecule list in shared memory. Up to 16 bodies per block can // be handled. __shared__ unsigned int body_type[16]; __shared__ Scalar4 body_orientation[16]; __shared__ unsigned int mol_idx[16]; __shared__ unsigned int central_idx[16]; // each thread makes partial sums of force and torque of all the particles that this thread loops over Scalar4 sum_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0),Scalar(0.0)); Scalar3 sum_torque = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0)); // thread_mask is a bitmask that masks out the high bits in threadIdx.x. // threadIdx.x & thread_mask is an index from 0 to block_size/n_bodies_per_block-1 and determines what offset // this thread is to use when accessing the particles in the body if ((threadIdx.x & thread_mask) == 0) { // thread 0 for this body reads in the body id and orientation and stores them in shared memory int group_idx = blockIdx.x*n_bodies_per_block + m; if (group_idx < n_mol) { mol_idx[m] = group_idx; // first ptl is central ptl central_idx[m] = d_molecule_list[molecule_indexer(group_idx, 0)]; body_type[m] = __scalar_as_int(d_postype[central_idx[m]].w); body_orientation[m] = d_orientation[central_idx[m]]; } else { mol_idx[m] = NO_BODY; } } __syncthreads(); if (mol_idx[m] != NO_BODY && central_idx[m] < N) { // compute the number of windows that we need to loop over unsigned int mol_len = d_molecule_len[mol_idx[m]]; unsigned int n_windows = mol_len / window_size + 1; if (mol_len != d_body_len[body_type[m]] + 1) { // incomplete molecule atomicMax(d_flag, d_body[central_idx[m]] + 1); } // slide the window throughout the block for (unsigned int start = 0; start < n_windows; start++) { // determine the index with this body that this particle should handle unsigned int k = start * window_size + (threadIdx.x & thread_mask); // if that index is in the body we are actually handling a real body if (k < mol_len) { // determine the particle idx of the particle unsigned int pidx = d_molecule_list[molecule_indexer(mol_idx[m],k)]; // if this particle is not the central particle if (pidx != central_idx[m]) { // calculate body force and torques vec3<Scalar> particle_pos(d_body_pos[body_indexer(body_type[m], k-1)]); Scalar4 fi = d_net_force[pidx]; //will likely need to rotate these components too vec3<Scalar> ti(d_net_torque[pidx]); // tally the force in the per thread counter sum_force.x += fi.x; sum_force.y += fi.y; sum_force.z += fi.z; // sum up energy sum_force.w += fi.w; // zero force only if we don't need it later if (zero_force) { // zero net energy on constituent ptls to avoid double counting // also zero net force for consistency d_net_force[pidx] = make_scalar4(0.0,0.0,0.0,0.0); } vec3<Scalar> ri = rotate(quat<Scalar>(body_orientation[m]), particle_pos); // torque = r x f vec3<Scalar> del_torque(cross(ri, vec3<Scalar>(fi))); // tally the torque in the per thread counter sum_torque.x += ti.x+del_torque.x; sum_torque.y += ti.y+del_torque.y; sum_torque.z += ti.z+del_torque.z; // zero net torque on constituent particles d_net_torque[pidx] = make_scalar4(0.0,0.0,0.0,0.0); } } } } __syncthreads(); // put the partial sums into shared memory body_force[threadIdx.x] = sum_force; body_torque[threadIdx.x] = sum_torque; __syncthreads(); // perform a set of partial reductions. Each block_size/n_bodies_per_block threads performs a sum reduction // just within its own group unsigned int offset = window_size >> 1; while (offset > 0) { if ((threadIdx.x & thread_mask) < offset) { body_force[threadIdx.x].x += body_force[threadIdx.x + offset].x; body_force[threadIdx.x].y += body_force[threadIdx.x + offset].y; body_force[threadIdx.x].z += body_force[threadIdx.x + offset].z; body_force[threadIdx.x].w += body_force[threadIdx.x + offset].w; body_torque[threadIdx.x].x += body_torque[threadIdx.x + offset].x; body_torque[threadIdx.x].y += body_torque[threadIdx.x + offset].y; body_torque[threadIdx.x].z += body_torque[threadIdx.x + offset].z; } offset >>= 1; __syncthreads(); } // thread 0 within this body writes out the total force and torque for the body if ((threadIdx.x & thread_mask) == 0 && mol_idx[m] != NO_BODY) { d_force[central_idx[m]] = body_force[threadIdx.x]; d_torque[central_idx[m]] = make_scalar4(body_torque[threadIdx.x].x, body_torque[threadIdx.x].y, body_torque[threadIdx.x].z, 0.0f); } } __global__ void gpu_rigid_virial_sliding_kernel(Scalar* d_virial, const unsigned int *d_molecule_len, const unsigned int *d_molecule_list, Index2D molecule_indexer, const Scalar4 *d_postype, const Scalar4* d_orientation, Index2D body_indexer, Scalar3* d_body_pos, Scalar4* d_body_orientation, Scalar4* d_net_force, Scalar* d_net_virial, unsigned int n_mol, unsigned int N, unsigned int net_virial_pitch, unsigned int virial_pitch, unsigned int window_size, unsigned int thread_mask, unsigned int n_bodies_per_block) { // determine which body (0 ... n_bodies_per_block-1) this thread is working on // assign threads 0, 1, 2, ... to body 0, n, n+1, n+2, ... to body 1, and so on. unsigned int m = threadIdx.x / (blockDim.x / n_bodies_per_block); // body_force and body_torque are each shared memory arrays with 1 element per threads Scalar *body_virial_xx = sum_virial; Scalar *body_virial_xy = &sum_virial[1*blockDim.x]; Scalar *body_virial_xz = &sum_virial[2*blockDim.x]; Scalar *body_virial_yy = &sum_virial[3*blockDim.x]; Scalar *body_virial_yz = &sum_virial[4*blockDim.x]; Scalar *body_virial_zz = &sum_virial[5*blockDim.x]; // store body type, orientation and the index in molecule list in shared memory. Up to 16 bodies per block can // be handled. __shared__ unsigned int body_type[16]; __shared__ Scalar4 body_orientation[16]; __shared__ unsigned int mol_idx[16]; __shared__ unsigned int central_idx[16]; // each thread makes partial sums of the virial of all the particles that this thread loops over Scalar sum_virial_xx(0.0); Scalar sum_virial_xy(0.0); Scalar sum_virial_xz(0.0); Scalar sum_virial_yy(0.0); Scalar sum_virial_yz(0.0); Scalar sum_virial_zz(0.0); // thread_mask is a bitmask that masks out the high bits in threadIdx.x. // threadIdx.x & thread_mask is an index from 0 to block_size/n_bodies_per_block-1 and determines what offset // this thread is to use when accessing the particles in the body if ((threadIdx.x & thread_mask) == 0) { // thread 0 for this body reads in the body id and orientation and stores them in shared memory int group_idx = blockIdx.x*n_bodies_per_block + m; if (group_idx < n_mol) { mol_idx[m] = group_idx; // first ptl is central ptl central_idx[m] = d_molecule_list[molecule_indexer(group_idx, 0)]; body_type[m] = __scalar_as_int(d_postype[central_idx[m]].w); body_orientation[m] = d_orientation[central_idx[m]]; } else { mol_idx[m] = NO_BODY; } } __syncthreads(); if (mol_idx[m] != NO_BODY && central_idx[m] < N) { // compute the number of windows that we need to loop over unsigned int mol_len = d_molecule_len[mol_idx[m]]; unsigned int n_windows = mol_len / window_size + 1; // slide the window throughout the block for (unsigned int start = 0; start < n_windows; start++) { // determine the index with this body that this particle should handle unsigned int k = start * window_size + (threadIdx.x & thread_mask); // if that index is in the body we are actually handling a real body if (k < mol_len) { // determine the particle idx of the particle unsigned int pidx = d_molecule_list[molecule_indexer(mol_idx[m],k)]; // if this particle is not the central particle if (pidx != central_idx[m]) { // calculate body force and torques vec3<Scalar> particle_pos(d_body_pos[body_indexer(body_type[m], k-1)]); Scalar4 fi = d_net_force[pidx]; vec3<Scalar> ri = rotate(quat<Scalar>(body_orientation[m]), particle_pos); // sum up virial Scalar virialxx = d_net_virial[0*net_virial_pitch+pidx]; Scalar virialxy = d_net_virial[1*net_virial_pitch+pidx]; Scalar virialxz = d_net_virial[2*net_virial_pitch+pidx]; Scalar virialyy = d_net_virial[3*net_virial_pitch+pidx]; Scalar virialyz = d_net_virial[4*net_virial_pitch+pidx]; Scalar virialzz = d_net_virial[5*net_virial_pitch+pidx]; // subtract intra-body virial prt sum_virial_xx += virialxx - fi.x*ri.x; sum_virial_xy += virialxy - fi.x*ri.y; sum_virial_xz += virialxz - fi.x*ri.z; sum_virial_yy += virialyy - fi.y*ri.y; sum_virial_yz += virialyz - fi.y*ri.z; sum_virial_zz += virialzz - fi.z*ri.z; // zero force and virial on constituent particles d_net_force[pidx] = make_scalar4(0.0,0.0,0.0,0.0); d_net_virial[0*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[1*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[2*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[3*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[4*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[5*net_virial_pitch+pidx] = Scalar(0.0); } } } } __syncthreads(); // put the partial sums into shared memory body_virial_xx[threadIdx.x] = sum_virial_xx; body_virial_xy[threadIdx.x] = sum_virial_xy; body_virial_xz[threadIdx.x] = sum_virial_xz; body_virial_yy[threadIdx.x] = sum_virial_yy; body_virial_yz[threadIdx.x] = sum_virial_yz; body_virial_zz[threadIdx.x] = sum_virial_zz; __syncthreads(); // perform a set of partial reductions. Each block_size/n_bodies_per_block threads performs a sum reduction // just within its own group unsigned int offset = window_size >> 1; while (offset > 0) { if ((threadIdx.x & thread_mask) < offset) { body_virial_xx[threadIdx.x] += body_virial_xx[threadIdx.x + offset]; body_virial_xy[threadIdx.x] += body_virial_xy[threadIdx.x + offset]; body_virial_xz[threadIdx.x] += body_virial_xz[threadIdx.x + offset]; body_virial_yy[threadIdx.x] += body_virial_yy[threadIdx.x + offset]; body_virial_yz[threadIdx.x] += body_virial_yz[threadIdx.x + offset]; body_virial_zz[threadIdx.x] += body_virial_zz[threadIdx.x + offset]; } offset >>= 1; __syncthreads(); } // thread 0 within this body writes out the total virial for the body if ((threadIdx.x & thread_mask) == 0 && mol_idx[m] != NO_BODY) { d_virial[0*virial_pitch+central_idx[m]] = body_virial_xx[threadIdx.x]; d_virial[1*virial_pitch+central_idx[m]] = body_virial_xy[threadIdx.x]; d_virial[2*virial_pitch+central_idx[m]] = body_virial_xz[threadIdx.x]; d_virial[3*virial_pitch+central_idx[m]] = body_virial_yy[threadIdx.x]; d_virial[4*virial_pitch+central_idx[m]] = body_virial_yz[threadIdx.x]; d_virial[5*virial_pitch+central_idx[m]] = body_virial_zz[threadIdx.x]; } } /*! */ hipError_t gpu_rigid_force(Scalar4* d_force, Scalar4* d_torque, const unsigned int *d_molecule_len, const unsigned int *d_molecule_list, Index2D molecule_indexer, const Scalar4 *d_postype, const Scalar4* d_orientation, Index2D body_indexer, Scalar3* d_body_pos, Scalar4* d_body_orientation, const unsigned int *d_body_len, const unsigned int *d_body, unsigned int *d_flag, Scalar4* d_net_force, Scalar4* d_net_torque, unsigned int n_mol, unsigned int N, unsigned int n_bodies_per_block, unsigned int block_size, const hipDeviceProp_t& dev_prop, bool zero_force) { // reset force and torque hipMemset(d_force, 0, sizeof(Scalar4)*N); hipMemset(d_torque, 0, sizeof(Scalar4)*N); dim3 force_grid(n_mol / n_bodies_per_block + 1, 1, 1); static unsigned int max_block_size = UINT_MAX; static hipFuncAttributes attr; if (max_block_size == UINT_MAX) { hipFuncGetAttributes(&attr, (const void *) gpu_rigid_force_sliding_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = max_block_size < block_size ? max_block_size : block_size; // round down to nearest power of two unsigned int b = 1; while (b * 2 <= run_block_size) { b *= 2; } run_block_size = b; unsigned int window_size = run_block_size / n_bodies_per_block; unsigned int thread_mask = window_size - 1; unsigned int shared_bytes = run_block_size * (sizeof(Scalar4) + sizeof(Scalar3)); while (shared_bytes + attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock) { // block size is power of two run_block_size /= 2; shared_bytes = run_block_size * (sizeof(Scalar4) + sizeof(Scalar3)); window_size = run_block_size / n_bodies_per_block; thread_mask = window_size - 1; } hipLaunchKernelGGL(( gpu_rigid_force_sliding_kernel), dim3(force_grid), dim3(run_block_size), shared_bytes , 0, d_force, d_torque, d_molecule_len, d_molecule_list, molecule_indexer, d_postype, d_orientation, body_indexer, d_body_pos, d_body_orientation, d_body_len, d_body, d_flag, d_net_force, d_net_torque, n_mol, N, window_size, thread_mask, n_bodies_per_block, zero_force); return hipSuccess; } hipError_t gpu_rigid_virial(Scalar* d_virial, const unsigned int *d_molecule_len, const unsigned int *d_molecule_list, Index2D molecule_indexer, const Scalar4 *d_postype, const Scalar4* d_orientation, Index2D body_indexer, Scalar3* d_body_pos, Scalar4* d_body_orientation, Scalar4* d_net_force, Scalar* d_net_virial, unsigned int n_mol, unsigned int N, unsigned int n_bodies_per_block, unsigned int net_virial_pitch, unsigned int virial_pitch, unsigned int block_size, const hipDeviceProp_t& dev_prop) { // reset force and torque hipMemset(d_virial,0, sizeof(Scalar)*virial_pitch*6); dim3 force_grid(n_mol / n_bodies_per_block + 1, 1, 1); static unsigned int max_block_size = UINT_MAX; static hipFuncAttributes attr; if (max_block_size == UINT_MAX) { hipFuncGetAttributes(&attr, (const void *) gpu_rigid_virial_sliding_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = max_block_size < block_size ? max_block_size : block_size; // round down to nearest power of two unsigned int b = 1; while (b * 2 <= run_block_size) { b *= 2; } run_block_size = b; unsigned int window_size = run_block_size / n_bodies_per_block; unsigned int thread_mask = window_size - 1; unsigned int shared_bytes = 6 * run_block_size * sizeof(Scalar); while (shared_bytes + attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock) { // block size is power of two run_block_size /= 2; shared_bytes = 6 * run_block_size * sizeof(Scalar); window_size = run_block_size / n_bodies_per_block; thread_mask = window_size - 1; } hipLaunchKernelGGL(( gpu_rigid_virial_sliding_kernel), dim3(force_grid), dim3(run_block_size), shared_bytes , 0, d_virial, d_molecule_len, d_molecule_list, molecule_indexer, d_postype, d_orientation, body_indexer, d_body_pos, d_body_orientation, d_net_force, d_net_virial, n_mol, N, net_virial_pitch, virial_pitch, window_size, thread_mask, n_bodies_per_block); return hipSuccess; } __global__ void gpu_update_composite_kernel(unsigned int N, unsigned int n_ghost, const unsigned int *d_body, const unsigned int *d_rtag, Scalar4 *d_postype, Scalar4 *d_orientation, Index2D body_indexer, const Scalar3 *d_body_pos, const Scalar4 *d_body_orientation, const unsigned int *d_body_len, const unsigned int *d_molecule_order, const unsigned int *d_molecule_len, const unsigned int *d_molecule_idx, int3 *d_image, const BoxDim box, unsigned int *d_flag) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N+n_ghost) return; unsigned int central_tag = d_body[idx]; if (central_tag == NO_BODY) return; unsigned int central_idx = d_rtag[central_tag]; if (central_idx == NOT_LOCAL && idx >= N) return; // do not overwrite central ptl if (idx == central_idx) return; Scalar4 postype = d_postype[central_idx]; vec3<Scalar> pos(postype); quat<Scalar> orientation(d_orientation[central_idx]); unsigned int body_type = __scalar_as_int(postype.w); unsigned int body_len = d_body_len[body_type]; unsigned int mol_idx = d_molecule_idx[idx]; if (body_len != d_molecule_len[mol_idx]-1) { // if a molecule with a local member is incomplete, this is an error if (idx < N) { atomicMax(d_flag, central_tag+1); } // otherwise, ignore return; } int3 img = d_image[central_idx]; unsigned int idx_in_body = d_molecule_order[idx] - 1; vec3<Scalar> local_pos(d_body_pos[body_indexer(body_type, idx_in_body)]); vec3<Scalar> dr_space = rotate(orientation, local_pos); vec3<Scalar> updated_pos(pos); updated_pos += dr_space; quat<Scalar> local_orientation(d_body_orientation[body_indexer(body_type, idx_in_body)]); quat<Scalar> updated_orientation = orientation*local_orientation; int3 imgi = img; box.wrap(updated_pos, imgi); unsigned int type = __scalar_as_int(d_postype[idx].w); d_postype[idx] = make_scalar4(updated_pos.x, updated_pos.y, updated_pos.z, __int_as_scalar(type)); d_image[idx] = imgi; } void gpu_update_composite(unsigned int N, unsigned int n_ghost, const unsigned int *d_body, const unsigned int *d_rtag, Scalar4 *d_postype, Scalar4 *d_orientation, Index2D body_indexer, const Scalar3 *d_body_pos, const Scalar4 *d_body_orientation, const unsigned int *d_body_len, const unsigned int *d_molecule_order, const unsigned int *d_molecule_len, const unsigned int *d_molecule_idx, int3 *d_image, const BoxDim box, unsigned int block_size, unsigned int *d_flag) { unsigned int run_block_size = block_size; static unsigned int max_block_size = UINT_MAX; static hipFuncAttributes attr; if (max_block_size == UINT_MAX) { hipFuncGetAttributes(&attr, (const void *) gpu_update_composite_kernel); max_block_size = attr.maxThreadsPerBlock; } if (max_block_size <= run_block_size) { run_block_size = max_block_size; } unsigned int n_blocks = (N+n_ghost)/run_block_size + 1; hipLaunchKernelGGL(( gpu_update_composite_kernel), dim3(n_blocks),dim3(run_block_size), 0, 0, N, n_ghost, d_body, d_rtag, d_postype, d_orientation, body_indexer, d_body_pos, d_body_orientation, d_body_len, d_molecule_order, d_molecule_len, d_molecule_idx, d_image, box, d_flag); }
ForceCompositeGPU.cu
// Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "hoomd/VectorMath.h" #include "hoomd/Index1D.h" #include "hoomd/ParticleData.cuh" // Maintainer: jglaser /*! \file ForceComposite.cu \brief Defines GPU kernel code for the composite particle integration on the GPU. */ //! Shared memory for body force and torque reduction, required allocation when the kernel is called extern __shared__ char sum[]; extern __shared__ Scalar sum_virial[]; //! Calculates the body forces and torques by summing the constituent particle forces using a fixed sliding window size /* Compute the force and torque sum on all bodies in the system from their constituent particles. n_bodies_per_block bodies are handled within each block of execution on the GPU. The reason for this is to decrease over-parallelism and use the GPU cores more effectively when bodies are smaller than the block size. Otherwise, small bodies leave many threads in the block idle with nothing to do. On start, the properties common to each body are read in, computed, and stored in shared memory for all the threads working on that body to access. Then, the threads loop over all particles that are part of the body with a sliding window. Each loop of the window computes the force and torque for block_size/n_bodies_per_block particles in as many threads in parallel. These quantities are summed over enough windows to cover the whole body. The block_size/n_bodies_per_block partial sums are stored in shared memory. Then n_bodies_per_block partial reductions are performed in parallel using all threads to sum the total force and torque on each body. This looks just like a normal reduction, except that it terminates at a certain level in the tree. To make the math for the partial reduction work out, block_size must be a power of 2 as must n_bodies_per_block. Performance testing on GF100 with many different bodies of different sizes ranging from 4-256 particles per body has found that the optimum block size for most bodies is 64 threads. Performance increases for all body sizes as n_bodies_per_block is increased, but only up to 8. n_bodies_per_block=16 slows performance significantly. Based on these performance results, this kernel is hardcoded to handle only 1,2,4,8 n_bodies_per_block with a power of 2 block size (hardcoded to 64 in the kernel launch). */ __global__ void gpu_rigid_force_sliding_kernel(Scalar4* d_force, Scalar4* d_torque, const unsigned int *d_molecule_len, const unsigned int *d_molecule_list, Index2D molecule_indexer, const Scalar4 *d_postype, const Scalar4* d_orientation, Index2D body_indexer, Scalar3* d_body_pos, Scalar4* d_body_orientation, const unsigned int *d_body_len, const unsigned int *d_body, unsigned int *d_flag, Scalar4* d_net_force, Scalar4* d_net_torque, unsigned int n_mol, unsigned int N, unsigned int window_size, unsigned int thread_mask, unsigned int n_bodies_per_block, bool zero_force) { // determine which body (0 ... n_bodies_per_block-1) this thread is working on // assign threads 0, 1, 2, ... to body 0, n, n+1, n+2, ... to body 1, and so on. unsigned int m = threadIdx.x / (blockDim.x / n_bodies_per_block); // body_force and body_torque are each shared memory arrays with 1 element per threads Scalar4 *body_force = (Scalar4 *)sum; Scalar3 *body_torque = (Scalar3 *) (body_force + blockDim.x); // store body type, orientation and the index in molecule list in shared memory. Up to 16 bodies per block can // be handled. __shared__ unsigned int body_type[16]; __shared__ Scalar4 body_orientation[16]; __shared__ unsigned int mol_idx[16]; __shared__ unsigned int central_idx[16]; // each thread makes partial sums of force and torque of all the particles that this thread loops over Scalar4 sum_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0),Scalar(0.0)); Scalar3 sum_torque = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0)); // thread_mask is a bitmask that masks out the high bits in threadIdx.x. // threadIdx.x & thread_mask is an index from 0 to block_size/n_bodies_per_block-1 and determines what offset // this thread is to use when accessing the particles in the body if ((threadIdx.x & thread_mask) == 0) { // thread 0 for this body reads in the body id and orientation and stores them in shared memory int group_idx = blockIdx.x*n_bodies_per_block + m; if (group_idx < n_mol) { mol_idx[m] = group_idx; // first ptl is central ptl central_idx[m] = d_molecule_list[molecule_indexer(group_idx, 0)]; body_type[m] = __scalar_as_int(d_postype[central_idx[m]].w); body_orientation[m] = d_orientation[central_idx[m]]; } else { mol_idx[m] = NO_BODY; } } __syncthreads(); if (mol_idx[m] != NO_BODY && central_idx[m] < N) { // compute the number of windows that we need to loop over unsigned int mol_len = d_molecule_len[mol_idx[m]]; unsigned int n_windows = mol_len / window_size + 1; if (mol_len != d_body_len[body_type[m]] + 1) { // incomplete molecule atomicMax(d_flag, d_body[central_idx[m]] + 1); } // slide the window throughout the block for (unsigned int start = 0; start < n_windows; start++) { // determine the index with this body that this particle should handle unsigned int k = start * window_size + (threadIdx.x & thread_mask); // if that index is in the body we are actually handling a real body if (k < mol_len) { // determine the particle idx of the particle unsigned int pidx = d_molecule_list[molecule_indexer(mol_idx[m],k)]; // if this particle is not the central particle if (pidx != central_idx[m]) { // calculate body force and torques vec3<Scalar> particle_pos(d_body_pos[body_indexer(body_type[m], k-1)]); Scalar4 fi = d_net_force[pidx]; //will likely need to rotate these components too vec3<Scalar> ti(d_net_torque[pidx]); // tally the force in the per thread counter sum_force.x += fi.x; sum_force.y += fi.y; sum_force.z += fi.z; // sum up energy sum_force.w += fi.w; // zero force only if we don't need it later if (zero_force) { // zero net energy on constituent ptls to avoid double counting // also zero net force for consistency d_net_force[pidx] = make_scalar4(0.0,0.0,0.0,0.0); } vec3<Scalar> ri = rotate(quat<Scalar>(body_orientation[m]), particle_pos); // torque = r x f vec3<Scalar> del_torque(cross(ri, vec3<Scalar>(fi))); // tally the torque in the per thread counter sum_torque.x += ti.x+del_torque.x; sum_torque.y += ti.y+del_torque.y; sum_torque.z += ti.z+del_torque.z; // zero net torque on constituent particles d_net_torque[pidx] = make_scalar4(0.0,0.0,0.0,0.0); } } } } __syncthreads(); // put the partial sums into shared memory body_force[threadIdx.x] = sum_force; body_torque[threadIdx.x] = sum_torque; __syncthreads(); // perform a set of partial reductions. Each block_size/n_bodies_per_block threads performs a sum reduction // just within its own group unsigned int offset = window_size >> 1; while (offset > 0) { if ((threadIdx.x & thread_mask) < offset) { body_force[threadIdx.x].x += body_force[threadIdx.x + offset].x; body_force[threadIdx.x].y += body_force[threadIdx.x + offset].y; body_force[threadIdx.x].z += body_force[threadIdx.x + offset].z; body_force[threadIdx.x].w += body_force[threadIdx.x + offset].w; body_torque[threadIdx.x].x += body_torque[threadIdx.x + offset].x; body_torque[threadIdx.x].y += body_torque[threadIdx.x + offset].y; body_torque[threadIdx.x].z += body_torque[threadIdx.x + offset].z; } offset >>= 1; __syncthreads(); } // thread 0 within this body writes out the total force and torque for the body if ((threadIdx.x & thread_mask) == 0 && mol_idx[m] != NO_BODY) { d_force[central_idx[m]] = body_force[threadIdx.x]; d_torque[central_idx[m]] = make_scalar4(body_torque[threadIdx.x].x, body_torque[threadIdx.x].y, body_torque[threadIdx.x].z, 0.0f); } } __global__ void gpu_rigid_virial_sliding_kernel(Scalar* d_virial, const unsigned int *d_molecule_len, const unsigned int *d_molecule_list, Index2D molecule_indexer, const Scalar4 *d_postype, const Scalar4* d_orientation, Index2D body_indexer, Scalar3* d_body_pos, Scalar4* d_body_orientation, Scalar4* d_net_force, Scalar* d_net_virial, unsigned int n_mol, unsigned int N, unsigned int net_virial_pitch, unsigned int virial_pitch, unsigned int window_size, unsigned int thread_mask, unsigned int n_bodies_per_block) { // determine which body (0 ... n_bodies_per_block-1) this thread is working on // assign threads 0, 1, 2, ... to body 0, n, n+1, n+2, ... to body 1, and so on. unsigned int m = threadIdx.x / (blockDim.x / n_bodies_per_block); // body_force and body_torque are each shared memory arrays with 1 element per threads Scalar *body_virial_xx = sum_virial; Scalar *body_virial_xy = &sum_virial[1*blockDim.x]; Scalar *body_virial_xz = &sum_virial[2*blockDim.x]; Scalar *body_virial_yy = &sum_virial[3*blockDim.x]; Scalar *body_virial_yz = &sum_virial[4*blockDim.x]; Scalar *body_virial_zz = &sum_virial[5*blockDim.x]; // store body type, orientation and the index in molecule list in shared memory. Up to 16 bodies per block can // be handled. __shared__ unsigned int body_type[16]; __shared__ Scalar4 body_orientation[16]; __shared__ unsigned int mol_idx[16]; __shared__ unsigned int central_idx[16]; // each thread makes partial sums of the virial of all the particles that this thread loops over Scalar sum_virial_xx(0.0); Scalar sum_virial_xy(0.0); Scalar sum_virial_xz(0.0); Scalar sum_virial_yy(0.0); Scalar sum_virial_yz(0.0); Scalar sum_virial_zz(0.0); // thread_mask is a bitmask that masks out the high bits in threadIdx.x. // threadIdx.x & thread_mask is an index from 0 to block_size/n_bodies_per_block-1 and determines what offset // this thread is to use when accessing the particles in the body if ((threadIdx.x & thread_mask) == 0) { // thread 0 for this body reads in the body id and orientation and stores them in shared memory int group_idx = blockIdx.x*n_bodies_per_block + m; if (group_idx < n_mol) { mol_idx[m] = group_idx; // first ptl is central ptl central_idx[m] = d_molecule_list[molecule_indexer(group_idx, 0)]; body_type[m] = __scalar_as_int(d_postype[central_idx[m]].w); body_orientation[m] = d_orientation[central_idx[m]]; } else { mol_idx[m] = NO_BODY; } } __syncthreads(); if (mol_idx[m] != NO_BODY && central_idx[m] < N) { // compute the number of windows that we need to loop over unsigned int mol_len = d_molecule_len[mol_idx[m]]; unsigned int n_windows = mol_len / window_size + 1; // slide the window throughout the block for (unsigned int start = 0; start < n_windows; start++) { // determine the index with this body that this particle should handle unsigned int k = start * window_size + (threadIdx.x & thread_mask); // if that index is in the body we are actually handling a real body if (k < mol_len) { // determine the particle idx of the particle unsigned int pidx = d_molecule_list[molecule_indexer(mol_idx[m],k)]; // if this particle is not the central particle if (pidx != central_idx[m]) { // calculate body force and torques vec3<Scalar> particle_pos(d_body_pos[body_indexer(body_type[m], k-1)]); Scalar4 fi = d_net_force[pidx]; vec3<Scalar> ri = rotate(quat<Scalar>(body_orientation[m]), particle_pos); // sum up virial Scalar virialxx = d_net_virial[0*net_virial_pitch+pidx]; Scalar virialxy = d_net_virial[1*net_virial_pitch+pidx]; Scalar virialxz = d_net_virial[2*net_virial_pitch+pidx]; Scalar virialyy = d_net_virial[3*net_virial_pitch+pidx]; Scalar virialyz = d_net_virial[4*net_virial_pitch+pidx]; Scalar virialzz = d_net_virial[5*net_virial_pitch+pidx]; // subtract intra-body virial prt sum_virial_xx += virialxx - fi.x*ri.x; sum_virial_xy += virialxy - fi.x*ri.y; sum_virial_xz += virialxz - fi.x*ri.z; sum_virial_yy += virialyy - fi.y*ri.y; sum_virial_yz += virialyz - fi.y*ri.z; sum_virial_zz += virialzz - fi.z*ri.z; // zero force and virial on constituent particles d_net_force[pidx] = make_scalar4(0.0,0.0,0.0,0.0); d_net_virial[0*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[1*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[2*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[3*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[4*net_virial_pitch+pidx] = Scalar(0.0); d_net_virial[5*net_virial_pitch+pidx] = Scalar(0.0); } } } } __syncthreads(); // put the partial sums into shared memory body_virial_xx[threadIdx.x] = sum_virial_xx; body_virial_xy[threadIdx.x] = sum_virial_xy; body_virial_xz[threadIdx.x] = sum_virial_xz; body_virial_yy[threadIdx.x] = sum_virial_yy; body_virial_yz[threadIdx.x] = sum_virial_yz; body_virial_zz[threadIdx.x] = sum_virial_zz; __syncthreads(); // perform a set of partial reductions. Each block_size/n_bodies_per_block threads performs a sum reduction // just within its own group unsigned int offset = window_size >> 1; while (offset > 0) { if ((threadIdx.x & thread_mask) < offset) { body_virial_xx[threadIdx.x] += body_virial_xx[threadIdx.x + offset]; body_virial_xy[threadIdx.x] += body_virial_xy[threadIdx.x + offset]; body_virial_xz[threadIdx.x] += body_virial_xz[threadIdx.x + offset]; body_virial_yy[threadIdx.x] += body_virial_yy[threadIdx.x + offset]; body_virial_yz[threadIdx.x] += body_virial_yz[threadIdx.x + offset]; body_virial_zz[threadIdx.x] += body_virial_zz[threadIdx.x + offset]; } offset >>= 1; __syncthreads(); } // thread 0 within this body writes out the total virial for the body if ((threadIdx.x & thread_mask) == 0 && mol_idx[m] != NO_BODY) { d_virial[0*virial_pitch+central_idx[m]] = body_virial_xx[threadIdx.x]; d_virial[1*virial_pitch+central_idx[m]] = body_virial_xy[threadIdx.x]; d_virial[2*virial_pitch+central_idx[m]] = body_virial_xz[threadIdx.x]; d_virial[3*virial_pitch+central_idx[m]] = body_virial_yy[threadIdx.x]; d_virial[4*virial_pitch+central_idx[m]] = body_virial_yz[threadIdx.x]; d_virial[5*virial_pitch+central_idx[m]] = body_virial_zz[threadIdx.x]; } } /*! */ cudaError_t gpu_rigid_force(Scalar4* d_force, Scalar4* d_torque, const unsigned int *d_molecule_len, const unsigned int *d_molecule_list, Index2D molecule_indexer, const Scalar4 *d_postype, const Scalar4* d_orientation, Index2D body_indexer, Scalar3* d_body_pos, Scalar4* d_body_orientation, const unsigned int *d_body_len, const unsigned int *d_body, unsigned int *d_flag, Scalar4* d_net_force, Scalar4* d_net_torque, unsigned int n_mol, unsigned int N, unsigned int n_bodies_per_block, unsigned int block_size, const cudaDeviceProp& dev_prop, bool zero_force) { // reset force and torque cudaMemset(d_force, 0, sizeof(Scalar4)*N); cudaMemset(d_torque, 0, sizeof(Scalar4)*N); dim3 force_grid(n_mol / n_bodies_per_block + 1, 1, 1); static unsigned int max_block_size = UINT_MAX; static cudaFuncAttributes attr; if (max_block_size == UINT_MAX) { cudaFuncGetAttributes(&attr, (const void *) gpu_rigid_force_sliding_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = max_block_size < block_size ? max_block_size : block_size; // round down to nearest power of two unsigned int b = 1; while (b * 2 <= run_block_size) { b *= 2; } run_block_size = b; unsigned int window_size = run_block_size / n_bodies_per_block; unsigned int thread_mask = window_size - 1; unsigned int shared_bytes = run_block_size * (sizeof(Scalar4) + sizeof(Scalar3)); while (shared_bytes + attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock) { // block size is power of two run_block_size /= 2; shared_bytes = run_block_size * (sizeof(Scalar4) + sizeof(Scalar3)); window_size = run_block_size / n_bodies_per_block; thread_mask = window_size - 1; } gpu_rigid_force_sliding_kernel<<< force_grid, run_block_size, shared_bytes >>>( d_force, d_torque, d_molecule_len, d_molecule_list, molecule_indexer, d_postype, d_orientation, body_indexer, d_body_pos, d_body_orientation, d_body_len, d_body, d_flag, d_net_force, d_net_torque, n_mol, N, window_size, thread_mask, n_bodies_per_block, zero_force); return cudaSuccess; } cudaError_t gpu_rigid_virial(Scalar* d_virial, const unsigned int *d_molecule_len, const unsigned int *d_molecule_list, Index2D molecule_indexer, const Scalar4 *d_postype, const Scalar4* d_orientation, Index2D body_indexer, Scalar3* d_body_pos, Scalar4* d_body_orientation, Scalar4* d_net_force, Scalar* d_net_virial, unsigned int n_mol, unsigned int N, unsigned int n_bodies_per_block, unsigned int net_virial_pitch, unsigned int virial_pitch, unsigned int block_size, const cudaDeviceProp& dev_prop) { // reset force and torque cudaMemset(d_virial,0, sizeof(Scalar)*virial_pitch*6); dim3 force_grid(n_mol / n_bodies_per_block + 1, 1, 1); static unsigned int max_block_size = UINT_MAX; static cudaFuncAttributes attr; if (max_block_size == UINT_MAX) { cudaFuncGetAttributes(&attr, (const void *) gpu_rigid_virial_sliding_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = max_block_size < block_size ? max_block_size : block_size; // round down to nearest power of two unsigned int b = 1; while (b * 2 <= run_block_size) { b *= 2; } run_block_size = b; unsigned int window_size = run_block_size / n_bodies_per_block; unsigned int thread_mask = window_size - 1; unsigned int shared_bytes = 6 * run_block_size * sizeof(Scalar); while (shared_bytes + attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock) { // block size is power of two run_block_size /= 2; shared_bytes = 6 * run_block_size * sizeof(Scalar); window_size = run_block_size / n_bodies_per_block; thread_mask = window_size - 1; } gpu_rigid_virial_sliding_kernel<<< force_grid, run_block_size, shared_bytes >>>( d_virial, d_molecule_len, d_molecule_list, molecule_indexer, d_postype, d_orientation, body_indexer, d_body_pos, d_body_orientation, d_net_force, d_net_virial, n_mol, N, net_virial_pitch, virial_pitch, window_size, thread_mask, n_bodies_per_block); return cudaSuccess; } __global__ void gpu_update_composite_kernel(unsigned int N, unsigned int n_ghost, const unsigned int *d_body, const unsigned int *d_rtag, Scalar4 *d_postype, Scalar4 *d_orientation, Index2D body_indexer, const Scalar3 *d_body_pos, const Scalar4 *d_body_orientation, const unsigned int *d_body_len, const unsigned int *d_molecule_order, const unsigned int *d_molecule_len, const unsigned int *d_molecule_idx, int3 *d_image, const BoxDim box, unsigned int *d_flag) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N+n_ghost) return; unsigned int central_tag = d_body[idx]; if (central_tag == NO_BODY) return; unsigned int central_idx = d_rtag[central_tag]; if (central_idx == NOT_LOCAL && idx >= N) return; // do not overwrite central ptl if (idx == central_idx) return; Scalar4 postype = d_postype[central_idx]; vec3<Scalar> pos(postype); quat<Scalar> orientation(d_orientation[central_idx]); unsigned int body_type = __scalar_as_int(postype.w); unsigned int body_len = d_body_len[body_type]; unsigned int mol_idx = d_molecule_idx[idx]; if (body_len != d_molecule_len[mol_idx]-1) { // if a molecule with a local member is incomplete, this is an error if (idx < N) { atomicMax(d_flag, central_tag+1); } // otherwise, ignore return; } int3 img = d_image[central_idx]; unsigned int idx_in_body = d_molecule_order[idx] - 1; vec3<Scalar> local_pos(d_body_pos[body_indexer(body_type, idx_in_body)]); vec3<Scalar> dr_space = rotate(orientation, local_pos); vec3<Scalar> updated_pos(pos); updated_pos += dr_space; quat<Scalar> local_orientation(d_body_orientation[body_indexer(body_type, idx_in_body)]); quat<Scalar> updated_orientation = orientation*local_orientation; int3 imgi = img; box.wrap(updated_pos, imgi); unsigned int type = __scalar_as_int(d_postype[idx].w); d_postype[idx] = make_scalar4(updated_pos.x, updated_pos.y, updated_pos.z, __int_as_scalar(type)); d_image[idx] = imgi; } void gpu_update_composite(unsigned int N, unsigned int n_ghost, const unsigned int *d_body, const unsigned int *d_rtag, Scalar4 *d_postype, Scalar4 *d_orientation, Index2D body_indexer, const Scalar3 *d_body_pos, const Scalar4 *d_body_orientation, const unsigned int *d_body_len, const unsigned int *d_molecule_order, const unsigned int *d_molecule_len, const unsigned int *d_molecule_idx, int3 *d_image, const BoxDim box, unsigned int block_size, unsigned int *d_flag) { unsigned int run_block_size = block_size; static unsigned int max_block_size = UINT_MAX; static cudaFuncAttributes attr; if (max_block_size == UINT_MAX) { cudaFuncGetAttributes(&attr, (const void *) gpu_update_composite_kernel); max_block_size = attr.maxThreadsPerBlock; } if (max_block_size <= run_block_size) { run_block_size = max_block_size; } unsigned int n_blocks = (N+n_ghost)/run_block_size + 1; gpu_update_composite_kernel<<<n_blocks,run_block_size>>>(N, n_ghost, d_body, d_rtag, d_postype, d_orientation, body_indexer, d_body_pos, d_body_orientation, d_body_len, d_molecule_order, d_molecule_len, d_molecule_idx, d_image, box, d_flag); }
100bb49a435173a48167fd00340b0e1ae297de15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mex.h" #include "gpu/mxGPUArray.h" __global__ void vec_add ( const double* src1, const double* src2, const double k1, const double k2, double* dst, int const N ) { // Calculate index const int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < N) { dst[tid] = k1*src1[tid] + k2*src2[tid]; } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Define variables const mxGPUArray *src1; const mxGPUArray *src2; double k1; double k2; mxGPUArray *dst; const double *d_src1; const double *d_src2; double *d_dst; int N1, N2; // Check the number of arguments if ( nrhs != 4 ) { mexErrMsgIdAndTxt("MATLAB:vec_add","The number of input arguments must be 4."); } if ( nlhs != 1 ) { mexErrMsgIdAndTxt("MATLAB:vec_add","The number of output arguments must be 1."); } // Initialization mxInitGPU(); // Get data from *prhs[] src1 = mxGPUCreateFromMxArray(prhs[0]); src2 = mxGPUCreateFromMxArray(prhs[1]); k1 = mxGetScalar(prhs[2]); k2 = mxGetScalar(prhs[3]); // Check the dimension of src vectors N1 = (int)(mxGPUGetNumberOfElements(src1)); N2 = (int)(mxGPUGetNumberOfElements(src2)); if ( N1 != N2 ) { mxGPUDestroyGPUArray(src1); mxGPUDestroyGPUArray(src2); mexErrMsgIdAndTxt("MATLAB:vec_add","The dimension of input vectors must be same."); } // Get address of src1 and src2 d_src1 = (const double*)(mxGPUGetDataReadOnly(src1)); d_src2 = (const double*)(mxGPUGetDataReadOnly(src2)); // Allocate memory of the destination variable on device memory dst = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(src1), mxGPUGetDimensions(src1), mxGPUGetClassID(src1), mxGPUGetComplexity(src1), MX_GPU_DO_NOT_INITIALIZE); d_dst = (double *)(mxGPUGetData(dst)); // Call kernel function dim3 block(N1); dim3 grid((N1 + block.x - 1) / block.x); hipLaunchKernelGGL(( vec_add), dim3(grid), dim3(block), 0, 0, d_src1, d_src2, k1, k2, d_dst, N1); // Pass dst to plhs[0] plhs[0] = mxGPUCreateMxArrayOnGPU(dst); // Release memory mxGPUDestroyGPUArray(src1); mxGPUDestroyGPUArray(src2); }
100bb49a435173a48167fd00340b0e1ae297de15.cu
#include "mex.h" #include "gpu/mxGPUArray.h" __global__ void vec_add ( const double* src1, const double* src2, const double k1, const double k2, double* dst, int const N ) { // Calculate index const int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < N) { dst[tid] = k1*src1[tid] + k2*src2[tid]; } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Define variables const mxGPUArray *src1; const mxGPUArray *src2; double k1; double k2; mxGPUArray *dst; const double *d_src1; const double *d_src2; double *d_dst; int N1, N2; // Check the number of arguments if ( nrhs != 4 ) { mexErrMsgIdAndTxt("MATLAB:vec_add","The number of input arguments must be 4."); } if ( nlhs != 1 ) { mexErrMsgIdAndTxt("MATLAB:vec_add","The number of output arguments must be 1."); } // Initialization mxInitGPU(); // Get data from *prhs[] src1 = mxGPUCreateFromMxArray(prhs[0]); src2 = mxGPUCreateFromMxArray(prhs[1]); k1 = mxGetScalar(prhs[2]); k2 = mxGetScalar(prhs[3]); // Check the dimension of src vectors N1 = (int)(mxGPUGetNumberOfElements(src1)); N2 = (int)(mxGPUGetNumberOfElements(src2)); if ( N1 != N2 ) { mxGPUDestroyGPUArray(src1); mxGPUDestroyGPUArray(src2); mexErrMsgIdAndTxt("MATLAB:vec_add","The dimension of input vectors must be same."); } // Get address of src1 and src2 d_src1 = (const double*)(mxGPUGetDataReadOnly(src1)); d_src2 = (const double*)(mxGPUGetDataReadOnly(src2)); // Allocate memory of the destination variable on device memory dst = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(src1), mxGPUGetDimensions(src1), mxGPUGetClassID(src1), mxGPUGetComplexity(src1), MX_GPU_DO_NOT_INITIALIZE); d_dst = (double *)(mxGPUGetData(dst)); // Call kernel function dim3 block(N1); dim3 grid((N1 + block.x - 1) / block.x); vec_add<<<grid, block>>>(d_src1, d_src2, k1, k2, d_dst, N1); // Pass dst to plhs[0] plhs[0] = mxGPUCreateMxArrayOnGPU(dst); // Release memory mxGPUDestroyGPUArray(src1); mxGPUDestroyGPUArray(src2); }
2432fc3052b5ef8aad8faf5e93bcdaa0d6fa52af.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <algorithm> #include <cstdlib> #include <ctime> #include <hip/hip_runtime.h> #include <stdio.h> #include <cassert> //define the chunk sizes that each threadblock will work on #define BLKXSIZE 32 #define BLKYSIZE 4 #define BLKZSIZE 4 #define Q 19 #define lx 10 #define ly 10 #define lz 5 // for cuda error checking #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ return 1; \ } \ } while (0) template <typename T> __device__ void swap ( T& a, T& b ) { T c(a); a=b; b=c; } __global__ void gpu_array_swap(int ptr_gpu[][ly][lz][Q]) { // int thread_id = thread_idx(grid_dim, block_dim); unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned idy = blockIdx.y * blockDim.y + threadIdx.y; unsigned idz = blockIdx.z * blockDim.z + threadIdx.z; if ((idx < lx) && (idy < ly) && (idz < lz)) { for (size_t i = 1; i <= 9; i++) swap(ptr_gpu[idx][idy][idz][i], ptr_gpu[idx][idy][idz][i + 9]); } } void set_array(int array[][ly][lz][Q]) { int m = 0; for (int l = 0; l < Q; ++l) { for (int i = 0; i < lz; ++i) { for (int j = 0; j < ly; ++j) { for (int k = 0; k < lx; ++k) { array[i][j][k][l] = ++m; } } } } } void print_array(int array[][ly][lz][Q]) { for (int i = 0; i < lx; ++i) { for (int j = 0; j < ly; ++j) { for (int k = 0; k < lz; ++k) { for (int l = 0; l < Q; ++l) { std::cout << array[i][j][k][l] << " "; if (l == (Q - 1)) std::cout << std::endl; } } } } } int main() { typedef int array_3d[ly][lz]; typedef int array_4d[ly][lz][Q]; const dim3 blockSize(BLKXSIZE, BLKYSIZE, BLKZSIZE); const dim3 gridSize(((lx + BLKXSIZE - 1) / BLKXSIZE), ((ly + BLKYSIZE - 1) / BLKYSIZE), ((lz + BLKZSIZE - 1) / BLKZSIZE)); // pointers for data set storage via malloc array_4d* c; // storage for result stored on host array_4d* d_c; // storage for result computed on device // allocate storage for data set if ((c = (array_4d*)malloc((lx * ly * lz * Q) * sizeof(int))) == 0) { fprintf(stderr, "malloc1 Fail \n"); return 1; } set_array(c); print_array(c); // allocate GPU device buffers hipMalloc((void**)&d_c, (lx * ly * lz * Q) * sizeof(int)); cudaCheckErrors("Failed to allocate device buffer"); hipMemcpy(d_c, c, ((lx * ly * lz * Q) * sizeof(int)), hipMemcpyHostToDevice); // compute result hipLaunchKernelGGL(( gpu_array_swap), dim3(gridSize), dim3(blockSize), 0, 0, d_c); cudaCheckErrors("Kernel launch failure"); // copy output data back to host hipMemcpy(c, d_c, ((lx * ly * lz * Q) * sizeof(int)), hipMemcpyDeviceToHost); cudaCheckErrors("CUDA memcpy failure"); free(c); hipFree(d_c); cudaCheckErrors("hipFree fail"); return 0; }
2432fc3052b5ef8aad8faf5e93bcdaa0d6fa52af.cu
#include <iostream> #include <algorithm> #include <cstdlib> #include <ctime> #include <cuda.h> #include <stdio.h> #include <cassert> //define the chunk sizes that each threadblock will work on #define BLKXSIZE 32 #define BLKYSIZE 4 #define BLKZSIZE 4 #define Q 19 #define lx 10 #define ly 10 #define lz 5 // for cuda error checking #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ return 1; \ } \ } while (0) template <typename T> __device__ void swap ( T& a, T& b ) { T c(a); a=b; b=c; } __global__ void gpu_array_swap(int ptr_gpu[][ly][lz][Q]) { // int thread_id = thread_idx(grid_dim, block_dim); unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned idy = blockIdx.y * blockDim.y + threadIdx.y; unsigned idz = blockIdx.z * blockDim.z + threadIdx.z; if ((idx < lx) && (idy < ly) && (idz < lz)) { for (size_t i = 1; i <= 9; i++) swap(ptr_gpu[idx][idy][idz][i], ptr_gpu[idx][idy][idz][i + 9]); } } void set_array(int array[][ly][lz][Q]) { int m = 0; for (int l = 0; l < Q; ++l) { for (int i = 0; i < lz; ++i) { for (int j = 0; j < ly; ++j) { for (int k = 0; k < lx; ++k) { array[i][j][k][l] = ++m; } } } } } void print_array(int array[][ly][lz][Q]) { for (int i = 0; i < lx; ++i) { for (int j = 0; j < ly; ++j) { for (int k = 0; k < lz; ++k) { for (int l = 0; l < Q; ++l) { std::cout << array[i][j][k][l] << " "; if (l == (Q - 1)) std::cout << std::endl; } } } } } int main() { typedef int array_3d[ly][lz]; typedef int array_4d[ly][lz][Q]; const dim3 blockSize(BLKXSIZE, BLKYSIZE, BLKZSIZE); const dim3 gridSize(((lx + BLKXSIZE - 1) / BLKXSIZE), ((ly + BLKYSIZE - 1) / BLKYSIZE), ((lz + BLKZSIZE - 1) / BLKZSIZE)); // pointers for data set storage via malloc array_4d* c; // storage for result stored on host array_4d* d_c; // storage for result computed on device // allocate storage for data set if ((c = (array_4d*)malloc((lx * ly * lz * Q) * sizeof(int))) == 0) { fprintf(stderr, "malloc1 Fail \n"); return 1; } set_array(c); print_array(c); // allocate GPU device buffers cudaMalloc((void**)&d_c, (lx * ly * lz * Q) * sizeof(int)); cudaCheckErrors("Failed to allocate device buffer"); cudaMemcpy(d_c, c, ((lx * ly * lz * Q) * sizeof(int)), cudaMemcpyHostToDevice); // compute result gpu_array_swap<<<gridSize, blockSize>>>(d_c); cudaCheckErrors("Kernel launch failure"); // copy output data back to host cudaMemcpy(c, d_c, ((lx * ly * lz * Q) * sizeof(int)), cudaMemcpyDeviceToHost); cudaCheckErrors("CUDA memcpy failure"); free(c); cudaFree(d_c); cudaCheckErrors("cudaFree fail"); return 0; }
542815a945de1f0524dcd2b8fde062a6ab99124d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/scale_layer.hpp" #include "caffe/util/math_functions.hpp" //gan added #include "caffe/net.hpp" namespace caffe { template <typename Dtype> __global__ void ScaleForward(const int n, const Dtype* in, const Dtype* scale, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index]; } } template <typename Dtype> __global__ void ScaleBiasForward(const int n, const Dtype* in, const Dtype* scale, const Dtype* bias, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index] + bias[scale_index]; } } template <typename Dtype> void ScaleLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); if (bottom[0] == top[0]) { // in-place computation; need to store bottom data before overwriting it. // Note that this is only necessary for Backward; we could skip this if not // doing Backward, but Caffe currently provides no way of knowing whether // we'll need to do Backward at the time of the Forward call. caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), temp_.mutable_gpu_data()); } const Dtype* scale_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); if (bias_layer_) { const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data(); ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_, top_data); } else { ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data); } } template <typename Dtype> void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // gan_added --- bool update_weight = true; gan_mode_ = Net<Dtype>::get_gan_mode(); if(this->layer_param_.scale_param().gen_mode() && gan_mode_ != 3) { update_weight = false; } if(this->layer_param_.scale_param().dis_mode() && gan_mode_ == 3) { update_weight = false; } // --- if (bias_layer_ && update_weight && this->param_propagate_down_[this->param_propagate_down_.size() - 1]) { bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_); } const bool scale_param = (bottom.size() == 1); Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1]; if ((!scale_param && propagate_down[1] && update_weight) || (scale_param && this->param_propagate_down_[0] && update_weight)) { const Dtype* top_diff = top[0]->gpu_diff(); const bool in_place = (bottom[0] == top[0]); const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data(); // Hack: store big eltwise product in bottom[0] diff, except in the special // case where this layer itself does the eltwise product, in which case we // can store it directly in the scale diff, and we're done. // If we're computing in-place (and not doing eltwise computation), this // hack doesn't work and we store the product in temp_. const bool is_eltwise = (bottom[0]->count() == scale->count()); Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() : (in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff())); caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); if (!is_eltwise) { Dtype* sum_result = NULL; if (inner_dim_ == 1) { sum_result = product; } else if (sum_result_.count() == 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(inner_dim_, product, sum_mult, &result); *scale_diff += result; } else { caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff); } } else { const Dtype* sum_mult = sum_multiplier_.gpu_data(); sum_result = (outer_dim_ == 1) ? scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, Dtype(1), product, sum_mult, Dtype(0), sum_result); } if (outer_dim_ != 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); if (scale_dim_ == 1) { Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result); *scale_diff += result; } else { caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff); } } else { Dtype* scale_diff = scale->mutable_gpu_diff(); caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_, Dtype(1), sum_result, sum_mult, Dtype(scale_param), scale_diff); } } } } if (propagate_down[0]) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* scale_data = scale->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer); } // namespace caffe
542815a945de1f0524dcd2b8fde062a6ab99124d.cu
#include <cfloat> #include <vector> #include "caffe/layers/scale_layer.hpp" #include "caffe/util/math_functions.hpp" //gan added #include "caffe/net.hpp" namespace caffe { template <typename Dtype> __global__ void ScaleForward(const int n, const Dtype* in, const Dtype* scale, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index]; } } template <typename Dtype> __global__ void ScaleBiasForward(const int n, const Dtype* in, const Dtype* scale, const Dtype* bias, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index] + bias[scale_index]; } } template <typename Dtype> void ScaleLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); if (bottom[0] == top[0]) { // in-place computation; need to store bottom data before overwriting it. // Note that this is only necessary for Backward; we could skip this if not // doing Backward, but Caffe currently provides no way of knowing whether // we'll need to do Backward at the time of the Forward call. caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), temp_.mutable_gpu_data()); } const Dtype* scale_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); if (bias_layer_) { const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data(); ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_, top_data); } else { ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data); } } template <typename Dtype> void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // gan_added --- bool update_weight = true; gan_mode_ = Net<Dtype>::get_gan_mode(); if(this->layer_param_.scale_param().gen_mode() && gan_mode_ != 3) { update_weight = false; } if(this->layer_param_.scale_param().dis_mode() && gan_mode_ == 3) { update_weight = false; } // --- if (bias_layer_ && update_weight && this->param_propagate_down_[this->param_propagate_down_.size() - 1]) { bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_); } const bool scale_param = (bottom.size() == 1); Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1]; if ((!scale_param && propagate_down[1] && update_weight) || (scale_param && this->param_propagate_down_[0] && update_weight)) { const Dtype* top_diff = top[0]->gpu_diff(); const bool in_place = (bottom[0] == top[0]); const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data(); // Hack: store big eltwise product in bottom[0] diff, except in the special // case where this layer itself does the eltwise product, in which case we // can store it directly in the scale diff, and we're done. // If we're computing in-place (and not doing eltwise computation), this // hack doesn't work and we store the product in temp_. const bool is_eltwise = (bottom[0]->count() == scale->count()); Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() : (in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff())); caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); if (!is_eltwise) { Dtype* sum_result = NULL; if (inner_dim_ == 1) { sum_result = product; } else if (sum_result_.count() == 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(inner_dim_, product, sum_mult, &result); *scale_diff += result; } else { caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff); } } else { const Dtype* sum_mult = sum_multiplier_.gpu_data(); sum_result = (outer_dim_ == 1) ? scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, Dtype(1), product, sum_mult, Dtype(0), sum_result); } if (outer_dim_ != 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); if (scale_dim_ == 1) { Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result); *scale_diff += result; } else { caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff); } } else { Dtype* scale_diff = scale->mutable_gpu_diff(); caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_, Dtype(1), sum_result, sum_mult, Dtype(scale_param), scale_diff); } } } } if (propagate_down[0]) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* scale_data = scale->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer); } // namespace caffe
5ec2b760c206dd4e0b896f02a70650353f9eaa40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Cuckoo Cycle, a memory-hard proof-of-work // Copyright (c) 2013-2015 John Tromp // The edge-trimming time-memory trade-off is due to Dave Anderson: // http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html #include <stdint.h> #include <string.h> #include "cuckoo.h" #if SIZESHIFT <= 32 typedef u32 nonce_t; typedef u32 node_t; #else typedef u64 nonce_t; typedef u64 node_t; #endif #include <openssl/sha.h> // d(evice s)ipnode #if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); } static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; } static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) { asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t" : "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y)); } #undef ROTL __inline__ __device__ uint2 ROTL(const uint2 a, const int offset) { uint2 result; if (offset >= 32) { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset)); } else { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset)); } return result; } __device__ __forceinline__ uint2 vectorize(const uint64_t x) { uint2 result; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x)); return result; } __device__ __forceinline__ uint64_t devectorize(uint2 x) { uint64_t result; asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y)); return result; } __device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) { uint2 nonce = vectorize(2*nce + uorv); uint2 v0 = ctx.v2[0], v1 = ctx.v2[1], v2 = ctx.v2[2], v3 = ctx.v2[3] ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= vectorize(0xff); SIPROUND; SIPROUND; SIPROUND; SIPROUND; return devectorize(v0 ^ v1 ^ v2 ^ v3) & NODEMASK; } #else __device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) { u64 nonce = 2*nce + uorv; u64 v0 = ctx.v[0], v1 = ctx.v[1], v2 = ctx.v[2], v3 = ctx.v[3] ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= 0xff; SIPROUND; SIPROUND; SIPROUND; SIPROUND; return (v0 ^ v1 ^ v2 ^ v3) & NODEMASK; } #endif #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <set> // algorithm parameters #ifndef PART_BITS // #bits used to partition edge set processing to save memory // a value of 0 does no partitioning and is fastest // a value of 1 partitions in two, making twice_set the // same size as shrinkingset at about 33% slowdown // higher values are not that interesting #define PART_BITS 0 #endif #ifndef IDXSHIFT // we want sizeof(cuckoo_hash) == sizeof(twice_set), so // CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32) // CUCKOO_SIZE * 2 == TWICE_WORDS // (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32 // SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5 // IDXSHIFT == 1 + PART_BITS + 5 #define IDXSHIFT (PART_BITS + 6) #endif // grow with cube root of size, hardly affected by trimming #define MAXPATHLEN (8 << (SIZESHIFT/3)) #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // set that starts out full and gets reset by threads on disjoint words class shrinkingset { public: u32 *bits; __device__ void reset(nonce_t n) { bits[n/32] |= 1 << (n%32); } __device__ bool test(node_t n) const { return !((bits[n/32] >> (n%32)) & 1); } __device__ u32 block(node_t n) const { return ~bits[n/32]; } }; #define PART_MASK ((1 << PART_BITS) - 1) #define ONCE_BITS (HALFSIZE >> PART_BITS) #define TWICE_WORDS ((2 * ONCE_BITS) / 32) class twice_set { public: u32 *bits; __device__ void reset() { memset(bits, 0, TWICE_WORDS * sizeof(u32)); } __device__ void set(node_t u) { node_t idx = u/16; u32 bit = 1 << (2 * (u%16)); u32 old = atomicOr(&bits[idx], bit); u32 bit2 = bit<<1; if ((old & (bit2|bit)) == bit) atomicOr(&bits[idx], bit2); } __device__ u32 test(node_t u) const { return (bits[u/16] >> (2 * (u%16))) & 2; } }; #define CUCKOO_SIZE (SIZE >> IDXSHIFT) #define CUCKOO_MASK (CUCKOO_SIZE - 1) // number of (least significant) key bits that survives leftshift by SIZESHIFT #define KEYBITS (64-SIZESHIFT) #define KEYMASK ((1L << KEYBITS) - 1) #define MAXDRIFT (1L << (KEYBITS - IDXSHIFT)) class cuckoo_hash { public: u64 *cuckoo; cuckoo_hash() { cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64)); assert(cuckoo != 0); } ~cuckoo_hash() { free(cuckoo); } void set(node_t u, node_t v) { u64 niew = (u64)u << SIZESHIFT | v; for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { #ifdef ATOMIC u64 old = 0; if (cuckoo[ui].compare_exchange_strong(old, niew, std::memory_order_relaxed)) return; if ((old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui].store(niew, std::memory_order_relaxed); #else u64 old = cuckoo[ui]; if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui] = niew; #endif return; } } } node_t operator[](node_t u) const { for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { #ifdef ATOMIC u64 cu = cuckoo[ui].load(std::memory_order_relaxed); #else u64 cu = cuckoo[ui]; #endif if (!cu) return 0; if ((cu >> SIZESHIFT) == (u & KEYMASK)) { assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT); return (node_t)(cu & (SIZE-1)); } } } }; class cuckoo_ctx { public: siphash_ctx sip_ctx; shrinkingset alive; twice_set nonleaf; int nthreads; cuckoo_ctx(const char* header, u32 n_threads) { setheader(&sip_ctx, header); nthreads = n_threads; } }; __global__ void count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_ctx sip_ctx = ctx->sip_ctx; // local copy sip context; 2.5% speed gain int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) { u32 alive32 = alive.block(block); for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs u32 ffs = __ffs(alive32); nonce += ffs; alive32 >>= ffs; node_t u = dipnode(sip_ctx, nonce, uorv); if ((u & PART_MASK) == part) { nonleaf.set(u >> PART_BITS); } } } } __global__ void kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_ctx sip_ctx = ctx->sip_ctx; int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) { u32 alive32 = alive.block(block); for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs u32 ffs = __ffs(alive32); nonce += ffs; alive32 >>= ffs; node_t u = dipnode(sip_ctx, nonce, uorv); if ((u & PART_MASK) == part) { if (!nonleaf.test(u >> PART_BITS)) { alive.reset(nonce); } } } } } u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) { u32 nu; for (nu = 0; u; u = cuckoo[u]) { if (++nu >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); exit(0); } us[nu] = u; } return nu; } typedef std::pair<node_t,node_t> edge; #include <unistd.h> int main(int argc, char **argv) { int nthreads = 1; int ntrims = 1 + (PART_BITS+3)*(PART_BITS+4)/2; int tpb = 0; const char *header = ""; int c; while ((c = getopt (argc, argv, "h:n:t:p:")) != -1) { switch (c) { case 'h': header = optarg; break; case 'n': ntrims = atoi(optarg); break; case 't': nthreads = atoi(optarg); break; case 'p': tpb = atoi(optarg); break; } } if (!tpb) // if not set, then default threads per block to roughly square root of threads for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ; printf("Looking for %d-cycle on cuckoo%d(\"%s\") with 50%% edges, %d trims, %d threads %d per block\n", PROOFSIZE, SIZESHIFT, header, ntrims, nthreads, tpb); u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32); cuckoo_ctx ctx(header, nthreads); checkCudaErrors(hipMalloc((void**)&ctx.alive.bits, edgeBytes)); checkCudaErrors(hipMemset(ctx.alive.bits, 0, edgeBytes)); checkCudaErrors(hipMalloc((void**)&ctx.nonleaf.bits, nodeBytes)); int edgeUnit=0, nodeUnit=0; u64 eb = edgeBytes, nb = nodeBytes; for (; eb >= 1024; eb>>=10) edgeUnit++; for (; nb >= 1024; nb>>=10) nodeUnit++; printf("Using %d%cB edge and %d%cB node memory.\n", (int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]); cuckoo_ctx *device_ctx; checkCudaErrors(hipMalloc((void**)&device_ctx, sizeof(cuckoo_ctx))); hipMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), hipMemcpyHostToDevice); for (u32 round=0; round < ntrims; round++) { for (u32 uorv = 0; uorv < 2; uorv++) { for (u32 part = 0; part <= PART_MASK; part++) { checkCudaErrors(hipMemset(ctx.nonleaf.bits, 0, nodeBytes)); hipLaunchKernelGGL(( count_node_deg), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_ctx, uorv, part); hipLaunchKernelGGL(( kill_leaf_edges), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_ctx, uorv, part); } } } u64 *bits; bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64)); assert(bits != 0); hipMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), hipMemcpyDeviceToHost); checkCudaErrors(hipFree(ctx.alive.bits)); checkCudaErrors(hipFree(ctx.nonleaf.bits)); u32 cnt = 0; for (int i = 0; i < HALFSIZE/64; i++) cnt += __builtin_popcountll(~bits[i]); u32 load = (u32)(100L * cnt / CUCKOO_SIZE); printf("final load %d%%\n", load); if (load >= 90) { printf("overloaded! exiting..."); exit(0); } cuckoo_hash &cuckoo = *(new cuckoo_hash()); node_t us[MAXPATHLEN], vs[MAXPATHLEN]; for (nonce_t block = 0; block < HALFSIZE; block += 64) { u64 alive64 = ~bits[block/64]; for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __builtin_ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u0=sipnode(&ctx.sip_ctx, nonce, 0), v0=sipnode(&ctx.sip_ctx, nonce, 1); if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[] continue; node_t u = cuckoo[us[0] = u0], v = cuckoo[vs[0] = v0]; u32 nu = path(cuckoo, u, us), nv = path(cuckoo, v, vs); if (us[nu] == vs[nv]) { u32 min = nu < nv ? nu : nv; for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ; u32 len = nu + nv + 1; printf("% 4d-cycle found at %d:%d%%\n", len, 0, (u32)(nonce*100L/HALFSIZE)); if (len == PROOFSIZE) { printf("Solution"); std::set<edge> cycle; u32 n = 0; cycle.insert(edge(*us, *vs)); while (nu--) cycle.insert(edge(us[(nu+1)&~1], us[nu|1])); // u's in even position; v's in odd while (nv--) cycle.insert(edge(vs[nv|1], vs[(nv+1)&~1])); // u's in odd position; v's in even for (nonce_t blk = 0; blk < HALFSIZE; blk += 64) { u64 alv64 = ~bits[blk/64]; for (nonce_t nce = blk-1; alv64; ) { // -1 compensates for 1-based ffs u32 ffs = __builtin_ffsll(alv64); nce += ffs; alv64 >>= ffs; edge e(sipnode(&ctx.sip_ctx, nce, 0), sipnode(&ctx.sip_ctx, nce, 1)); if (cycle.find(e) != cycle.end()) { printf(" %x", nce); if (PROOFSIZE > 2) cycle.erase(e); n++; } if (ffs & 64) break; // can't shift by 64 } } assert(n==PROOFSIZE); printf("\n"); } continue; } if (nu < nv) { while (nu--) cuckoo.set(us[nu+1], us[nu]); cuckoo.set(u0, v0); } else { while (nv--) cuckoo.set(vs[nv+1], vs[nv]); cuckoo.set(v0, u0); } if (ffs & 64) break; // can't shift by 64 } } return 0; }
5ec2b760c206dd4e0b896f02a70650353f9eaa40.cu
// Cuckoo Cycle, a memory-hard proof-of-work // Copyright (c) 2013-2015 John Tromp // The edge-trimming time-memory trade-off is due to Dave Anderson: // http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html #include <stdint.h> #include <string.h> #include "cuckoo.h" #if SIZESHIFT <= 32 typedef u32 nonce_t; typedef u32 node_t; #else typedef u64 nonce_t; typedef u64 node_t; #endif #include <openssl/sha.h> // d(evice s)ipnode #if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); } static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; } static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) { asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t" : "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y)); } #undef ROTL __inline__ __device__ uint2 ROTL(const uint2 a, const int offset) { uint2 result; if (offset >= 32) { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset)); } else { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset)); } return result; } __device__ __forceinline__ uint2 vectorize(const uint64_t x) { uint2 result; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x)); return result; } __device__ __forceinline__ uint64_t devectorize(uint2 x) { uint64_t result; asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y)); return result; } __device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) { uint2 nonce = vectorize(2*nce + uorv); uint2 v0 = ctx.v2[0], v1 = ctx.v2[1], v2 = ctx.v2[2], v3 = ctx.v2[3] ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= vectorize(0xff); SIPROUND; SIPROUND; SIPROUND; SIPROUND; return devectorize(v0 ^ v1 ^ v2 ^ v3) & NODEMASK; } #else __device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) { u64 nonce = 2*nce + uorv; u64 v0 = ctx.v[0], v1 = ctx.v[1], v2 = ctx.v[2], v3 = ctx.v[3] ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= 0xff; SIPROUND; SIPROUND; SIPROUND; SIPROUND; return (v0 ^ v1 ^ v2 ^ v3) & NODEMASK; } #endif #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <set> // algorithm parameters #ifndef PART_BITS // #bits used to partition edge set processing to save memory // a value of 0 does no partitioning and is fastest // a value of 1 partitions in two, making twice_set the // same size as shrinkingset at about 33% slowdown // higher values are not that interesting #define PART_BITS 0 #endif #ifndef IDXSHIFT // we want sizeof(cuckoo_hash) == sizeof(twice_set), so // CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32) // CUCKOO_SIZE * 2 == TWICE_WORDS // (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32 // SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5 // IDXSHIFT == 1 + PART_BITS + 5 #define IDXSHIFT (PART_BITS + 6) #endif // grow with cube root of size, hardly affected by trimming #define MAXPATHLEN (8 << (SIZESHIFT/3)) #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // set that starts out full and gets reset by threads on disjoint words class shrinkingset { public: u32 *bits; __device__ void reset(nonce_t n) { bits[n/32] |= 1 << (n%32); } __device__ bool test(node_t n) const { return !((bits[n/32] >> (n%32)) & 1); } __device__ u32 block(node_t n) const { return ~bits[n/32]; } }; #define PART_MASK ((1 << PART_BITS) - 1) #define ONCE_BITS (HALFSIZE >> PART_BITS) #define TWICE_WORDS ((2 * ONCE_BITS) / 32) class twice_set { public: u32 *bits; __device__ void reset() { memset(bits, 0, TWICE_WORDS * sizeof(u32)); } __device__ void set(node_t u) { node_t idx = u/16; u32 bit = 1 << (2 * (u%16)); u32 old = atomicOr(&bits[idx], bit); u32 bit2 = bit<<1; if ((old & (bit2|bit)) == bit) atomicOr(&bits[idx], bit2); } __device__ u32 test(node_t u) const { return (bits[u/16] >> (2 * (u%16))) & 2; } }; #define CUCKOO_SIZE (SIZE >> IDXSHIFT) #define CUCKOO_MASK (CUCKOO_SIZE - 1) // number of (least significant) key bits that survives leftshift by SIZESHIFT #define KEYBITS (64-SIZESHIFT) #define KEYMASK ((1L << KEYBITS) - 1) #define MAXDRIFT (1L << (KEYBITS - IDXSHIFT)) class cuckoo_hash { public: u64 *cuckoo; cuckoo_hash() { cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64)); assert(cuckoo != 0); } ~cuckoo_hash() { free(cuckoo); } void set(node_t u, node_t v) { u64 niew = (u64)u << SIZESHIFT | v; for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { #ifdef ATOMIC u64 old = 0; if (cuckoo[ui].compare_exchange_strong(old, niew, std::memory_order_relaxed)) return; if ((old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui].store(niew, std::memory_order_relaxed); #else u64 old = cuckoo[ui]; if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui] = niew; #endif return; } } } node_t operator[](node_t u) const { for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { #ifdef ATOMIC u64 cu = cuckoo[ui].load(std::memory_order_relaxed); #else u64 cu = cuckoo[ui]; #endif if (!cu) return 0; if ((cu >> SIZESHIFT) == (u & KEYMASK)) { assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT); return (node_t)(cu & (SIZE-1)); } } } }; class cuckoo_ctx { public: siphash_ctx sip_ctx; shrinkingset alive; twice_set nonleaf; int nthreads; cuckoo_ctx(const char* header, u32 n_threads) { setheader(&sip_ctx, header); nthreads = n_threads; } }; __global__ void count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_ctx sip_ctx = ctx->sip_ctx; // local copy sip context; 2.5% speed gain int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) { u32 alive32 = alive.block(block); for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs u32 ffs = __ffs(alive32); nonce += ffs; alive32 >>= ffs; node_t u = dipnode(sip_ctx, nonce, uorv); if ((u & PART_MASK) == part) { nonleaf.set(u >> PART_BITS); } } } } __global__ void kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_ctx sip_ctx = ctx->sip_ctx; int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) { u32 alive32 = alive.block(block); for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs u32 ffs = __ffs(alive32); nonce += ffs; alive32 >>= ffs; node_t u = dipnode(sip_ctx, nonce, uorv); if ((u & PART_MASK) == part) { if (!nonleaf.test(u >> PART_BITS)) { alive.reset(nonce); } } } } } u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) { u32 nu; for (nu = 0; u; u = cuckoo[u]) { if (++nu >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); exit(0); } us[nu] = u; } return nu; } typedef std::pair<node_t,node_t> edge; #include <unistd.h> int main(int argc, char **argv) { int nthreads = 1; int ntrims = 1 + (PART_BITS+3)*(PART_BITS+4)/2; int tpb = 0; const char *header = ""; int c; while ((c = getopt (argc, argv, "h:n:t:p:")) != -1) { switch (c) { case 'h': header = optarg; break; case 'n': ntrims = atoi(optarg); break; case 't': nthreads = atoi(optarg); break; case 'p': tpb = atoi(optarg); break; } } if (!tpb) // if not set, then default threads per block to roughly square root of threads for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ; printf("Looking for %d-cycle on cuckoo%d(\"%s\") with 50%% edges, %d trims, %d threads %d per block\n", PROOFSIZE, SIZESHIFT, header, ntrims, nthreads, tpb); u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32); cuckoo_ctx ctx(header, nthreads); checkCudaErrors(cudaMalloc((void**)&ctx.alive.bits, edgeBytes)); checkCudaErrors(cudaMemset(ctx.alive.bits, 0, edgeBytes)); checkCudaErrors(cudaMalloc((void**)&ctx.nonleaf.bits, nodeBytes)); int edgeUnit=0, nodeUnit=0; u64 eb = edgeBytes, nb = nodeBytes; for (; eb >= 1024; eb>>=10) edgeUnit++; for (; nb >= 1024; nb>>=10) nodeUnit++; printf("Using %d%cB edge and %d%cB node memory.\n", (int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]); cuckoo_ctx *device_ctx; checkCudaErrors(cudaMalloc((void**)&device_ctx, sizeof(cuckoo_ctx))); cudaMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), cudaMemcpyHostToDevice); for (u32 round=0; round < ntrims; round++) { for (u32 uorv = 0; uorv < 2; uorv++) { for (u32 part = 0; part <= PART_MASK; part++) { checkCudaErrors(cudaMemset(ctx.nonleaf.bits, 0, nodeBytes)); count_node_deg<<<nthreads/tpb,tpb >>>(device_ctx, uorv, part); kill_leaf_edges<<<nthreads/tpb,tpb >>>(device_ctx, uorv, part); } } } u64 *bits; bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64)); assert(bits != 0); cudaMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), cudaMemcpyDeviceToHost); checkCudaErrors(cudaFree(ctx.alive.bits)); checkCudaErrors(cudaFree(ctx.nonleaf.bits)); u32 cnt = 0; for (int i = 0; i < HALFSIZE/64; i++) cnt += __builtin_popcountll(~bits[i]); u32 load = (u32)(100L * cnt / CUCKOO_SIZE); printf("final load %d%%\n", load); if (load >= 90) { printf("overloaded! exiting..."); exit(0); } cuckoo_hash &cuckoo = *(new cuckoo_hash()); node_t us[MAXPATHLEN], vs[MAXPATHLEN]; for (nonce_t block = 0; block < HALFSIZE; block += 64) { u64 alive64 = ~bits[block/64]; for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __builtin_ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u0=sipnode(&ctx.sip_ctx, nonce, 0), v0=sipnode(&ctx.sip_ctx, nonce, 1); if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[] continue; node_t u = cuckoo[us[0] = u0], v = cuckoo[vs[0] = v0]; u32 nu = path(cuckoo, u, us), nv = path(cuckoo, v, vs); if (us[nu] == vs[nv]) { u32 min = nu < nv ? nu : nv; for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ; u32 len = nu + nv + 1; printf("% 4d-cycle found at %d:%d%%\n", len, 0, (u32)(nonce*100L/HALFSIZE)); if (len == PROOFSIZE) { printf("Solution"); std::set<edge> cycle; u32 n = 0; cycle.insert(edge(*us, *vs)); while (nu--) cycle.insert(edge(us[(nu+1)&~1], us[nu|1])); // u's in even position; v's in odd while (nv--) cycle.insert(edge(vs[nv|1], vs[(nv+1)&~1])); // u's in odd position; v's in even for (nonce_t blk = 0; blk < HALFSIZE; blk += 64) { u64 alv64 = ~bits[blk/64]; for (nonce_t nce = blk-1; alv64; ) { // -1 compensates for 1-based ffs u32 ffs = __builtin_ffsll(alv64); nce += ffs; alv64 >>= ffs; edge e(sipnode(&ctx.sip_ctx, nce, 0), sipnode(&ctx.sip_ctx, nce, 1)); if (cycle.find(e) != cycle.end()) { printf(" %x", nce); if (PROOFSIZE > 2) cycle.erase(e); n++; } if (ffs & 64) break; // can't shift by 64 } } assert(n==PROOFSIZE); printf("\n"); } continue; } if (nu < nv) { while (nu--) cuckoo.set(us[nu+1], us[nu]); cuckoo.set(u0, v0); } else { while (nv--) cuckoo.set(vs[nv+1], vs[nv]); cuckoo.set(v0, u0); } if (ffs & 64) break; // can't shift by 64 } } return 0; }
9803af9e72c2daca0d85e56e3422f1c66eb5ed83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> using std::endl; __global__ void sum_kernel(double *A, double *B, double *C, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } double a = A[idx]; double b = B[idx]; C[idx] = a + b; } int main(int argc, char **argv) { // Size of vectors int n = 100000; // Host vectors double *h_a, *h_b, *h_c; // Size, in bytes, of each vector size_t bytes = n * sizeof(double); // Allocate memory for each vector on host h_a = (double *) malloc(bytes); h_b = (double *) malloc(bytes); h_c = (double *) malloc(bytes); int i; // Initialize vectors on host for (i = 0; i < n; i++) { h_a[i] = sin(i) * sin(i); h_b[i] = cos(i) * cos(i); } // Device input vectors double *d_a, *d_b, *d_c; // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); // Copy host vectors to device hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); int blockSize = 1024; int gridSize = (n - 1) / blockSize + 1; // Execute the kernel hipLaunchKernelGGL(( sum_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n); // Copy array back to host hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); // Release device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); double maxError = 0; for (int i = 0; i < n; ++i) { double error = abs(h_c[i] - 1.0); maxError = error > maxError ? error : maxError; } std::cout << "Max error = " << maxError << endl; }
9803af9e72c2daca0d85e56e3422f1c66eb5ed83.cu
#include <iostream> using std::endl; __global__ void sum_kernel(double *A, double *B, double *C, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } double a = A[idx]; double b = B[idx]; C[idx] = a + b; } int main(int argc, char **argv) { // Size of vectors int n = 100000; // Host vectors double *h_a, *h_b, *h_c; // Size, in bytes, of each vector size_t bytes = n * sizeof(double); // Allocate memory for each vector on host h_a = (double *) malloc(bytes); h_b = (double *) malloc(bytes); h_c = (double *) malloc(bytes); int i; // Initialize vectors on host for (i = 0; i < n; i++) { h_a[i] = sin(i) * sin(i); h_b[i] = cos(i) * cos(i); } // Device input vectors double *d_a, *d_b, *d_c; // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // Copy host vectors to device cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); int blockSize = 1024; int gridSize = (n - 1) / blockSize + 1; // Execute the kernel sum_kernel<<<gridSize, blockSize>>>(d_a, d_b, d_c, n); // Copy array back to host cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); // Release device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); double maxError = 0; for (int i = 0; i < n; ++i) { double error = abs(h_c[i] - 1.0); maxError = error > maxError ? error : maxError; } std::cout << "Max error = " << maxError << endl; }
71b04a497393d6662f3c3ec92667235e74f4ec59.hip
// !!! This is a file automatically generated by hipify!!! /* -- KBLAS (version 1.0) -- Ahmad Abdelfattah, Center of Extreme Computing Hatem Ltaief, Supercomputing Laboratory David Keyes, Center of Extreme Computing King Abdullah University of Science and Technology (KAUST) June 2013 KBLAS is a subset of BLAS routines highly optimized for NVIDIA GPUs */ /** -- Center of Extreme Computing and Supercomputing Laboratory -- Division of Applied Mathematics and Computational Science -- King Abdullah University of Science and Technology -- (C) Copyright 2013 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of Tennessee, Knoxville nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **/ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> #include "gemv2_core.cuh" #if(SM >= 30) #define cgemvn_nb (32) #define cgemvn_ntcol (4) #define cgemvn_ept (2) #define cgemvn_width (cgemvn_ntcol*cgemvn_ept) #define cgemvn_by (16) #define cgemvt_nb (32) #define cgemvt_ntcol (2) #define cgemvt_ept (4) #define cgemvt_width (cgemvt_ntcol*cgemvt_ept) #define cgemvt_by (8) #else #define cgemvn_nb (64) #define cgemvn_ntcol (8) #define cgemvn_ept (2) #define cgemvn_width (cgemvn_ntcol*cgemvn_ept) #define cgemvn_by (1) #define cgemvt_nb (64) #define cgemvt_ntcol (8) #define cgemvt_ept (2) #define cgemvt_width (cgemvt_ntcol*cgemvt_ept) #define cgemvt_by (1) #endif extern "C" int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, hipStream_t stream); int kblas_cgemv2_driver( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, hipStream_t stream) { if(trans == 'n' || trans == 'N') { // scaling with beta kblas_cscal_async(rows, beta, dY, incy, stream); int mod_r = rows % cgemvn_nb; int mod_c = cols % cgemvn_width; int blocks = rows/cgemvn_nb; if(mod_r != 0) blocks += 1; const int thread_x = cgemvn_nb; const int thread_y = cgemvn_ntcol; const int ept = cgemvn_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, cgemvn_by); switch(ept_) { case 0:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 1:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 2:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 3:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 4:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 5:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 6:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 7:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 8:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; default: printf("irregular part %d is not supported, please extend the case statement of cgemv\n", ept_); exit(1); } } // end of non-transpose case else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { // scaling with beta kblas_cscal_async(cols, beta, dY, incy, stream); int mod_r = rows % cgemvt_nb; int mod_c = cols % cgemvt_width; int blocks = cols/cgemvt_width; if(mod_c != 0) blocks += 1; const int thread_x = cgemvt_nb; const int thread_y = cgemvt_ntcol; const int ept = cgemvt_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, cgemvt_by); int conj; if(trans == 'c' || trans == 'C')conj = 1; else conj = 0; //printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_); switch(ept_) { case 0:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 1:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 2:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 3:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 4:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 5:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 6:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 7:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 8:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; default: printf("irregular part %d is not supported, please extend the case statement of cgemv\n", ept_); exit(1); } } else { printf("CGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } extern "C" int kblas_cgemv2(char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy) { return kblas_cgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, 0); } extern "C" int kblas_cgemv2_async( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, hipStream_t stream) { return kblas_cgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, stream); }
71b04a497393d6662f3c3ec92667235e74f4ec59.cu
/* -- KBLAS (version 1.0) -- Ahmad Abdelfattah, Center of Extreme Computing Hatem Ltaief, Supercomputing Laboratory David Keyes, Center of Extreme Computing King Abdullah University of Science and Technology (KAUST) June 2013 KBLAS is a subset of BLAS routines highly optimized for NVIDIA GPUs */ /** -- Center of Extreme Computing and Supercomputing Laboratory -- Division of Applied Mathematics and Computational Science -- King Abdullah University of Science and Technology -- (C) Copyright 2013 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of Tennessee, Knoxville nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **/ #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cublas.h> #include "gemv2_core.cuh" #if(SM >= 30) #define cgemvn_nb (32) #define cgemvn_ntcol (4) #define cgemvn_ept (2) #define cgemvn_width (cgemvn_ntcol*cgemvn_ept) #define cgemvn_by (16) #define cgemvt_nb (32) #define cgemvt_ntcol (2) #define cgemvt_ept (4) #define cgemvt_width (cgemvt_ntcol*cgemvt_ept) #define cgemvt_by (8) #else #define cgemvn_nb (64) #define cgemvn_ntcol (8) #define cgemvn_ept (2) #define cgemvn_width (cgemvn_ntcol*cgemvn_ept) #define cgemvn_by (1) #define cgemvt_nb (64) #define cgemvt_ntcol (8) #define cgemvt_ept (2) #define cgemvt_width (cgemvt_ntcol*cgemvt_ept) #define cgemvt_by (1) #endif extern "C" int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, cudaStream_t stream); int kblas_cgemv2_driver( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, cudaStream_t stream) { if(trans == 'n' || trans == 'N') { // scaling with beta kblas_cscal_async(rows, beta, dY, incy, stream); int mod_r = rows % cgemvn_nb; int mod_c = cols % cgemvn_width; int blocks = rows/cgemvn_nb; if(mod_r != 0) blocks += 1; const int thread_x = cgemvn_nb; const int thread_y = cgemvn_ntcol; const int ept = cgemvn_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, cgemvn_by); switch(ept_) { case 0: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 1: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 2: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 3: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 4: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 5: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 6: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 7: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 8: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; default: printf("irregular part %d is not supported, please extend the case statement of cgemv\n", ept_); exit(1); } } // end of non-transpose case else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { // scaling with beta kblas_cscal_async(cols, beta, dY, incy, stream); int mod_r = rows % cgemvt_nb; int mod_c = cols % cgemvt_width; int blocks = cols/cgemvt_width; if(mod_c != 0) blocks += 1; const int thread_x = cgemvt_nb; const int thread_y = cgemvt_ntcol; const int ept = cgemvt_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, cgemvt_by); int conj; if(trans == 'c' || trans == 'C')conj = 1; else conj = 0; //printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_); switch(ept_) { case 0: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 1: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 2: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 3: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 4: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 5: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 6: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 7: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 8: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; default: printf("irregular part %d is not supported, please extend the case statement of cgemv\n", ept_); exit(1); } } else { printf("CGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } extern "C" int kblas_cgemv2(char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy) { return kblas_cgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, 0); } extern "C" int kblas_cgemv2_async( char trans, int rows, int cols, cuFloatComplex alpha, cuFloatComplex *dA, int lda, cuFloatComplex *dX, int incx, cuFloatComplex beta, cuFloatComplex *dY, int incy, cudaStream_t stream) { return kblas_cgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, stream); }
7fd8fd9d07b641dac504ff4a8b276f355bcca4a6.hip
// !!! This is a file automatically generated by hipify!!! /* Collision detection using binary-tree based BVH. This code uses Nvidia's cuda tutorial as an utility, which refers to the book.h below. Author: Asichurter Date: 2021-11-01 */ #include <hip/hip_runtime.h> #include <vector> #include <string> #include <set> #include "load_obj.h" #include "collision.cuh" #include "check.cuh" #include "./common/book.h" void printElapsedTime(hipEvent_t* start, hipEvent_t* stop, const char* opname) { printf("\nTime of %s: ", opname); float elapsedTime; HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, *start, *stop)); printf("%3.1f ms\n", elapsedTime); } void printTriangleVertex(Triangle* t, vector<vec3f>& vs) { vec3f *v1 = &vs[t->vIdx[0]], *v2 = &vs[t->vIdx[1]], *v3 = &vs[t->vIdx[2]]; printf("# v1(%u)=(%.4f, %.4f, %.4f), v2(%u)=(%.4f, %.4f, %.4f), v3(%u)=(%.4f, %.4f, %.4f)\n", t->vIdx[0], v1->x, v1->y, v1->z, t->vIdx[1], v2->x, v2->y, v2->z, t->vIdx[2], v3->x, v3->y, v3->z); } void makeAndPrintSet(unsigned int* data, unsigned int num, const char* title) { set<unsigned int> dset; for (int i = 0; i < num; i++) { dset.insert(data[i]); } printf("\n\n%s%u points in total:\n", title, dset.size()); set<unsigned int>::iterator it; for (it = dset.begin(); it != dset.end(); it++) { printf("%u\n", *it); } } int main() { hipEvent_t start, stop, m_start, m_stop; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); HANDLE_ERROR(hipEventCreate(&m_start)); HANDLE_ERROR(hipEventCreate(&m_stop)); HANDLE_ERROR(hipEventRecord(m_start, 0)); // Set your OBJ file path here const std::string file_path = "./resources/flag-2000-changed.obj"; std::vector<vec3f> vertexes; std::vector<Triangle> triangles; std::vector<unsigned long long int> mortons; loadObj(file_path, vertexes, triangles, mortons); vec3f* v_ptr; Triangle* t_ptr; unsigned long long int* m_ptr; Node* leaf_nodes; Node* internal_nodes; unsigned int* collision_list; unsigned int* test_val; unsigned int temp_nums[100]; unsigned int h_collision_list[1000]; Triangle* colTris; /* Allocate and copy GPU memory */ HANDLE_ERROR(hipMalloc((void**)&v_ptr, vertexes.size() * sizeof(vec3f))); HANDLE_ERROR(hipMalloc((void**)&t_ptr, triangles.size() * sizeof(Triangle))); HANDLE_ERROR(hipMalloc((void**)&m_ptr, mortons.size() * sizeof(unsigned long long int))); HANDLE_ERROR(hipMalloc((void**)&collision_list, 1000 * sizeof(unsigned int))); HANDLE_ERROR(hipMalloc((void**)&test_val, sizeof(unsigned int))); HANDLE_ERROR(hipMalloc((void**)&colTris, 100 * sizeof(Triangle))); HANDLE_ERROR(hipMalloc((void**)&leaf_nodes, mortons.size() * sizeof(Node))); HANDLE_ERROR(hipMalloc((void**)&internal_nodes, (mortons.size() - 1) * sizeof(Node))); HANDLE_ERROR(hipMemcpy(v_ptr, &vertexes[0], vertexes.size() * sizeof(vec3f), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(t_ptr, &triangles[0], triangles.size() * sizeof(Triangle), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(m_ptr, &mortons[0], mortons.size() * sizeof(unsigned long long int), hipMemcpyHostToDevice)); /* Fill leaf nodes with triangles */ HANDLE_ERROR(hipEventRecord(start, 0)); hipLaunchKernelGGL(( fillLeafNodes) , dim3(128), dim3(128) , 0, 0, t_ptr, mortons.size(), leaf_nodes); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); printElapsedTime(&start, &stop, "fillLeafNode"); /* Generate BVH parallel */ HANDLE_ERROR(hipMemset(collision_list, 0, sizeof(unsigned int) * 5)); HANDLE_ERROR(hipEventRecord(start, 0)); hipLaunchKernelGGL(( generateHierarchyParallel) , dim3(128), dim3(128) , 0, 0, m_ptr, mortons.size(), leaf_nodes, internal_nodes, &collision_list[0]); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); printElapsedTime(&start, &stop, "generateHierarchyParallel"); HANDLE_ERROR(hipMemcpy(temp_nums, collision_list, 5 * sizeof(unsigned int), hipMemcpyDeviceToHost)); printf("\n- generateHierarchyParallel check result: wrongParentNum = %u, with total nodes=%u\n\n", temp_nums[0], mortons.size() - 1); /* Calculate bounding box bottom-up */ HANDLE_ERROR(hipEventRecord(start, 0)); hipLaunchKernelGGL(( calBoundingBox) , dim3(128), dim3(128) , 0, 0, leaf_nodes, v_ptr, mortons.size()); //std::cout << "- calBoundingBox returned" << std::endl << std::endl; HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); printElapsedTime(&start, &stop, "calBoundingBox"); /* Self-check internal nodes and leaf nodes */ HANDLE_ERROR(hipMemset(collision_list, 0, sizeof(unsigned int) * 5)); HANDLE_ERROR(hipEventRecord(start, 0)); hipLaunchKernelGGL(( checkInternalNodes) , dim3(128), dim3(128) , 0, 0, internal_nodes, mortons.size()-1, &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3], &collision_list[4]); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); printElapsedTime(&start, &stop, "checkInternalNodes"); HANDLE_ERROR(hipMemcpy(temp_nums, collision_list, 5 * sizeof(unsigned int), hipMemcpyDeviceToHost)); printf("\n- Internal node check result: nullParentnum = %u, wrongBoundCount=%u, nullChildCount=%u, notInternalCount=%u, uninitBoxCount=%u, with total nodes=%u\n\n", temp_nums[0], temp_nums[1], temp_nums[2], temp_nums[3], temp_nums[4], mortons.size()-1); HANDLE_ERROR(hipMemset(collision_list, 0, sizeof(unsigned int) * 5)); HANDLE_ERROR(hipEventRecord(start, 0)); hipLaunchKernelGGL(( checkLeafNodes) , dim3(128), dim3(128) , 0, 0, leaf_nodes, mortons.size(), &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3]); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); printElapsedTime(&start, &stop, "checkLeafNodes"); HANDLE_ERROR(hipMemcpy(temp_nums, collision_list, 5 * sizeof(unsigned int), hipMemcpyDeviceToHost)); printf("\n- Leaf node check result: nullParentnum = %u, nullTriangle=%u, notLeafCount=%u, illegalBoxCount=%u, with total nodes=%u\n\n", temp_nums[0], temp_nums[1], temp_nums[2], temp_nums[3], mortons.size()); HANDLE_ERROR(hipMemset(collision_list, 0, sizeof(unsigned int) * 5)); HANDLE_ERROR(hipEventRecord(start, 0)); hipLaunchKernelGGL(( checkTriangleIdx) , dim3(128), dim3(128) , 0, 0, leaf_nodes, v_ptr, mortons.size(), 632674, &collision_list[0]); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); printElapsedTime(&start, &stop, "checkTriangleIdx"); HANDLE_ERROR(hipMemcpy(temp_nums, collision_list, sizeof(unsigned int), hipMemcpyDeviceToHost)); printf("\n- Triangle check result: illegal triangle vidx num = %u, with total triangles=%u\n\n", temp_nums[0], mortons.size()); printf("\n$ triangle num = %u, mortons num = %u, vertex num = %u\n\n", triangles.size(), mortons.size(), vertexes.size()); /* Find collision pairs */ HANDLE_ERROR(hipEventRecord(start, 0)); dim3 blocks(128, 128); dim3 threads(128); hipLaunchKernelGGL(( findCollisions) , dim3(blocks), dim3(threads), 0, 0, &internal_nodes[0], leaf_nodes, v_ptr, mortons.size(), test_val, collision_list); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); printElapsedTime(&start, &stop, "findCollisions"); HANDLE_ERROR(hipMemcpy(temp_nums, test_val, sizeof(unsigned int), hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMemcpy(h_collision_list, collision_list, 1000 * sizeof(unsigned int), hipMemcpyDeviceToHost)); printf("\n\n- contact val = %u\n", temp_nums[0]); printf("\nCollision pair (%u triangle pairs in total):\n", temp_nums[0]); for (int i = 0; i < temp_nums[0]; i++) { printf("%07u - %07u\n", h_collision_list[2*i], h_collision_list[2*i+1]); } makeAndPrintSet(h_collision_list, 2 * temp_nums[0], "Collision Triangles:"); HANDLE_ERROR(hipFree(v_ptr)); HANDLE_ERROR(hipFree(t_ptr)); HANDLE_ERROR(hipFree(m_ptr)); HANDLE_ERROR(hipFree(leaf_nodes)); HANDLE_ERROR(hipFree(internal_nodes)); HANDLE_ERROR(hipFree(collision_list)); HANDLE_ERROR(hipFree(test_val)); HANDLE_ERROR(hipFree(colTris)); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); std::cout << "- Successfully Return" << std::endl; HANDLE_ERROR(hipEventRecord(m_stop, 0)); HANDLE_ERROR(hipEventSynchronize(m_stop)); printElapsedTime(&m_start, &m_stop, "Total Time"); return 0; }
7fd8fd9d07b641dac504ff4a8b276f355bcca4a6.cu
/* Collision detection using binary-tree based BVH. This code uses Nvidia's cuda tutorial as an utility, which refers to the book.h below. Author: Asichurter Date: 2021-11-01 */ #include <cuda_runtime.h> #include <vector> #include <string> #include <set> #include "load_obj.h" #include "collision.cuh" #include "check.cuh" #include "./common/book.h" void printElapsedTime(cudaEvent_t* start, cudaEvent_t* stop, const char* opname) { printf("\nTime of %s: ", opname); float elapsedTime; HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, *start, *stop)); printf("%3.1f ms\n", elapsedTime); } void printTriangleVertex(Triangle* t, vector<vec3f>& vs) { vec3f *v1 = &vs[t->vIdx[0]], *v2 = &vs[t->vIdx[1]], *v3 = &vs[t->vIdx[2]]; printf("# v1(%u)=(%.4f, %.4f, %.4f), v2(%u)=(%.4f, %.4f, %.4f), v3(%u)=(%.4f, %.4f, %.4f)\n", t->vIdx[0], v1->x, v1->y, v1->z, t->vIdx[1], v2->x, v2->y, v2->z, t->vIdx[2], v3->x, v3->y, v3->z); } void makeAndPrintSet(unsigned int* data, unsigned int num, const char* title) { set<unsigned int> dset; for (int i = 0; i < num; i++) { dset.insert(data[i]); } printf("\n\n%s£¨%u points in total£©:\n", title, dset.size()); set<unsigned int>::iterator it; for (it = dset.begin(); it != dset.end(); it++) { printf("%u\n", *it); } } int main() { cudaEvent_t start, stop, m_start, m_stop; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); HANDLE_ERROR(cudaEventCreate(&m_start)); HANDLE_ERROR(cudaEventCreate(&m_stop)); HANDLE_ERROR(cudaEventRecord(m_start, 0)); // Set your OBJ file path here const std::string file_path = "./resources/flag-2000-changed.obj"; std::vector<vec3f> vertexes; std::vector<Triangle> triangles; std::vector<unsigned long long int> mortons; loadObj(file_path, vertexes, triangles, mortons); vec3f* v_ptr; Triangle* t_ptr; unsigned long long int* m_ptr; Node* leaf_nodes; Node* internal_nodes; unsigned int* collision_list; unsigned int* test_val; unsigned int temp_nums[100]; unsigned int h_collision_list[1000]; Triangle* colTris; /* Allocate and copy GPU memory */ HANDLE_ERROR(cudaMalloc((void**)&v_ptr, vertexes.size() * sizeof(vec3f))); HANDLE_ERROR(cudaMalloc((void**)&t_ptr, triangles.size() * sizeof(Triangle))); HANDLE_ERROR(cudaMalloc((void**)&m_ptr, mortons.size() * sizeof(unsigned long long int))); HANDLE_ERROR(cudaMalloc((void**)&collision_list, 1000 * sizeof(unsigned int))); HANDLE_ERROR(cudaMalloc((void**)&test_val, sizeof(unsigned int))); HANDLE_ERROR(cudaMalloc((void**)&colTris, 100 * sizeof(Triangle))); HANDLE_ERROR(cudaMalloc((void**)&leaf_nodes, mortons.size() * sizeof(Node))); HANDLE_ERROR(cudaMalloc((void**)&internal_nodes, (mortons.size() - 1) * sizeof(Node))); HANDLE_ERROR(cudaMemcpy(v_ptr, &vertexes[0], vertexes.size() * sizeof(vec3f), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(t_ptr, &triangles[0], triangles.size() * sizeof(Triangle), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(m_ptr, &mortons[0], mortons.size() * sizeof(unsigned long long int), cudaMemcpyHostToDevice)); /* Fill leaf nodes with triangles */ HANDLE_ERROR(cudaEventRecord(start, 0)); fillLeafNodes <<< 128, 128 >>> (t_ptr, mortons.size(), leaf_nodes); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); printElapsedTime(&start, &stop, "fillLeafNode"); /* Generate BVH parallel */ HANDLE_ERROR(cudaMemset(collision_list, 0, sizeof(unsigned int) * 5)); HANDLE_ERROR(cudaEventRecord(start, 0)); generateHierarchyParallel <<< 128, 128 >>> (m_ptr, mortons.size(), leaf_nodes, internal_nodes, &collision_list[0]); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); printElapsedTime(&start, &stop, "generateHierarchyParallel"); HANDLE_ERROR(cudaMemcpy(temp_nums, collision_list, 5 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); printf("\n- generateHierarchyParallel check result: wrongParentNum = %u, with total nodes=%u\n\n", temp_nums[0], mortons.size() - 1); /* Calculate bounding box bottom-up */ HANDLE_ERROR(cudaEventRecord(start, 0)); calBoundingBox <<< 128, 128 >>> (leaf_nodes, v_ptr, mortons.size()); //std::cout << "- calBoundingBox returned" << std::endl << std::endl; HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); printElapsedTime(&start, &stop, "calBoundingBox"); /* Self-check internal nodes and leaf nodes */ HANDLE_ERROR(cudaMemset(collision_list, 0, sizeof(unsigned int) * 5)); HANDLE_ERROR(cudaEventRecord(start, 0)); checkInternalNodes <<< 128, 128 >>> (internal_nodes, mortons.size()-1, &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3], &collision_list[4]); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); printElapsedTime(&start, &stop, "checkInternalNodes"); HANDLE_ERROR(cudaMemcpy(temp_nums, collision_list, 5 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); printf("\n- Internal node check result: nullParentnum = %u, wrongBoundCount=%u, nullChildCount=%u, notInternalCount=%u, uninitBoxCount=%u, with total nodes=%u\n\n", temp_nums[0], temp_nums[1], temp_nums[2], temp_nums[3], temp_nums[4], mortons.size()-1); HANDLE_ERROR(cudaMemset(collision_list, 0, sizeof(unsigned int) * 5)); HANDLE_ERROR(cudaEventRecord(start, 0)); checkLeafNodes <<< 128, 128 >>> (leaf_nodes, mortons.size(), &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3]); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); printElapsedTime(&start, &stop, "checkLeafNodes"); HANDLE_ERROR(cudaMemcpy(temp_nums, collision_list, 5 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); printf("\n- Leaf node check result: nullParentnum = %u, nullTriangle=%u, notLeafCount=%u, illegalBoxCount=%u, with total nodes=%u\n\n", temp_nums[0], temp_nums[1], temp_nums[2], temp_nums[3], mortons.size()); HANDLE_ERROR(cudaMemset(collision_list, 0, sizeof(unsigned int) * 5)); HANDLE_ERROR(cudaEventRecord(start, 0)); checkTriangleIdx <<< 128, 128 >>> (leaf_nodes, v_ptr, mortons.size(), 632674, &collision_list[0]); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); printElapsedTime(&start, &stop, "checkTriangleIdx"); HANDLE_ERROR(cudaMemcpy(temp_nums, collision_list, sizeof(unsigned int), cudaMemcpyDeviceToHost)); printf("\n- Triangle check result: illegal triangle vidx num = %u, with total triangles=%u\n\n", temp_nums[0], mortons.size()); printf("\n$ triangle num = %u, mortons num = %u, vertex num = %u\n\n", triangles.size(), mortons.size(), vertexes.size()); /* Find collision pairs */ HANDLE_ERROR(cudaEventRecord(start, 0)); dim3 blocks(128, 128); dim3 threads(128); findCollisions <<< blocks, threads>>> (&internal_nodes[0], leaf_nodes, v_ptr, mortons.size(), test_val, collision_list); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); printElapsedTime(&start, &stop, "findCollisions"); HANDLE_ERROR(cudaMemcpy(temp_nums, test_val, sizeof(unsigned int), cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(h_collision_list, collision_list, 1000 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); printf("\n\n- contact val = %u\n", temp_nums[0]); printf("\nCollision pair (%u triangle pairs in total):\n", temp_nums[0]); for (int i = 0; i < temp_nums[0]; i++) { printf("%07u - %07u\n", h_collision_list[2*i], h_collision_list[2*i+1]); } makeAndPrintSet(h_collision_list, 2 * temp_nums[0], "Collision Triangles:"); HANDLE_ERROR(cudaFree(v_ptr)); HANDLE_ERROR(cudaFree(t_ptr)); HANDLE_ERROR(cudaFree(m_ptr)); HANDLE_ERROR(cudaFree(leaf_nodes)); HANDLE_ERROR(cudaFree(internal_nodes)); HANDLE_ERROR(cudaFree(collision_list)); HANDLE_ERROR(cudaFree(test_val)); HANDLE_ERROR(cudaFree(colTris)); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); std::cout << "- Successfully Return" << std::endl; HANDLE_ERROR(cudaEventRecord(m_stop, 0)); HANDLE_ERROR(cudaEventSynchronize(m_stop)); printElapsedTime(&m_start, &m_stop, "Total Time"); return 0; }
8c82ff5cc74eacc23d541b8a944d903380b4fbed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip_helpers.cuh" #define float3 Float3 struct Float3 { float x,y,z; __device__ friend Float3 operator+(const Float3 a, const Float3 b) { Float3 c; c.x = a.x+b.x; c.y = a.y+b.y; c.z = a.z+b.z; return c; } __device__ friend Float3 operator-(const Float3 a, const Float3 b) { Float3 c; c.x = a.x-b.x; c.y = a.y-b.y; c.z = a.z-b.z; return c; } __device__ friend Float3 operator/(const Float3 a, const Float3 b) { Float3 c; c.x = a.x/b.x; c.y = a.y/b.y; c.z = a.z/b.z; return c; } __device__ friend Float3 operator/(const float a, const Float3 b) { Float3 c; c.x = a/b.x; c.y = a/b.y; c.z = a/b.z; return c; } __device__ friend Float3 operator*(const Float3 a, const Float3 b) { Float3 c; c.x = a.x*b.x; c.y = a.y*b.y; c.z = a.z*b.z; return c; } __device__ friend Float3 operator*(const Float3 a, const float b) { Float3 c; c.x = a.x*b; c.y = a.y*b; c.z = a.z*b; return c; } }; #define int8 char #define int16 short struct Ray { float3 origin, dir, invDir; unsigned int dirIsNeg0, dirIsNeg1, dirIsNeg2; float mint, maxt; int hitId; }; struct Triangle { float p[3][4]; int id; int pad[3]; }; struct LinearBVHNode { float bounds[2][3]; unsigned int offset; // num primitives for leaf, second child for interior unsigned int8 nPrimitives; unsigned int8 splitAxis; unsigned int16 pad; }; __device__ static inline float3 Cross(const float3 v1, const float3 v2) { float v1x = v1.x, v1y = v1.y, v1z = v1.z; float v2x = v2.x, v2y = v2.y, v2z = v2.z; float3 ret; ret.x = (v1y * v2z) - (v1z * v2y); ret.y = (v1z * v2x) - (v1x * v2z); ret.z = (v1x * v2y) - (v1y * v2x); return ret; } __device__ static inline float Dot(const float3 a, const float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __device__ inline static void generateRay( const float raster2camera[4][4], const float camera2world[4][4], float x, float y, Ray &ray) { ray.mint = 0.f; ray.maxt = 1e30f; ray.hitId = 0; // transform raster coordinate (x, y, 0) to camera space float camx = raster2camera[0][0] * x + raster2camera[0][1] * y + raster2camera[0][3]; float camy = raster2camera[1][0] * x + raster2camera[1][1] * y + raster2camera[1][3]; float camz = raster2camera[2][3]; float camw = raster2camera[3][3]; camx /= camw; camy /= camw; camz /= camw; ray.dir.x = camera2world[0][0] * camx + camera2world[0][1] * camy + camera2world[0][2] * camz; ray.dir.y = camera2world[1][0] * camx + camera2world[1][1] * camy + camera2world[1][2] * camz; ray.dir.z = camera2world[2][0] * camx + camera2world[2][1] * camy + camera2world[2][2] * camz; ray.origin.x = camera2world[0][3] / camera2world[3][3]; ray.origin.y = camera2world[1][3] / camera2world[3][3]; ray.origin.z = camera2world[2][3] / camera2world[3][3]; ray.invDir = 1.f / ray.dir; #if 0 ray.dirIsNeg[0] = any(ray.invDir.x < 0) ? 1 : 0; ray.dirIsNeg[1] = any(ray.invDir.y < 0) ? 1 : 0; ray.dirIsNeg[2] = any(ray.invDir.z < 0) ? 1 : 0; #else ray.dirIsNeg0 = any(ray.invDir.x < 0) ? 1 : 0; ray.dirIsNeg1 = any(ray.invDir.y < 0) ? 1 : 0; ray.dirIsNeg2 = any(ray.invDir.z < 0) ? 1 : 0; #endif } __device__ inline static bool BBoxIntersect(const float bounds[2][3], const Ray &ray) { float3 bounds0 = { bounds[0][0], bounds[0][1], bounds[0][2] }; float3 bounds1 = { bounds[1][0], bounds[1][1], bounds[1][2] }; float t0 = ray.mint, t1 = ray.maxt; // Check all three axis-aligned slabs. Don't try to early out; it's // not worth the trouble float3 tNear = (bounds0 - ray.origin) * ray.invDir; float3 tFar = (bounds1 - ray.origin) * ray.invDir; if (tNear.x > tFar.x) { float tmp = tNear.x; tNear.x = tFar.x; tFar.x = tmp; } t0 = max(tNear.x, t0); t1 = min(tFar.x, t1); if (tNear.y > tFar.y) { float tmp = tNear.y; tNear.y = tFar.y; tFar.y = tmp; } t0 = max(tNear.y, t0); t1 = min(tFar.y, t1); if (tNear.z > tFar.z) { float tmp = tNear.z; tNear.z = tFar.z; tFar.z = tmp; } t0 = max(tNear.z, t0); t1 = min(tFar.z, t1); return (t0 <= t1); } __device__ inline static bool TriIntersect(const Triangle &tri, Ray &ray) { float3 p0 = { tri.p[0][0], tri.p[0][1], tri.p[0][2] }; float3 p1 = { tri.p[1][0], tri.p[1][1], tri.p[1][2] }; float3 p2 = { tri.p[2][0], tri.p[2][1], tri.p[2][2] }; float3 e1 = p1 - p0; float3 e2 = p2 - p0; float3 s1 = Cross(ray.dir, e2); float divisor = Dot(s1, e1); bool hit = true; if (divisor == 0.) hit = false; float invDivisor = 1.f / divisor; // Compute first barycentric coordinate float3 d = ray.origin - p0; float b1 = Dot(d, s1) * invDivisor; if (b1 < 0. || b1 > 1.) hit = false; // Compute second barycentric coordinate float3 s2 = Cross(d, e1); float b2 = Dot(ray.dir, s2) * invDivisor; if (b2 < 0. || b1 + b2 > 1.) hit = false; // Compute _t_ to intersection point float t = Dot(e2, s2) * invDivisor; if (t < ray.mint || t > ray.maxt) hit = false; if (hit) { ray.maxt = t; ray.hitId = tri.id; } return hit; } __device__ inline bool BVHIntersect(const LinearBVHNode nodes[], const Triangle tris[], Ray &r, int todo[]) { Ray ray = r; bool hit = false; // Follow ray through BVH nodes to find primitive intersections int todoOffset = 0, nodeNum = 0; while (true) { // Check ray against BVH node LinearBVHNode node = nodes[nodeNum]; if (any(BBoxIntersect(node.bounds, ray))) { unsigned int nPrimitives = node.nPrimitives; if (nPrimitives > 0) { // Intersect ray with primitives in leaf BVH node unsigned int primitivesOffset = node.offset; for ( unsigned int i = 0; i < nPrimitives; ++i) { if (TriIntersect(tris[primitivesOffset+i], ray)) hit = true; } if (todoOffset == 0) break; nodeNum = todo[--todoOffset]; } else { // Put far BVH node on _todo_ stack, advance to near node int dirIsNeg; if (node.splitAxis == 0) dirIsNeg = r.dirIsNeg0; if (node.splitAxis == 1) dirIsNeg = r.dirIsNeg1; if (node.splitAxis == 2) dirIsNeg = r.dirIsNeg2; if (dirIsNeg) { todo[todoOffset++] = nodeNum + 1; nodeNum = node.offset; } else { todo[todoOffset++] = node.offset; nodeNum = nodeNum + 1; } } } else { if (todoOffset == 0) break; nodeNum = todo[--todoOffset]; } } r.maxt = ray.maxt; r.hitId = ray.hitId; return hit; } __device__ inline static void raytrace_tile( int x0, int x1, int y0, int y1, int width, int height, int baseWidth, int baseHeight, const float raster2camera[4][4], const float camera2world[4][4], float image[], int id[], const LinearBVHNode nodes[], const Triangle triangles[]) { float widthScale = (float)(baseWidth) / (float)(width); float heightScale = (float)(baseHeight) / (float)(height); #if 0 int * todo = new int[64]; #define ALLOC #else int todo[64]; #endif for (int y = y0 ;y < y1; y++) for (int x = x0 + programIndex; x < x1; x += programCount) if (x < x1) { Ray ray; generateRay(raster2camera, camera2world, x*widthScale, y*heightScale, ray); BVHIntersect(nodes, triangles, ray, todo); int offset = y * width + x; image[offset] = ray.maxt; id[offset] = ray.hitId; } #ifdef ALLOC delete todo; #endif } __global__ void raytrace_tile_task( int width, int height, int baseWidth, int baseHeight, const float raster2camera[4][4], const float camera2world[4][4], float image[], int id[], const LinearBVHNode nodes[], const Triangle triangles[]) { int dx = 64, dy = 8; // must match dx, dy below int xBuckets = (width + (dx-1)) / dx; int x0 = (taskIndex % xBuckets) * dx; int x1 = min(x0 + dx, width); int y0 = (taskIndex / xBuckets) * dy; int y1 = min(y0 + dy, height); raytrace_tile(x0, x1, y0, y1, width, height, baseWidth, baseHeight, raster2camera, camera2world, image, id, nodes, triangles); } extern "C" __global__ void raytrace_ispc_tasks___export( int width, int height, int baseWidth, int baseHeight, const float raster2camera[4][4], const float camera2world[4][4], float image[], int id[], const LinearBVHNode nodes[], const Triangle triangles[]) { int dx = 64, dy = 8; int xBuckets = (width + (dx-1)) / dx; int yBuckets = (height + (dy-1)) / dy; int nTasks = xBuckets * yBuckets; launch(nTasks,1,1,raytrace_tile_task) (width, height, baseWidth, baseHeight, raster2camera, camera2world, image, id, nodes, triangles); hipDeviceSynchronize(); } extern "C" __host__ void raytrace_ispc_tasks( int width, int height, int baseWidth, int baseHeight, const float raster2camera[4][4], const float camera2world[4][4], float image[], int id[], const LinearBVHNode nodes[], const Triangle triangles[]) { hipLaunchKernelGGL(( raytrace_ispc_tasks___export), dim3(1),dim3(32), 0, 0, width, height, baseWidth, baseHeight, raster2camera, camera2world, image, id, nodes, triangles); hipDeviceSynchronize(); }
8c82ff5cc74eacc23d541b8a944d903380b4fbed.cu
#include "cuda_helpers.cuh" #define float3 Float3 struct Float3 { float x,y,z; __device__ friend Float3 operator+(const Float3 a, const Float3 b) { Float3 c; c.x = a.x+b.x; c.y = a.y+b.y; c.z = a.z+b.z; return c; } __device__ friend Float3 operator-(const Float3 a, const Float3 b) { Float3 c; c.x = a.x-b.x; c.y = a.y-b.y; c.z = a.z-b.z; return c; } __device__ friend Float3 operator/(const Float3 a, const Float3 b) { Float3 c; c.x = a.x/b.x; c.y = a.y/b.y; c.z = a.z/b.z; return c; } __device__ friend Float3 operator/(const float a, const Float3 b) { Float3 c; c.x = a/b.x; c.y = a/b.y; c.z = a/b.z; return c; } __device__ friend Float3 operator*(const Float3 a, const Float3 b) { Float3 c; c.x = a.x*b.x; c.y = a.y*b.y; c.z = a.z*b.z; return c; } __device__ friend Float3 operator*(const Float3 a, const float b) { Float3 c; c.x = a.x*b; c.y = a.y*b; c.z = a.z*b; return c; } }; #define int8 char #define int16 short struct Ray { float3 origin, dir, invDir; unsigned int dirIsNeg0, dirIsNeg1, dirIsNeg2; float mint, maxt; int hitId; }; struct Triangle { float p[3][4]; int id; int pad[3]; }; struct LinearBVHNode { float bounds[2][3]; unsigned int offset; // num primitives for leaf, second child for interior unsigned int8 nPrimitives; unsigned int8 splitAxis; unsigned int16 pad; }; __device__ static inline float3 Cross(const float3 v1, const float3 v2) { float v1x = v1.x, v1y = v1.y, v1z = v1.z; float v2x = v2.x, v2y = v2.y, v2z = v2.z; float3 ret; ret.x = (v1y * v2z) - (v1z * v2y); ret.y = (v1z * v2x) - (v1x * v2z); ret.z = (v1x * v2y) - (v1y * v2x); return ret; } __device__ static inline float Dot(const float3 a, const float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __device__ inline static void generateRay( const float raster2camera[4][4], const float camera2world[4][4], float x, float y, Ray &ray) { ray.mint = 0.f; ray.maxt = 1e30f; ray.hitId = 0; // transform raster coordinate (x, y, 0) to camera space float camx = raster2camera[0][0] * x + raster2camera[0][1] * y + raster2camera[0][3]; float camy = raster2camera[1][0] * x + raster2camera[1][1] * y + raster2camera[1][3]; float camz = raster2camera[2][3]; float camw = raster2camera[3][3]; camx /= camw; camy /= camw; camz /= camw; ray.dir.x = camera2world[0][0] * camx + camera2world[0][1] * camy + camera2world[0][2] * camz; ray.dir.y = camera2world[1][0] * camx + camera2world[1][1] * camy + camera2world[1][2] * camz; ray.dir.z = camera2world[2][0] * camx + camera2world[2][1] * camy + camera2world[2][2] * camz; ray.origin.x = camera2world[0][3] / camera2world[3][3]; ray.origin.y = camera2world[1][3] / camera2world[3][3]; ray.origin.z = camera2world[2][3] / camera2world[3][3]; ray.invDir = 1.f / ray.dir; #if 0 ray.dirIsNeg[0] = any(ray.invDir.x < 0) ? 1 : 0; ray.dirIsNeg[1] = any(ray.invDir.y < 0) ? 1 : 0; ray.dirIsNeg[2] = any(ray.invDir.z < 0) ? 1 : 0; #else ray.dirIsNeg0 = any(ray.invDir.x < 0) ? 1 : 0; ray.dirIsNeg1 = any(ray.invDir.y < 0) ? 1 : 0; ray.dirIsNeg2 = any(ray.invDir.z < 0) ? 1 : 0; #endif } __device__ inline static bool BBoxIntersect(const float bounds[2][3], const Ray &ray) { float3 bounds0 = { bounds[0][0], bounds[0][1], bounds[0][2] }; float3 bounds1 = { bounds[1][0], bounds[1][1], bounds[1][2] }; float t0 = ray.mint, t1 = ray.maxt; // Check all three axis-aligned slabs. Don't try to early out; it's // not worth the trouble float3 tNear = (bounds0 - ray.origin) * ray.invDir; float3 tFar = (bounds1 - ray.origin) * ray.invDir; if (tNear.x > tFar.x) { float tmp = tNear.x; tNear.x = tFar.x; tFar.x = tmp; } t0 = max(tNear.x, t0); t1 = min(tFar.x, t1); if (tNear.y > tFar.y) { float tmp = tNear.y; tNear.y = tFar.y; tFar.y = tmp; } t0 = max(tNear.y, t0); t1 = min(tFar.y, t1); if (tNear.z > tFar.z) { float tmp = tNear.z; tNear.z = tFar.z; tFar.z = tmp; } t0 = max(tNear.z, t0); t1 = min(tFar.z, t1); return (t0 <= t1); } __device__ inline static bool TriIntersect(const Triangle &tri, Ray &ray) { float3 p0 = { tri.p[0][0], tri.p[0][1], tri.p[0][2] }; float3 p1 = { tri.p[1][0], tri.p[1][1], tri.p[1][2] }; float3 p2 = { tri.p[2][0], tri.p[2][1], tri.p[2][2] }; float3 e1 = p1 - p0; float3 e2 = p2 - p0; float3 s1 = Cross(ray.dir, e2); float divisor = Dot(s1, e1); bool hit = true; if (divisor == 0.) hit = false; float invDivisor = 1.f / divisor; // Compute first barycentric coordinate float3 d = ray.origin - p0; float b1 = Dot(d, s1) * invDivisor; if (b1 < 0. || b1 > 1.) hit = false; // Compute second barycentric coordinate float3 s2 = Cross(d, e1); float b2 = Dot(ray.dir, s2) * invDivisor; if (b2 < 0. || b1 + b2 > 1.) hit = false; // Compute _t_ to intersection point float t = Dot(e2, s2) * invDivisor; if (t < ray.mint || t > ray.maxt) hit = false; if (hit) { ray.maxt = t; ray.hitId = tri.id; } return hit; } __device__ inline bool BVHIntersect(const LinearBVHNode nodes[], const Triangle tris[], Ray &r, int todo[]) { Ray ray = r; bool hit = false; // Follow ray through BVH nodes to find primitive intersections int todoOffset = 0, nodeNum = 0; while (true) { // Check ray against BVH node LinearBVHNode node = nodes[nodeNum]; if (any(BBoxIntersect(node.bounds, ray))) { unsigned int nPrimitives = node.nPrimitives; if (nPrimitives > 0) { // Intersect ray with primitives in leaf BVH node unsigned int primitivesOffset = node.offset; for ( unsigned int i = 0; i < nPrimitives; ++i) { if (TriIntersect(tris[primitivesOffset+i], ray)) hit = true; } if (todoOffset == 0) break; nodeNum = todo[--todoOffset]; } else { // Put far BVH node on _todo_ stack, advance to near node int dirIsNeg; if (node.splitAxis == 0) dirIsNeg = r.dirIsNeg0; if (node.splitAxis == 1) dirIsNeg = r.dirIsNeg1; if (node.splitAxis == 2) dirIsNeg = r.dirIsNeg2; if (dirIsNeg) { todo[todoOffset++] = nodeNum + 1; nodeNum = node.offset; } else { todo[todoOffset++] = node.offset; nodeNum = nodeNum + 1; } } } else { if (todoOffset == 0) break; nodeNum = todo[--todoOffset]; } } r.maxt = ray.maxt; r.hitId = ray.hitId; return hit; } __device__ inline static void raytrace_tile( int x0, int x1, int y0, int y1, int width, int height, int baseWidth, int baseHeight, const float raster2camera[4][4], const float camera2world[4][4], float image[], int id[], const LinearBVHNode nodes[], const Triangle triangles[]) { float widthScale = (float)(baseWidth) / (float)(width); float heightScale = (float)(baseHeight) / (float)(height); #if 0 int * todo = new int[64]; #define ALLOC #else int todo[64]; #endif for (int y = y0 ;y < y1; y++) for (int x = x0 + programIndex; x < x1; x += programCount) if (x < x1) { Ray ray; generateRay(raster2camera, camera2world, x*widthScale, y*heightScale, ray); BVHIntersect(nodes, triangles, ray, todo); int offset = y * width + x; image[offset] = ray.maxt; id[offset] = ray.hitId; } #ifdef ALLOC delete todo; #endif } __global__ void raytrace_tile_task( int width, int height, int baseWidth, int baseHeight, const float raster2camera[4][4], const float camera2world[4][4], float image[], int id[], const LinearBVHNode nodes[], const Triangle triangles[]) { int dx = 64, dy = 8; // must match dx, dy below int xBuckets = (width + (dx-1)) / dx; int x0 = (taskIndex % xBuckets) * dx; int x1 = min(x0 + dx, width); int y0 = (taskIndex / xBuckets) * dy; int y1 = min(y0 + dy, height); raytrace_tile(x0, x1, y0, y1, width, height, baseWidth, baseHeight, raster2camera, camera2world, image, id, nodes, triangles); } extern "C" __global__ void raytrace_ispc_tasks___export( int width, int height, int baseWidth, int baseHeight, const float raster2camera[4][4], const float camera2world[4][4], float image[], int id[], const LinearBVHNode nodes[], const Triangle triangles[]) { int dx = 64, dy = 8; int xBuckets = (width + (dx-1)) / dx; int yBuckets = (height + (dy-1)) / dy; int nTasks = xBuckets * yBuckets; launch(nTasks,1,1,raytrace_tile_task) (width, height, baseWidth, baseHeight, raster2camera, camera2world, image, id, nodes, triangles); cudaDeviceSynchronize(); } extern "C" __host__ void raytrace_ispc_tasks( int width, int height, int baseWidth, int baseHeight, const float raster2camera[4][4], const float camera2world[4][4], float image[], int id[], const LinearBVHNode nodes[], const Triangle triangles[]) { raytrace_ispc_tasks___export<<<1,32>>>( width, height, baseWidth, baseHeight, raster2camera, camera2world, image, id, nodes, triangles); cudaDeviceSynchronize(); }
728f19ca07a88c168d4060c96e9cc4c19296d853.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <chrono> #include <cmath> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "conv_kernel.h" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; /** * Main method that will profile our convolution function */ int main(int argc, const char* argv[]) { // Check to make sure we get an matrix size if(argc < 2) { cerr << "Please specify a size of the image matrix" << endl; cerr << "./conv_cuda <imgsize>" << endl; return EXIT_FAILURE; } // Size of our matrix and kernals const int imgSize = std::atoi(argv[1]); const int kernelSize = 10; // Allocate variables on stack double* imgIn = new double[imgSize*imgSize]; double* imgOut = new double[imgSize*imgSize]; double* kernel = new double[kernelSize*kernelSize]; // Total time int loopCt = 25; double sumTime = 0.0; double sumTimeCopy1 = 0.0; double sumTimeKernel = 0.0; double sumTimeCopy2 = 0.0; double sumTimeFree = 0.0; double* times = new double[loopCt]; // Startup the GPU device // https://devtalk.nvidia.com/default/topic/895513/cuda-programming-and-performance/cudamalloc-slow/post/4724457/#4724457 hipFree(0); // Send it to our function! for(int i=0; i<loopCt; ++i) { // Generate a image matrices for(int i=0; i<imgSize; ++i) { for(int j=0; j<imgSize; ++j) { imgIn[i*imgSize+j] = 5; imgOut[i*imgSize+j] = 0; } } // Generate kernel for(int i=0; i<kernelSize; ++i) { for(int j=0; j<kernelSize; ++j) { kernel[i*kernelSize+j] = 1.0/9.0; } } // Run the code std::chrono::high_resolution_clock::time_point t0 = std::chrono::high_resolution_clock::now(); // Allocate memory on the device double* imgInD, *imgOutD, *kernelD; gpuErrchk(hipMalloc((void**)&imgInD, imgSize*imgSize*sizeof(double))); gpuErrchk(hipMalloc((void**)&imgOutD, imgSize*imgSize*sizeof(double))); gpuErrchk(hipMalloc((void**)&kernelD, kernelSize*kernelSize*sizeof(double))); std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); // Copy our data to the device gpuErrchk(hipMemcpy(imgInD, imgIn, imgSize*imgSize*sizeof(double), hipMemcpyHostToDevice)); //gpuErrchk(hipMemcpy(imgOutD, imgOut, imgSize*imgSize*sizeof(double), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(kernelD, kernel, kernelSize*kernelSize*sizeof(double), hipMemcpyHostToDevice)); std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); // Calculate size of kernel int block_size = 32; int grid_sizex = 0; int grid_sizey = 0; int size_totalx = imgSize; int size_totaly = imgSize; //Calculate grid_size (add 1 if not evenly divided) if(size_totalx%block_size == 0) { grid_sizex = ceil(size_totalx/block_size); } else { grid_sizex = ceil(size_totalx/block_size) + 1; } if(size_totaly%block_size == 0) { grid_sizey = ceil(size_totaly/block_size); } else { grid_sizey = ceil(size_totaly/block_size) + 1; } // Create size objects dim3 DimGrid(grid_sizex,grid_sizey,1); dim3 DimBlock(block_size,block_size,1); // Debug //cout << "grid_size = " << grid_size << endl; //cout << "block_size = " << block_size << endl; // Launch the kernel hipLaunchKernelGGL(( perform_convolution), dim3(DimGrid), dim3(DimBlock), 0, 0, kernelD,kernelSize,kernelSize,imgInD,imgOutD,imgSize,imgSize); // Sync after the kernel is launched hipDeviceSynchronize(); std::chrono::high_resolution_clock::time_point t3 = std::chrono::high_resolution_clock::now(); // Copy back to host gpuErrchk(hipMemcpy(imgOut, imgOutD, imgSize*imgSize*sizeof(double), hipMemcpyDeviceToHost)); std::chrono::high_resolution_clock::time_point t4 = std::chrono::high_resolution_clock::now(); // Free the data gpuErrchk(hipFree(imgInD)); gpuErrchk(hipFree(kernelD)); gpuErrchk(hipFree(imgOutD)); std::chrono::high_resolution_clock::time_point t5 = std::chrono::high_resolution_clock::now(); double runTime = std::chrono::duration_cast<std::chrono::milliseconds>(t5 - t0).count(); // Store our results times[i] = runTime; sumTime += runTime; sumTimeCopy1 += std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count(); sumTimeKernel += std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count(); sumTimeCopy2 += std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count(); sumTimeFree += std::chrono::duration_cast<std::chrono::milliseconds>(t5 - t4).count(); // std::cout << "loop #" << i << " = " << runTime << " with " << imgOut[10] << " " << imgOut[1000] << std::endl; // std::cout << "\tmalloc: " << std::chrono::duration_cast<std::chrono::milliseconds>(t1 - t0).count() << std::endl; // std::cout << "\tcopy: " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() << std::endl; // std::cout << "\tkernel: " << std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count() << std::endl; // std::cout << "\tcopy: " << std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count() << std::endl; // std::cout << "\tfree: " << std::chrono::duration_cast<std::chrono::milliseconds>(t5 - t4).count() << std::endl; } // Print average std::cout << std::fixed << std::setprecision(5) << sumTime/loopCt << " ms average" << std::endl; // Calculate the std deviation double var = 0; for(int n=0; n<loopCt; ++n) { var += ::pow((times[n] - sumTime/loopCt),2); } var /= loopCt; double deviation = std::sqrt(var); std::cout << std::fixed << std::setprecision(5) << deviation << " sigma deviation" << std::endl; // Extra times for GPU computing std::cout << "\t" << std::fixed << std::setprecision(5) << sumTimeCopy1/loopCt << " ms average (copy1)" << std::endl; std::cout << "\t" << std::fixed << std::setprecision(5) << sumTimeKernel/loopCt << " ms average (kernel)" << std::endl; std::cout << "\t" << std::fixed << std::setprecision(5) << sumTimeCopy2/loopCt << " ms average (copy2)" << std::endl; std::cout << "\t" << std::fixed << std::setprecision(5) << sumTimeFree/loopCt << " ms average (free)" << std::endl; }
728f19ca07a88c168d4060c96e9cc4c19296d853.cu
#include <iostream> #include <iomanip> #include <chrono> #include <cmath> #include <cuda.h> #include <cuda_runtime_api.h> #include "conv_kernel.h" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; /** * Main method that will profile our convolution function */ int main(int argc, const char* argv[]) { // Check to make sure we get an matrix size if(argc < 2) { cerr << "Please specify a size of the image matrix" << endl; cerr << "./conv_cuda <imgsize>" << endl; return EXIT_FAILURE; } // Size of our matrix and kernals const int imgSize = std::atoi(argv[1]); const int kernelSize = 10; // Allocate variables on stack double* imgIn = new double[imgSize*imgSize]; double* imgOut = new double[imgSize*imgSize]; double* kernel = new double[kernelSize*kernelSize]; // Total time int loopCt = 25; double sumTime = 0.0; double sumTimeCopy1 = 0.0; double sumTimeKernel = 0.0; double sumTimeCopy2 = 0.0; double sumTimeFree = 0.0; double* times = new double[loopCt]; // Startup the GPU device // https://devtalk.nvidia.com/default/topic/895513/cuda-programming-and-performance/cudamalloc-slow/post/4724457/#4724457 cudaFree(0); // Send it to our function! for(int i=0; i<loopCt; ++i) { // Generate a image matrices for(int i=0; i<imgSize; ++i) { for(int j=0; j<imgSize; ++j) { imgIn[i*imgSize+j] = 5; imgOut[i*imgSize+j] = 0; } } // Generate kernel for(int i=0; i<kernelSize; ++i) { for(int j=0; j<kernelSize; ++j) { kernel[i*kernelSize+j] = 1.0/9.0; } } // Run the code std::chrono::high_resolution_clock::time_point t0 = std::chrono::high_resolution_clock::now(); // Allocate memory on the device double* imgInD, *imgOutD, *kernelD; gpuErrchk(cudaMalloc((void**)&imgInD, imgSize*imgSize*sizeof(double))); gpuErrchk(cudaMalloc((void**)&imgOutD, imgSize*imgSize*sizeof(double))); gpuErrchk(cudaMalloc((void**)&kernelD, kernelSize*kernelSize*sizeof(double))); std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); // Copy our data to the device gpuErrchk(cudaMemcpy(imgInD, imgIn, imgSize*imgSize*sizeof(double), cudaMemcpyHostToDevice)); //gpuErrchk(cudaMemcpy(imgOutD, imgOut, imgSize*imgSize*sizeof(double), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(kernelD, kernel, kernelSize*kernelSize*sizeof(double), cudaMemcpyHostToDevice)); std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); // Calculate size of kernel int block_size = 32; int grid_sizex = 0; int grid_sizey = 0; int size_totalx = imgSize; int size_totaly = imgSize; //Calculate grid_size (add 1 if not evenly divided) if(size_totalx%block_size == 0) { grid_sizex = ceil(size_totalx/block_size); } else { grid_sizex = ceil(size_totalx/block_size) + 1; } if(size_totaly%block_size == 0) { grid_sizey = ceil(size_totaly/block_size); } else { grid_sizey = ceil(size_totaly/block_size) + 1; } // Create size objects dim3 DimGrid(grid_sizex,grid_sizey,1); dim3 DimBlock(block_size,block_size,1); // Debug //cout << "grid_size = " << grid_size << endl; //cout << "block_size = " << block_size << endl; // Launch the kernel perform_convolution<<<DimGrid, DimBlock>>>(kernelD,kernelSize,kernelSize,imgInD,imgOutD,imgSize,imgSize); // Sync after the kernel is launched cudaDeviceSynchronize(); std::chrono::high_resolution_clock::time_point t3 = std::chrono::high_resolution_clock::now(); // Copy back to host gpuErrchk(cudaMemcpy(imgOut, imgOutD, imgSize*imgSize*sizeof(double), cudaMemcpyDeviceToHost)); std::chrono::high_resolution_clock::time_point t4 = std::chrono::high_resolution_clock::now(); // Free the data gpuErrchk(cudaFree(imgInD)); gpuErrchk(cudaFree(kernelD)); gpuErrchk(cudaFree(imgOutD)); std::chrono::high_resolution_clock::time_point t5 = std::chrono::high_resolution_clock::now(); double runTime = std::chrono::duration_cast<std::chrono::milliseconds>(t5 - t0).count(); // Store our results times[i] = runTime; sumTime += runTime; sumTimeCopy1 += std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count(); sumTimeKernel += std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count(); sumTimeCopy2 += std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count(); sumTimeFree += std::chrono::duration_cast<std::chrono::milliseconds>(t5 - t4).count(); // std::cout << "loop #" << i << " = " << runTime << " with " << imgOut[10] << " " << imgOut[1000] << std::endl; // std::cout << "\tmalloc: " << std::chrono::duration_cast<std::chrono::milliseconds>(t1 - t0).count() << std::endl; // std::cout << "\tcopy: " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() << std::endl; // std::cout << "\tkernel: " << std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count() << std::endl; // std::cout << "\tcopy: " << std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count() << std::endl; // std::cout << "\tfree: " << std::chrono::duration_cast<std::chrono::milliseconds>(t5 - t4).count() << std::endl; } // Print average std::cout << std::fixed << std::setprecision(5) << sumTime/loopCt << " ms average" << std::endl; // Calculate the std deviation double var = 0; for(int n=0; n<loopCt; ++n) { var += std::pow((times[n] - sumTime/loopCt),2); } var /= loopCt; double deviation = std::sqrt(var); std::cout << std::fixed << std::setprecision(5) << deviation << " sigma deviation" << std::endl; // Extra times for GPU computing std::cout << "\t" << std::fixed << std::setprecision(5) << sumTimeCopy1/loopCt << " ms average (copy1)" << std::endl; std::cout << "\t" << std::fixed << std::setprecision(5) << sumTimeKernel/loopCt << " ms average (kernel)" << std::endl; std::cout << "\t" << std::fixed << std::setprecision(5) << sumTimeCopy2/loopCt << " ms average (copy2)" << std::endl; std::cout << "\t" << std::fixed << std::setprecision(5) << sumTimeFree/loopCt << " ms average (free)" << std::endl; }
ea56d5831c1370c96429e213acc2f133398fb1ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/convrelu_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ConvReLUForward(const int channels, const int len, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, channels) { Dtype sum(0); for (int i = 0; i < len; ++i) { sum += in[index * len + i]; } for (int i = 0; i < len; ++i) { out[i + index * len] = sum > 0 ? in[i + index * len] : 0; } } } template <typename Dtype> void ConvReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int channels_ = bottom[0]->channels(); int batch_size_ = bottom[0]->num(); const int count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int code_length_ = count / batch_size_; for (int i = 0; i < batch_size_; ++i) { hipLaunchKernelGGL(( ConvReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(channels_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, channels_, count / batch_size_ / channels_, bottom_data + i * code_length_, top_data + i * code_length_); } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ConvReLUBackward(const int channels, const int len, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, channels) { Dtype sum(0); for (int i = 0; i < len; ++i) { sum += in_data[i + index * len]; } for (int i = 0; i < len; ++i) { out_diff[i + index * len] = sum > 0 ? in_diff[i + index * len] : 0; } } } template <typename Dtype> void ConvReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); int channels_ = bottom[0]->channels(); int batch_size_ = bottom[0]->num(); const int code_length_ = count / batch_size_; for (int i = 0; i < batch_size_; ++i) { hipLaunchKernelGGL(( ConvReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(channels_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, channels_, count / batch_size_ / channels_, top_diff + i * code_length_, bottom_data + i * code_length_, bottom_diff + i * code_length_); } CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ConvReLULayer); } // namespace caffe
ea56d5831c1370c96429e213acc2f133398fb1ee.cu
#include <algorithm> #include <vector> #include "caffe/layers/convrelu_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ConvReLUForward(const int channels, const int len, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, channels) { Dtype sum(0); for (int i = 0; i < len; ++i) { sum += in[index * len + i]; } for (int i = 0; i < len; ++i) { out[i + index * len] = sum > 0 ? in[i + index * len] : 0; } } } template <typename Dtype> void ConvReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int channels_ = bottom[0]->channels(); int batch_size_ = bottom[0]->num(); const int count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int code_length_ = count / batch_size_; for (int i = 0; i < batch_size_; ++i) { ConvReLUForward<Dtype><<<CAFFE_GET_BLOCKS(channels_), CAFFE_CUDA_NUM_THREADS>>>(channels_, count / batch_size_ / channels_, bottom_data + i * code_length_, top_data + i * code_length_); } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ConvReLUBackward(const int channels, const int len, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, channels) { Dtype sum(0); for (int i = 0; i < len; ++i) { sum += in_data[i + index * len]; } for (int i = 0; i < len; ++i) { out_diff[i + index * len] = sum > 0 ? in_diff[i + index * len] : 0; } } } template <typename Dtype> void ConvReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); int channels_ = bottom[0]->channels(); int batch_size_ = bottom[0]->num(); const int code_length_ = count / batch_size_; for (int i = 0; i < batch_size_; ++i) { ConvReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(channels_), CAFFE_CUDA_NUM_THREADS>>>(channels_, count / batch_size_ / channels_, top_diff + i * code_length_, bottom_data + i * code_length_, bottom_diff + i * code_length_); } CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ConvReLULayer); } // namespace caffe
9609179e5535b3e413dfc3736101e0f86b10b6a9.hip
// !!! This is a file automatically generated by hipify!!! /* Shared memory speeds up performance when we need to access data frequently. Here, the 1D stencil kernel adds all its neighboring data within a radius. The C model is added to verify the stencil result on a GPU Developer: Zheming Jin */ #define LENGTH 1024 #define THREADS_PER_BLOCK 256 #define RADIUS 7 #define BLOCK_SIZE THREADS_PER_BLOCK #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> __global__ void stencil_1d(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; // At both end of a block, the sliding window moves beyond the block boundary. if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = (gindex < RADIUS) ? 0 : in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Synchronize (ensure all the threads will be completed before continue) __syncthreads(); // Apply the 1D stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; } int main(void) { int size = LENGTH * sizeof(int); int pad_size = (LENGTH + RADIUS) * sizeof(int); int *a, *b; // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(pad_size); b = (int *)malloc(size); for (int i = 0; i < LENGTH+RADIUS; i++) a[i] = i; int *d_a, *d_b; // Alloc space for device copies of a, b, c hipMalloc((void **)&d_a, pad_size); hipMalloc((void **)&d_b, size); // Copy inputs to device hipMemcpy(d_a, a, pad_size, hipMemcpyHostToDevice); // Launch add() kernel on GPU hipLaunchKernelGGL(( stencil_1d) , dim3(dim3(LENGTH/THREADS_PER_BLOCK)), dim3(dim3(THREADS_PER_BLOCK)) , 0, 0, d_a, d_b); // Copy result back to host hipMemcpy(b, d_b, size, hipMemcpyDeviceToHost); // verification for (int i = 0; i < 2*RADIUS; i++) { int s = 0; for (int j = i; j <= i+2*RADIUS; j++) { s += j < RADIUS ? 0 : (a[j] - RADIUS); } if (s != b[i]) { printf("FAILED at %d: %d (cpu) != %d (gpu)\n", i, s, b[i]); return 1; } } for (int i = 2*RADIUS; i < LENGTH; i++) { int s = 0; for (int j = i-RADIUS; j <= i+RADIUS; j++) { s += a[j]; } if (s != b[i]) { printf("FAILED at %d: %d (cpu) != %d (gpu)\n", i, s, b[i]); return 1; } } // Cleanup free(a); free(b); hipFree(d_a); hipFree(d_b); printf("PASSED\n"); return 0; }
9609179e5535b3e413dfc3736101e0f86b10b6a9.cu
/* Shared memory speeds up performance when we need to access data frequently. Here, the 1D stencil kernel adds all its neighboring data within a radius. The C model is added to verify the stencil result on a GPU Developer: Zheming Jin */ #define LENGTH 1024 #define THREADS_PER_BLOCK 256 #define RADIUS 7 #define BLOCK_SIZE THREADS_PER_BLOCK #include <stdio.h> #include <assert.h> #include <cuda.h> __global__ void stencil_1d(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; // At both end of a block, the sliding window moves beyond the block boundary. if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = (gindex < RADIUS) ? 0 : in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Synchronize (ensure all the threads will be completed before continue) __syncthreads(); // Apply the 1D stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; } int main(void) { int size = LENGTH * sizeof(int); int pad_size = (LENGTH + RADIUS) * sizeof(int); int *a, *b; // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(pad_size); b = (int *)malloc(size); for (int i = 0; i < LENGTH+RADIUS; i++) a[i] = i; int *d_a, *d_b; // Alloc space for device copies of a, b, c cudaMalloc((void **)&d_a, pad_size); cudaMalloc((void **)&d_b, size); // Copy inputs to device cudaMemcpy(d_a, a, pad_size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU stencil_1d <<< dim3(LENGTH/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) >>> (d_a, d_b); // Copy result back to host cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost); // verification for (int i = 0; i < 2*RADIUS; i++) { int s = 0; for (int j = i; j <= i+2*RADIUS; j++) { s += j < RADIUS ? 0 : (a[j] - RADIUS); } if (s != b[i]) { printf("FAILED at %d: %d (cpu) != %d (gpu)\n", i, s, b[i]); return 1; } } for (int i = 2*RADIUS; i < LENGTH; i++) { int s = 0; for (int j = i-RADIUS; j <= i+RADIUS; j++) { s += a[j]; } if (s != b[i]) { printf("FAILED at %d: %d (cpu) != %d (gpu)\n", i, s, b[i]); return 1; } } // Cleanup free(a); free(b); cudaFree(d_a); cudaFree(d_b); printf("PASSED\n"); return 0; }
11bef6e107d158f8e683f5a56fe2bb0485f73d8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <math.h> #include <torch/extension.h> #include <cstdio> #include <sstream> #include <tuple> #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" namespace { // A little structure for holding details about a pixel. struct Pix { float z; // Depth of the reference point. int32_t idx; // Index of the reference point. float dist2; // Euclidean distance square to the reference point. }; __device__ inline bool operator<(const Pix& a, const Pix& b) { return a.z < b.z; } // This function checks if a pixel given by xy location pxy lies within the // point with index p and batch index n. One of the inputs is a list (q) // which contains Pixel structs with the indices of the points which intersect // with this pixel sorted by closest z distance. If the pixel pxy lies in the // point, the list (q) is updated and re-orderered in place. In addition // the auxillary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizePointsNaiveCudaKernel and // RasterizePointsFineCudaKernel. template <typename PointQ> __device__ void CheckPixelInsidePoint( const float* points, // (P, 3) const int p_idx, int& q_size, float& q_max_z, int& q_max_idx, PointQ& q, const float radius2, const float xf, const float yf, const int K) { const float px = points[p_idx * 3 + 0]; const float py = points[p_idx * 3 + 1]; const float pz = points[p_idx * 3 + 2]; if (pz < 0) return; // Don't render points behind the camera const float dx = xf - px; const float dy = yf - py; const float dist2 = dx * dx + dy * dy; if (dist2 < radius2) { if (q_size < K) { // Just insert it q[q_size] = {pz, p_idx, dist2}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max q[q_max_idx] = {pz, p_idx, dist2}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsNaiveCudaKernel( const float* points, // (P, 3) const int64_t* cloud_to_packed_first_idx, // (N) const int64_t* num_points_per_cloud, // (N) const float radius, const int N, const int S, const int K, int32_t* point_idxs, // (N, S, S, K) float* zbuf, // (N, S, S, K) float* pix_dists) { // (N, S, S, K) // Simple version: One thread per output pixel const int num_threads = gridDim.x * blockDim.x; const int tid = blockDim.x * blockIdx.x + threadIdx.x; const float radius2 = radius * radius; for (int i = tid; i < N * S * S; i += num_threads) { // Convert linear index to 3D index const int n = i / (S * S); // Batch index const int pix_idx = i % (S * S); // Reverse ordering of X and Y axes. const int yi = S - 1 - pix_idx / S; const int xi = S - 1 - pix_idx % S; const float xf = PixToNdc(xi, S); const float yf = PixToNdc(yi, S); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. // TODO(jcjohns) Abstract this out into a standalone data structure Pix q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the points. const int64_t point_start_idx = cloud_to_packed_first_idx[n]; const int64_t point_stop_idx = point_start_idx + num_points_per_cloud[n]; for (int p_idx = point_start_idx; p_idx < point_stop_idx; ++p_idx) { CheckPixelInsidePoint( points, p_idx, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, K); } BubbleSort(q, q_size); int idx = n * S * S * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { point_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist2; } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaiveCuda( const torch::Tensor& points, // (P. 3) const torch::Tensor& cloud_to_packed_first_idx, // (N) const torch::Tensor& num_points_per_cloud, // (N) const int image_size, const float radius, const int points_per_pixel) { if (points.ndimension() != 2 || points.size(1) != 3) { AT_ERROR("points must have dimensions (num_points, 3)"); } if (num_points_per_cloud.size(0) != cloud_to_packed_first_idx.size(0)) { AT_ERROR( "num_points_per_cloud must have same size first dimension as cloud_to_packed_first_idx"); } const int N = num_points_per_cloud.size(0); // batch size. const int S = image_size; const int K = points_per_pixel; if (K > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } auto int_opts = points.options().dtype(torch::kInt32); auto float_opts = points.options().dtype(torch::kFloat32); torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts); torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts); torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts); const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizePointsNaiveCudaKernel), dim3(blocks), dim3(threads), 0, 0, points.contiguous().data_ptr<float>(), cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(), num_points_per_cloud.contiguous().data_ptr<int64_t>(), radius, N, S, K, point_idxs.contiguous().data_ptr<int32_t>(), zbuf.contiguous().data_ptr<float>(), pix_dists.contiguous().data_ptr<float>()); return std::make_tuple(point_idxs, zbuf, pix_dists); } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsCoarseCudaKernel( const float* points, // (P, 3) const int64_t* cloud_to_packed_first_idx, // (N) const int64_t* num_points_per_cloud, // (N) const float radius, const int N, const int P, const int S, const int bin_size, const int chunk_size, const int max_points_per_bin, int* points_per_bin, int* bin_points) { extern __shared__ char sbuf[]; const int M = max_points_per_bin; const int num_bins = 1 + (S - 1) / bin_size; // Integer divide round up const float half_pix = 1.0f / S; // Size of half a pixel in NDC units // This is a boolean array of shape (num_bins, num_bins, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size); // Have each block handle a chunk of points and build a 3D bitmask in // shared memory to mark which points hit which bins. In this first phase, // each thread processes one point at a time. After processing the chunk, // one thread is assigned per bin, and the thread counts and writes the // points for the bin out to global memory. const int chunks_per_batch = 1 + (P - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; const int chunk_idx = chunk % chunks_per_batch; const int point_start_idx = chunk_idx * chunk_size; binmask.block_clear(); // Using the batch index of the thread get the start and stop // indices for the points. const int64_t cloud_point_start_idx = cloud_to_packed_first_idx[batch_idx]; const int64_t cloud_point_stop_idx = cloud_point_start_idx + num_points_per_cloud[batch_idx]; // Have each thread handle a different point within the chunk for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) { const int p_idx = point_start_idx + p; // Check if point index corresponds to the cloud in the batch given by // batch_idx. if (p_idx >= cloud_point_stop_idx || p_idx < cloud_point_start_idx) { continue; } const float px = points[p_idx * 3 + 0]; const float py = points[p_idx * 3 + 1]; const float pz = points[p_idx * 3 + 2]; if (pz < 0) continue; // Don't render points behind the camera. const float px0 = px - radius; const float px1 = px + radius; const float py0 = py - radius; const float py1 = py + radius; // Brute-force search over all bins; TODO something smarter? // For example we could compute the exact bin where the point falls, // then check neighboring bins. This way we wouldn't have to check // all bins (however then we might have more warp divergence?) for (int by = 0; by < num_bins; ++by) { // Get y extent for the bin. PixToNdc gives us the location of // the center of each pixel, so we need to add/subtract a half // pixel to get the true extent of the bin. // Reverse ordering of Y axis so that +Y is upwards in the image. const int yidx = num_bins - by; const float bin_y_max = PixToNdc(yidx * bin_size - 1, S) + half_pix; const float bin_y_min = PixToNdc((yidx - 1) * bin_size, S) - half_pix; const bool y_overlap = (py0 <= bin_y_max) && (bin_y_min <= py1); if (!y_overlap) { continue; } for (int bx = 0; bx < num_bins; ++bx) { // Get x extent for the bin; again we need to adjust the // output of PixToNdc by half a pixel. // Reverse ordering of x axis so that +X is left. const int xidx = num_bins - bx; const float bin_x_max = PixToNdc(xidx * bin_size - 1, S) + half_pix; const float bin_x_min = PixToNdc((xidx - 1) * bin_size, S) - half_pix; const bool x_overlap = (px0 <= bin_x_max) && (bin_x_min <= px1); if (x_overlap) { binmask.set(by, bx, p); } } } } __syncthreads(); // Now we have processed every point in the current chunk. We need to // count the number of points in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) { const int by = byx / num_bins; const int bx = byx % num_bins; const int count = binmask.count(by, bx); const int points_per_bin_idx = batch_idx * num_bins * num_bins + by * num_bins + bx; // This atomically increments the (global) number of points found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_points array for the // points in the current chunk that fall into this bin. const int start = atomicAdd(points_per_bin + points_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_points. int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M + bx * M + start; for (int p = 0; p < chunk_size; ++p) { if (binmask.get(by, bx, p)) { // TODO: Throw an error if next_idx >= M -- this means that // we got more than max_points_per_bin in this bin // TODO: check if atomicAdd is needed in line 265. bin_points[next_idx] = point_start_idx + p; next_idx++; } } } __syncthreads(); } } torch::Tensor RasterizePointsCoarseCuda( const torch::Tensor& points, // (P, 3) const torch::Tensor& cloud_to_packed_first_idx, // (N) const torch::Tensor& num_points_per_cloud, // (N) const int image_size, const float radius, const int bin_size, const int max_points_per_bin) { const int P = points.size(0); const int N = num_points_per_cloud.size(0); const int num_bins = 1 + (image_size - 1) / bin_size; // divide round up const int M = max_points_per_bin; if (points.ndimension() != 2 || points.size(1) != 3) { AT_ERROR("points must have dimensions (num_points, 3)"); } if (num_bins >= 22) { // Make sure we do not use too much shared memory. std::stringstream ss; ss << "Got " << num_bins << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = points.options().dtype(torch::kInt32); torch::Tensor points_per_bin = torch::zeros({N, num_bins, num_bins}, opts); torch::Tensor bin_points = torch::full({N, num_bins, num_bins, M}, -1, opts); const int chunk_size = 512; const size_t shared_size = num_bins * num_bins * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; hipLaunchKernelGGL(( RasterizePointsCoarseCudaKernel), dim3(blocks), dim3(threads), shared_size, 0, points.contiguous().data_ptr<float>(), cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(), num_points_per_cloud.contiguous().data_ptr<int64_t>(), radius, N, P, image_size, bin_size, chunk_size, M, points_per_bin.contiguous().data_ptr<int32_t>(), bin_points.contiguous().data_ptr<int32_t>()); return bin_points; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsFineCudaKernel( const float* points, // (P, 3) const int32_t* bin_points, // (N, B, B, T) const float radius, const int bin_size, const int N, const int B, const int M, const int S, const int K, int32_t* point_idxs, // (N, S, S, K) float* zbuf, // (N, S, S, K) float* pix_dists) { // (N, S, S, K) // This can be more than S^2 if S is not dividable by bin_size. const int num_pixels = N * B * B * bin_size * bin_size; const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; const float radius2 = radius * radius; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from points and bin_points. int i = pid; const int n = i / (B * B * bin_size * bin_size); i %= B * B * bin_size * bin_size; const int by = i / (B * bin_size * bin_size); i %= B * bin_size * bin_size; const int bx = i / (bin_size * bin_size); i %= bin_size * bin_size; const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= S || xi >= S) continue; // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = S - 1 - yi; const int xidx = S - 1 - xi; const float xf = PixToNdc(xidx, S); const float yf = PixToNdc(yidx, S); // This part looks like the naive rasterization kernel, except we use // bin_points to only look at a subset of points already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pix q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; ++m) { const int p = bin_points[n * B * B * M + by * B * M + bx * M + m]; if (p < 0) { // bin_points uses -1 as a sentinal value continue; } CheckPixelInsidePoint( points, p, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, K); } // Now we've looked at all the points for this bin, so we can write // output for the current pixel. BubbleSort(q, q_size); const int pix_idx = n * S * S * K + yi * S * K + xi * K; for (int k = 0; k < q_size; ++k) { point_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist2; } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFineCuda( const torch::Tensor& points, // (P, 3) const torch::Tensor& bin_points, const int image_size, const float radius, const int bin_size, const int points_per_pixel) { const int N = bin_points.size(0); const int B = bin_points.size(1); const int M = bin_points.size(3); const int S = image_size; const int K = points_per_pixel; if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 8"); } auto int_opts = points.options().dtype(torch::kInt32); auto float_opts = points.options().dtype(torch::kFloat32); torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts); torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts); torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts); const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizePointsFineCudaKernel), dim3(blocks), dim3(threads), 0, 0, points.contiguous().data_ptr<float>(), bin_points.contiguous().data_ptr<int32_t>(), radius, bin_size, N, B, M, S, K, point_idxs.contiguous().data_ptr<int32_t>(), zbuf.contiguous().data_ptr<float>(), pix_dists.contiguous().data_ptr<float>()); return std::make_tuple(point_idxs, zbuf, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO(T55115174) Add more documentation for backward kernel. __global__ void RasterizePointsBackwardCudaKernel( const float* points, // (P, 3) const int32_t* idxs, // (N, H, W, K) const int N, const int P, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_dists, // (N, H, W, K) float* grad_points) { // (P, 3) // Parallelized over each of K points per pixel, for each pixel in images of // size H * W, for each image in the batch of size N. int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < N * H * W * K; i += num_threads) { // const int n = i / (H * W * K); // batch index (not needed). const int yxk = i % (H * W * K); const int yi = yxk / (W * K); const int xk = yxk % (W * K); const int xi = xk / K; // k = xk % K (We don't actually need k, but this would be it.) // Reverse ordering of X and Y axes. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const float xf = PixToNdc(xidx, W); const float yf = PixToNdc(yidx, H); const int p = idxs[i]; if (p < 0) continue; const float grad_dist2 = grad_dists[i]; const int p_ind = p * 3; // index into packed points tensor const float px = points[p_ind + 0]; const float py = points[p_ind + 1]; const float dx = px - xf; const float dy = py - yf; const float grad_px = 2.0f * grad_dist2 * dx; const float grad_py = 2.0f * grad_dist2 * dy; const float grad_pz = grad_zbuf[i]; atomicAdd(grad_points + p_ind + 0, grad_px); atomicAdd(grad_points + p_ind + 1, grad_py); atomicAdd(grad_points + p_ind + 2, grad_pz); } } torch::Tensor RasterizePointsBackwardCuda( const torch::Tensor& points, // (N, P, 3) const torch::Tensor& idxs, // (N, H, W, K) const torch::Tensor& grad_zbuf, // (N, H, W, K) const torch::Tensor& grad_dists) { // (N, H, W, K) const int P = points.size(0); const int N = idxs.size(0); const int H = idxs.size(1); const int W = idxs.size(2); const int K = idxs.size(3); torch::Tensor grad_points = torch::zeros({P, 3}, points.options()); const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizePointsBackwardCudaKernel), dim3(blocks), dim3(threads), 0, 0, points.contiguous().data_ptr<float>(), idxs.contiguous().data_ptr<int32_t>(), N, P, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_points.contiguous().data_ptr<float>()); return grad_points; }
11bef6e107d158f8e683f5a56fe2bb0485f73d8b.cu
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <math.h> #include <torch/extension.h> #include <cstdio> #include <sstream> #include <tuple> #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" namespace { // A little structure for holding details about a pixel. struct Pix { float z; // Depth of the reference point. int32_t idx; // Index of the reference point. float dist2; // Euclidean distance square to the reference point. }; __device__ inline bool operator<(const Pix& a, const Pix& b) { return a.z < b.z; } // This function checks if a pixel given by xy location pxy lies within the // point with index p and batch index n. One of the inputs is a list (q) // which contains Pixel structs with the indices of the points which intersect // with this pixel sorted by closest z distance. If the pixel pxy lies in the // point, the list (q) is updated and re-orderered in place. In addition // the auxillary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizePointsNaiveCudaKernel and // RasterizePointsFineCudaKernel. template <typename PointQ> __device__ void CheckPixelInsidePoint( const float* points, // (P, 3) const int p_idx, int& q_size, float& q_max_z, int& q_max_idx, PointQ& q, const float radius2, const float xf, const float yf, const int K) { const float px = points[p_idx * 3 + 0]; const float py = points[p_idx * 3 + 1]; const float pz = points[p_idx * 3 + 2]; if (pz < 0) return; // Don't render points behind the camera const float dx = xf - px; const float dy = yf - py; const float dist2 = dx * dx + dy * dy; if (dist2 < radius2) { if (q_size < K) { // Just insert it q[q_size] = {pz, p_idx, dist2}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max q[q_max_idx] = {pz, p_idx, dist2}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsNaiveCudaKernel( const float* points, // (P, 3) const int64_t* cloud_to_packed_first_idx, // (N) const int64_t* num_points_per_cloud, // (N) const float radius, const int N, const int S, const int K, int32_t* point_idxs, // (N, S, S, K) float* zbuf, // (N, S, S, K) float* pix_dists) { // (N, S, S, K) // Simple version: One thread per output pixel const int num_threads = gridDim.x * blockDim.x; const int tid = blockDim.x * blockIdx.x + threadIdx.x; const float radius2 = radius * radius; for (int i = tid; i < N * S * S; i += num_threads) { // Convert linear index to 3D index const int n = i / (S * S); // Batch index const int pix_idx = i % (S * S); // Reverse ordering of X and Y axes. const int yi = S - 1 - pix_idx / S; const int xi = S - 1 - pix_idx % S; const float xf = PixToNdc(xi, S); const float yf = PixToNdc(yi, S); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. // TODO(jcjohns) Abstract this out into a standalone data structure Pix q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the points. const int64_t point_start_idx = cloud_to_packed_first_idx[n]; const int64_t point_stop_idx = point_start_idx + num_points_per_cloud[n]; for (int p_idx = point_start_idx; p_idx < point_stop_idx; ++p_idx) { CheckPixelInsidePoint( points, p_idx, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, K); } BubbleSort(q, q_size); int idx = n * S * S * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { point_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist2; } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaiveCuda( const torch::Tensor& points, // (P. 3) const torch::Tensor& cloud_to_packed_first_idx, // (N) const torch::Tensor& num_points_per_cloud, // (N) const int image_size, const float radius, const int points_per_pixel) { if (points.ndimension() != 2 || points.size(1) != 3) { AT_ERROR("points must have dimensions (num_points, 3)"); } if (num_points_per_cloud.size(0) != cloud_to_packed_first_idx.size(0)) { AT_ERROR( "num_points_per_cloud must have same size first dimension as cloud_to_packed_first_idx"); } const int N = num_points_per_cloud.size(0); // batch size. const int S = image_size; const int K = points_per_pixel; if (K > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } auto int_opts = points.options().dtype(torch::kInt32); auto float_opts = points.options().dtype(torch::kFloat32); torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts); torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts); torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts); const size_t blocks = 1024; const size_t threads = 64; RasterizePointsNaiveCudaKernel<<<blocks, threads>>>( points.contiguous().data_ptr<float>(), cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(), num_points_per_cloud.contiguous().data_ptr<int64_t>(), radius, N, S, K, point_idxs.contiguous().data_ptr<int32_t>(), zbuf.contiguous().data_ptr<float>(), pix_dists.contiguous().data_ptr<float>()); return std::make_tuple(point_idxs, zbuf, pix_dists); } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsCoarseCudaKernel( const float* points, // (P, 3) const int64_t* cloud_to_packed_first_idx, // (N) const int64_t* num_points_per_cloud, // (N) const float radius, const int N, const int P, const int S, const int bin_size, const int chunk_size, const int max_points_per_bin, int* points_per_bin, int* bin_points) { extern __shared__ char sbuf[]; const int M = max_points_per_bin; const int num_bins = 1 + (S - 1) / bin_size; // Integer divide round up const float half_pix = 1.0f / S; // Size of half a pixel in NDC units // This is a boolean array of shape (num_bins, num_bins, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size); // Have each block handle a chunk of points and build a 3D bitmask in // shared memory to mark which points hit which bins. In this first phase, // each thread processes one point at a time. After processing the chunk, // one thread is assigned per bin, and the thread counts and writes the // points for the bin out to global memory. const int chunks_per_batch = 1 + (P - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; const int chunk_idx = chunk % chunks_per_batch; const int point_start_idx = chunk_idx * chunk_size; binmask.block_clear(); // Using the batch index of the thread get the start and stop // indices for the points. const int64_t cloud_point_start_idx = cloud_to_packed_first_idx[batch_idx]; const int64_t cloud_point_stop_idx = cloud_point_start_idx + num_points_per_cloud[batch_idx]; // Have each thread handle a different point within the chunk for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) { const int p_idx = point_start_idx + p; // Check if point index corresponds to the cloud in the batch given by // batch_idx. if (p_idx >= cloud_point_stop_idx || p_idx < cloud_point_start_idx) { continue; } const float px = points[p_idx * 3 + 0]; const float py = points[p_idx * 3 + 1]; const float pz = points[p_idx * 3 + 2]; if (pz < 0) continue; // Don't render points behind the camera. const float px0 = px - radius; const float px1 = px + radius; const float py0 = py - radius; const float py1 = py + radius; // Brute-force search over all bins; TODO something smarter? // For example we could compute the exact bin where the point falls, // then check neighboring bins. This way we wouldn't have to check // all bins (however then we might have more warp divergence?) for (int by = 0; by < num_bins; ++by) { // Get y extent for the bin. PixToNdc gives us the location of // the center of each pixel, so we need to add/subtract a half // pixel to get the true extent of the bin. // Reverse ordering of Y axis so that +Y is upwards in the image. const int yidx = num_bins - by; const float bin_y_max = PixToNdc(yidx * bin_size - 1, S) + half_pix; const float bin_y_min = PixToNdc((yidx - 1) * bin_size, S) - half_pix; const bool y_overlap = (py0 <= bin_y_max) && (bin_y_min <= py1); if (!y_overlap) { continue; } for (int bx = 0; bx < num_bins; ++bx) { // Get x extent for the bin; again we need to adjust the // output of PixToNdc by half a pixel. // Reverse ordering of x axis so that +X is left. const int xidx = num_bins - bx; const float bin_x_max = PixToNdc(xidx * bin_size - 1, S) + half_pix; const float bin_x_min = PixToNdc((xidx - 1) * bin_size, S) - half_pix; const bool x_overlap = (px0 <= bin_x_max) && (bin_x_min <= px1); if (x_overlap) { binmask.set(by, bx, p); } } } } __syncthreads(); // Now we have processed every point in the current chunk. We need to // count the number of points in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) { const int by = byx / num_bins; const int bx = byx % num_bins; const int count = binmask.count(by, bx); const int points_per_bin_idx = batch_idx * num_bins * num_bins + by * num_bins + bx; // This atomically increments the (global) number of points found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_points array for the // points in the current chunk that fall into this bin. const int start = atomicAdd(points_per_bin + points_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_points. int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M + bx * M + start; for (int p = 0; p < chunk_size; ++p) { if (binmask.get(by, bx, p)) { // TODO: Throw an error if next_idx >= M -- this means that // we got more than max_points_per_bin in this bin // TODO: check if atomicAdd is needed in line 265. bin_points[next_idx] = point_start_idx + p; next_idx++; } } } __syncthreads(); } } torch::Tensor RasterizePointsCoarseCuda( const torch::Tensor& points, // (P, 3) const torch::Tensor& cloud_to_packed_first_idx, // (N) const torch::Tensor& num_points_per_cloud, // (N) const int image_size, const float radius, const int bin_size, const int max_points_per_bin) { const int P = points.size(0); const int N = num_points_per_cloud.size(0); const int num_bins = 1 + (image_size - 1) / bin_size; // divide round up const int M = max_points_per_bin; if (points.ndimension() != 2 || points.size(1) != 3) { AT_ERROR("points must have dimensions (num_points, 3)"); } if (num_bins >= 22) { // Make sure we do not use too much shared memory. std::stringstream ss; ss << "Got " << num_bins << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = points.options().dtype(torch::kInt32); torch::Tensor points_per_bin = torch::zeros({N, num_bins, num_bins}, opts); torch::Tensor bin_points = torch::full({N, num_bins, num_bins, M}, -1, opts); const int chunk_size = 512; const size_t shared_size = num_bins * num_bins * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; RasterizePointsCoarseCudaKernel<<<blocks, threads, shared_size>>>( points.contiguous().data_ptr<float>(), cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(), num_points_per_cloud.contiguous().data_ptr<int64_t>(), radius, N, P, image_size, bin_size, chunk_size, M, points_per_bin.contiguous().data_ptr<int32_t>(), bin_points.contiguous().data_ptr<int32_t>()); return bin_points; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsFineCudaKernel( const float* points, // (P, 3) const int32_t* bin_points, // (N, B, B, T) const float radius, const int bin_size, const int N, const int B, const int M, const int S, const int K, int32_t* point_idxs, // (N, S, S, K) float* zbuf, // (N, S, S, K) float* pix_dists) { // (N, S, S, K) // This can be more than S^2 if S is not dividable by bin_size. const int num_pixels = N * B * B * bin_size * bin_size; const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; const float radius2 = radius * radius; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from points and bin_points. int i = pid; const int n = i / (B * B * bin_size * bin_size); i %= B * B * bin_size * bin_size; const int by = i / (B * bin_size * bin_size); i %= B * bin_size * bin_size; const int bx = i / (bin_size * bin_size); i %= bin_size * bin_size; const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= S || xi >= S) continue; // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = S - 1 - yi; const int xidx = S - 1 - xi; const float xf = PixToNdc(xidx, S); const float yf = PixToNdc(yidx, S); // This part looks like the naive rasterization kernel, except we use // bin_points to only look at a subset of points already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pix q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; ++m) { const int p = bin_points[n * B * B * M + by * B * M + bx * M + m]; if (p < 0) { // bin_points uses -1 as a sentinal value continue; } CheckPixelInsidePoint( points, p, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, K); } // Now we've looked at all the points for this bin, so we can write // output for the current pixel. BubbleSort(q, q_size); const int pix_idx = n * S * S * K + yi * S * K + xi * K; for (int k = 0; k < q_size; ++k) { point_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist2; } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFineCuda( const torch::Tensor& points, // (P, 3) const torch::Tensor& bin_points, const int image_size, const float radius, const int bin_size, const int points_per_pixel) { const int N = bin_points.size(0); const int B = bin_points.size(1); const int M = bin_points.size(3); const int S = image_size; const int K = points_per_pixel; if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 8"); } auto int_opts = points.options().dtype(torch::kInt32); auto float_opts = points.options().dtype(torch::kFloat32); torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts); torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts); torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts); const size_t blocks = 1024; const size_t threads = 64; RasterizePointsFineCudaKernel<<<blocks, threads>>>( points.contiguous().data_ptr<float>(), bin_points.contiguous().data_ptr<int32_t>(), radius, bin_size, N, B, M, S, K, point_idxs.contiguous().data_ptr<int32_t>(), zbuf.contiguous().data_ptr<float>(), pix_dists.contiguous().data_ptr<float>()); return std::make_tuple(point_idxs, zbuf, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO(T55115174) Add more documentation for backward kernel. __global__ void RasterizePointsBackwardCudaKernel( const float* points, // (P, 3) const int32_t* idxs, // (N, H, W, K) const int N, const int P, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_dists, // (N, H, W, K) float* grad_points) { // (P, 3) // Parallelized over each of K points per pixel, for each pixel in images of // size H * W, for each image in the batch of size N. int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < N * H * W * K; i += num_threads) { // const int n = i / (H * W * K); // batch index (not needed). const int yxk = i % (H * W * K); const int yi = yxk / (W * K); const int xk = yxk % (W * K); const int xi = xk / K; // k = xk % K (We don't actually need k, but this would be it.) // Reverse ordering of X and Y axes. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const float xf = PixToNdc(xidx, W); const float yf = PixToNdc(yidx, H); const int p = idxs[i]; if (p < 0) continue; const float grad_dist2 = grad_dists[i]; const int p_ind = p * 3; // index into packed points tensor const float px = points[p_ind + 0]; const float py = points[p_ind + 1]; const float dx = px - xf; const float dy = py - yf; const float grad_px = 2.0f * grad_dist2 * dx; const float grad_py = 2.0f * grad_dist2 * dy; const float grad_pz = grad_zbuf[i]; atomicAdd(grad_points + p_ind + 0, grad_px); atomicAdd(grad_points + p_ind + 1, grad_py); atomicAdd(grad_points + p_ind + 2, grad_pz); } } torch::Tensor RasterizePointsBackwardCuda( const torch::Tensor& points, // (N, P, 3) const torch::Tensor& idxs, // (N, H, W, K) const torch::Tensor& grad_zbuf, // (N, H, W, K) const torch::Tensor& grad_dists) { // (N, H, W, K) const int P = points.size(0); const int N = idxs.size(0); const int H = idxs.size(1); const int W = idxs.size(2); const int K = idxs.size(3); torch::Tensor grad_points = torch::zeros({P, 3}, points.options()); const size_t blocks = 1024; const size_t threads = 64; RasterizePointsBackwardCudaKernel<<<blocks, threads>>>( points.contiguous().data_ptr<float>(), idxs.contiguous().data_ptr<int32_t>(), N, P, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_points.contiguous().data_ptr<float>()); return grad_points; }
6d01b5c34f7e51b76fe693f2d40f28a7537b014b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2017-2018 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <hip/hip_runtime.h> #include <stdint.h> #include <stdio.h> static __global__ void ConvertUInt8ToUInt16Kernel(uint8_t *dpUInt8, uint16_t *dpUInt16, int nSrcPitch, int nDestPitch, int nWidth, int nHeight) { int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= nWidth || y >= nHeight) { return; } int destStrideInPixels = nDestPitch / (sizeof(uint16_t)); *(uchar2 *)&dpUInt16[y * destStrideInPixels + x] = uchar2{ 0, dpUInt8[y * nSrcPitch + x] }; } static __global__ void ConvertUInt16ToUInt8Kernel(uint16_t *dpUInt16, uint8_t *dpUInt8, int nSrcPitch, int nDestPitch, int nWidth, int nHeight) { int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= nWidth || y >= nHeight) { return; } int srcStrideInPixels = nSrcPitch / (sizeof(uint16_t)); dpUInt8[y * nDestPitch + x] = ((uchar2 *)&dpUInt16[y * srcStrideInPixels + x])->y; } void ConvertUInt8ToUInt16(uint8_t *dpUInt8, uint16_t *dpUInt16, int nSrcPitch, int nDestPitch, int nWidth, int nHeight) { dim3 blockSize(16, 16, 1); dim3 gridSize(((uint32_t)nWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nHeight + blockSize.y - 1) / blockSize.y, 1); hipLaunchKernelGGL(( ConvertUInt8ToUInt16Kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, dpUInt8, dpUInt16, nSrcPitch, nDestPitch, nWidth, nHeight); } void ConvertUInt16ToUInt8(uint16_t *dpUInt16, uint8_t *dpUInt8, int nSrcPitch, int nDestPitch, int nWidth, int nHeight) { dim3 blockSize(16, 16, 1); dim3 gridSize(((uint32_t)nWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nHeight + blockSize.y - 1) / blockSize.y, 1); hipLaunchKernelGGL(( ConvertUInt16ToUInt8Kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, dpUInt16, dpUInt8, nSrcPitch, nDestPitch, nWidth, nHeight); }
6d01b5c34f7e51b76fe693f2d40f28a7537b014b.cu
/* * Copyright 2017-2018 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <cuda_runtime.h> #include <stdint.h> #include <stdio.h> static __global__ void ConvertUInt8ToUInt16Kernel(uint8_t *dpUInt8, uint16_t *dpUInt16, int nSrcPitch, int nDestPitch, int nWidth, int nHeight) { int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= nWidth || y >= nHeight) { return; } int destStrideInPixels = nDestPitch / (sizeof(uint16_t)); *(uchar2 *)&dpUInt16[y * destStrideInPixels + x] = uchar2{ 0, dpUInt8[y * nSrcPitch + x] }; } static __global__ void ConvertUInt16ToUInt8Kernel(uint16_t *dpUInt16, uint8_t *dpUInt8, int nSrcPitch, int nDestPitch, int nWidth, int nHeight) { int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= nWidth || y >= nHeight) { return; } int srcStrideInPixels = nSrcPitch / (sizeof(uint16_t)); dpUInt8[y * nDestPitch + x] = ((uchar2 *)&dpUInt16[y * srcStrideInPixels + x])->y; } void ConvertUInt8ToUInt16(uint8_t *dpUInt8, uint16_t *dpUInt16, int nSrcPitch, int nDestPitch, int nWidth, int nHeight) { dim3 blockSize(16, 16, 1); dim3 gridSize(((uint32_t)nWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nHeight + blockSize.y - 1) / blockSize.y, 1); ConvertUInt8ToUInt16Kernel <<< gridSize, blockSize >>>(dpUInt8, dpUInt16, nSrcPitch, nDestPitch, nWidth, nHeight); } void ConvertUInt16ToUInt8(uint16_t *dpUInt16, uint8_t *dpUInt8, int nSrcPitch, int nDestPitch, int nWidth, int nHeight) { dim3 blockSize(16, 16, 1); dim3 gridSize(((uint32_t)nWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nHeight + blockSize.y - 1) / blockSize.y, 1); ConvertUInt16ToUInt8Kernel <<<gridSize, blockSize >>>(dpUInt16, dpUInt8, nSrcPitch, nDestPitch, nWidth, nHeight); }
1a71a4a9242288e1dc1c663bc003dc217087683c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cudnn.h> #include <iostream> #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } int main(int argc, char * argv[] ) { // input: inputSize*inputSize*depth // kernel: kernelSize*kernelSize*depth // output: outputSize*outputSize int inputSize = 7; int depth = 3; int kernelSize = 3; int kernelNum = 3; int stride[3] = {1 , 2 , 3 }; int pad[3] = {0,0,0}; int outputSize[3]; // stridepaddingpadoutputoutputSize for(int i = 0; i < kernelNum; i++) { if((inputSize - kernelSize)%stride[i] != 0) { pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2; } outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1; } // ============================= ========================= // ==== CPU // input:A kernel:kernel output:B float *A, *kernel[3], *B[3]; A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth); for(int i = 0; i < 3; i++) { kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth); B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth); } // input A for(int d = 0; d < depth; d++) { for(int i=0; i<inputSize*inputSize; i++) { A[d*inputSize*inputSize + i] = i; } } // kernel for(int i = 0; i < 3; i++){ for(int j = 0; j < kernelSize*kernelSize*depth; j++) { kernel[i][j] = 1; } } // ==== GPU float *d_A, *d_kernel[3], *d_B[3]; hipMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth); for(int i = 0; i < 3; i++) { hipMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth); hipMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth); } hipMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,hipMemcpyHostToDevice); for(int i = 0; i < 3; i++) { hipMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,hipMemcpyHostToDevice); } // ========================== cuDNN =================== cudnnHandle_t cudnn[3]; for( int i = 0; i < 3; i++ ) checkCUDNN(cudnnCreate(&cudnn[i])); cudnnTensorDescriptor_t input_desc; checkCUDNN(cudnnCreateTensorDescriptor(&input_desc)); checkCUDNN(cudnnSetTensor4dDescriptor( input_desc,CUDNN_TENSOR_NCHW,CUDNN_DATA_FLOAT, 1,depth,inputSize,inputSize)); cudnnFilterDescriptor_t filter_desc; checkCUDNN(cudnnCreateFilterDescriptor(&filter_desc)); checkCUDNN(cudnnSetFilter4dDescriptor( filter_desc,CUDNN_DATA_FLOAT,CUDNN_TENSOR_NCHW, 1,depth,kernelSize,kernelSize)); cudnnConvolutionDescriptor_t conv_desc[3]; for( int i = 0; i < 3; i++) { checkCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc[i])); checkCUDNN(cudnnSetConvolution2dDescriptor( conv_desc[i], pad[i],pad[i],stride[i],stride[i],1,1, CUDNN_CONVOLUTION,CUDNN_DATA_FLOAT)); } cudnnTensorDescriptor_t output_desc[3]; for( int i = 0; i < 3; i++ ) { checkCUDNN(cudnnCreateTensorDescriptor(&output_desc[i])); checkCUDNN(cudnnSetTensor4dDescriptor( output_desc[i],CUDNN_TENSOR_NCHW,CUDNN_DATA_FLOAT, 1,1,outputSize[i],outputSize[i])); } cudnnConvolutionFwdAlgo_t algo[3]; size_t ws_size[3]; float *ws_data[3]; for( int i = 0; i < 3; i++ ) { cudnnGetConvolutionForwardAlgorithm( cudnn[i], input_desc,filter_desc,conv_desc[i],output_desc[i], CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo[i]); } for(int i = 0; i < 3; i++ ) { checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize( cudnn[i], input_desc,filter_desc,conv_desc[i],output_desc[i], algo[i],&ws_size[i])); hipMalloc((void**)&ws_data[i],ws_size[i]); } //printf("1: %d \n 2: %d \n 3: %d \n", ws_size[0],ws_size[1],ws_size[2]); struct timeval start, end; gettimeofday( &start, NULL ); float alpha = 1.0; float beta = 0.0; for(int i = 0; i < 3; i++ ) { checkCUDNN(cudnnConvolutionForward( cudnn[i], &alpha, input_desc,d_A, filter_desc,d_kernel[i], conv_desc[i],algo[i],ws_data[i],ws_size[i], &beta, output_desc[i],d_B[i])); } for( int i = 0; i < 3; i++ ) { hipMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth,hipMemcpyDeviceToHost); } gettimeofday( &end, NULL ); int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; //printf("Block(%d,%d) Grid(%d,%d).\n", Block.x, Block.y, Grid.x, Grid.y); printf("total time is %f ms\n", timeuse/(float)1000); FILE *b[3]; b[0] = fopen("matrixB31.m", "wb"); b[1] = fopen("matrixB32.m", "wb"); b[2] = fopen("matrixB33.m", "wb"); for(int k = 0; k < 3; k++ ) { fprintf(b[k], "B = [ \n"); for (int i = 0; i < outputSize[k]; i++) { for (int j = 0; j < outputSize[k]; j++) fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]); fprintf(b[k], "\n"); } fprintf(b[k], "];"); } // ============================= ========================= free(A); hipFree(d_A); cudnnDestroyTensorDescriptor(input_desc); cudnnDestroyFilterDescriptor(filter_desc); for(int i = 0; i < 3; i++) { free(kernel[i]); free(B[i]); hipFree(d_B[i]); hipFree(d_kernel[i]); hipFree(ws_data[i]); cudnnDestroyTensorDescriptor(output_desc[i]); cudnnDestroyConvolutionDescriptor(conv_desc[i]); fclose(b[i]); } return 0; }
1a71a4a9242288e1dc1c663bc003dc217087683c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cudnn.h> #include <iostream> #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } int main(int argc, char * argv[] ) { // input: inputSize*inputSize*depth // kernel: kernelSize*kernelSize*depth // output: outputSize*outputSize int inputSize = 7; int depth = 3; int kernelSize = 3; int kernelNum = 3; int stride[3] = {1 , 2 , 3 }; int pad[3] = {0,0,0}; int outputSize[3]; // 计算不同stride下需要的padding数量pad和output的规模outputSize for(int i = 0; i < kernelNum; i++) { if((inputSize - kernelSize)%stride[i] != 0) { pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2; } outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1; } // ============================= 资源申请的初始化 ========================= // ==== CPU资源申请和初始化 // input:A kernel:kernel output:B float *A, *kernel[3], *B[3]; A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth); for(int i = 0; i < 3; i++) { kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth); B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth); } // 初始化input A for(int d = 0; d < depth; d++) { for(int i=0; i<inputSize*inputSize; i++) { A[d*inputSize*inputSize + i] = i; } } // 初始化kernel for(int i = 0; i < 3; i++){ for(int j = 0; j < kernelSize*kernelSize*depth; j++) { kernel[i][j] = 1; } } // ==== GPU资源申请和初始化 float *d_A, *d_kernel[3], *d_B[3]; cudaMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth); for(int i = 0; i < 3; i++) { cudaMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth); cudaMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth); } cudaMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,cudaMemcpyHostToDevice); for(int i = 0; i < 3; i++) { cudaMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,cudaMemcpyHostToDevice); } // ========================== cuDNN 调用 =================== cudnnHandle_t cudnn[3]; for( int i = 0; i < 3; i++ ) checkCUDNN(cudnnCreate(&cudnn[i])); cudnnTensorDescriptor_t input_desc; checkCUDNN(cudnnCreateTensorDescriptor(&input_desc)); checkCUDNN(cudnnSetTensor4dDescriptor( input_desc,CUDNN_TENSOR_NCHW,CUDNN_DATA_FLOAT, 1,depth,inputSize,inputSize)); cudnnFilterDescriptor_t filter_desc; checkCUDNN(cudnnCreateFilterDescriptor(&filter_desc)); checkCUDNN(cudnnSetFilter4dDescriptor( filter_desc,CUDNN_DATA_FLOAT,CUDNN_TENSOR_NCHW, 1,depth,kernelSize,kernelSize)); cudnnConvolutionDescriptor_t conv_desc[3]; for( int i = 0; i < 3; i++) { checkCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc[i])); checkCUDNN(cudnnSetConvolution2dDescriptor( conv_desc[i], pad[i],pad[i],stride[i],stride[i],1,1, CUDNN_CONVOLUTION,CUDNN_DATA_FLOAT)); } cudnnTensorDescriptor_t output_desc[3]; for( int i = 0; i < 3; i++ ) { checkCUDNN(cudnnCreateTensorDescriptor(&output_desc[i])); checkCUDNN(cudnnSetTensor4dDescriptor( output_desc[i],CUDNN_TENSOR_NCHW,CUDNN_DATA_FLOAT, 1,1,outputSize[i],outputSize[i])); } cudnnConvolutionFwdAlgo_t algo[3]; size_t ws_size[3]; float *ws_data[3]; for( int i = 0; i < 3; i++ ) { cudnnGetConvolutionForwardAlgorithm( cudnn[i], input_desc,filter_desc,conv_desc[i],output_desc[i], CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo[i]); } for(int i = 0; i < 3; i++ ) { checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize( cudnn[i], input_desc,filter_desc,conv_desc[i],output_desc[i], algo[i],&ws_size[i])); cudaMalloc((void**)&ws_data[i],ws_size[i]); } //printf("1: %d \n 2: %d \n 3: %d \n", ws_size[0],ws_size[1],ws_size[2]); struct timeval start, end; gettimeofday( &start, NULL ); float alpha = 1.0; float beta = 0.0; for(int i = 0; i < 3; i++ ) { checkCUDNN(cudnnConvolutionForward( cudnn[i], &alpha, input_desc,d_A, filter_desc,d_kernel[i], conv_desc[i],algo[i],ws_data[i],ws_size[i], &beta, output_desc[i],d_B[i])); } for( int i = 0; i < 3; i++ ) { cudaMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth,cudaMemcpyDeviceToHost); } gettimeofday( &end, NULL ); int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; //printf("Block(%d,%d) Grid(%d,%d).\n", Block.x, Block.y, Grid.x, Grid.y); printf("total time is %f ms\n", timeuse/(float)1000); FILE *b[3]; b[0] = fopen("matrixB31.m", "wb"); b[1] = fopen("matrixB32.m", "wb"); b[2] = fopen("matrixB33.m", "wb"); for(int k = 0; k < 3; k++ ) { fprintf(b[k], "B = [ \n"); for (int i = 0; i < outputSize[k]; i++) { for (int j = 0; j < outputSize[k]; j++) fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]); fprintf(b[k], "\n"); } fprintf(b[k], "];"); } // ============================= 资源释放 ========================= free(A); cudaFree(d_A); cudnnDestroyTensorDescriptor(input_desc); cudnnDestroyFilterDescriptor(filter_desc); for(int i = 0; i < 3; i++) { free(kernel[i]); free(B[i]); cudaFree(d_B[i]); cudaFree(d_kernel[i]); cudaFree(ws_data[i]); cudnnDestroyTensorDescriptor(output_desc[i]); cudnnDestroyConvolutionDescriptor(conv_desc[i]); fclose(b[i]); } return 0; }