hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
e253da75f9309d7756743642b3befda78c1c6a07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int cuda_test()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel) , dim3(1), dim3(size) , 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
else
{
//fprintf(stderr, "Cuda launch succeeded! \n");
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
if (cudaStatus != 0){
fprintf(stderr, "Cuda does not seem to be working properly.\n"); // This is not a good thing
}
else{
fprintf(stderr, "Cuda functionality test succeeded.\n"); // This is a good thing
}
return cudaStatus;
}
| e253da75f9309d7756743642b3befda78c1c6a07.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int cuda_test()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel <<<1, size >>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
else
{
//fprintf(stderr, "Cuda launch succeeded! \n");
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
if (cudaStatus != 0){
fprintf(stderr, "Cuda does not seem to be working properly.\n"); // This is not a good thing
}
else{
fprintf(stderr, "Cuda functionality test succeeded.\n"); // This is a good thing
}
return cudaStatus;
}
|
c9cdc6c433ba87055a2d44a722068b1bb465b43e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
/* THIS FUNCTION
We're trying to remove all these to proper functions and get rid of this routine.
*/
#define OP_SOUNDSPEED 1
#define OP_GASPRESSURE 2
#define OP_TOTALPRESSURE 3
#define OP_MAGPRESSURE 4
#define OP_TOTALANDSND 5
#define OP_WARRAYS 6
#define OP_RELAXINGFLUX 7
#define OP_SEPERATELRFLUX 8
__global__ void cukern_Soundspeed(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_GasPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_TotalPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_MagneticPressure(double *bx, double *by, double *bz, double *dout, int n);
__global__ void cukern_TotalAndSound(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *total, double *sound, double gam, int n);
__global__ void cukern_CalcWArrays(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *P, double *Cfreeze, double *rhoW, double *enerW, double *pxW, double *pyW, double *pzW, int dir, int n);
__global__ void cukern_SeperateLRFlux(double *arr, double *wArr, double *left, double *right, int n);
__global__ void cukern_PerformFlux(double *array0, double *Cfreeze, double *fluxRa, double *fluxRb, double *fluxLa, double *fluxLb, double *out, double lambda, int n);
#define BLOCKWIDTH 256
#define THREADLOOPS 1
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// Determine appropriate number of arguments for RHS
if (nrhs < 2) mexErrMsgTxt("Require at least (computation type, input argument)");
int operation = (int)*mxGetPr(prhs[0]);
dim3 blocksize; blocksize.x = BLOCKWIDTH; blocksize.y = blocksize.z = 1;
dim3 gridsize;
// Select the appropriate kernel to invoke
if((operation == OP_SOUNDSPEED) || (operation == OP_GASPRESSURE) || (operation == OP_TOTALPRESSURE)) {
if( (nlhs != 1) || (nrhs != 10)) { mexErrMsgTxt("Soundspeed operator is Cs = cudaMHDKernels(1, rho, E, px, py, pz, bx, by, bz, gamma)"); }
double gam = *mxGetPr(prhs[9]);
MGArray fluid[8];
int worked = MGA_accessMatlabArrays(prhs, 1, 8, fluid);
MGArray *dst = MGA_createReturnedArrays(plhs, 1, fluid);
gridsize.x = fluid->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < fluid->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double *srcs[8]; pullMGAPointers(fluid, 8, 0, srcs);
//printf("%i %i %i %i %i %i\n", blocksize.x, blocksize.y, blocksize.z, gridsize.x, gridsize.y, gridsize.z);
switch(operation) {
case OP_SOUNDSPEED: hipLaunchKernelGGL(( cukern_Soundspeed), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], dst->devicePtr[0], gam, fluid->numel); break;
case OP_GASPRESSURE: hipLaunchKernelGGL(( cukern_GasPressure), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], dst->devicePtr[0], gam, fluid->numel); break;
case OP_TOTALPRESSURE:hipLaunchKernelGGL(( cukern_TotalPressure), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], dst->devicePtr[0], gam, fluid->numel); break;
}
free(dst);
} else if((operation == OP_MAGPRESSURE)) {
if( (nlhs != 1) || (nrhs != 4)) { mexErrMsgTxt("Magnetic pressure operator is Pm = cudaMHDKernels(4, bx, by, bz)"); }
MGArray mag[3];
int worked = MGA_accessMatlabArrays(prhs, 1, 3, mag);
MGArray *Pmag = MGA_createReturnedArrays(plhs, 1, mag);
gridsize.x = mag->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < mag->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
hipLaunchKernelGGL(( cukern_MagneticPressure), dim3(gridsize), dim3(blocksize), 0, 0, mag[0].devicePtr[0], mag[1].devicePtr[0], mag[2].devicePtr[0], Pmag->devicePtr[0], mag->numel);
free(Pmag);
} else if((operation == OP_TOTALANDSND)) {
if( (nlhs != 2) || (nrhs != 10)) { mexErrMsgTxt("Soundspeed operator is [Ptot Cs] = cudaMHDKernels(5, rho, E, px, py, pz, bx, by, bz, gamma)"); }
double gam = *mxGetPr(prhs[9]);
MGArray fluid[8];
int worked = MGA_accessMatlabArrays(prhs, 1, 8, fluid);
gridsize.x = fluid->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < fluid->numel) gridsize.x++;
gridsize.y = gridsize.z = 1;
MGArray *out = MGA_createReturnedArrays(plhs, 2, fluid);
double *srcs[8]; pullMGAPointers(fluid, 8, 0, srcs);
hipLaunchKernelGGL(( cukern_TotalAndSound), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], out[0].devicePtr[0], out[1].devicePtr[0], gam, fluid->numel);
free(out);
} else if ((operation == OP_WARRAYS)) {
if( (nlhs != 5) || (nrhs != 12)) { mexErrMsgTxt("solving W operator is [rhoW enerW pxW pyW pzW] = cudaMHDKernels(6, rho, E, px, py, pz, bx, by, bz, P, cFreeze, direction)"); }
int dir = (int)*mxGetPr(prhs[11]);
MGArray fluid[10];
int worked = MGA_accessMatlabArrays(prhs, 1, 10, fluid);
MGArray *Wout = MGA_createReturnedArrays(plhs, 5, fluid);
gridsize.x = fluid->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < fluid->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double *srcs[10]; pullMGAPointers(fluid, 10, 0, srcs);
double *dst[5]; pullMGAPointers(Wout, 5, 0, dst);
hipLaunchKernelGGL(( cukern_CalcWArrays), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], srcs[8], srcs[9], dst[0], dst[1], dst[2], dst[3], dst[4], dir, fluid->numel);
free(Wout);
} else if ((operation == OP_RELAXINGFLUX)) {
if( (nlhs != 1) || (nrhs != 8)) { mexErrMsgTxt("relaxing flux operator is fluxed = cudaMHDKernels(7, old, tempfreeze, right, right_shifted, left, left_shifted, lambda)"); }
double lambda = *mxGetPr(prhs[7]);
MGArray fluid[6];
int worked = MGA_accessMatlabArrays(prhs, 1, 6, fluid);
MGArray *dst = MGA_createReturnedArrays(plhs, 1, fluid);
gridsize.x = fluid->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < fluid->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double *srcs[6]; pullMGAPointers(fluid, 6, 0, srcs);
hipLaunchKernelGGL(( cukern_PerformFlux), dim3(gridsize), dim3(blocksize), 0, 0, srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], dst->devicePtr[0], lambda, fluid->numel);
free(dst);
} else if ((operation == OP_SEPERATELRFLUX)) {
if ((nlhs != 2) || (nrhs != 3)) { mexErrMsgTxt("flux seperation operator is [Fl Fr] = cudaMHDKernels(8, array, wArray)"); }
MGArray in[2];
int worked = MGA_accessMatlabArrays(prhs, 1, 2, in);
MGArray *out = MGA_createReturnedArrays(plhs, 2, in);
gridsize.x = in->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < in->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
hipLaunchKernelGGL(( cukern_SeperateLRFlux), dim3(gridsize), dim3(blocksize), 0, 0, in[0].devicePtr[0], in[1].devicePtr[0], out[0].devicePtr[0], out[1].devicePtr[0], in->numel);
free(out);
}
}
//#define KERNEL_PREAMBLE int x = THREADLOOPS*(threadIdx.x + blockDim.x*blockIdx.x); if (x >= n) {return;} int imax; ((x+THREADLOOPS) > n) ? imax = n : imax = x + THREADLOOPS; for(; x < imax; x++)
#define KERNEL_PREAMBLE int x = threadIdx.x + blockDim.x*blockIdx.x; if (x >= n) { return; }
// THIS KERNEL CALCULATES SOUNDSPEED
__global__ void cukern_Soundspeed(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
double gg1 = gam*(gam-1.0);
KERNEL_PREAMBLE
gg1 = ( (gg1*(E[x] - .5*(px[x]*px[x] + py[x]*py[x] + pz[x]*pz[x])/rho[x]) + (2.0 -.5*gg1)*(bx[x]*bx[x] + by[x]*by[x] + bz[x]*bz[x]))/rho[x] );
if(gg1 < 0.0) gg1 = 0.0;
dout[x] = sqrt(gg1);
}
// THIS KERNEL CALCULATES GAS PRESSURE
__global__ void cukern_GasPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
double pres;
KERNEL_PREAMBLE
pres = (gam-1.0)*(E[x] - .5*((px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x])/rho[x] + bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]));
if(pres < 0.0) pres = 0.0; // Deny existence of negative presure
dout[x] = pres;
}
// THIS KERNEL CALCULATES TOTAL PRESSURE
__global__ void cukern_TotalPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
double pres;
KERNEL_PREAMBLE
pres = (gam-1.0)*(E[x] - .5*((px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x])/rho[x])) + .5*(2.0-gam)*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
if(pres < 0.0) pres = 0.0;
dout[x] = pres;
}
// THIS KERNEL CALCULATES MAGNETIC PRESSURE
__global__ void cukern_MagneticPressure(double *bx, double *by, double *bz, double *dout, int n)
{
KERNEL_PREAMBLE
dout[x] = .5*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
}
// THIS KERNEL CALCULATE TOTAL PRESSURE AND SOUNDSPEED
__global__ void cukern_TotalAndSound(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *total, double *sound, double gam, int n)
{
double gg1 = gam*(gam-1.0);
double psqhf, bsqhf;
double p0;
KERNEL_PREAMBLE {
psqhf = .5*(px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x]);
bsqhf = .5*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
p0 = (gam-1.0)*(E[x] - psqhf/rho[x]) + (2.0-gam)*bsqhf;
if(p0 < 0.0) p0 = 0.0;
total[x] = p0;
p0 = ( (gg1*(E[x] - psqhf/rho[x]) + (4.0 - gg1)*bsqhf)/rho[x] );
if(p0 < 0.0) p0 = 0.0;
sound[x] = sqrt(p0);
}
}
__global__ void cukern_CalcWArrays(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *P, double *Cfreeze, double *rhoW, double *enerW, double *pxW, double *pyW, double *pzW, int dir, int n)
{
double Cinv, rhoinv;
KERNEL_PREAMBLE {
Cinv = 1.0/Cfreeze[x];
rhoinv = 1.0/rho[x];
switch(dir) {
case 1:
rhoW[x] = px[x] * Cinv;
enerW[x] = (px[x] * (E[x] + P[x]) - bx[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (px[x]*px[x]*rhoinv + P[x] - bx[x]*bx[x])*Cinv;
pyW[x] = (px[x]*py[x]*rhoinv - bx[x]*by[x])*Cinv;
pzW[x] = (px[x]*pz[x]*rhoinv - bx[x]*bz[x])*Cinv;
break;
case 2:
rhoW[x] = py[x] * Cinv;
enerW[x] = (py[x] * (E[x] + P[x]) - by[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (py[x]*px[x]*rhoinv - by[x]*bx[x])*Cinv;
pyW[x] = (py[x]*py[x]*rhoinv + P[x] - by[x]*by[x])*Cinv;
pzW[x] = (py[x]*pz[x]*rhoinv - by[x]*bz[x])*Cinv;
break;
case 3:
rhoW[x] = pz[x] * Cinv;
enerW[x] = (pz[x] * (E[x] + P[x]) - bz[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (pz[x]*px[x]*rhoinv - bz[x]*bx[x])*Cinv;
pyW[x] = (pz[x]*py[x]*rhoinv - bz[x]*by[x])*Cinv;
pzW[x] = (pz[x]*pz[x]*rhoinv + P[x] - bz[x]*bz[x])*Cinv;
break;
}
}
/*mass.wArray = mom(X).array ./ freezeSpd.array;
%--- ENERGY DENSITY ---%
ener.wArray = velocity .* (ener.array + press) - mag(X).cellMag.array .* ...
( mag(1).cellMag.array .* mom(1).array ...
+ mag(2).cellMag.array .* mom(2).array ...
+ mag(3).cellMag.array .* mom(3).array) ./ mass.array;
ener.wArray = ener.wArray ./ freezeSpd.array;
%--- MOMENTUM DENSITY ---%
for i=1:3
mom(i).wArray = (velocity .* mom(i).array + press*dirVec(i)...
- mag(X).cellMag.array .* mag(i).cellMag.array) ./ freezeSpd.array;
end*/
}
__global__ void cukern_PerformFlux(double *array0, double *Cfreeze, double *fluxRa, double *fluxRb, double *fluxLa, double *fluxLb, double *out, double lambda, int n)
{
KERNEL_PREAMBLE
out[x] = array0[x] - lambda*Cfreeze[x]*(fluxRa[x] - fluxRb[x] + fluxLa[x] - fluxLb[x]);
//v(i).store.array = v(i).array - 0.5*fluxFactor .* tempFreeze .* ...
// ( v(i).store.fluxR.array - v(i).store.fluxR.shift(X,-1) ...
// + v(i).store.fluxL.array - v(i).store.fluxL.shift(X,1) );
}
__global__ void cukern_SeperateLRFlux(double *arr, double *wArr, double *left, double *right, int n)
{
KERNEL_PREAMBLE {
left[x] = .5*(arr[x] - wArr[x]);
right[x] = .5*(arr[x] + wArr[x]);
}
}
| c9cdc6c433ba87055a2d44a722068b1bb465b43e.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
/* THIS FUNCTION
We're trying to remove all these to proper functions and get rid of this routine.
*/
#define OP_SOUNDSPEED 1
#define OP_GASPRESSURE 2
#define OP_TOTALPRESSURE 3
#define OP_MAGPRESSURE 4
#define OP_TOTALANDSND 5
#define OP_WARRAYS 6
#define OP_RELAXINGFLUX 7
#define OP_SEPERATELRFLUX 8
__global__ void cukern_Soundspeed(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_GasPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_TotalPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n);
__global__ void cukern_MagneticPressure(double *bx, double *by, double *bz, double *dout, int n);
__global__ void cukern_TotalAndSound(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *total, double *sound, double gam, int n);
__global__ void cukern_CalcWArrays(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *P, double *Cfreeze, double *rhoW, double *enerW, double *pxW, double *pyW, double *pzW, int dir, int n);
__global__ void cukern_SeperateLRFlux(double *arr, double *wArr, double *left, double *right, int n);
__global__ void cukern_PerformFlux(double *array0, double *Cfreeze, double *fluxRa, double *fluxRb, double *fluxLa, double *fluxLb, double *out, double lambda, int n);
#define BLOCKWIDTH 256
#define THREADLOOPS 1
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// Determine appropriate number of arguments for RHS
if (nrhs < 2) mexErrMsgTxt("Require at least (computation type, input argument)");
int operation = (int)*mxGetPr(prhs[0]);
dim3 blocksize; blocksize.x = BLOCKWIDTH; blocksize.y = blocksize.z = 1;
dim3 gridsize;
// Select the appropriate kernel to invoke
if((operation == OP_SOUNDSPEED) || (operation == OP_GASPRESSURE) || (operation == OP_TOTALPRESSURE)) {
if( (nlhs != 1) || (nrhs != 10)) { mexErrMsgTxt("Soundspeed operator is Cs = cudaMHDKernels(1, rho, E, px, py, pz, bx, by, bz, gamma)"); }
double gam = *mxGetPr(prhs[9]);
MGArray fluid[8];
int worked = MGA_accessMatlabArrays(prhs, 1, 8, fluid);
MGArray *dst = MGA_createReturnedArrays(plhs, 1, fluid);
gridsize.x = fluid->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < fluid->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double *srcs[8]; pullMGAPointers(fluid, 8, 0, srcs);
//printf("%i %i %i %i %i %i\n", blocksize.x, blocksize.y, blocksize.z, gridsize.x, gridsize.y, gridsize.z);
switch(operation) {
case OP_SOUNDSPEED: cukern_Soundspeed<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], dst->devicePtr[0], gam, fluid->numel); break;
case OP_GASPRESSURE: cukern_GasPressure<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], dst->devicePtr[0], gam, fluid->numel); break;
case OP_TOTALPRESSURE: cukern_TotalPressure<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], dst->devicePtr[0], gam, fluid->numel); break;
}
free(dst);
} else if((operation == OP_MAGPRESSURE)) {
if( (nlhs != 1) || (nrhs != 4)) { mexErrMsgTxt("Magnetic pressure operator is Pm = cudaMHDKernels(4, bx, by, bz)"); }
MGArray mag[3];
int worked = MGA_accessMatlabArrays(prhs, 1, 3, mag);
MGArray *Pmag = MGA_createReturnedArrays(plhs, 1, mag);
gridsize.x = mag->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < mag->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
cukern_MagneticPressure<<<gridsize, blocksize>>>(mag[0].devicePtr[0], mag[1].devicePtr[0], mag[2].devicePtr[0], Pmag->devicePtr[0], mag->numel);
free(Pmag);
} else if((operation == OP_TOTALANDSND)) {
if( (nlhs != 2) || (nrhs != 10)) { mexErrMsgTxt("Soundspeed operator is [Ptot Cs] = cudaMHDKernels(5, rho, E, px, py, pz, bx, by, bz, gamma)"); }
double gam = *mxGetPr(prhs[9]);
MGArray fluid[8];
int worked = MGA_accessMatlabArrays(prhs, 1, 8, fluid);
gridsize.x = fluid->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < fluid->numel) gridsize.x++;
gridsize.y = gridsize.z = 1;
MGArray *out = MGA_createReturnedArrays(plhs, 2, fluid);
double *srcs[8]; pullMGAPointers(fluid, 8, 0, srcs);
cukern_TotalAndSound<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], out[0].devicePtr[0], out[1].devicePtr[0], gam, fluid->numel);
free(out);
} else if ((operation == OP_WARRAYS)) {
if( (nlhs != 5) || (nrhs != 12)) { mexErrMsgTxt("solving W operator is [rhoW enerW pxW pyW pzW] = cudaMHDKernels(6, rho, E, px, py, pz, bx, by, bz, P, cFreeze, direction)"); }
int dir = (int)*mxGetPr(prhs[11]);
MGArray fluid[10];
int worked = MGA_accessMatlabArrays(prhs, 1, 10, fluid);
MGArray *Wout = MGA_createReturnedArrays(plhs, 5, fluid);
gridsize.x = fluid->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < fluid->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double *srcs[10]; pullMGAPointers(fluid, 10, 0, srcs);
double *dst[5]; pullMGAPointers(Wout, 5, 0, dst);
cukern_CalcWArrays<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], srcs[6], srcs[7], srcs[8], srcs[9], dst[0], dst[1], dst[2], dst[3], dst[4], dir, fluid->numel);
free(Wout);
} else if ((operation == OP_RELAXINGFLUX)) {
if( (nlhs != 1) || (nrhs != 8)) { mexErrMsgTxt("relaxing flux operator is fluxed = cudaMHDKernels(7, old, tempfreeze, right, right_shifted, left, left_shifted, lambda)"); }
double lambda = *mxGetPr(prhs[7]);
MGArray fluid[6];
int worked = MGA_accessMatlabArrays(prhs, 1, 6, fluid);
MGArray *dst = MGA_createReturnedArrays(plhs, 1, fluid);
gridsize.x = fluid->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < fluid->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
double *srcs[6]; pullMGAPointers(fluid, 6, 0, srcs);
cukern_PerformFlux<<<gridsize, blocksize>>>(srcs[0], srcs[1], srcs[2], srcs[3], srcs[4], srcs[5], dst->devicePtr[0], lambda, fluid->numel);
free(dst);
} else if ((operation == OP_SEPERATELRFLUX)) {
if ((nlhs != 2) || (nrhs != 3)) { mexErrMsgTxt("flux seperation operator is [Fl Fr] = cudaMHDKernels(8, array, wArray)"); }
MGArray in[2];
int worked = MGA_accessMatlabArrays(prhs, 1, 2, in);
MGArray *out = MGA_createReturnedArrays(plhs, 2, in);
gridsize.x = in->numel / (BLOCKWIDTH*THREADLOOPS); if(gridsize.x * (BLOCKWIDTH*THREADLOOPS) < in->numel) gridsize.x++;
gridsize.y = gridsize.z =1;
cukern_SeperateLRFlux<<<gridsize, blocksize>>>(in[0].devicePtr[0], in[1].devicePtr[0], out[0].devicePtr[0], out[1].devicePtr[0], in->numel);
free(out);
}
}
//#define KERNEL_PREAMBLE int x = THREADLOOPS*(threadIdx.x + blockDim.x*blockIdx.x); if (x >= n) {return;} int imax; ((x+THREADLOOPS) > n) ? imax = n : imax = x + THREADLOOPS; for(; x < imax; x++)
#define KERNEL_PREAMBLE int x = threadIdx.x + blockDim.x*blockIdx.x; if (x >= n) { return; }
// THIS KERNEL CALCULATES SOUNDSPEED
__global__ void cukern_Soundspeed(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
double gg1 = gam*(gam-1.0);
KERNEL_PREAMBLE
gg1 = ( (gg1*(E[x] - .5*(px[x]*px[x] + py[x]*py[x] + pz[x]*pz[x])/rho[x]) + (2.0 -.5*gg1)*(bx[x]*bx[x] + by[x]*by[x] + bz[x]*bz[x]))/rho[x] );
if(gg1 < 0.0) gg1 = 0.0;
dout[x] = sqrt(gg1);
}
// THIS KERNEL CALCULATES GAS PRESSURE
__global__ void cukern_GasPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
double pres;
KERNEL_PREAMBLE
pres = (gam-1.0)*(E[x] - .5*((px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x])/rho[x] + bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]));
if(pres < 0.0) pres = 0.0; // Deny existence of negative presure
dout[x] = pres;
}
// THIS KERNEL CALCULATES TOTAL PRESSURE
__global__ void cukern_TotalPressure(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *dout, double gam, int n)
{
double pres;
KERNEL_PREAMBLE
pres = (gam-1.0)*(E[x] - .5*((px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x])/rho[x])) + .5*(2.0-gam)*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
if(pres < 0.0) pres = 0.0;
dout[x] = pres;
}
// THIS KERNEL CALCULATES MAGNETIC PRESSURE
__global__ void cukern_MagneticPressure(double *bx, double *by, double *bz, double *dout, int n)
{
KERNEL_PREAMBLE
dout[x] = .5*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
}
// THIS KERNEL CALCULATE TOTAL PRESSURE AND SOUNDSPEED
__global__ void cukern_TotalAndSound(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *total, double *sound, double gam, int n)
{
double gg1 = gam*(gam-1.0);
double psqhf, bsqhf;
double p0;
KERNEL_PREAMBLE {
psqhf = .5*(px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x]);
bsqhf = .5*(bx[x]*bx[x]+by[x]*by[x]+bz[x]*bz[x]);
p0 = (gam-1.0)*(E[x] - psqhf/rho[x]) + (2.0-gam)*bsqhf;
if(p0 < 0.0) p0 = 0.0;
total[x] = p0;
p0 = ( (gg1*(E[x] - psqhf/rho[x]) + (4.0 - gg1)*bsqhf)/rho[x] );
if(p0 < 0.0) p0 = 0.0;
sound[x] = sqrt(p0);
}
}
__global__ void cukern_CalcWArrays(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *P, double *Cfreeze, double *rhoW, double *enerW, double *pxW, double *pyW, double *pzW, int dir, int n)
{
double Cinv, rhoinv;
KERNEL_PREAMBLE {
Cinv = 1.0/Cfreeze[x];
rhoinv = 1.0/rho[x];
switch(dir) {
case 1:
rhoW[x] = px[x] * Cinv;
enerW[x] = (px[x] * (E[x] + P[x]) - bx[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (px[x]*px[x]*rhoinv + P[x] - bx[x]*bx[x])*Cinv;
pyW[x] = (px[x]*py[x]*rhoinv - bx[x]*by[x])*Cinv;
pzW[x] = (px[x]*pz[x]*rhoinv - bx[x]*bz[x])*Cinv;
break;
case 2:
rhoW[x] = py[x] * Cinv;
enerW[x] = (py[x] * (E[x] + P[x]) - by[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (py[x]*px[x]*rhoinv - by[x]*bx[x])*Cinv;
pyW[x] = (py[x]*py[x]*rhoinv + P[x] - by[x]*by[x])*Cinv;
pzW[x] = (py[x]*pz[x]*rhoinv - by[x]*bz[x])*Cinv;
break;
case 3:
rhoW[x] = pz[x] * Cinv;
enerW[x] = (pz[x] * (E[x] + P[x]) - bz[x]*(px[x]*bx[x]+py[x]*by[x]+pz[x]*bz[x]) ) * (rhoinv*Cinv);
pxW[x] = (pz[x]*px[x]*rhoinv - bz[x]*bx[x])*Cinv;
pyW[x] = (pz[x]*py[x]*rhoinv - bz[x]*by[x])*Cinv;
pzW[x] = (pz[x]*pz[x]*rhoinv + P[x] - bz[x]*bz[x])*Cinv;
break;
}
}
/*mass.wArray = mom(X).array ./ freezeSpd.array;
%--- ENERGY DENSITY ---%
ener.wArray = velocity .* (ener.array + press) - mag(X).cellMag.array .* ...
( mag(1).cellMag.array .* mom(1).array ...
+ mag(2).cellMag.array .* mom(2).array ...
+ mag(3).cellMag.array .* mom(3).array) ./ mass.array;
ener.wArray = ener.wArray ./ freezeSpd.array;
%--- MOMENTUM DENSITY ---%
for i=1:3
mom(i).wArray = (velocity .* mom(i).array + press*dirVec(i)...
- mag(X).cellMag.array .* mag(i).cellMag.array) ./ freezeSpd.array;
end*/
}
__global__ void cukern_PerformFlux(double *array0, double *Cfreeze, double *fluxRa, double *fluxRb, double *fluxLa, double *fluxLb, double *out, double lambda, int n)
{
KERNEL_PREAMBLE
out[x] = array0[x] - lambda*Cfreeze[x]*(fluxRa[x] - fluxRb[x] + fluxLa[x] - fluxLb[x]);
//v(i).store.array = v(i).array - 0.5*fluxFactor .* tempFreeze .* ...
// ( v(i).store.fluxR.array - v(i).store.fluxR.shift(X,-1) ...
// + v(i).store.fluxL.array - v(i).store.fluxL.shift(X,1) );
}
__global__ void cukern_SeperateLRFlux(double *arr, double *wArr, double *left, double *right, int n)
{
KERNEL_PREAMBLE {
left[x] = .5*(arr[x] - wArr[x]);
right[x] = .5*(arr[x] + wArr[x]);
}
}
|
cdb08e2a286ff27862a2410e6d1c512eecda0d06.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//main.cu: error: possible read-write race
//however, this didn't happen in the tests
//altough in CUDA providing the inline keyword should still keep a copy of the function around,
//this kind of access is considered a error by ESBMC
//ps: the values from A[N-1-offset) to A[N-1] always will receive unpredictable values,
//because they acess values because they access memory positions that were not initiated
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define tid threadIdx.x
#define N 2//1024
__device__ inline void inlined(int *A, int offset)
{
int temp = A[tid + offset];
A[tid] += temp;
}
__global__ void inline_test(int *A, int offset) {
inlined(A, offset);
}
int main(){
int *a;
int *dev_a;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
a = (int*)malloc(N*size);
for (int i = 0; i < N; i++)
a[i] = i;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
printf("a: ");
//for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
hipLaunchKernelGGL(( inline_test), dim3(1),dim3(N), 0, 0, dev_a, 2); //you can change this offset for tests
//ESBMC_verify_kernel_intt(inline_test, 1, N, dev_a, 2);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
printf("\nFunction Results:\n ");
//for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
free(a);
hipFree(dev_a);
return 0;
}
| cdb08e2a286ff27862a2410e6d1c512eecda0d06.cu | //xfail:BOOGIE_ERROR
//main.cu: error: possible read-write race
//however, this didn't happen in the tests
//altough in CUDA providing the inline keyword should still keep a copy of the function around,
//this kind of access is considered a error by ESBMC
//ps: the values from A[N-1-offset) to A[N-1] always will receive unpredictable values,
//because they acess values because they access memory positions that were not initiated
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define tid threadIdx.x
#define N 2//1024
__device__ inline void inlined(int *A, int offset)
{
int temp = A[tid + offset];
A[tid] += temp;
}
__global__ void inline_test(int *A, int offset) {
inlined(A, offset);
}
int main(){
int *a;
int *dev_a;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
a = (int*)malloc(N*size);
for (int i = 0; i < N; i++)
a[i] = i;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
printf("a: ");
//for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
inline_test<<<1,N>>>(dev_a, 2); //you can change this offset for tests
//ESBMC_verify_kernel_intt(inline_test, 1, N, dev_a, 2);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
printf("\nFunction Results:\n ");
//for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
free(a);
cudaFree(dev_a);
return 0;
}
|
12fcb2a48309f1969c975479dd30860b0068735a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix Multiplication in CUDA */
#include<stdio.h>
__global__ void matrix_mul(float* ad, float* bd, float* cd, int N) {
float pvalue = 0;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < N; ++i) {
float m = ad[Row * N + i];
float n = bd[i * N + Col];
pvalue += m * n;
}
cd[Row * N + Col] = pvalue;
}
int main() {
int N = 8, i, j; //N == size of square matrix
float* a, * b, * c;
float* ad, * bd, * cd;
FILE* f;
f = fopen("Parallel Multiply.txt", "w");
size_t size = sizeof(float) * N * N;
a = (float*)malloc(size);
b = (float*)malloc(size);
c = (float*)malloc(size);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
a[i * N + j] = 1.0; //(float)(i*N+j); //initializing each value with its own index
b[i * N + j] = 1.0; //(float)(i*N+j); //random functions can be used alternatively
}
}
hipMalloc(&ad, size);
hipMalloc(&bd, size);
hipMalloc(&cd, size);
hipMemcpy(ad, a, size, hipMemcpyHostToDevice);
hipMemcpy(bd, b, size, hipMemcpyHostToDevice);
printf("\nAfter HostToDevice Memcpy\n%s\n",
hipGetErrorString(hipGetLastError()));
dim3 blocksize(8, 8); //each block contains 16 * 16 (=256) threads
dim3 gridsize(N / 8, N / 8); //creating just sufficient no of blocks
matrix_mul << <gridsize, blocksize >> > (ad, bd, cd, N);
hipMemcpy(c, cd, sizeof(float) * N * N, hipMemcpyDeviceToHost);
printf("\nAfter DeviceToHost Memcpy\n%s\n",
hipGetErrorString(hipGetLastError()));
printf("Array A was---\n");
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", a[i * N + j]);
printf("\n");
}
printf("\nArray B was---\n");
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", b[i * N + j]);
printf("\n");
}
printf("\nMultiplication of A and B gives C----\n");
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", c[i * N + j]); //if correctly computed, then all values must be N
printf("\n");
}
printf("\nYou can see output in Parallel Mutiply.txt file in project directory");
fclose(f);
free(a);
free(b);
free(c);
return 1;
}
/*
After HostToDevice Memcpy
no error
After DeviceToHost Memcpy
no error
Array A was---
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
Array B was---
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
Multiplication of A and B gives C----
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
You can see output in Parallel Mutiply.txt file in project directory
C:\Users\Vrushil\Desktop\ProgramSupplier\HPC running\Assignment2\matrix_mult_console\x64\Debug\matrix_mult_console.exe (process 15256) exited with code 1.
Press any key to close this window . . .
*/
| 12fcb2a48309f1969c975479dd30860b0068735a.cu | /* Matrix Multiplication in CUDA */
#include<stdio.h>
__global__ void matrix_mul(float* ad, float* bd, float* cd, int N) {
float pvalue = 0;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < N; ++i) {
float m = ad[Row * N + i];
float n = bd[i * N + Col];
pvalue += m * n;
}
cd[Row * N + Col] = pvalue;
}
int main() {
int N = 8, i, j; //N == size of square matrix
float* a, * b, * c;
float* ad, * bd, * cd;
FILE* f;
f = fopen("Parallel Multiply.txt", "w");
size_t size = sizeof(float) * N * N;
a = (float*)malloc(size);
b = (float*)malloc(size);
c = (float*)malloc(size);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
a[i * N + j] = 1.0; //(float)(i*N+j); //initializing each value with its own index
b[i * N + j] = 1.0; //(float)(i*N+j); //random functions can be used alternatively
}
}
cudaMalloc(&ad, size);
cudaMalloc(&bd, size);
cudaMalloc(&cd, size);
cudaMemcpy(ad, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, size, cudaMemcpyHostToDevice);
printf("\nAfter HostToDevice Memcpy\n%s\n",
cudaGetErrorString(cudaGetLastError()));
dim3 blocksize(8, 8); //each block contains 16 * 16 (=256) threads
dim3 gridsize(N / 8, N / 8); //creating just sufficient no of blocks
matrix_mul << <gridsize, blocksize >> > (ad, bd, cd, N);
cudaMemcpy(c, cd, sizeof(float) * N * N, cudaMemcpyDeviceToHost);
printf("\nAfter DeviceToHost Memcpy\n%s\n",
cudaGetErrorString(cudaGetLastError()));
printf("Array A was---\n");
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", a[i * N + j]);
printf("\n");
}
printf("\nArray B was---\n");
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", b[i * N + j]);
printf("\n");
}
printf("\nMultiplication of A and B gives C----\n");
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", c[i * N + j]); //if correctly computed, then all values must be N
printf("\n");
}
printf("\nYou can see output in Parallel Mutiply.txt file in project directory");
fclose(f);
free(a);
free(b);
free(c);
return 1;
}
/*
After HostToDevice Memcpy
no error
After DeviceToHost Memcpy
no error
Array A was---
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
Array B was---
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
Multiplication of A and B gives C----
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000 8.000000
You can see output in Parallel Mutiply.txt file in project directory
C:\Users\Vrushil\Desktop\ProgramSupplier\HPC running\Assignment2\matrix_mult_console\x64\Debug\matrix_mult_console.exe (process 15256) exited with code 1.
Press any key to close this window . . .
*/
|
a61f08977b361aa9ac6377e7fe7c892e01581820.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// // include files //
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//#include <cutil_inline.h>
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = threadIdx.x;
}
//
// main code
//
int main(int argc, char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
hipMalloc((void **)&d_x, nsize*sizeof(float));
// execute kernel
hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x);
// copy back results and print them out
hipMemcpy(h_x,d_x,nsize*sizeof(float),hipMemcpyDeviceToHost);
for ( n=0; n<nsize; n++ ) printf(" n, x = %d %f \n",n,h_x[n]);
// free memory
hipFree(d_x);
free(h_x);
return 0;
}
| a61f08977b361aa9ac6377e7fe7c892e01581820.cu | // // include files //
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//#include <cutil_inline.h>
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = threadIdx.x;
}
//
// main code
//
int main(int argc, char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
cudaMalloc((void **)&d_x, nsize*sizeof(float));
// execute kernel
my_first_kernel<<<nblocks,nthreads>>>(d_x);
// copy back results and print them out
cudaMemcpy(h_x,d_x,nsize*sizeof(float),cudaMemcpyDeviceToHost);
for ( n=0; n<nsize; n++ ) printf(" n, x = %d %f \n",n,h_x[n]);
// free memory
cudaFree(d_x);
free(h_x);
return 0;
}
|
2a0750e2be5ea7d406b49b226e9bebec19ac8528.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION &
AFFILIATES. All rights reserved. SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/group_norm_op_plugin.h"
#include "paddle/phi/kernels/group_norm_kernel.h"
#include <hipcub/hipcub.hpp>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
using DataLayout = phi::DataLayout;
static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; }
static inline __device__ __host__ float sigmoid(float x) {
return 1.F / (1.F + expf(-x));
}
struct GroupSums {
// Is it the 1st element of the group?
int32_t flag;
// The sum.
float sum;
// The sum of squares.
float sumSq;
};
struct GroupSumsOp {
inline __device__ GroupSums operator()(GroupSums const &a,
GroupSums const &b) {
GroupSums dst;
dst.sum = b.flag ? b.sum : (a.sum + b.sum);
dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq);
dst.flag = a.flag + b.flag;
return dst;
}
};
static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) {
int32_t maxDivisor = -1;
for (int32_t i = 1; i <= std::sqrt(n); i++) {
if (n % i == 0) {
int32_t divisor1 = n / i;
int32_t divisor2 = i;
if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) {
maxDivisor = divisor1;
}
if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) {
maxDivisor = divisor2;
}
}
}
return maxDivisor;
}
template <int tTHREADS_PER_BLOCK>
__global__ void groupNormNCHW32SumKernelQDQ(
const GroupNormNHWCParams<__half> params) {
// The object in charge of doing the sums for the different blocks.
typedef hipcub::BlockScan<GroupSums, tTHREADS_PER_BLOCK> BlockScan;
// Allocate shared memory for BlockScan.
__shared__ typename BlockScan::TempStorage tempStorage;
// Allocate shared memory for the groups. We could reduce the amount of shared
// memory reserved.
__shared__ float2 smem[tTHREADS_PER_BLOCK];
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for int8x2).
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2;
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// The sums.
float sum = 0.F;
float sumSq = 0.F;
const int8_t *src_ptr = reinterpret_cast<const int8_t *>(params.srcX);
// nchw32 layout
// batch offset + channel offset
int nc_offset = static_cast<int64_t>(ni) * params.hwc +
ci / 32 * params.hw * 32 + ci % 32;
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The offset.
int64_t offset = nc_offset + static_cast<int64_t>(hwi) * 32;
// Fetch two channels per thread.
__half2 h2(0, 0);
if (ci < params.c) {
int8_t tmp_in[2];
*reinterpret_cast<int16_t *>(tmp_in) =
*reinterpret_cast<int16_t const *>(&src_ptr[offset]);
h2.x = params.dqScaleIn * tmp_in[0];
h2.y = params.dqScaleIn * tmp_in[1];
}
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Update the sum.
sum += f2.x + f2.y;
// Update the sum of squares.
sumSq += f2.x * f2.x + f2.y * f2.y;
}
// The group that thread works on and the channel in the group (modulus).
int32_t gi = threadIdx.x * 2 / params.cPerGroup;
int32_t cj = threadIdx.x * 2 - params.cPerGroup * gi;
// The data for the summations.
GroupSums inp{cj == 0 ? 1 : 0, sum, sumSq};
// Do the segmented scan.
GroupSums out;
BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp());
// Store the results for the groups in shared memory (to produce coalesced
// stores later).
// 2 channels per thread
if (cj == params.cPerGroup - 2) {
smem[gi] = make_float2(out.sum, out.sumSq);
}
// Make sure the data is in shared memory.
__syncthreads();
// The global group index.
int32_t gj = blockIdx.x * params.groupsPerBlock + threadIdx.x;
// Threads that have nothing left to do, exit.
if (threadIdx.x >= params.groupsPerBlock || gj >= params.groups) {
return;
}
// The first threads (those storing to global memory, load the values).
float2 sums = smem[threadIdx.x];
// Store to global memory.
atomicAdd(¶ms.redBuffer[(2 * ni + 0) * params.groups + gj], sums.x);
atomicAdd(¶ms.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y);
}
void groupNormNCHW32SumQDQ(const GroupNormNHWCParams<__half> ¶ms,
hipStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = divUp(params.c, params.cPerBlock);
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
switch (params.cPerBlock) {
case 320:
hipLaunchKernelGGL(( groupNormNCHW32SumKernelQDQ<160>), dim3(grid), dim3(160), 0, stream, params);
break;
case 480:
hipLaunchKernelGGL(( groupNormNCHW32SumKernelQDQ<256>), dim3(grid), dim3(256), 0, stream, params);
break;
case 256:
hipLaunchKernelGGL(( groupNormNCHW32SumKernelQDQ<128>), dim3(grid), dim3(128), 0, stream, params);
break;
case 128:
hipLaunchKernelGGL(( groupNormNCHW32SumKernelQDQ<64>), dim3(grid), dim3(64), 0, stream, params);
break;
case 8:
hipLaunchKernelGGL(( groupNormNCHW32SumKernelQDQ<4>), dim3(grid), dim3(4), 0, stream, params);
break;
}
}
template <int tTHREADS_PER_BLOCK>
__global__ void groupNormNCHW32ScaleKernelQDQ(
const GroupNormNHWCParams<__half> params) {
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2;
// The group that thread works on and the channel in the group (modulus).
int32_t gi = ci / params.cPerGroup;
const int8_t *src_ptr = reinterpret_cast<const int8_t *>(params.srcX);
int8_t *dst_ptr = reinterpret_cast<int8_t *>(params.dst);
// Load the sum and sum of squares for the group.
float sum = 0.F, sumSq = 0.F;
if (gi < params.groups) {
sum = params.redBuffer[(2 * ni + 0) * params.groups + gi];
sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi];
}
// Load gamma/beta.
float2 gammaF2, betaF2;
if (ci < params.c) {
gammaF2 = __half22float2(*reinterpret_cast<half2 const *>(
reinterpret_cast<half const *>(params.gamma) + ci));
betaF2 = __half22float2(*reinterpret_cast<half2 const *>(
reinterpret_cast<half const *>(params.beta) + ci));
}
// Compute the mean.
float mean = sum * params.invHWC;
// Compute the variance.
float var = sumSq * params.invHWC - (mean * mean);
// Compute the inverse of the stddev.
float invStdDev = rsqrtf(var + params.eps);
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// nchw32 layout
int c_offset = ci / 32 * params.hw * 32 + ci % 32;
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = static_cast<int64_t>(ni) * params.hwc + c_offset +
static_cast<int64_t>(hwi) * 32;
// Fetch two channels per thread.
__half2 h2(0, 0);
if (ci < params.c) {
int8_t tmp_in[2];
*reinterpret_cast<int16_t *>(tmp_in) =
*reinterpret_cast<int16_t const *>(&src_ptr[offset]);
h2.x = params.dqScaleIn * tmp_in[0];
h2.y = params.dqScaleIn * tmp_in[1];
}
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
if (ci < params.c) {
int8_t tmp_in[2];
int32_t tmpq0 = __float2int_rn(params.inv_qScale * f2.x);
int32_t tmpq1 = __float2int_rn(params.inv_qScale * f2.y);
tmpq0 = max(-128, tmpq0);
tmpq0 = min(127, tmpq0);
tmpq1 = max(-128, tmpq1);
tmpq1 = min(127, tmpq1);
tmp_in[0] = tmpq0;
tmp_in[1] = tmpq1;
*reinterpret_cast<int16_t *>(&dst_ptr[offset]) =
*reinterpret_cast<int16_t *>(tmp_in);
}
}
}
void groupNormNCHW32ScaleQDQ(const GroupNormNHWCParams<__half> ¶ms,
hipStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = divUp(params.c, params.cPerBlock);
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
switch (params.cPerBlock) {
case 320:
hipLaunchKernelGGL(( groupNormNCHW32ScaleKernelQDQ<160>), dim3(grid), dim3(160), 0, stream, params);
break;
case 480:
hipLaunchKernelGGL(( groupNormNCHW32ScaleKernelQDQ<256>), dim3(grid), dim3(256), 0, stream, params);
break;
case 256:
hipLaunchKernelGGL(( groupNormNCHW32ScaleKernelQDQ<128>), dim3(grid), dim3(128), 0, stream, params);
break;
case 128:
hipLaunchKernelGGL(( groupNormNCHW32ScaleKernelQDQ<64>), dim3(grid), dim3(64), 0, stream, params);
break;
case 8:
hipLaunchKernelGGL(( groupNormNCHW32ScaleKernelQDQ<4>), dim3(grid), dim3(4), 0, stream, params);
break;
default:
PADDLE_THROW(
platform::errors::Fatal("The function groupNormNCHW32ScaleQDQ of "
"GroupNorm TRT Plugin encounter error"));
}
}
int GroupNormPlugin::initialize() TRT_NOEXCEPT {
if (!with_fp16_) {
// if use fp32
hipMalloc(&scale_gpu_, sizeof(float) * scale_.size());
hipMalloc(&bias_gpu_, sizeof(float) * bias_.size());
hipMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
hipMemcpyHostToDevice);
} else {
// if use fp16
std::vector<half> scale_half(scale_.size());
std::vector<half> bias_half(bias_.size());
for (int i = 0; i < scale_.size(); ++i) {
scale_half[i] = static_cast<half>(scale_[i]);
}
for (int i = 0; i < bias_.size(); ++i) {
bias_half[i] = static_cast<half>(bias_[i]);
}
hipMalloc(&scale_gpu_, sizeof(half) * scale_half.size());
hipMalloc(&bias_gpu_, sizeof(half) * bias_half.size());
hipMemcpy(scale_gpu_,
scale_half.data(),
scale_half.size() * sizeof(half),
hipMemcpyHostToDevice);
hipMemcpy(bias_gpu_,
bias_half.data(),
bias_half.size() * sizeof(half),
hipMemcpyHostToDevice);
}
return 0;
}
bool GroupNormPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
nvinfer1::Dims GroupNormPlugin::getOutputDimensions(
int index, const nvinfer1::Dims *inputDims, int nbInputs) TRT_NOEXCEPT {
return inputDims[0];
}
int GroupNormPlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
#else
void *const *outputs,
void *workspace,
#endif
hipStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = this->getInputDims(0);
int groups = groups_;
float eps = eps_;
std::vector<int> input_shape;
input_shape.push_back(batch_size);
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
int C = input_shape[1];
PADDLE_ENFORCE_EQ(
C,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the channel number in groupnorm,"
"but got channel number:%d, scale's size:%d.",
C,
scale_.size()));
PADDLE_ENFORCE_EQ(
C,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the channel number in groupnorm,"
"but got channel number:%d, bias's size:%d.",
C,
bias_.size()));
float *mean_d = static_cast<float *>(workspace);
float *variance_d = mean_d + input_shape[0] * groups_;
float *temp_variance_d = variance_d + input_shape[0] * groups_;
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32";
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<float *>(bias_gpu_),
reinterpret_cast<float *>(scale_gpu_),
temp_variance_d,
groups_,
eps_,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16";
const half *input = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<half, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<const half *>(bias_gpu_),
reinterpret_cast<const half *>(scale_gpu_),
temp_variance_d,
groups_,
eps_,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The GroupNorm TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
nvinfer1::DimsExprs GroupNormPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputDims,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputDims[0];
}
bool GroupNormPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of groupnorm plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
bool int8_support = in.type == nvinfer1::DataType::kINT8 &&
in.format == nvinfer1::PluginFormat::kCHW32;
bool fp16_support =
(in.type == nvinfer1::DataType::kHALF) &&
((!with_silu_ && in.format == nvinfer1::PluginFormat::kLINEAR) ||
in.format == nvinfer1::PluginFormat::kHWC8);
if (pos == 0) {
if (with_int8_) {
return int8_support || fp16_support;
} else if (with_fp16_) {
return fp16_support;
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType GroupNormPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The groupnorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true,
platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int GroupNormPluginDynamic::initialize() TRT_NOEXCEPT {
if (with_fp16_ == false) {
// if use fp32
hipMalloc(&scale_gpu_, sizeof(float) * scale_.size());
hipMalloc(&bias_gpu_, sizeof(float) * bias_.size());
hipMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
hipMemcpyHostToDevice);
} else {
// if use fp16
std::vector<half> scale_half(scale_.size());
std::vector<half> bias_half(bias_.size());
for (int i = 0; i < scale_.size(); ++i) {
scale_half[i] = static_cast<half>(scale_[i]);
}
for (int i = 0; i < bias_.size(); ++i) {
bias_half[i] = static_cast<half>(bias_[i]);
}
hipMalloc(&scale_gpu_, sizeof(half) * scale_.size());
hipMalloc(&bias_gpu_, sizeof(half) * bias_.size());
hipMemcpy(scale_gpu_,
scale_half.data(),
scale_half.size() * sizeof(half),
hipMemcpyHostToDevice);
hipMemcpy(bias_gpu_,
bias_half.data(),
bias_half.size() * sizeof(half),
hipMemcpyHostToDevice);
}
return 0;
}
int GroupNormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = input_desc[0].dims;
int groups = groups_;
float eps = eps_;
std::vector<int> input_shape;
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
int C = input_shape[1];
int image_size = input_shape[2] * input_shape[3];
int batchSize = input_shape[0];
PADDLE_ENFORCE_EQ(
C,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the channel number in groupnorm,"
"but got feature_size:%d, scale's size:%d.",
C,
scale_.size()));
PADDLE_ENFORCE_EQ(
C,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the channel number in groupnorm,"
"but got feature_size:%d, bias's size:%d.",
C,
bias_.size()));
float *mean_d = static_cast<float *>(workspace);
float *variance_d = mean_d + input_shape[0] * groups_;
float *temp_variance_d = variance_d + input_shape[0] * groups_;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<float, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<float *>(bias_gpu_),
reinterpret_cast<float *>(scale_gpu_),
temp_variance_d,
groups,
eps,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
if (input_desc[0].format == nvinfer1::PluginFormat::kLINEAR) {
phi::GroupNormDirectCUDAFunctor<half, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<half *>(bias_gpu_),
reinterpret_cast<half *>(scale_gpu_),
temp_variance_d,
groups,
eps,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_desc[0].format == nvinfer1::PluginFormat::kHWC8) {
int32_t cPerBlock = 320;
int32_t maxBlocksPerHW = 1024;
switch (input_desc[0].dims.d[1]) {
case 960:
case 1920:
cPerBlock = 480;
break;
case 512:
case 256:
cPerBlock = 256;
break;
case 128:
cPerBlock = 128;
break;
default:
cPerBlock = 320;
}
if (cPerBlock > input_desc[0].dims.d[1]) {
cPerBlock = 8;
}
params_.withSilu = with_silu_;
params_.dst = static_cast<half *>(outputs[0]);
params_.srcX = static_cast<half const *>(inputs[0]);
params_.gamma = reinterpret_cast<half *>(scale_gpu_);
params_.beta = reinterpret_cast<half *>(bias_gpu_);
params_.redBuffer = static_cast<float *>(workspace);
params_.var_data = nullptr;
params_.n = input_desc[0].dims.d[0];
params_.h = input_desc[0].dims.d[2];
params_.w = input_desc[0].dims.d[3];
params_.c = input_desc[0].dims.d[1];
params_.groups = groups_;
params_.hw = params_.h * params_.w;
const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW);
params_.hwPerBlock = divUp(params_.hw, blocksPerHW);
params_.cPerBlock = cPerBlock;
params_.cPerGroup = params_.c / params_.groups;
params_.hwc = params_.hw * params_.c;
params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup);
params_.groupsPerBlock = cPerBlock / params_.cPerGroup;
params_.eps = eps_;
params_.var_data = nullptr;
hipMemsetAsync(params_.redBuffer,
0,
2 * sizeof(float) * params_.n * groups_,
stream);
phi::groupNormNHWCSum<half> nhwc_sum;
nhwc_sum(¶ms_, stream);
phi::groupNormNHWCScale<half> nhwc_scale;
nhwc_scale(params_, stream);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin's only support nchw or nhwc8 input"));
}
} else if (input_type == nvinfer1::DataType::kINT8) {
const int8_t *input = reinterpret_cast<const int8_t *>(inputs[0]);
int8_t *output = static_cast<int8_t *>(outputs[0]);
if (input_desc[0].format == nvinfer1::PluginFormat::kCHW32) {
int32_t cPerBlock = 320;
int32_t maxBlocksPerHW = 1024;
switch (input_desc[0].dims.d[1]) {
case 960:
case 1920:
cPerBlock = 480;
break;
case 512:
case 256:
cPerBlock = 256;
break;
case 128:
cPerBlock = 128;
break;
default:
cPerBlock = 320;
}
if (cPerBlock > input_desc[0].dims.d[1]) {
cPerBlock = 8;
}
params_.withSilu = with_silu_;
params_.dst = static_cast<half *>(outputs[0]);
params_.srcX = static_cast<half const *>(inputs[0]);
params_.gamma = scale_gpu_;
params_.beta = bias_gpu_;
params_.redBuffer = static_cast<float *>(workspace);
params_.n = input_desc[0].dims.d[0];
params_.h = input_desc[0].dims.d[2];
params_.w = input_desc[0].dims.d[3];
params_.c = input_desc[0].dims.d[1];
params_.groups = groups_;
params_.hw = params_.h * params_.w;
const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW);
params_.hwPerBlock = divUp(params_.hw, blocksPerHW);
params_.cPerBlock = cPerBlock;
params_.cPerGroup = params_.c / params_.groups;
params_.hwc = params_.hw * params_.c;
params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup);
params_.groupsPerBlock = cPerBlock / params_.cPerGroup;
CHECK_EQ(cPerBlock % params_.cPerGroup, 0);
CHECK_EQ(params_.cPerGroup % 2, 0);
params_.eps = eps_;
params_.dqScaleIn = input_desc[0].scale;
params_.inv_qScale = 1.f / output_desc[0].scale;
// Just used for TensorRTDynamicShapeGNTes in test_dynamic_engine.cc
// Do not Edit it
// params_.dqScaleIn = 1.f;
// params_.inv_qScale = 1 / 0.05f;
hipMemsetAsync(params_.redBuffer,
0,
2 * sizeof(float) * params_.n * groups_,
stream);
groupNormNCHW32SumQDQ(params_, stream);
groupNormNCHW32ScaleQDQ(params_, stream);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin only support nchw32 input"));
}
} else {
// input not float
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin's only support fp32, fp16 or int8 input"));
}
return hipGetLastError() != hipSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 2a0750e2be5ea7d406b49b226e9bebec19ac8528.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION &
AFFILIATES. All rights reserved. SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/group_norm_op_plugin.h"
#include "paddle/phi/kernels/group_norm_kernel.h"
#include <cub/cub.cuh>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
using DataLayout = phi::DataLayout;
static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; }
static inline __device__ __host__ float sigmoid(float x) {
return 1.F / (1.F + expf(-x));
}
struct GroupSums {
// Is it the 1st element of the group?
int32_t flag;
// The sum.
float sum;
// The sum of squares.
float sumSq;
};
struct GroupSumsOp {
inline __device__ GroupSums operator()(GroupSums const &a,
GroupSums const &b) {
GroupSums dst;
dst.sum = b.flag ? b.sum : (a.sum + b.sum);
dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq);
dst.flag = a.flag + b.flag;
return dst;
}
};
static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) {
int32_t maxDivisor = -1;
for (int32_t i = 1; i <= std::sqrt(n); i++) {
if (n % i == 0) {
int32_t divisor1 = n / i;
int32_t divisor2 = i;
if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) {
maxDivisor = divisor1;
}
if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) {
maxDivisor = divisor2;
}
}
}
return maxDivisor;
}
template <int tTHREADS_PER_BLOCK>
__global__ void groupNormNCHW32SumKernelQDQ(
const GroupNormNHWCParams<__half> params) {
// The object in charge of doing the sums for the different blocks.
typedef cub::BlockScan<GroupSums, tTHREADS_PER_BLOCK> BlockScan;
// Allocate shared memory for BlockScan.
__shared__ typename BlockScan::TempStorage tempStorage;
// Allocate shared memory for the groups. We could reduce the amount of shared
// memory reserved.
__shared__ float2 smem[tTHREADS_PER_BLOCK];
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for int8x2).
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2;
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// The sums.
float sum = 0.F;
float sumSq = 0.F;
const int8_t *src_ptr = reinterpret_cast<const int8_t *>(params.srcX);
// nchw32 layout
// batch offset + channel offset
int nc_offset = static_cast<int64_t>(ni) * params.hwc +
ci / 32 * params.hw * 32 + ci % 32;
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The offset.
int64_t offset = nc_offset + static_cast<int64_t>(hwi) * 32;
// Fetch two channels per thread.
__half2 h2(0, 0);
if (ci < params.c) {
int8_t tmp_in[2];
*reinterpret_cast<int16_t *>(tmp_in) =
*reinterpret_cast<int16_t const *>(&src_ptr[offset]);
h2.x = params.dqScaleIn * tmp_in[0];
h2.y = params.dqScaleIn * tmp_in[1];
}
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Update the sum.
sum += f2.x + f2.y;
// Update the sum of squares.
sumSq += f2.x * f2.x + f2.y * f2.y;
}
// The group that thread works on and the channel in the group (modulus).
int32_t gi = threadIdx.x * 2 / params.cPerGroup;
int32_t cj = threadIdx.x * 2 - params.cPerGroup * gi;
// The data for the summations.
GroupSums inp{cj == 0 ? 1 : 0, sum, sumSq};
// Do the segmented scan.
GroupSums out;
BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp());
// Store the results for the groups in shared memory (to produce coalesced
// stores later).
// 2 channels per thread
if (cj == params.cPerGroup - 2) {
smem[gi] = make_float2(out.sum, out.sumSq);
}
// Make sure the data is in shared memory.
__syncthreads();
// The global group index.
int32_t gj = blockIdx.x * params.groupsPerBlock + threadIdx.x;
// Threads that have nothing left to do, exit.
if (threadIdx.x >= params.groupsPerBlock || gj >= params.groups) {
return;
}
// The first threads (those storing to global memory, load the values).
float2 sums = smem[threadIdx.x];
// Store to global memory.
atomicAdd(¶ms.redBuffer[(2 * ni + 0) * params.groups + gj], sums.x);
atomicAdd(¶ms.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y);
}
void groupNormNCHW32SumQDQ(const GroupNormNHWCParams<__half> ¶ms,
cudaStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = divUp(params.c, params.cPerBlock);
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
switch (params.cPerBlock) {
case 320:
groupNormNCHW32SumKernelQDQ<160><<<grid, 160, 0, stream>>>(params);
break;
case 480:
groupNormNCHW32SumKernelQDQ<256><<<grid, 256, 0, stream>>>(params);
break;
case 256:
groupNormNCHW32SumKernelQDQ<128><<<grid, 128, 0, stream>>>(params);
break;
case 128:
groupNormNCHW32SumKernelQDQ<64><<<grid, 64, 0, stream>>>(params);
break;
case 8:
groupNormNCHW32SumKernelQDQ<4><<<grid, 4, 0, stream>>>(params);
break;
}
}
template <int tTHREADS_PER_BLOCK>
__global__ void groupNormNCHW32ScaleKernelQDQ(
const GroupNormNHWCParams<__half> params) {
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2;
// The group that thread works on and the channel in the group (modulus).
int32_t gi = ci / params.cPerGroup;
const int8_t *src_ptr = reinterpret_cast<const int8_t *>(params.srcX);
int8_t *dst_ptr = reinterpret_cast<int8_t *>(params.dst);
// Load the sum and sum of squares for the group.
float sum = 0.F, sumSq = 0.F;
if (gi < params.groups) {
sum = params.redBuffer[(2 * ni + 0) * params.groups + gi];
sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi];
}
// Load gamma/beta.
float2 gammaF2, betaF2;
if (ci < params.c) {
gammaF2 = __half22float2(*reinterpret_cast<half2 const *>(
reinterpret_cast<half const *>(params.gamma) + ci));
betaF2 = __half22float2(*reinterpret_cast<half2 const *>(
reinterpret_cast<half const *>(params.beta) + ci));
}
// Compute the mean.
float mean = sum * params.invHWC;
// Compute the variance.
float var = sumSq * params.invHWC - (mean * mean);
// Compute the inverse of the stddev.
float invStdDev = rsqrtf(var + params.eps);
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// nchw32 layout
int c_offset = ci / 32 * params.hw * 32 + ci % 32;
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = static_cast<int64_t>(ni) * params.hwc + c_offset +
static_cast<int64_t>(hwi) * 32;
// Fetch two channels per thread.
__half2 h2(0, 0);
if (ci < params.c) {
int8_t tmp_in[2];
*reinterpret_cast<int16_t *>(tmp_in) =
*reinterpret_cast<int16_t const *>(&src_ptr[offset]);
h2.x = params.dqScaleIn * tmp_in[0];
h2.y = params.dqScaleIn * tmp_in[1];
}
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
if (ci < params.c) {
int8_t tmp_in[2];
int32_t tmpq0 = __float2int_rn(params.inv_qScale * f2.x);
int32_t tmpq1 = __float2int_rn(params.inv_qScale * f2.y);
tmpq0 = max(-128, tmpq0);
tmpq0 = min(127, tmpq0);
tmpq1 = max(-128, tmpq1);
tmpq1 = min(127, tmpq1);
tmp_in[0] = tmpq0;
tmp_in[1] = tmpq1;
*reinterpret_cast<int16_t *>(&dst_ptr[offset]) =
*reinterpret_cast<int16_t *>(tmp_in);
}
}
}
void groupNormNCHW32ScaleQDQ(const GroupNormNHWCParams<__half> ¶ms,
cudaStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = divUp(params.c, params.cPerBlock);
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
switch (params.cPerBlock) {
case 320:
groupNormNCHW32ScaleKernelQDQ<160><<<grid, 160, 0, stream>>>(params);
break;
case 480:
groupNormNCHW32ScaleKernelQDQ<256><<<grid, 256, 0, stream>>>(params);
break;
case 256:
groupNormNCHW32ScaleKernelQDQ<128><<<grid, 128, 0, stream>>>(params);
break;
case 128:
groupNormNCHW32ScaleKernelQDQ<64><<<grid, 64, 0, stream>>>(params);
break;
case 8:
groupNormNCHW32ScaleKernelQDQ<4><<<grid, 4, 0, stream>>>(params);
break;
default:
PADDLE_THROW(
platform::errors::Fatal("The function groupNormNCHW32ScaleQDQ of "
"GroupNorm TRT Plugin encounter error"));
}
}
int GroupNormPlugin::initialize() TRT_NOEXCEPT {
if (!with_fp16_) {
// if use fp32
cudaMalloc(&scale_gpu_, sizeof(float) * scale_.size());
cudaMalloc(&bias_gpu_, sizeof(float) * bias_.size());
cudaMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
cudaMemcpyHostToDevice);
} else {
// if use fp16
std::vector<half> scale_half(scale_.size());
std::vector<half> bias_half(bias_.size());
for (int i = 0; i < scale_.size(); ++i) {
scale_half[i] = static_cast<half>(scale_[i]);
}
for (int i = 0; i < bias_.size(); ++i) {
bias_half[i] = static_cast<half>(bias_[i]);
}
cudaMalloc(&scale_gpu_, sizeof(half) * scale_half.size());
cudaMalloc(&bias_gpu_, sizeof(half) * bias_half.size());
cudaMemcpy(scale_gpu_,
scale_half.data(),
scale_half.size() * sizeof(half),
cudaMemcpyHostToDevice);
cudaMemcpy(bias_gpu_,
bias_half.data(),
bias_half.size() * sizeof(half),
cudaMemcpyHostToDevice);
}
return 0;
}
bool GroupNormPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
nvinfer1::Dims GroupNormPlugin::getOutputDimensions(
int index, const nvinfer1::Dims *inputDims, int nbInputs) TRT_NOEXCEPT {
return inputDims[0];
}
int GroupNormPlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
#else
void *const *outputs,
void *workspace,
#endif
cudaStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = this->getInputDims(0);
int groups = groups_;
float eps = eps_;
std::vector<int> input_shape;
input_shape.push_back(batch_size);
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
int C = input_shape[1];
PADDLE_ENFORCE_EQ(
C,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the channel number in groupnorm,"
"but got channel number:%d, scale's size:%d.",
C,
scale_.size()));
PADDLE_ENFORCE_EQ(
C,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the channel number in groupnorm,"
"but got channel number:%d, bias's size:%d.",
C,
bias_.size()));
float *mean_d = static_cast<float *>(workspace);
float *variance_d = mean_d + input_shape[0] * groups_;
float *temp_variance_d = variance_d + input_shape[0] * groups_;
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32";
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<float *>(bias_gpu_),
reinterpret_cast<float *>(scale_gpu_),
temp_variance_d,
groups_,
eps_,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16";
const half *input = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<half, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<const half *>(bias_gpu_),
reinterpret_cast<const half *>(scale_gpu_),
temp_variance_d,
groups_,
eps_,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The GroupNorm TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
nvinfer1::DimsExprs GroupNormPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputDims,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputDims[0];
}
bool GroupNormPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of groupnorm plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
bool int8_support = in.type == nvinfer1::DataType::kINT8 &&
in.format == nvinfer1::PluginFormat::kCHW32;
bool fp16_support =
(in.type == nvinfer1::DataType::kHALF) &&
((!with_silu_ && in.format == nvinfer1::PluginFormat::kLINEAR) ||
in.format == nvinfer1::PluginFormat::kHWC8);
if (pos == 0) {
if (with_int8_) {
return int8_support || fp16_support;
} else if (with_fp16_) {
return fp16_support;
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType GroupNormPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The groupnorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true,
platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int GroupNormPluginDynamic::initialize() TRT_NOEXCEPT {
if (with_fp16_ == false) {
// if use fp32
cudaMalloc(&scale_gpu_, sizeof(float) * scale_.size());
cudaMalloc(&bias_gpu_, sizeof(float) * bias_.size());
cudaMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
cudaMemcpyHostToDevice);
} else {
// if use fp16
std::vector<half> scale_half(scale_.size());
std::vector<half> bias_half(bias_.size());
for (int i = 0; i < scale_.size(); ++i) {
scale_half[i] = static_cast<half>(scale_[i]);
}
for (int i = 0; i < bias_.size(); ++i) {
bias_half[i] = static_cast<half>(bias_[i]);
}
cudaMalloc(&scale_gpu_, sizeof(half) * scale_.size());
cudaMalloc(&bias_gpu_, sizeof(half) * bias_.size());
cudaMemcpy(scale_gpu_,
scale_half.data(),
scale_half.size() * sizeof(half),
cudaMemcpyHostToDevice);
cudaMemcpy(bias_gpu_,
bias_half.data(),
bias_half.size() * sizeof(half),
cudaMemcpyHostToDevice);
}
return 0;
}
int GroupNormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = input_desc[0].dims;
int groups = groups_;
float eps = eps_;
std::vector<int> input_shape;
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
int C = input_shape[1];
int image_size = input_shape[2] * input_shape[3];
int batchSize = input_shape[0];
PADDLE_ENFORCE_EQ(
C,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the channel number in groupnorm,"
"but got feature_size:%d, scale's size:%d.",
C,
scale_.size()));
PADDLE_ENFORCE_EQ(
C,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the channel number in groupnorm,"
"but got feature_size:%d, bias's size:%d.",
C,
bias_.size()));
float *mean_d = static_cast<float *>(workspace);
float *variance_d = mean_d + input_shape[0] * groups_;
float *temp_variance_d = variance_d + input_shape[0] * groups_;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<float, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<float *>(bias_gpu_),
reinterpret_cast<float *>(scale_gpu_),
temp_variance_d,
groups,
eps,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
if (input_desc[0].format == nvinfer1::PluginFormat::kLINEAR) {
phi::GroupNormDirectCUDAFunctor<half, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<half *>(bias_gpu_),
reinterpret_cast<half *>(scale_gpu_),
temp_variance_d,
groups,
eps,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_desc[0].format == nvinfer1::PluginFormat::kHWC8) {
int32_t cPerBlock = 320;
int32_t maxBlocksPerHW = 1024;
switch (input_desc[0].dims.d[1]) {
case 960:
case 1920:
cPerBlock = 480;
break;
case 512:
case 256:
cPerBlock = 256;
break;
case 128:
cPerBlock = 128;
break;
default:
cPerBlock = 320;
}
if (cPerBlock > input_desc[0].dims.d[1]) {
cPerBlock = 8;
}
params_.withSilu = with_silu_;
params_.dst = static_cast<half *>(outputs[0]);
params_.srcX = static_cast<half const *>(inputs[0]);
params_.gamma = reinterpret_cast<half *>(scale_gpu_);
params_.beta = reinterpret_cast<half *>(bias_gpu_);
params_.redBuffer = static_cast<float *>(workspace);
params_.var_data = nullptr;
params_.n = input_desc[0].dims.d[0];
params_.h = input_desc[0].dims.d[2];
params_.w = input_desc[0].dims.d[3];
params_.c = input_desc[0].dims.d[1];
params_.groups = groups_;
params_.hw = params_.h * params_.w;
const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW);
params_.hwPerBlock = divUp(params_.hw, blocksPerHW);
params_.cPerBlock = cPerBlock;
params_.cPerGroup = params_.c / params_.groups;
params_.hwc = params_.hw * params_.c;
params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup);
params_.groupsPerBlock = cPerBlock / params_.cPerGroup;
params_.eps = eps_;
params_.var_data = nullptr;
cudaMemsetAsync(params_.redBuffer,
0,
2 * sizeof(float) * params_.n * groups_,
stream);
phi::groupNormNHWCSum<half> nhwc_sum;
nhwc_sum(¶ms_, stream);
phi::groupNormNHWCScale<half> nhwc_scale;
nhwc_scale(params_, stream);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin's only support nchw or nhwc8 input"));
}
} else if (input_type == nvinfer1::DataType::kINT8) {
const int8_t *input = reinterpret_cast<const int8_t *>(inputs[0]);
int8_t *output = static_cast<int8_t *>(outputs[0]);
if (input_desc[0].format == nvinfer1::PluginFormat::kCHW32) {
int32_t cPerBlock = 320;
int32_t maxBlocksPerHW = 1024;
switch (input_desc[0].dims.d[1]) {
case 960:
case 1920:
cPerBlock = 480;
break;
case 512:
case 256:
cPerBlock = 256;
break;
case 128:
cPerBlock = 128;
break;
default:
cPerBlock = 320;
}
if (cPerBlock > input_desc[0].dims.d[1]) {
cPerBlock = 8;
}
params_.withSilu = with_silu_;
params_.dst = static_cast<half *>(outputs[0]);
params_.srcX = static_cast<half const *>(inputs[0]);
params_.gamma = scale_gpu_;
params_.beta = bias_gpu_;
params_.redBuffer = static_cast<float *>(workspace);
params_.n = input_desc[0].dims.d[0];
params_.h = input_desc[0].dims.d[2];
params_.w = input_desc[0].dims.d[3];
params_.c = input_desc[0].dims.d[1];
params_.groups = groups_;
params_.hw = params_.h * params_.w;
const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW);
params_.hwPerBlock = divUp(params_.hw, blocksPerHW);
params_.cPerBlock = cPerBlock;
params_.cPerGroup = params_.c / params_.groups;
params_.hwc = params_.hw * params_.c;
params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup);
params_.groupsPerBlock = cPerBlock / params_.cPerGroup;
CHECK_EQ(cPerBlock % params_.cPerGroup, 0);
CHECK_EQ(params_.cPerGroup % 2, 0);
params_.eps = eps_;
params_.dqScaleIn = input_desc[0].scale;
params_.inv_qScale = 1.f / output_desc[0].scale;
// Just used for TensorRTDynamicShapeGNTes in test_dynamic_engine.cc
// Do not Edit it
// params_.dqScaleIn = 1.f;
// params_.inv_qScale = 1 / 0.05f;
cudaMemsetAsync(params_.redBuffer,
0,
2 * sizeof(float) * params_.n * groups_,
stream);
groupNormNCHW32SumQDQ(params_, stream);
groupNormNCHW32ScaleQDQ(params_, stream);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin only support nchw32 input"));
}
} else {
// input not float
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin's only support fp32, fp16 or int8 input"));
}
return cudaGetLastError() != cudaSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
a702f75186b3c7e1b0cb7c3f0e3797f89274dfb0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <sys/time.h>
#include <sys/time.h>
#include <stdlib.h>
#include "matrixMult.h"
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
struct timeval tv1, tv2;
int main(int argc, char const *argv[])
{
int block_size_x = 16, block_size_y = 16;
hipError_t err = hipSuccess;
if(argc < 1) {
printf("Passe o tamanho da matriz como referencia.\n");
return -1;
}
if(argc > 2){
block_size_x = atoi(argv[2]), block_size_y = atoi(argv[3]);
}
printf("Bloco dim = %d %d\n", block_size_x, block_size_y);
printf("Qtd Threads/bloco = %d\n", block_size_x*block_size_y);
int N = atoi(argv[1]);
int size = N*N*sizeof(float);
printf("Dimensao da matriz %d\n", N);
float *h_A, *h_B;
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
inicMat(h_A, N, 1.0);
inicMat(h_B, N, 1.0);
float *d_A, *d_B, *d_C;
//Alocando vetores no cuda
hipMalloc((void **)&d_A, size);
hipMalloc((void **)&d_B, size);
hipMalloc((void **)&d_C, size);
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
dim3 threads(block_size_x, block_size_y, 1);
int grid_x = N/block_size_x + (N%block_size_x==0?0:1);
int grid_y = N/block_size_y + (N%block_size_y==0?0:1);
dim3 grid(grid_x, grid_y, 1);
hipLaunchKernelGGL(( matTrans), dim3(grid), dim3(threads), 0, 0, d_B, N);
gettimeofday(&tv1, NULL);
hipLaunchKernelGGL(( matMultTransCuda), dim3(grid), dim3(threads), 0, 0, d_A, d_B, d_C, N);
gettimeofday(&tv2, NULL);
printf ("Total time = %f seconds\n",
(double) (tv2.tv_usec - tv1.tv_usec) / 1000 +
(double) (tv2.tv_sec - tv1.tv_sec) * 1000);
free(h_A);
free(h_B);
// Free device global memory
err = hipFree(d_A);
err = hipFree(d_B);
err = hipFree(d_C);
return 0;
} | a702f75186b3c7e1b0cb7c3f0e3797f89274dfb0.cu |
#include <stdio.h>
#include <sys/time.h>
#include <sys/time.h>
#include <stdlib.h>
#include "matrixMult.h"
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
struct timeval tv1, tv2;
int main(int argc, char const *argv[])
{
int block_size_x = 16, block_size_y = 16;
cudaError_t err = cudaSuccess;
if(argc < 1) {
printf("Passe o tamanho da matriz como referencia.\n");
return -1;
}
if(argc > 2){
block_size_x = atoi(argv[2]), block_size_y = atoi(argv[3]);
}
printf("Bloco dim = %d %d\n", block_size_x, block_size_y);
printf("Qtd Threads/bloco = %d\n", block_size_x*block_size_y);
int N = atoi(argv[1]);
int size = N*N*sizeof(float);
printf("Dimensao da matriz %d\n", N);
float *h_A, *h_B;
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
inicMat(h_A, N, 1.0);
inicMat(h_B, N, 1.0);
float *d_A, *d_B, *d_C;
//Alocando vetores no cuda
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
dim3 threads(block_size_x, block_size_y, 1);
int grid_x = N/block_size_x + (N%block_size_x==0?0:1);
int grid_y = N/block_size_y + (N%block_size_y==0?0:1);
dim3 grid(grid_x, grid_y, 1);
matTrans<<<grid, threads>>>(d_B, N);
gettimeofday(&tv1, NULL);
matMultTransCuda<<<grid, threads>>>(d_A, d_B, d_C, N);
gettimeofday(&tv2, NULL);
printf ("Total time = %f seconds\n",
(double) (tv2.tv_usec - tv1.tv_usec) / 1000 +
(double) (tv2.tv_sec - tv1.tv_sec) * 1000);
free(h_A);
free(h_B);
// Free device global memory
err = cudaFree(d_A);
err = cudaFree(d_B);
err = cudaFree(d_C);
return 0;
} |
3f0cf38ad95040cb1f5d6db2c81c8777c9612819.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_mergesort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
long *source = NULL;
hipMalloc(&source, XSIZE*YSIZE);
long *dest = NULL;
hipMalloc(&dest, XSIZE*YSIZE);
long size = XSIZE*YSIZE;
long width = XSIZE;
long slices = 1;
dim3 *threads = NULL;
hipMalloc(&threads, XSIZE*YSIZE);
dim3 *blocks = NULL;
hipMalloc(&blocks, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_mergesort), dim3(gridBlock),dim3(threadBlock), 0, 0, source,dest,size,width,slices,threads,blocks);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_mergesort), dim3(gridBlock),dim3(threadBlock), 0, 0, source,dest,size,width,slices,threads,blocks);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_mergesort), dim3(gridBlock),dim3(threadBlock), 0, 0, source,dest,size,width,slices,threads,blocks);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3f0cf38ad95040cb1f5d6db2c81c8777c9612819.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_mergesort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
long *source = NULL;
cudaMalloc(&source, XSIZE*YSIZE);
long *dest = NULL;
cudaMalloc(&dest, XSIZE*YSIZE);
long size = XSIZE*YSIZE;
long width = XSIZE;
long slices = 1;
dim3 *threads = NULL;
cudaMalloc(&threads, XSIZE*YSIZE);
dim3 *blocks = NULL;
cudaMalloc(&blocks, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_mergesort<<<gridBlock,threadBlock>>>(source,dest,size,width,slices,threads,blocks);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_mergesort<<<gridBlock,threadBlock>>>(source,dest,size,width,slices,threads,blocks);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_mergesort<<<gridBlock,threadBlock>>>(source,dest,size,width,slices,threads,blocks);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
58c09b419786f48452c5f0c9f5da53bc23d1dd93.hip | // !!! This is a file automatically generated by hipify!!!
//Written by Helen Fan
#include <iostream>
#include <iomanip>
#include <string>
#include <sstream>
#include <Windows.h>
#include <tchar.h>
#include <stdio.h>
#include <strsafe.h>
#include <hip/hip_runtime_api.h>
#include "hf_siddon_recon.hpp"
//maybe I should include how long it took to run the program in the parameter file???
//call it .info file instead of .cfg
//.info - generated using siddon_recon class, saves the information that was used to create files
//.cfg - used as input for the siddon_recon class for recon/projection/backprojection
int main()
{
hipProfilerStart();
double totalTime;
StopWatchInterface *timer;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
//---------------- how to use siddon_recon class --------------------------------
siddon_recon recon;
//recon.a1_FORWARD_PROJECTION("H:\\Visual Studio 2010\\CT_Recon\\CT_Recon", "CTParameters.cfg");
recon.a0_RECON_MLEM("H:\\Visual Studio 2010\\CT_Recon\\CT_Recon\\", "CTParameters.cfg");
//recon.a1_BACKWARD_PROJECTION("H:\\Visual Studio 2010\\CT_Recon\\CT_Recon", "MasterParameterFile.cfg", true);
//-------------------------------------------------------------------
sdkStopTimer(&timer);
totalTime = sdkGetTimerValue(&timer)*1e-3;
printf("calculation time = %f seconds \n", totalTime);
hipProfilerStop();
system("PAUSE");
}
| 58c09b419786f48452c5f0c9f5da53bc23d1dd93.cu | //Written by Helen Fan
#include <iostream>
#include <iomanip>
#include <string>
#include <sstream>
#include <Windows.h>
#include <tchar.h>
#include <stdio.h>
#include <strsafe.h>
#include <cuda_profiler_api.h>
#include "hf_siddon_recon.hpp"
//maybe I should include how long it took to run the program in the parameter file???
//call it .info file instead of .cfg
//.info - generated using siddon_recon class, saves the information that was used to create files
//.cfg - used as input for the siddon_recon class for recon/projection/backprojection
int main()
{
cudaProfilerStart();
double totalTime;
StopWatchInterface *timer;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
//---------------- how to use siddon_recon class --------------------------------
siddon_recon recon;
//recon.a1_FORWARD_PROJECTION("H:\\Visual Studio 2010\\CT_Recon\\CT_Recon", "CTParameters.cfg");
recon.a0_RECON_MLEM("H:\\Visual Studio 2010\\CT_Recon\\CT_Recon\\", "CTParameters.cfg");
//recon.a1_BACKWARD_PROJECTION("H:\\Visual Studio 2010\\CT_Recon\\CT_Recon", "MasterParameterFile.cfg", true);
//-------------------------------------------------------------------
sdkStopTimer(&timer);
totalTime = sdkGetTimerValue(&timer)*1e-3;
printf("calculation time = %f seconds \n", totalTime);
cudaProfilerStop();
system("PAUSE");
}
|
2a611ee05ff72b032db8f2a01478036c0fc5cd90.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string>
#include <unistd.h>
#include "pgm.h"
#include "clock.h"
#include "kernels.h"
// The 5 * 5 Laplacian filter
const int8_t f[] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 24,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
};
/* Use this function to print the time of each of your kernels.
* The parameter names are intuitive, but don't hesitate to ask
* for clarifications.
* DO NOT modify this function.*/
void print_run(float time_cpu, int kernel, float time_gpu_computation,
float time_gpu_transfer_in, float time_gpu_transfer_out) {
printf("%12.6f ", time_cpu);
printf("%5d ", kernel);
printf("%12.6f ", time_gpu_computation);
printf("%14.6f ", time_gpu_transfer_in);
printf("%15.6f ", time_gpu_transfer_out);
printf("%13.2f ", time_cpu / time_gpu_computation);
printf("%7.2f\n", time_cpu / (time_gpu_computation + time_gpu_transfer_in +
time_gpu_transfer_out));
}
int main(int argc, char **argv) {
int c;
std::string input_filename, cpu_output_filename, base_gpu_output_filename;
if (argc < 3) {
printf("Wrong usage. Expected -i <input_file> -o <output_file>\n");
return 0;
}
while ((c = getopt(argc, argv, "i:o:")) != -1) {
switch (c) {
case 'i':
input_filename = std::string(optarg);
break;
case 'o':
cpu_output_filename = std::string(optarg);
base_gpu_output_filename = std::string(optarg);
break;
default:
return 0;
}
}
pgm_image source_img;
init_pgm_image(&source_img);
if (load_pgm_from_file(input_filename.c_str(), &source_img) != NO_ERR) {
printf("Error loading source image.\n");
return 0;
}
/* Do not modify this printf */
printf("CPU_time(ms) Kernel GPU_time(ms) TransferIn(ms) TransferOut(ms) "
"Speedup_noTrf Speedup\n");
/* TODO: run your CPU implementation here and get its time. Don't include
* file IO in your measurement.*/
/* For example: */
float time_cpu;
{
std::string cpu_file = cpu_output_filename;
pgm_image cpu_output_img;
copy_pgm_image_size(&source_img, &cpu_output_img);
// Start time
Clock clock;
clock.start();
run_best_cpu(f, 5, source_img.matrix, cpu_output_img.matrix, source_img.width, source_img.height); // From kernels.h
// End time
time_cpu = clock.stop();
// print_run(args...) // Defined on the top of this file
save_pgm_to_file(cpu_file.c_str(), &cpu_output_img);
destroy_pgm_image(&cpu_output_img);
}
{
std::string gpu_file = "1" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
hipMalloc((void **)&d_filter, 25 * sizeof(int8_t));
hipMalloc((void **)&d_input, size);
hipMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock clock;
clock.start();
hipMemcpy(d_filter, f, 25 * sizeof(int8_t), hipMemcpyHostToDevice);
hipMemcpy(d_input, source_img.matrix, size, hipMemcpyHostToDevice);
hipMemcpy(d_output, gpu_output_img.matrix, size, hipMemcpyHostToDevice);
transfer_in = clock.stop();
// Start time
clock.start();
run_kernel1(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = clock.stop();
clock.start();
hipMemcpy(gpu_output_img.matrix, d_output, size, hipMemcpyDeviceToHost);
transfer_out = clock.stop();
print_run(time_cpu, 1, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
hipFree(d_filter);
hipFree(d_input);
hipFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
{
std::string gpu_file = "2" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
hipMalloc((void **)&d_filter, 25 * sizeof(int8_t));
hipMalloc((void **)&d_input, size);
hipMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock clock;
clock.start();
hipMemcpy(d_filter, f, 25 * sizeof(int8_t), hipMemcpyHostToDevice);
hipMemcpy(d_input, source_img.matrix, size, hipMemcpyHostToDevice);
hipMemcpy(d_output, gpu_output_img.matrix, size, hipMemcpyHostToDevice);
transfer_in = clock.stop();
// Start time
clock.start();
run_kernel2(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = clock.stop();
clock.start();
hipMemcpy(gpu_output_img.matrix, d_output, size, hipMemcpyDeviceToHost);
transfer_out = clock.stop();
print_run(time_cpu, 2, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
hipFree(d_filter);
hipFree(d_input);
hipFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
{
std::string gpu_file = "3" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
hipMalloc((void **)&d_filter, 25 * sizeof(int8_t));
hipMalloc((void **)&d_input, size);
hipMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock k;
k.start();
hipMemcpy(d_filter, f, 25 * sizeof(int8_t), hipMemcpyHostToDevice);
hipMemcpy(d_input, source_img.matrix, size, hipMemcpyHostToDevice);
hipMemcpy(d_output, gpu_output_img.matrix, size, hipMemcpyHostToDevice);
transfer_in = k.stop();
// Start time
k.start();
run_kernel3(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = k.stop();
k.start();
hipMemcpy(gpu_output_img.matrix, d_output, size, hipMemcpyDeviceToHost);
transfer_out = k.stop();
print_run(time_cpu, 3, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
hipFree(d_filter);
hipFree(d_input);
hipFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
{
std::string gpu_file = "4" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
hipMalloc((void **)&d_filter, 25 * sizeof(int8_t));
hipMalloc((void **)&d_input, size);
hipMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock k;
k.start();
hipMemcpy(d_filter, f, 25 * sizeof(int8_t), hipMemcpyHostToDevice);
hipMemcpy(d_input, source_img.matrix, size, hipMemcpyHostToDevice);
hipMemcpy(d_output, gpu_output_img.matrix, size, hipMemcpyHostToDevice);
transfer_in = k.stop();
// Start time
k.start();
run_kernel4(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = k.stop();
k.start();
hipMemcpy(gpu_output_img.matrix, d_output, size, hipMemcpyDeviceToHost);
transfer_out = k.stop();
print_run(time_cpu, 4, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
hipFree(d_filter);
hipFree(d_input);
hipFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
{
std::string gpu_file = "5" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
hipMalloc((void **)&d_filter, 25 * sizeof(int8_t));
hipMalloc((void **)&d_input, size);
hipMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock clock;
clock.start();
hipMemcpy(d_filter, f, 25 * sizeof(int8_t), hipMemcpyHostToDevice);
hipMemcpy(d_input, source_img.matrix, size, hipMemcpyHostToDevice);
hipMemcpy(d_output, gpu_output_img.matrix, size, hipMemcpyHostToDevice);
transfer_in = clock.stop();
// Start time
clock.start();
run_kernel5(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = clock.stop();
clock.start();
hipMemcpy(gpu_output_img.matrix, d_output, size, hipMemcpyDeviceToHost);
transfer_out = clock.stop();
print_run(time_cpu, 5, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
hipFree(d_filter);
hipFree(d_input);
hipFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
}
| 2a611ee05ff72b032db8f2a01478036c0fc5cd90.cu | #include <stdio.h>
#include <string>
#include <unistd.h>
#include "pgm.h"
#include "clock.h"
#include "kernels.h"
// The 5 * 5 Laplacian filter
const int8_t f[] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 24,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
};
/* Use this function to print the time of each of your kernels.
* The parameter names are intuitive, but don't hesitate to ask
* for clarifications.
* DO NOT modify this function.*/
void print_run(float time_cpu, int kernel, float time_gpu_computation,
float time_gpu_transfer_in, float time_gpu_transfer_out) {
printf("%12.6f ", time_cpu);
printf("%5d ", kernel);
printf("%12.6f ", time_gpu_computation);
printf("%14.6f ", time_gpu_transfer_in);
printf("%15.6f ", time_gpu_transfer_out);
printf("%13.2f ", time_cpu / time_gpu_computation);
printf("%7.2f\n", time_cpu / (time_gpu_computation + time_gpu_transfer_in +
time_gpu_transfer_out));
}
int main(int argc, char **argv) {
int c;
std::string input_filename, cpu_output_filename, base_gpu_output_filename;
if (argc < 3) {
printf("Wrong usage. Expected -i <input_file> -o <output_file>\n");
return 0;
}
while ((c = getopt(argc, argv, "i:o:")) != -1) {
switch (c) {
case 'i':
input_filename = std::string(optarg);
break;
case 'o':
cpu_output_filename = std::string(optarg);
base_gpu_output_filename = std::string(optarg);
break;
default:
return 0;
}
}
pgm_image source_img;
init_pgm_image(&source_img);
if (load_pgm_from_file(input_filename.c_str(), &source_img) != NO_ERR) {
printf("Error loading source image.\n");
return 0;
}
/* Do not modify this printf */
printf("CPU_time(ms) Kernel GPU_time(ms) TransferIn(ms) TransferOut(ms) "
"Speedup_noTrf Speedup\n");
/* TODO: run your CPU implementation here and get its time. Don't include
* file IO in your measurement.*/
/* For example: */
float time_cpu;
{
std::string cpu_file = cpu_output_filename;
pgm_image cpu_output_img;
copy_pgm_image_size(&source_img, &cpu_output_img);
// Start time
Clock clock;
clock.start();
run_best_cpu(f, 5, source_img.matrix, cpu_output_img.matrix, source_img.width, source_img.height); // From kernels.h
// End time
time_cpu = clock.stop();
// print_run(args...) // Defined on the top of this file
save_pgm_to_file(cpu_file.c_str(), &cpu_output_img);
destroy_pgm_image(&cpu_output_img);
}
{
std::string gpu_file = "1" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
cudaMalloc((void **)&d_filter, 25 * sizeof(int8_t));
cudaMalloc((void **)&d_input, size);
cudaMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock clock;
clock.start();
cudaMemcpy(d_filter, f, 25 * sizeof(int8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_input, source_img.matrix, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_output, gpu_output_img.matrix, size, cudaMemcpyHostToDevice);
transfer_in = clock.stop();
// Start time
clock.start();
run_kernel1(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = clock.stop();
clock.start();
cudaMemcpy(gpu_output_img.matrix, d_output, size, cudaMemcpyDeviceToHost);
transfer_out = clock.stop();
print_run(time_cpu, 1, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
cudaFree(d_filter);
cudaFree(d_input);
cudaFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
{
std::string gpu_file = "2" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
cudaMalloc((void **)&d_filter, 25 * sizeof(int8_t));
cudaMalloc((void **)&d_input, size);
cudaMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock clock;
clock.start();
cudaMemcpy(d_filter, f, 25 * sizeof(int8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_input, source_img.matrix, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_output, gpu_output_img.matrix, size, cudaMemcpyHostToDevice);
transfer_in = clock.stop();
// Start time
clock.start();
run_kernel2(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = clock.stop();
clock.start();
cudaMemcpy(gpu_output_img.matrix, d_output, size, cudaMemcpyDeviceToHost);
transfer_out = clock.stop();
print_run(time_cpu, 2, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
cudaFree(d_filter);
cudaFree(d_input);
cudaFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
{
std::string gpu_file = "3" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
cudaMalloc((void **)&d_filter, 25 * sizeof(int8_t));
cudaMalloc((void **)&d_input, size);
cudaMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock k;
k.start();
cudaMemcpy(d_filter, f, 25 * sizeof(int8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_input, source_img.matrix, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_output, gpu_output_img.matrix, size, cudaMemcpyHostToDevice);
transfer_in = k.stop();
// Start time
k.start();
run_kernel3(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = k.stop();
k.start();
cudaMemcpy(gpu_output_img.matrix, d_output, size, cudaMemcpyDeviceToHost);
transfer_out = k.stop();
print_run(time_cpu, 3, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
cudaFree(d_filter);
cudaFree(d_input);
cudaFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
{
std::string gpu_file = "4" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
cudaMalloc((void **)&d_filter, 25 * sizeof(int8_t));
cudaMalloc((void **)&d_input, size);
cudaMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock k;
k.start();
cudaMemcpy(d_filter, f, 25 * sizeof(int8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_input, source_img.matrix, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_output, gpu_output_img.matrix, size, cudaMemcpyHostToDevice);
transfer_in = k.stop();
// Start time
k.start();
run_kernel4(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = k.stop();
k.start();
cudaMemcpy(gpu_output_img.matrix, d_output, size, cudaMemcpyDeviceToHost);
transfer_out = k.stop();
print_run(time_cpu, 4, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
cudaFree(d_filter);
cudaFree(d_input);
cudaFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
{
std::string gpu_file = "5" + base_gpu_output_filename;
pgm_image gpu_output_img;
copy_pgm_image_size(&source_img, &gpu_output_img);
int32_t *d_input = NULL;
int32_t *d_output = NULL;
int8_t *d_filter = NULL;
size_t size = source_img.width * source_img.height * sizeof(int32_t);
cudaMalloc((void **)&d_filter, 25 * sizeof(int8_t));
cudaMalloc((void **)&d_input, size);
cudaMalloc((void **)&d_output, size);
float transfer_in, transfer_out, computation;
Clock clock;
clock.start();
cudaMemcpy(d_filter, f, 25 * sizeof(int8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_input, source_img.matrix, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_output, gpu_output_img.matrix, size, cudaMemcpyHostToDevice);
transfer_in = clock.stop();
// Start time
clock.start();
run_kernel5(d_filter, 5, d_input, d_output, gpu_output_img.width, gpu_output_img.height);
// End time
computation = clock.stop();
clock.start();
cudaMemcpy(gpu_output_img.matrix, d_output, size, cudaMemcpyDeviceToHost);
transfer_out = clock.stop();
print_run(time_cpu, 5, computation, transfer_in, transfer_out);
save_pgm_to_file(gpu_file.c_str(), &gpu_output_img);
cudaFree(d_filter);
cudaFree(d_input);
cudaFree(d_output);
destroy_pgm_image(&gpu_output_img);
}
}
|
854e34239f242d2476f8844aa458536274369c9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include "cuda_utils.cuh"
#include "nvidia/helper_math.h"
#include "Renderer.cuh"
__global__
void initRandState_kernel(int imageWidth, int imageHeight, hiprandState_t* randState)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
if (i < imageWidth && j < imageHeight) {
int pixelIdx = i + j*imageWidth;
hiprand_init(1984, pixelIdx, 0, &randState[pixelIdx]);
}
}
void Renderer::initRandState(StatusCode &status)
{
const int tx{16};
const int ty{16};
dim3 numThreads(tx, ty);
dim3 numBlocks((m_imageWidth + tx - 1)/tx,
(m_imageHeight + ty - 1)/ty);
status = CCE(hipMalloc((void**)&m_randState, m_imageWidth*m_imageHeight*sizeof(hiprandState_t)));
if (status != StatusCode::NoError) {
return;
}
hipLaunchKernelGGL(( initRandState_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, m_imageWidth, m_imageHeight, m_randState);
status = CCE(hipGetLastError());
if (status != StatusCode::NoError) {
return;
}
status = CCE(hipDeviceSynchronize());
if (status != StatusCode::NoError) {
return;
}
}
void Renderer::initBuffers(StatusCode &status)
{
// initialize random state on device
initRandState(status);
if (status != StatusCode::NoError) {
return;
}
// allocate buffer for the final image
int framebufferSize = m_imageWidth*m_imageHeight*sizeof(float3);
status = CCE(hipMallocManaged((void**)&m_framebuffer, framebufferSize));
if (status != StatusCode::NoError) {
return;
}
}
#define MY_FLOAT_MAX 3.402823e+38
__device__ float3 getBackgroundColor(const Ray& ray)
{
return make_float3(0.0f, 0.0f, 0.0f);
}
__device__ float3 getColor(const Ray& ray, Object* world,
hiprandState_t* localRandState,
int rayDepth)
{
HitRecord hitRec;
float3 color;
Ray inRay = ray;
float3 attenuationTotal = make_float3(1.0f, 1.0f, 1.0f);
for (int i = 0; i < rayDepth; i++) {
if (world->hit(inRay, 0.001f, MY_FLOAT_MAX, hitRec)) {
float3 attenuation;
Ray scattered;
float3 emitted = hitRec.material->emitted(hitRec.u, hitRec.v, hitRec.p);
if (hitRec.material->scatter(inRay, hitRec, attenuation,
scattered, localRandState)) {
attenuationTotal *= attenuation;
inRay = scattered;
}
else {
color = emitted;
break;
}
}
else {
color = getBackgroundColor(inRay);
break;
}
}
color *= attenuationTotal;
return color;
}
__global__
void renderScene_kernel(Camera* camera, Object** world,
hiprandState_t* randState, int imageWidth,
int imageHeight, int sampleCount,
int rayDepth, float3* framebuffer)
{
int pixelX = threadIdx.x + blockIdx.x*blockDim.x;
int pixelY = threadIdx.y + blockIdx.y*blockDim.y;
if (pixelX < imageWidth && pixelY < imageHeight) {
int pixelIdx = pixelX + pixelY*imageWidth;
hiprandState_t localRandState = randState[pixelIdx];
float3 color = make_float3(0.0f, 0.0f, 0.0f);
for (int sample = 0; sample < sampleCount; sample++) {
float u = float(pixelX + hiprand_uniform(&localRandState))/float(imageWidth);
float v = float(pixelY + hiprand_uniform(&localRandState))/float(imageHeight);
Ray ray = camera->getRay(u, v, &localRandState);
color += getColor(ray, *world, &localRandState, rayDepth);
}
framebuffer[pixelIdx] = color/float(sampleCount);
}
}
void Renderer::renderScene(const SceneDevice &sceneDevice, StatusCode &status)
{
LOG_TRIVIAL(trace) << "Renderer::renderScene";
const int tx = 8;
const int ty = 8;
dim3 numThreads(tx, ty);
dim3 numBlocks((m_imageWidth + tx - 1)/tx,
(m_imageHeight + ty - 1)/ty);
hipLaunchKernelGGL(( renderScene_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, sceneDevice.m_camera,
sceneDevice.m_world,
m_randState, m_imageWidth,
m_imageHeight, m_sampleCount,
m_rayDepth, m_framebuffer);
status = CCE(hipDeviceSynchronize());
if (status != StatusCode::NoError) {
return;
}
}
void Renderer::getImageOnHost(std::vector<float3>& image, StatusCode& status) const
{
int imageSize = m_imageWidth*m_imageHeight;
image.resize(imageSize);
for (int pixelIdx = 0; pixelIdx < imageSize; pixelIdx++) {
image[pixelIdx] = m_framebuffer[pixelIdx];
}
}
| 854e34239f242d2476f8844aa458536274369c9a.cu | #include <curand_kernel.h>
#include "cuda_utils.cuh"
#include "nvidia/helper_math.h"
#include "Renderer.cuh"
__global__
void initRandState_kernel(int imageWidth, int imageHeight, curandState* randState)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
if (i < imageWidth && j < imageHeight) {
int pixelIdx = i + j*imageWidth;
curand_init(1984, pixelIdx, 0, &randState[pixelIdx]);
}
}
void Renderer::initRandState(StatusCode &status)
{
const int tx{16};
const int ty{16};
dim3 numThreads(tx, ty);
dim3 numBlocks((m_imageWidth + tx - 1)/tx,
(m_imageHeight + ty - 1)/ty);
status = CCE(cudaMalloc((void**)&m_randState, m_imageWidth*m_imageHeight*sizeof(curandState)));
if (status != StatusCode::NoError) {
return;
}
initRandState_kernel<<<numBlocks, numThreads>>>(m_imageWidth, m_imageHeight, m_randState);
status = CCE(cudaGetLastError());
if (status != StatusCode::NoError) {
return;
}
status = CCE(cudaDeviceSynchronize());
if (status != StatusCode::NoError) {
return;
}
}
void Renderer::initBuffers(StatusCode &status)
{
// initialize random state on device
initRandState(status);
if (status != StatusCode::NoError) {
return;
}
// allocate buffer for the final image
int framebufferSize = m_imageWidth*m_imageHeight*sizeof(float3);
status = CCE(cudaMallocManaged((void**)&m_framebuffer, framebufferSize));
if (status != StatusCode::NoError) {
return;
}
}
#define MY_FLOAT_MAX 3.402823e+38
__device__ float3 getBackgroundColor(const Ray& ray)
{
return make_float3(0.0f, 0.0f, 0.0f);
}
__device__ float3 getColor(const Ray& ray, Object* world,
curandState* localRandState,
int rayDepth)
{
HitRecord hitRec;
float3 color;
Ray inRay = ray;
float3 attenuationTotal = make_float3(1.0f, 1.0f, 1.0f);
for (int i = 0; i < rayDepth; i++) {
if (world->hit(inRay, 0.001f, MY_FLOAT_MAX, hitRec)) {
float3 attenuation;
Ray scattered;
float3 emitted = hitRec.material->emitted(hitRec.u, hitRec.v, hitRec.p);
if (hitRec.material->scatter(inRay, hitRec, attenuation,
scattered, localRandState)) {
attenuationTotal *= attenuation;
inRay = scattered;
}
else {
color = emitted;
break;
}
}
else {
color = getBackgroundColor(inRay);
break;
}
}
color *= attenuationTotal;
return color;
}
__global__
void renderScene_kernel(Camera* camera, Object** world,
curandState* randState, int imageWidth,
int imageHeight, int sampleCount,
int rayDepth, float3* framebuffer)
{
int pixelX = threadIdx.x + blockIdx.x*blockDim.x;
int pixelY = threadIdx.y + blockIdx.y*blockDim.y;
if (pixelX < imageWidth && pixelY < imageHeight) {
int pixelIdx = pixelX + pixelY*imageWidth;
curandState localRandState = randState[pixelIdx];
float3 color = make_float3(0.0f, 0.0f, 0.0f);
for (int sample = 0; sample < sampleCount; sample++) {
float u = float(pixelX + curand_uniform(&localRandState))/float(imageWidth);
float v = float(pixelY + curand_uniform(&localRandState))/float(imageHeight);
Ray ray = camera->getRay(u, v, &localRandState);
color += getColor(ray, *world, &localRandState, rayDepth);
}
framebuffer[pixelIdx] = color/float(sampleCount);
}
}
void Renderer::renderScene(const SceneDevice &sceneDevice, StatusCode &status)
{
LOG_TRIVIAL(trace) << "Renderer::renderScene";
const int tx = 8;
const int ty = 8;
dim3 numThreads(tx, ty);
dim3 numBlocks((m_imageWidth + tx - 1)/tx,
(m_imageHeight + ty - 1)/ty);
renderScene_kernel<<<numBlocks, numThreads>>>(sceneDevice.m_camera,
sceneDevice.m_world,
m_randState, m_imageWidth,
m_imageHeight, m_sampleCount,
m_rayDepth, m_framebuffer);
status = CCE(cudaDeviceSynchronize());
if (status != StatusCode::NoError) {
return;
}
}
void Renderer::getImageOnHost(std::vector<float3>& image, StatusCode& status) const
{
int imageSize = m_imageWidth*m_imageHeight;
image.resize(imageSize);
for (int pixelIdx = 0; pixelIdx < imageSize; pixelIdx++) {
image[pixelIdx] = m_framebuffer[pixelIdx];
}
}
|
29b5b2870a7b70fd9f659d3ae6d70bcc398ebf89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHGeneral.h>
#include <THH/THHTensorSort.cuh>
#include <ATen/hip/HIPContext.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <THH/THHAtomics.cuh>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
AT_INDEX_ERROR("index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
AT_INDEX_ERROR("index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& backend = src.type().backend();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) {
if (indices[i].defined()) {
// Cast index to the longType matching src's backend
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).toBackend(backend);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, TensorList orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
namespace {
void index_put_accum_kernel(Tensor & self, TensorList indices, const Tensor & value, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
AT_INDEX_ERROR("too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.view(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
using device_ptr = thrust::device_ptr<int64_t>;
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
linearIndex.div_(sliceSize);
{
sorted_indices.copy_(linearIndex);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
const auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
// Sort; a stable sort is not required
// NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>());
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::Bool,
value_.scalar_type(), "indexing_backward", [&] {
hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream,
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore);
});
THCudaCheck(hipGetLastError());
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
int dstDims = dst.dim();
int srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) {
dim = maybe_wrap_dim(dim, self.dim());
TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4};
checkAllSameGPU("index_add", {self_arg, index_arg, source_arg});
TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector");
TORCH_CHECK(index.scalar_type() == ScalarType::Long, "index_add_(): Expected dtype int64 for index");
TORCH_CHECK(self.scalar_type() == source.scalar_type(),
"index_add_(): self and source must have the same scalar type");
TORCH_CHECK(dim == 0 || dim < source.dim(),
"index_add_(): Indexing dim ", dim, " is out of bounds of tensor");
TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)),
"index_add_(): Number of indices should be equal to self.size(dim)");
// Scalars are treated as 1-d tensor
Tensor self_ = (self.dim() == 0) ? self.view(1) : self;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return self;
}
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<int64_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
cuda::detail::TensorInfo<int64_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<int64_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true);
});
}
return self;
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} //at
} //native
| 29b5b2870a7b70fd9f659d3ae6d70bcc398ebf89.cu | #include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCGeneral.h>
#include <THC/THCTensorSort.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <THC/THCAtomics.cuh>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
AT_INDEX_ERROR("index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
AT_INDEX_ERROR("index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& backend = src.type().backend();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) {
if (indices[i].defined()) {
// Cast index to the longType matching src's backend
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).toBackend(backend);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, TensorList orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
namespace {
void index_put_accum_kernel(Tensor & self, TensorList indices, const Tensor & value, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
AT_INDEX_ERROR("too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.view(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
using device_ptr = thrust::device_ptr<int64_t>;
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
linearIndex.div_(sliceSize);
{
sorted_indices.copy_(linearIndex);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
const auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
// Sort; a stable sort is not required
// NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>());
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::Bool,
value_.scalar_type(), "indexing_backward", [&] {
indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore);
});
THCudaCheck(cudaGetLastError());
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
int dstDims = dst.dim();
int srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) {
dim = maybe_wrap_dim(dim, self.dim());
TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4};
checkAllSameGPU("index_add", {self_arg, index_arg, source_arg});
TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector");
TORCH_CHECK(index.scalar_type() == ScalarType::Long, "index_add_(): Expected dtype int64 for index");
TORCH_CHECK(self.scalar_type() == source.scalar_type(),
"index_add_(): self and source must have the same scalar type");
TORCH_CHECK(dim == 0 || dim < source.dim(),
"index_add_(): Indexing dim ", dim, " is out of bounds of tensor");
TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)),
"index_add_(): Number of indices should be equal to self.size(dim)");
// Scalars are treated as 1-d tensor
Tensor self_ = (self.dim() == 0) ? self.view(1) : self;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return self;
}
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<int64_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
cuda::detail::TensorInfo<int64_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<int64_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true);
});
}
return self;
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} //at
} //native
|
b8a2375290fdb9da5ba0f48378beb2fc4bed309f.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2012 Christopher Lux <christopherlux@gmail.com>
// Distributed under the Modified BSD License, see license.txt.
#include "volume_ray_cast.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cutil/cutil_math.h>
#include <renderer/volume_uniform_data.h>
#define SCM_LDATA_CUDA_VIS_PROFILE_CLOCK 0
#define SCM_LDATA_CUDA_VIS_ITER_COUNT 0
#define SCM_LDATA_CUDA_VIS_DEBUG 0
// cuda globals
surface<void, cudaSurfaceType2D> out_image;
texture<unsigned char, hipTextureType3D, hipReadModeNormalizedFloat> volume_texture;
texture<uchar4, hipTextureType1D, hipReadModeNormalizedFloat> colormap_texture;
//texture<float, hipTextureType3D, hipReadModeElementType> volume_texture;
//texture<float4, hipTextureType1D, hipReadModeElementType> colormap_texture;
__device__ __constant__ volume_uniform_data uniform_data;
// helpers
inline __device__ float4 min(float4 a, float4 b) { return make_float4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z), min(a.w, b.w)); }
inline __device__ float4 max(float4 a, float4 b) { return make_float4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z), max(a.w, b.w)); }
inline __device__ float3 min(float3 a, float3 b) { return make_float3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z)); }
inline __device__ float3 max(float3 a, float3 b) { return make_float3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z)); }
inline
__device__
float4
mul_matrix4(const float4x4 m, const float4 v)
{
return make_float4(dot(v, m.rows[0]), //v), //
dot(v, m.rows[1]), //v), //
dot(v, m.rows[2]), //v), //
dot(v, m.rows[3])); //v));//
}
struct ray
{
float3 origin;
float3 direction;
float3 direction_rec;
}; // struct ray
inline
__device__
void
make_ray(struct ray*const r,
const int2 spos,
const int2 ssize)
{
float4 spos_nrm = make_float4(((float)spos.x / (float)ssize.x) * 2.0 - 1.0,
((float)spos.y / (float)ssize.y) * 2.0 - 1.0,
-1.0,
1.0);
//float4 spos_os = mul_matrix4_ptr(&(vdata->_mvp_matrix_inverse), &spos_nrm);
float4 spos_os = mul_matrix4(uniform_data._mvp_matrix_inverse, spos_nrm);
spos_os /= spos_os.w;
r->origin = make_float3(uniform_data._os_camera_position);//.xyz;
r->direction = normalize(make_float3(spos_os) - r->origin);//vdata->_mvp_matrix_inverse.s012;//spos_os.xyz;//
r->direction_rec = 1.0 / r->direction;
}
bool
__device__
ray_box_intersection(const struct ray*const r,
float3 bbmin,
float3 bbmax,
float* tmin,
float* tmax)
{
#if 1
// compute intersection of ray with all six bbox planes
float3 tbot = r->direction_rec * (bbmin - r->origin);
float3 ttop = r->direction_rec * (bbmax - r->origin);
// re-order intersections to find smallest and largest on each axis
float3 tmin_a = min(ttop, tbot);
float3 tmax_a = max(ttop, tbot);
// find the largest tmin and the smallest tmax
*tmin = max(max(tmin_a.x, tmin_a.y), max(tmin_a.x, tmin_a.z));
*tmax = min(min(tmax_a.x, tmax_a.y), min(tmax_a.x, tmax_a.z));
#else
float l1 = (bbmin.x - r->origin.x) * r->direction_rec.x;
float l2 = (bbmax.x - r->origin.x) * r->direction_rec.x;
*tmin = min(l1,l2);
*tmax = max(l1,l2);
l1 = (bbmin.y - r->origin.y) * r->direction_rec.y;
l2 = (bbmax.y - r->origin.y) * r->direction_rec.y;
*tmin = max(min(l1,l2), *tmin);
*tmax = min(max(l1,l2), *tmax);
l1 = (bbmin.z - r->origin.z) * r->direction_rec.z;
l2 = (bbmax.z - r->origin.z) * r->direction_rec.z;
*tmin = max(min(l1,l2), *tmin);
*tmax = min(max(l1,l2), *tmax);
//return ((lmax > 0.f) & (lmax >= lmin));
//return ((lmax > 0.f) & (lmax > lmin));
#endif
return ((*tmin > 0.0) && (*tmax > *tmin));
}
inline
__device__
float
length_sqr(const float3 a, const float3 b)
{
float3 d = b - a;
//return mad(d.x, d.x, mad(d.y, d.y, d.z * d.z));
return dot(d, d);
}
extern "C"
void
__global__
main_vrc(unsigned out_image_w, unsigned out_image_h)
{
#if SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
clock_t thread_start;
clock_t thread_stop;
#endif // SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
int2 osize = make_int2(out_image_w, out_image_h);
int2 opos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (opos.x < osize.x && opos.y < osize.y) {
#if SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
thread_start = clock();
#endif // SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
float4 out_color;
struct ray cur_ray;
make_ray(&cur_ray, opos, osize);
float tmin = 0.0;
float tmax = 0.0;
if (ray_box_intersection(&cur_ray, make_float3(0.0), make_float3(uniform_data._volume_extends), &tmin, &tmax)) {
float3 cam_pos = make_float3(uniform_data._os_camera_position);
float3 ray_entry = tmin * cur_ray.direction + cur_ray.origin;
float3 ray_exit = tmax * cur_ray.direction + cur_ray.origin;
float3 ray_increment = cur_ray.direction * uniform_data._sampling_distance.x;
float3 sampling_pos = ray_entry + ray_increment; // test, increment just to be sure we are in the volume
float3 to_tex = make_float3(uniform_data._scale_obj_to_tex);
float smpl_sqr_dist = length_sqr(cam_pos, sampling_pos);
float exit_sqr_dist = length_sqr(cam_pos, ray_exit);
float4 dst = make_float4(0.0f);
float opc = uniform_data._sampling_distance.y;
int loop_count = 0;
#if SCM_LDATA_CUDA_VIS_DEBUG == 1
out_color = make_float4(ray_exit, 1.0);
#else // SCM_LDATA_CUDA_VIS_DEBUG == 1
while ((exit_sqr_dist - smpl_sqr_dist) > 0.0f && dst.w < 0.99f) {
++loop_count;
float3 tc_vol = sampling_pos * to_tex;
float s = tex3D(volume_texture, tc_vol.x, tc_vol.y, tc_vol.z);// texture(volume_raw, sampling_pos * volume_data.scale_obj_to_tex.xyz).r;
float4 src = tex1D(colormap_texture, s);
//float4 src = read_imagef(volume_image, vol_smpl, tc_vol).xxxx;//(float4)(s);//texture(color_map, s);
//float4 src = (float4)(s, s, s, 0.1);
// increment ray
sampling_pos += ray_increment;
smpl_sqr_dist = length_sqr(cam_pos, sampling_pos);
//float3 d = cam_pos - sampling_pos;
//smpl_sqr_dist = dot(d, d);
//inside_volume = inside_volume_bounds(sampling_pos) && (dst.a < 0.99);
// opacity correction
src.w = 1.0f - pow(1.0f - src.w, opc);
// compositing
float omda_sa = (1.0 - dst.w) * src.w;
dst.x += omda_sa * src.x;
dst.y += omda_sa * src.y;
dst.z += omda_sa * src.z;
dst.w += omda_sa;
}
#if SCM_LDATA_CUDA_VIS_ITER_COUNT == 1
out_color = tex1D(colormap_texture, (float)(loop_count) / 1500.0f);
#else // SCM_LDATA_CUDA_VIS_ITER_COUNT == 1
out_color = dst;
#endif // SCM_LDATA_CUDA_VIS_ITER_COUNT == 1
#endif // SCM_LDATA_CUDA_VIS_DEBUG == 1
}
else {
out_color = make_float4(0.0);
}
#if SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
thread_stop = clock();
out_color = tex1D(colormap_texture, (float)(thread_stop - thread_start) / 3000000.0f);
#endif // SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
uchar4 out_col_data;
out_col_data.x = (unsigned char)(out_color.x * 255.0f);
out_col_data.y = (unsigned char)(out_color.y * 255.0f);
out_col_data.z = (unsigned char)(out_color.z * 255.0f);
out_col_data.w = (unsigned char)(out_color.w * 255.0f);
surf2Dwrite(out_col_data, out_image, opos.x * sizeof(uchar4), opos.y);
}
}
extern "C"
void
startup_ray_cast_kernel(unsigned out_image_w, unsigned out_image_h,
cudaGraphicsResource_t output_image_res,
cudaGraphicsResource_t volume_image_res,
cudaGraphicsResource_t cmap_image_res,
const thrust::device_ptr<volume_uniform_data>& uniform_data,
hipStream_t cuda_stream)
{
hipError_t cu_err = hipSuccess;
// output image
hipArray* cu_oi_array = 0;
cu_err = hipGraphicsSubResourceGetMappedArray(&cu_oi_array, output_image_res, 0, 0);
cu_err = hipBindSurfaceToArray(out_image, cu_oi_array);
// volume texture
volume_texture.addressMode[0] = hipAddressModeClamp;
volume_texture.addressMode[1] = hipAddressModeClamp;
volume_texture.addressMode[2] = hipAddressModeClamp;
volume_texture.filterMode = hipFilterModeLinear;
volume_texture.normalized = true;
hipArray* cu_vi_array = 0;
cu_err = hipGraphicsSubResourceGetMappedArray(&cu_vi_array, volume_image_res, 0, 0);
cu_err = hipBindTextureToArray(volume_texture, cu_vi_array);
// color map texture
colormap_texture.addressMode[0] = hipAddressModeClamp;
colormap_texture.filterMode = hipFilterModeLinear;
colormap_texture.normalized = true;
hipArray* cu_ci_array = 0;
cu_err = hipGraphicsSubResourceGetMappedArray(&cu_ci_array, cmap_image_res, 0, 0);
cu_err = hipBindTextureToArray(colormap_texture, cu_ci_array);
// uniform data
volume_uniform_data* uniform_data_raw = uniform_data.get();
// calculate the grid and block sizes
hipFuncAttributes cu_krnl_attr;
cu_err = hipFuncGetAttributes(&cu_krnl_attr, "main_vrc");
dim3 vsize = dim3(out_image_w, out_image_h, 1);
//dim3 bsize = dim3(32, cu_krnl_attr.maxThreadsPerBlock / 32, 1);
dim3 bsize = dim3(8, 24, 1);
dim3 gsize;
gsize.x = vsize.x % bsize.x == 0 ? vsize.x / bsize.x : (vsize.x / bsize.x + 1);
gsize.y = vsize.y % bsize.y == 0 ? vsize.x / bsize.x : (vsize.y / bsize.y + 1);
dim3 grid_size(gsize.x, gsize.y, 1);
dim3 block_size(bsize.x, bsize.y, 1);
hipLaunchKernelGGL(( main_vrc), dim3(grid_size), dim3(block_size), 0, cuda_stream, out_image_w, out_image_h);//, uniform_data_raw);
}
| b8a2375290fdb9da5ba0f48378beb2fc4bed309f.cu |
// Copyright (c) 2012 Christopher Lux <christopherlux@gmail.com>
// Distributed under the Modified BSD License, see license.txt.
#include "volume_ray_cast.h"
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cutil/cutil_math.h>
#include <renderer/volume_uniform_data.h>
#define SCM_LDATA_CUDA_VIS_PROFILE_CLOCK 0
#define SCM_LDATA_CUDA_VIS_ITER_COUNT 0
#define SCM_LDATA_CUDA_VIS_DEBUG 0
// cuda globals
surface<void, cudaSurfaceType2D> out_image;
texture<unsigned char, cudaTextureType3D, cudaReadModeNormalizedFloat> volume_texture;
texture<uchar4, cudaTextureType1D, cudaReadModeNormalizedFloat> colormap_texture;
//texture<float, cudaTextureType3D, cudaReadModeElementType> volume_texture;
//texture<float4, cudaTextureType1D, cudaReadModeElementType> colormap_texture;
__device__ __constant__ volume_uniform_data uniform_data;
// helpers
inline __device__ float4 min(float4 a, float4 b) { return make_float4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z), min(a.w, b.w)); }
inline __device__ float4 max(float4 a, float4 b) { return make_float4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z), max(a.w, b.w)); }
inline __device__ float3 min(float3 a, float3 b) { return make_float3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z)); }
inline __device__ float3 max(float3 a, float3 b) { return make_float3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z)); }
inline
__device__
float4
mul_matrix4(const float4x4 m, const float4 v)
{
return make_float4(dot(v, m.rows[0]), //v), //
dot(v, m.rows[1]), //v), //
dot(v, m.rows[2]), //v), //
dot(v, m.rows[3])); //v));//
}
struct ray
{
float3 origin;
float3 direction;
float3 direction_rec;
}; // struct ray
inline
__device__
void
make_ray(struct ray*const r,
const int2 spos,
const int2 ssize)
{
float4 spos_nrm = make_float4(((float)spos.x / (float)ssize.x) * 2.0 - 1.0,
((float)spos.y / (float)ssize.y) * 2.0 - 1.0,
-1.0,
1.0);
//float4 spos_os = mul_matrix4_ptr(&(vdata->_mvp_matrix_inverse), &spos_nrm);
float4 spos_os = mul_matrix4(uniform_data._mvp_matrix_inverse, spos_nrm);
spos_os /= spos_os.w;
r->origin = make_float3(uniform_data._os_camera_position);//.xyz;
r->direction = normalize(make_float3(spos_os) - r->origin);//vdata->_mvp_matrix_inverse.s012;//spos_os.xyz;//
r->direction_rec = 1.0 / r->direction;
}
bool
__device__
ray_box_intersection(const struct ray*const r,
float3 bbmin,
float3 bbmax,
float* tmin,
float* tmax)
{
#if 1
// compute intersection of ray with all six bbox planes
float3 tbot = r->direction_rec * (bbmin - r->origin);
float3 ttop = r->direction_rec * (bbmax - r->origin);
// re-order intersections to find smallest and largest on each axis
float3 tmin_a = min(ttop, tbot);
float3 tmax_a = max(ttop, tbot);
// find the largest tmin and the smallest tmax
*tmin = max(max(tmin_a.x, tmin_a.y), max(tmin_a.x, tmin_a.z));
*tmax = min(min(tmax_a.x, tmax_a.y), min(tmax_a.x, tmax_a.z));
#else
float l1 = (bbmin.x - r->origin.x) * r->direction_rec.x;
float l2 = (bbmax.x - r->origin.x) * r->direction_rec.x;
*tmin = min(l1,l2);
*tmax = max(l1,l2);
l1 = (bbmin.y - r->origin.y) * r->direction_rec.y;
l2 = (bbmax.y - r->origin.y) * r->direction_rec.y;
*tmin = max(min(l1,l2), *tmin);
*tmax = min(max(l1,l2), *tmax);
l1 = (bbmin.z - r->origin.z) * r->direction_rec.z;
l2 = (bbmax.z - r->origin.z) * r->direction_rec.z;
*tmin = max(min(l1,l2), *tmin);
*tmax = min(max(l1,l2), *tmax);
//return ((lmax > 0.f) & (lmax >= lmin));
//return ((lmax > 0.f) & (lmax > lmin));
#endif
return ((*tmin > 0.0) && (*tmax > *tmin));
}
inline
__device__
float
length_sqr(const float3 a, const float3 b)
{
float3 d = b - a;
//return mad(d.x, d.x, mad(d.y, d.y, d.z * d.z));
return dot(d, d);
}
extern "C"
void
__global__
main_vrc(unsigned out_image_w, unsigned out_image_h)
{
#if SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
clock_t thread_start;
clock_t thread_stop;
#endif // SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
int2 osize = make_int2(out_image_w, out_image_h);
int2 opos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (opos.x < osize.x && opos.y < osize.y) {
#if SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
thread_start = clock();
#endif // SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
float4 out_color;
struct ray cur_ray;
make_ray(&cur_ray, opos, osize);
float tmin = 0.0;
float tmax = 0.0;
if (ray_box_intersection(&cur_ray, make_float3(0.0), make_float3(uniform_data._volume_extends), &tmin, &tmax)) {
float3 cam_pos = make_float3(uniform_data._os_camera_position);
float3 ray_entry = tmin * cur_ray.direction + cur_ray.origin;
float3 ray_exit = tmax * cur_ray.direction + cur_ray.origin;
float3 ray_increment = cur_ray.direction * uniform_data._sampling_distance.x;
float3 sampling_pos = ray_entry + ray_increment; // test, increment just to be sure we are in the volume
float3 to_tex = make_float3(uniform_data._scale_obj_to_tex);
float smpl_sqr_dist = length_sqr(cam_pos, sampling_pos);
float exit_sqr_dist = length_sqr(cam_pos, ray_exit);
float4 dst = make_float4(0.0f);
float opc = uniform_data._sampling_distance.y;
int loop_count = 0;
#if SCM_LDATA_CUDA_VIS_DEBUG == 1
out_color = make_float4(ray_exit, 1.0);
#else // SCM_LDATA_CUDA_VIS_DEBUG == 1
while ((exit_sqr_dist - smpl_sqr_dist) > 0.0f && dst.w < 0.99f) {
++loop_count;
float3 tc_vol = sampling_pos * to_tex;
float s = tex3D(volume_texture, tc_vol.x, tc_vol.y, tc_vol.z);// texture(volume_raw, sampling_pos * volume_data.scale_obj_to_tex.xyz).r;
float4 src = tex1D(colormap_texture, s);
//float4 src = read_imagef(volume_image, vol_smpl, tc_vol).xxxx;//(float4)(s);//texture(color_map, s);
//float4 src = (float4)(s, s, s, 0.1);
// increment ray
sampling_pos += ray_increment;
smpl_sqr_dist = length_sqr(cam_pos, sampling_pos);
//float3 d = cam_pos - sampling_pos;
//smpl_sqr_dist = dot(d, d);
//inside_volume = inside_volume_bounds(sampling_pos) && (dst.a < 0.99);
// opacity correction
src.w = 1.0f - pow(1.0f - src.w, opc);
// compositing
float omda_sa = (1.0 - dst.w) * src.w;
dst.x += omda_sa * src.x;
dst.y += omda_sa * src.y;
dst.z += omda_sa * src.z;
dst.w += omda_sa;
}
#if SCM_LDATA_CUDA_VIS_ITER_COUNT == 1
out_color = tex1D(colormap_texture, (float)(loop_count) / 1500.0f);
#else // SCM_LDATA_CUDA_VIS_ITER_COUNT == 1
out_color = dst;
#endif // SCM_LDATA_CUDA_VIS_ITER_COUNT == 1
#endif // SCM_LDATA_CUDA_VIS_DEBUG == 1
}
else {
out_color = make_float4(0.0);
}
#if SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
thread_stop = clock();
out_color = tex1D(colormap_texture, (float)(thread_stop - thread_start) / 3000000.0f);
#endif // SCM_LDATA_CUDA_VIS_PROFILE_CLOCK == 1
uchar4 out_col_data;
out_col_data.x = (unsigned char)(out_color.x * 255.0f);
out_col_data.y = (unsigned char)(out_color.y * 255.0f);
out_col_data.z = (unsigned char)(out_color.z * 255.0f);
out_col_data.w = (unsigned char)(out_color.w * 255.0f);
surf2Dwrite(out_col_data, out_image, opos.x * sizeof(uchar4), opos.y);
}
}
extern "C"
void
startup_ray_cast_kernel(unsigned out_image_w, unsigned out_image_h,
cudaGraphicsResource_t output_image_res,
cudaGraphicsResource_t volume_image_res,
cudaGraphicsResource_t cmap_image_res,
const thrust::device_ptr<volume_uniform_data>& uniform_data,
cudaStream_t cuda_stream)
{
cudaError cu_err = cudaSuccess;
// output image
cudaArray* cu_oi_array = 0;
cu_err = cudaGraphicsSubResourceGetMappedArray(&cu_oi_array, output_image_res, 0, 0);
cu_err = cudaBindSurfaceToArray(out_image, cu_oi_array);
// volume texture
volume_texture.addressMode[0] = cudaAddressModeClamp;
volume_texture.addressMode[1] = cudaAddressModeClamp;
volume_texture.addressMode[2] = cudaAddressModeClamp;
volume_texture.filterMode = cudaFilterModeLinear;
volume_texture.normalized = true;
cudaArray* cu_vi_array = 0;
cu_err = cudaGraphicsSubResourceGetMappedArray(&cu_vi_array, volume_image_res, 0, 0);
cu_err = cudaBindTextureToArray(volume_texture, cu_vi_array);
// color map texture
colormap_texture.addressMode[0] = cudaAddressModeClamp;
colormap_texture.filterMode = cudaFilterModeLinear;
colormap_texture.normalized = true;
cudaArray* cu_ci_array = 0;
cu_err = cudaGraphicsSubResourceGetMappedArray(&cu_ci_array, cmap_image_res, 0, 0);
cu_err = cudaBindTextureToArray(colormap_texture, cu_ci_array);
// uniform data
volume_uniform_data* uniform_data_raw = uniform_data.get();
// calculate the grid and block sizes
cudaFuncAttributes cu_krnl_attr;
cu_err = cudaFuncGetAttributes(&cu_krnl_attr, "main_vrc");
dim3 vsize = dim3(out_image_w, out_image_h, 1);
//dim3 bsize = dim3(32, cu_krnl_attr.maxThreadsPerBlock / 32, 1);
dim3 bsize = dim3(8, 24, 1);
dim3 gsize;
gsize.x = vsize.x % bsize.x == 0 ? vsize.x / bsize.x : (vsize.x / bsize.x + 1);
gsize.y = vsize.y % bsize.y == 0 ? vsize.x / bsize.x : (vsize.y / bsize.y + 1);
dim3 grid_size(gsize.x, gsize.y, 1);
dim3 block_size(bsize.x, bsize.y, 1);
main_vrc<<<grid_size, block_size, 0, cuda_stream>>>(out_image_w, out_image_h);//, uniform_data_raw);
}
|
5145779058440cc6a6dc3740a41713c41e17eb3b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <gameOfLife.h>
#include "aux.h"
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
// Calculate number of blocks
dim3 get_numBlocks(size_t w, size_t h, dim3 threadsPerBlock) {
dim3 numBlocks( (w + threadsPerBlock.x - 1) / threadsPerBlock.x,
(h + threadsPerBlock.y - 1) / threadsPerBlock.y);
return numBlocks;
}
__global__ void gameOfLifeKernel(unsigned char* d_src, unsigned char* d_dst, const size_t width, const size_t height) {
/**
* YOUR CODE HERE
*
* You must write here your kernel for one iteration of the game of life.
*
* Input: d_src should contain the board at time 't'
* Output: d_dst should contain the board at time 't + 1' after one
* iteration of the game of life.
*
*/
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
// Calculate local index
int ind = x + y * width;
// Store number of neighbours
int neighbours = 0;
int neighbour_ind;
// We are counting all neighbours and the cell itself
for (int cellX = -1; cellX <= 1; cellX++) {
for (int cellY = -1; cellY <= 1; cellY++) {
neighbour_ind = (x + cellX) + (y + cellY) * width;
// Check if we are still in board
if (x + cellX < width && (x + cellX) >= 0) {
if (y + cellY < height && (y + cellY) >= 0) {
neighbours += d_src[neighbour_ind];
}
}
}
}
// Rules
if (x < width && y < height) {
// if cell lives
if (d_src[ind] == 1) {
// Overcrowded
if (neighbours > 4) {
d_dst[ind] = 0;
}
// Perfect
else if (neighbours > 2) {
d_dst[ind] = 1;
// Lonely
} else {
d_dst[ind] = 0;
}
// cell was dead
} else {
if (neighbours == 3) d_dst[ind] = 1;
}
}
}
void runGameOfLifeIteration(unsigned char* d_src, unsigned char* d_dst, const size_t width, const size_t height) {
/**
* YOUR CODE HERE
*
* Here you must calculate the block size and grid size to latter call the
* gameOfLifeKernel.
*
*/
dim3 block = dim3(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1);
dim3 grid = get_numBlocks(width, height, block);
hipLaunchKernelGGL(( gameOfLifeKernel), dim3(grid), dim3(block), 0, 0, d_src, d_dst, width, height);
}
| 5145779058440cc6a6dc3740a41713c41e17eb3b.cu | #include <cuda_runtime.h>
#include <gameOfLife.h>
#include "aux.h"
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
// Calculate number of blocks
dim3 get_numBlocks(size_t w, size_t h, dim3 threadsPerBlock) {
dim3 numBlocks( (w + threadsPerBlock.x - 1) / threadsPerBlock.x,
(h + threadsPerBlock.y - 1) / threadsPerBlock.y);
return numBlocks;
}
__global__ void gameOfLifeKernel(unsigned char* d_src, unsigned char* d_dst, const size_t width, const size_t height) {
/**
* YOUR CODE HERE
*
* You must write here your kernel for one iteration of the game of life.
*
* Input: d_src should contain the board at time 't'
* Output: d_dst should contain the board at time 't + 1' after one
* iteration of the game of life.
*
*/
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
// Calculate local index
int ind = x + y * width;
// Store number of neighbours
int neighbours = 0;
int neighbour_ind;
// We are counting all neighbours and the cell itself
for (int cellX = -1; cellX <= 1; cellX++) {
for (int cellY = -1; cellY <= 1; cellY++) {
neighbour_ind = (x + cellX) + (y + cellY) * width;
// Check if we are still in board
if (x + cellX < width && (x + cellX) >= 0) {
if (y + cellY < height && (y + cellY) >= 0) {
neighbours += d_src[neighbour_ind];
}
}
}
}
// Rules
if (x < width && y < height) {
// if cell lives
if (d_src[ind] == 1) {
// Overcrowded
if (neighbours > 4) {
d_dst[ind] = 0;
}
// Perfect
else if (neighbours > 2) {
d_dst[ind] = 1;
// Lonely
} else {
d_dst[ind] = 0;
}
// cell was dead
} else {
if (neighbours == 3) d_dst[ind] = 1;
}
}
}
void runGameOfLifeIteration(unsigned char* d_src, unsigned char* d_dst, const size_t width, const size_t height) {
/**
* YOUR CODE HERE
*
* Here you must calculate the block size and grid size to latter call the
* gameOfLifeKernel.
*
*/
dim3 block = dim3(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1);
dim3 grid = get_numBlocks(width, height, block);
gameOfLifeKernel<<<grid, block>>>(d_src, d_dst, width, height);
}
|
e12fd399827ae7a1deaafecf41860940ab7f0b4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include "error_check.h"
__global__ void func1(int x)
{
int tid = threadIdx.x;
printf("thread: %d, parameter:%d \n", tid, x);
}
__global__ void func2(int *x)
{
int tid = threadIdx.x;
printf("thread: %d, parameter:%d \n", tid, *x);
}
__global__ void func3(int x[], int n)
{
int tid = threadIdx.x;
printf("thread: %d, parameter:%d \n", tid, x[tid]);
}
int main(){
const int gx = 2, bx = 2;
int a = 42, aa = 88;
int *b = &aa;
int c[] = {1, 1, 2, 3, 5, 8};
hipLaunchKernelGGL(( func1), dim3(gx), dim3(bx), 0, 0, a);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
hipLaunchKernelGGL(( func2), dim3(gx), dim3(bx), 0, 0, b);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
hipLaunchKernelGGL(( func3), dim3(gx), dim3(bx), 0, 0, c, 6);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
hipDeviceReset();
return 0;
}
| e12fd399827ae7a1deaafecf41860940ab7f0b4f.cu | #include<stdio.h>
#include "error_check.h"
__global__ void func1(int x)
{
int tid = threadIdx.x;
printf("thread: %d, parameter:%d \n", tid, x);
}
__global__ void func2(int *x)
{
int tid = threadIdx.x;
printf("thread: %d, parameter:%d \n", tid, *x);
}
__global__ void func3(int x[], int n)
{
int tid = threadIdx.x;
printf("thread: %d, parameter:%d \n", tid, x[tid]);
}
int main(){
const int gx = 2, bx = 2;
int a = 42, aa = 88;
int *b = &aa;
int c[] = {1, 1, 2, 3, 5, 8};
func1<<<gx, bx>>>(a);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
func2<<<gx, bx>>>(b);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
func3<<<gx, bx>>>(c, 6);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
cudaDeviceReset();
return 0;
}
|
f3f9f9ea68521b61b58df61396de794f3b1d8b51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "utils.h"
__global__
void packed_accessor_kernel(torch::PackedTensorAccessor64<float, 1, torch::RestrictPtrTraits> weights_a, float *weights, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n) weights[i] = weights_a[i];
}
void tensor_accessor(at::Tensor at_weights, float * ft_weights){
int n = at_weights.sizes()[0];
int size = n * sizeof(float);
float * device_weights;
cuda_check(hipMalloc((void **) &device_weights, size));
auto weights_a = at_weights.packed_accessor64<float, 1, torch::RestrictPtrTraits>();
hipLaunchKernelGGL(( packed_accessor_kernel), dim3(ceil(n/256.0)), dim3(256), 0, 0, weights_a, device_weights, n);
hipMemcpy(ft_weights, device_weights, size, hipMemcpyDeviceToHost);
hipFree(device_weights);
}
| f3f9f9ea68521b61b58df61396de794f3b1d8b51.cu | #include <math.h>
#include "utils.h"
__global__
void packed_accessor_kernel(torch::PackedTensorAccessor64<float, 1, torch::RestrictPtrTraits> weights_a, float *weights, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n) weights[i] = weights_a[i];
}
void tensor_accessor(at::Tensor at_weights, float * ft_weights){
int n = at_weights.sizes()[0];
int size = n * sizeof(float);
float * device_weights;
cuda_check(cudaMalloc((void **) &device_weights, size));
auto weights_a = at_weights.packed_accessor64<float, 1, torch::RestrictPtrTraits>();
packed_accessor_kernel<<<ceil(n/256.0), 256>>>(weights_a, device_weights, n);
cudaMemcpy(ft_weights, device_weights, size, cudaMemcpyDeviceToHost);
cudaFree(device_weights);
}
|
4c739f65a77528b5efa2890ff228b1525fd03d29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockDim.x*blockIdx.x + threadIdx.x;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */ // Assumes that the dimension fits inside RAM and each dimension is divisible by 32
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice);
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
hipLaunchKernelGGL(( add), dim3((N/THREADS_PER_BLOCK)), dim3(512), 0, 0, d_a, d_b, d_c);
/* copy result back to host */
hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
hipFree(d_b);
hipFree(d_c);
return 0;
} /* end main */
| 4c739f65a77528b5efa2890ff228b1525fd03d29.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockDim.x*blockIdx.x + threadIdx.x;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */ // Assumes that the dimension fits inside RAM and each dimension is divisible by 32
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice);
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
add<<<(N/THREADS_PER_BLOCK), 512>>>(d_a, d_b, d_c);
/* copy result back to host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree(d_b);
cudaFree(d_c);
return 0;
} /* end main */
|
b2545e85cb2d748f6cf44ab6bf0333ea1ef54b18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij
* and stores in res_ij.
*
* Shared memory is necessary here because we are reading and writing
* to memory many times...
*
* Note that __syncthreads is not needed here because each row in shared
* memory is exclusively read and written to by a single warp.
*/
__global__ void pow_rows(const float *a, uint pow, float *res) {
// store entire matrix in shared memory for fast reads.
__shared__ float s_a[32 * 32];
// store result in shared memory for fast writes.
__shared__ float s_res[32 * 32];
// assign each thread an index so that threads in the same warp process
// elements in the same row.
const uint row_i = threadIdx.x + 32 * threadIdx.y;
// copy matrix from global memory to shared memory in a coalesced fashion.
s_a[row_i] = a[row_i];
// intialize result as a matrix where each element is 1.0.
s_res[row_i] = 1.0;
// a single block computes the power of the entire matrix.
// each warp in the block computes the power of a single row.
// each thread in the warp computes the power of a single element.
while (pow > 0) {
s_res[row_i] *= s_a[row_i];
pow -= 1;
}
// copy result from shared memory to global memory in a coalesced fashion.
res[row_i] = s_res[row_i];
};
/**
* For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij
* and stores in res_ij.
*
* After reading the matrix a into local memory row by row, we
* compute the power of each element on a column by column basis
* in order to cause a bank conflict.
*
* Note that __syncthreads is necessary here because the same shared
* memory is accessed by multiple warps.
*/
__global__ void pow_cols(const float *a, uint pow, float *res) {
// store entire matrix in shared memory for fast reads.
__shared__ float s_a[32 * 32];
// store result in shared memory for fast writes.
__shared__ float s_res[32 * 32];
// assign each thread an index so that threads in the same warp process
// elements in the same row.
const uint row_i = threadIdx.x + 32 * threadIdx.y;
// copy matrix from global memory to shared memory in a coalesced fashion.
s_a[row_i] = a[row_i];
// intialize result as a matrix where each element is 1.0.
s_res[row_i] = 1.0;
// in order to process the matrix column-by-column... all warps must
// finish initializing shared memory row-by-row.
__syncthreads();
// assign each thread an index so that threads in the same warp process
// elements in the same column.
const uint col_i = threadIdx.y + 32 * threadIdx.x;
// a single block computes the power of the entire matrix.
// each warp in the block computes the power of a single column.
// each thread in the warp computes the power of a single element.
while (pow > 0) {
// Note that col_i % 32 = threadIdx.y.
// Since all threads in the same warp have the same threadIdx.y, this
// is a 32-way bank conflict!
s_res[col_i] *= s_a[col_i];
pow -= 1;
}
// in order to read the matrix row-by-row... all warps must
// finish initializing shared memory column-by-column.
__syncthreads();
// copy result from shared memory to global memory in a coalesced fashion.
res[row_i] = s_res[row_i];
};
/**
* For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij
* and stores in res_ij.
*
* After reading the matrix a into local memory row by row, we
* compute the power of each element on a column by column basis.
* Due to zero padding, we don't have a bank conflict.
*
* Note that __syncthreads is necessary here because the same shared
* memory is accessed by multiple warps.
*/
__global__ void pow_cols_pad(const float *a, uint pow, float *res) {
// store entire matrix in shared memory for fast reads.
__shared__ float s_a[33 * 33];
// store result in shared memory for fast writes.
__shared__ float s_res[33 * 33];
// assign each thread an index so that threads in the same warp process
// elements in the same column.
const uint row_i = threadIdx.x + 32 * threadIdx.y;
// copy matrix from global memory to shared memory in a coalesced fashion.
s_a[row_i] = a[row_i];
// intialize result as a matrix where each element is 1.0.
s_res[row_i] = 1.0;
// assign each thread an index so that threads in the same warp process
// elements in the same column.
const uint col_i = threadIdx.y + 33 * threadIdx.x;
// in order to process the matrix column-by-column... all warps must
// finish initializing shared memory row-by-row.
__syncthreads();
// a single block computes the power of the entire matrix.
// each warp in the block computes the power of a single column.
// each thread in the warp computes the power of a single element.
while (pow > 0) {
// Results from number theory: Additive group of integers mod n is
// generated by all integers m relatively prime to n. A warp conflict occurs
// if two threads in a warp access the same address mod 32. We
// minimize bank conflicts by reading and writing data to shared memory
// with a stride m relatively prime to n.
//
// Even though we are reading data column-by-column, we don't have
// bank conflicts since our stride is relatively prime to 32.
// For larger matrices (size n), we should choose a stride that is
// relatively prime to 32. It is useful to note that for any integer n,
// gcd(n, n + 1) = 1.
s_res[col_i] *= s_a[col_i];
pow -= 1;
}
// in order to read the matrix row-by-row... all warps must
// finish initializing shared memory column-by-column.
__syncthreads();
// copy result from shared memory to global memory in a coalesced fashion.
res[row_i] = s_res[row_i];
};
| b2545e85cb2d748f6cf44ab6bf0333ea1ef54b18.cu | /**
* For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij
* and stores in res_ij.
*
* Shared memory is necessary here because we are reading and writing
* to memory many times...
*
* Note that __syncthreads is not needed here because each row in shared
* memory is exclusively read and written to by a single warp.
*/
__global__ void pow_rows(const float *a, uint pow, float *res) {
// store entire matrix in shared memory for fast reads.
__shared__ float s_a[32 * 32];
// store result in shared memory for fast writes.
__shared__ float s_res[32 * 32];
// assign each thread an index so that threads in the same warp process
// elements in the same row.
const uint row_i = threadIdx.x + 32 * threadIdx.y;
// copy matrix from global memory to shared memory in a coalesced fashion.
s_a[row_i] = a[row_i];
// intialize result as a matrix where each element is 1.0.
s_res[row_i] = 1.0;
// a single block computes the power of the entire matrix.
// each warp in the block computes the power of a single row.
// each thread in the warp computes the power of a single element.
while (pow > 0) {
s_res[row_i] *= s_a[row_i];
pow -= 1;
}
// copy result from shared memory to global memory in a coalesced fashion.
res[row_i] = s_res[row_i];
};
/**
* For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij
* and stores in res_ij.
*
* After reading the matrix a into local memory row by row, we
* compute the power of each element on a column by column basis
* in order to cause a bank conflict.
*
* Note that __syncthreads is necessary here because the same shared
* memory is accessed by multiple warps.
*/
__global__ void pow_cols(const float *a, uint pow, float *res) {
// store entire matrix in shared memory for fast reads.
__shared__ float s_a[32 * 32];
// store result in shared memory for fast writes.
__shared__ float s_res[32 * 32];
// assign each thread an index so that threads in the same warp process
// elements in the same row.
const uint row_i = threadIdx.x + 32 * threadIdx.y;
// copy matrix from global memory to shared memory in a coalesced fashion.
s_a[row_i] = a[row_i];
// intialize result as a matrix where each element is 1.0.
s_res[row_i] = 1.0;
// in order to process the matrix column-by-column... all warps must
// finish initializing shared memory row-by-row.
__syncthreads();
// assign each thread an index so that threads in the same warp process
// elements in the same column.
const uint col_i = threadIdx.y + 32 * threadIdx.x;
// a single block computes the power of the entire matrix.
// each warp in the block computes the power of a single column.
// each thread in the warp computes the power of a single element.
while (pow > 0) {
// Note that col_i % 32 = threadIdx.y.
// Since all threads in the same warp have the same threadIdx.y, this
// is a 32-way bank conflict!
s_res[col_i] *= s_a[col_i];
pow -= 1;
}
// in order to read the matrix row-by-row... all warps must
// finish initializing shared memory column-by-column.
__syncthreads();
// copy result from shared memory to global memory in a coalesced fashion.
res[row_i] = s_res[row_i];
};
/**
* For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij
* and stores in res_ij.
*
* After reading the matrix a into local memory row by row, we
* compute the power of each element on a column by column basis.
* Due to zero padding, we don't have a bank conflict.
*
* Note that __syncthreads is necessary here because the same shared
* memory is accessed by multiple warps.
*/
__global__ void pow_cols_pad(const float *a, uint pow, float *res) {
// store entire matrix in shared memory for fast reads.
__shared__ float s_a[33 * 33];
// store result in shared memory for fast writes.
__shared__ float s_res[33 * 33];
// assign each thread an index so that threads in the same warp process
// elements in the same column.
const uint row_i = threadIdx.x + 32 * threadIdx.y;
// copy matrix from global memory to shared memory in a coalesced fashion.
s_a[row_i] = a[row_i];
// intialize result as a matrix where each element is 1.0.
s_res[row_i] = 1.0;
// assign each thread an index so that threads in the same warp process
// elements in the same column.
const uint col_i = threadIdx.y + 33 * threadIdx.x;
// in order to process the matrix column-by-column... all warps must
// finish initializing shared memory row-by-row.
__syncthreads();
// a single block computes the power of the entire matrix.
// each warp in the block computes the power of a single column.
// each thread in the warp computes the power of a single element.
while (pow > 0) {
// Results from number theory: Additive group of integers mod n is
// generated by all integers m relatively prime to n. A warp conflict occurs
// if two threads in a warp access the same address mod 32. We
// minimize bank conflicts by reading and writing data to shared memory
// with a stride m relatively prime to n.
//
// Even though we are reading data column-by-column, we don't have
// bank conflicts since our stride is relatively prime to 32.
// For larger matrices (size n), we should choose a stride that is
// relatively prime to 32. It is useful to note that for any integer n,
// gcd(n, n + 1) = 1.
s_res[col_i] *= s_a[col_i];
pow -= 1;
}
// in order to read the matrix row-by-row... all warps must
// finish initializing shared memory column-by-column.
__syncthreads();
// copy result from shared memory to global memory in a coalesced fashion.
res[row_i] = s_res[row_i];
};
|
0476e061da0070d1e67c95b0f09c1de08cb5a025.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
// columns [bS, iC, kD, kH, kW, oD, oH, oW] to be de-convoluted to volume [bS, iC, iD, iH, iW]
template <typename T>
static __global__ void col2volCuda(const void* columns, const Nd4jLong* colShapeInfo, void* volume, const Nd4jLong* volShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
const T* col = reinterpret_cast<const T*>(columns);
T* vol = reinterpret_cast<T*>(volume);
__shared__ uint kD, kH, kW, oD, oH, oW, *sharedMem;
__shared__ Nd4jLong volLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<uint*>(shmem);
oD = colShapeInfo[6];
oH = colShapeInfo[7];
oW = colShapeInfo[8];
kD = dD * (colShapeInfo[3] - 1) + 1;
kH = dH * (colShapeInfo[4] - 1) + 1;
kW = dW * (colShapeInfo[5] - 1) + 1;
volLen = shape::length(volShapeInfo);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * 8;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < volLen; i += gridDim.x * blockDim.x) {
shape::index2coords(i, volShapeInfo, coords);
const auto volOffset = shape::getOffset(volShapeInfo, coords);
const auto bSiCoffset = coords[0] * colShapeInfo[9] + coords[1] * colShapeInfo[10];
const uint imD = coords[2] + pD;
const uint imH = coords[3] + pH;
const uint imW = coords[4] + pW;
const uint colDstart = (imD < kD) ? 0 : (imD - kD) / sD + 1;
const uint colHstart = (imH < kH) ? 0 : (imH - kH) / sH + 1;
const uint colWstart = (imW < kW) ? 0 : (imW - kW) / sW + 1;
const uint colDend = sd::math::nd4j_min<uint>(imD / sD + 1, oD);
const uint colHend = sd::math::nd4j_min<uint>(imH / sH + 1, oH);
const uint colWend = sd::math::nd4j_min<uint>(imW / sW + 1, oW);
T val = 0;
for(uint colD = colDstart; colD < colDend; ++colD) {
coords[2] = imD - colD * sD;
if(coords[2] % dD != 0) continue;
for(uint colH = colHstart; colH < colHend; ++colH) {
coords[3] = imH - colH * sH;
if(coords[3] % dH != 0) continue;
for(uint colW = colWstart; colW < colWend; ++colW) {
coords[4] = imW - colW * sW;
if(coords[4] % dW != 0) continue;
val += col[bSiCoffset + (coords[2]/dD)*colShapeInfo[11] + (coords[3]/dH)*colShapeInfo[12] + (coords[4]/dW)*colShapeInfo[13] + colD*colShapeInfo[14] + colH*colShapeInfo[15] + colW*colShapeInfo[16]];
}
}
}
vol[volOffset] = val;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void col2volCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* columns, const Nd4jLong* colShapeInfo,
void* volume, const Nd4jLong* volShapeInfo,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
hipLaunchKernelGGL(( col2volCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, columns, colShapeInfo, volume, volShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::col2vol(sd::graph::Context& block, const NDArray& col, NDArray& vol, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
PointersManager manager(block.launchContext(), "col2vol");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (vol.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = col.rankOf() * sizeof(uint) * threadsPerBlock + 256;
NDArray::prepareSpecialUse({&vol}, {&col});
BUILD_SINGLE_SELECTOR(vol.dataType(), col2volCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), vol.specialBuffer(), vol.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES);
NDArray::registerSpecialUse({&vol}, {&col});
manager.synchronize();
}
}
}
| 0476e061da0070d1e67c95b0f09c1de08cb5a025.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
// columns [bS, iC, kD, kH, kW, oD, oH, oW] to be de-convoluted to volume [bS, iC, iD, iH, iW]
template <typename T>
static __global__ void col2volCuda(const void* columns, const Nd4jLong* colShapeInfo, void* volume, const Nd4jLong* volShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
const T* col = reinterpret_cast<const T*>(columns);
T* vol = reinterpret_cast<T*>(volume);
__shared__ uint kD, kH, kW, oD, oH, oW, *sharedMem;
__shared__ Nd4jLong volLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<uint*>(shmem);
oD = colShapeInfo[6];
oH = colShapeInfo[7];
oW = colShapeInfo[8];
kD = dD * (colShapeInfo[3] - 1) + 1;
kH = dH * (colShapeInfo[4] - 1) + 1;
kW = dW * (colShapeInfo[5] - 1) + 1;
volLen = shape::length(volShapeInfo);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * 8;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < volLen; i += gridDim.x * blockDim.x) {
shape::index2coords(i, volShapeInfo, coords);
const auto volOffset = shape::getOffset(volShapeInfo, coords);
const auto bSiCoffset = coords[0] * colShapeInfo[9] + coords[1] * colShapeInfo[10];
const uint imD = coords[2] + pD;
const uint imH = coords[3] + pH;
const uint imW = coords[4] + pW;
const uint colDstart = (imD < kD) ? 0 : (imD - kD) / sD + 1;
const uint colHstart = (imH < kH) ? 0 : (imH - kH) / sH + 1;
const uint colWstart = (imW < kW) ? 0 : (imW - kW) / sW + 1;
const uint colDend = sd::math::nd4j_min<uint>(imD / sD + 1, oD);
const uint colHend = sd::math::nd4j_min<uint>(imH / sH + 1, oH);
const uint colWend = sd::math::nd4j_min<uint>(imW / sW + 1, oW);
T val = 0;
for(uint colD = colDstart; colD < colDend; ++colD) {
coords[2] = imD - colD * sD;
if(coords[2] % dD != 0) continue;
for(uint colH = colHstart; colH < colHend; ++colH) {
coords[3] = imH - colH * sH;
if(coords[3] % dH != 0) continue;
for(uint colW = colWstart; colW < colWend; ++colW) {
coords[4] = imW - colW * sW;
if(coords[4] % dW != 0) continue;
val += col[bSiCoffset + (coords[2]/dD)*colShapeInfo[11] + (coords[3]/dH)*colShapeInfo[12] + (coords[4]/dW)*colShapeInfo[13] + colD*colShapeInfo[14] + colH*colShapeInfo[15] + colW*colShapeInfo[16]];
}
}
}
vol[volOffset] = val;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void col2volCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* columns, const Nd4jLong* colShapeInfo,
void* volume, const Nd4jLong* volShapeInfo,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
col2volCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(columns, colShapeInfo, volume, volShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::col2vol(sd::graph::Context& block, const NDArray& col, NDArray& vol, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
PointersManager manager(block.launchContext(), "col2vol");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (vol.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = col.rankOf() * sizeof(uint) * threadsPerBlock + 256;
NDArray::prepareSpecialUse({&vol}, {&col});
BUILD_SINGLE_SELECTOR(vol.dataType(), col2volCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), vol.specialBuffer(), vol.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES);
NDArray::registerSpecialUse({&vol}, {&col});
manager.synchronize();
}
}
}
|
4faf74db94654651ba249f7d31c9cd6489271cbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../gtest.h"
#include <backends/gpu/kernels/stack.hpp>
#include <backends/gpu/stack.hpp>
#include <backends/gpu/managed_ptr.hpp>
using namespace arb;
TEST(stack, construction) {
using T = int;
gpu::stack<T> s(10);
EXPECT_EQ(0u, s.size());
EXPECT_EQ(10u, s.capacity());
}
// kernel and functors for testing push_back functionality
namespace kernels {
template <typename F>
__global__
void push_back(gpu::stack_storage<int>& s, F f) {
if (f(threadIdx.x)) {
arb::gpu::push_back(s, int(threadIdx.x));
}
}
struct all_ftor {
__host__ __device__
bool operator() (int i) {
return true;
}
};
struct even_ftor {
__host__ __device__
bool operator() (int i) {
return (i%2)==0;
}
};
struct odd_ftor {
__host__ __device__
bool operator() (int i) {
return i%2;
}
};
}
TEST(stack, push_back) {
using T = int;
using stack = gpu::stack<T>;
const unsigned n = 10;
EXPECT_TRUE(n%2 == 0); // require n is even for tests to work
auto s = stack(n);
auto& sstorage = s.storage();
hipLaunchKernelGGL(( kernels::push_back), dim3(1), dim3(n), 0, 0, sstorage, kernels::all_ftor());
hipDeviceSynchronize();
EXPECT_EQ(n, s.size());
for (auto i=0; i<int(s.size()); ++i) {
EXPECT_EQ(i, s[i]);
}
s.clear();
hipLaunchKernelGGL(( kernels::push_back), dim3(1), dim3(n), 0, 0, sstorage, kernels::even_ftor());
hipDeviceSynchronize();
EXPECT_EQ(n/2, s.size());
for (auto i=0; i<int(s.size())/2; ++i) {
EXPECT_EQ(2*i, s[i]);
}
s.clear();
hipLaunchKernelGGL(( kernels::push_back), dim3(1), dim3(n), 0, 0, sstorage, kernels::odd_ftor());
hipDeviceSynchronize();
EXPECT_EQ(n/2, s.size());
for (auto i=0; i<int(s.size())/2; ++i) {
EXPECT_EQ(2*i+1, s[i]);
}
}
TEST(stack, overflow) {
using T = int;
using stack = gpu::stack<T>;
const unsigned n = 10;
auto s = stack(n);
auto& sstorage = s.storage();
EXPECT_FALSE(s.overflow());
// push 2n items into a stack of size n
hipLaunchKernelGGL(( kernels::push_back), dim3(1), dim3(2*n), 0, 0, sstorage, kernels::all_ftor());
hipDeviceSynchronize();
EXPECT_EQ(n, s.size());
EXPECT_EQ(2*n, s.pushes());
EXPECT_TRUE(s.overflow());
}
TEST(stack, empty) {
using T = int;
using stack = gpu::stack<T>;
stack s(0u);
EXPECT_EQ(s.size(), 0u);
EXPECT_EQ(s.capacity(), 0u);
EXPECT_EQ(s.storage().data, nullptr);
}
| 4faf74db94654651ba249f7d31c9cd6489271cbb.cu | #include "../gtest.h"
#include <backends/gpu/kernels/stack.hpp>
#include <backends/gpu/stack.hpp>
#include <backends/gpu/managed_ptr.hpp>
using namespace arb;
TEST(stack, construction) {
using T = int;
gpu::stack<T> s(10);
EXPECT_EQ(0u, s.size());
EXPECT_EQ(10u, s.capacity());
}
// kernel and functors for testing push_back functionality
namespace kernels {
template <typename F>
__global__
void push_back(gpu::stack_storage<int>& s, F f) {
if (f(threadIdx.x)) {
arb::gpu::push_back(s, int(threadIdx.x));
}
}
struct all_ftor {
__host__ __device__
bool operator() (int i) {
return true;
}
};
struct even_ftor {
__host__ __device__
bool operator() (int i) {
return (i%2)==0;
}
};
struct odd_ftor {
__host__ __device__
bool operator() (int i) {
return i%2;
}
};
}
TEST(stack, push_back) {
using T = int;
using stack = gpu::stack<T>;
const unsigned n = 10;
EXPECT_TRUE(n%2 == 0); // require n is even for tests to work
auto s = stack(n);
auto& sstorage = s.storage();
kernels::push_back<<<1, n>>>(sstorage, kernels::all_ftor());
cudaDeviceSynchronize();
EXPECT_EQ(n, s.size());
for (auto i=0; i<int(s.size()); ++i) {
EXPECT_EQ(i, s[i]);
}
s.clear();
kernels::push_back<<<1, n>>>(sstorage, kernels::even_ftor());
cudaDeviceSynchronize();
EXPECT_EQ(n/2, s.size());
for (auto i=0; i<int(s.size())/2; ++i) {
EXPECT_EQ(2*i, s[i]);
}
s.clear();
kernels::push_back<<<1, n>>>(sstorage, kernels::odd_ftor());
cudaDeviceSynchronize();
EXPECT_EQ(n/2, s.size());
for (auto i=0; i<int(s.size())/2; ++i) {
EXPECT_EQ(2*i+1, s[i]);
}
}
TEST(stack, overflow) {
using T = int;
using stack = gpu::stack<T>;
const unsigned n = 10;
auto s = stack(n);
auto& sstorage = s.storage();
EXPECT_FALSE(s.overflow());
// push 2n items into a stack of size n
kernels::push_back<<<1, 2*n>>>(sstorage, kernels::all_ftor());
cudaDeviceSynchronize();
EXPECT_EQ(n, s.size());
EXPECT_EQ(2*n, s.pushes());
EXPECT_TRUE(s.overflow());
}
TEST(stack, empty) {
using T = int;
using stack = gpu::stack<T>;
stack s(0u);
EXPECT_EQ(s.size(), 0u);
EXPECT_EQ(s.capacity(), 0u);
EXPECT_EQ(s.storage().data, nullptr);
}
|
58379859cfda1a21d423faa5aa06101814867cc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
__device__ inline int max(int a, int b) {
return a >= b ? a : b;
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_cuda_update_output(
PackedTensorAccessor64<scalar_t, 4> input,
PackedTensorAccessor64<scalar_t, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad,
int offsetZ, int divisor_override)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oCol < output.size(3))
{
accscalar_t sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.size(1) + padT);
int hend = min(hstart + kH, input.size(2) + padH);
int wend = min(wstart + kW, input.size(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.size(1));
hend = min(hend, input.size(2));
wend = min(wend, input.size(3));
if (tstart >= tend || hstart >= hend || wstart >= wend) {
output[slice][oFrame][oRow][oCol] = scalar_t(0);
return;
}
accscalar_t divide_factor;
if (divisor_override) {
divide_factor = static_cast<accscalar_t>(divisor_override);
} else {
if(count_include_pad) {
divide_factor = static_cast<accscalar_t>(pool_size);
} else {
divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart));
}
}
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
scalar_t val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum / divide_factor);
}
}
// Inner-most loop size (kW) passed as template parameter for
// performance reasons.
//
template<int KERNEL_WIDTH, typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_cuda_update_output(
PackedTensorAccessor64<scalar_t, 4> input,
PackedTensorAccessor64<scalar_t, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad,
int offsetZ, int divisor_override)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oCol < output.size(3))
{
accscalar_t sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.size(1) + padT);
int hend = min(hstart + kH, input.size(2) + padH);
int wend = min(wstart + KERNEL_WIDTH, input.size(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.size(1));
hend = min(hend, input.size(2));
wend = min(wend, input.size(3));
if (tstart >= tend || hstart >= hend || wstart >= wend) {
output[slice][oFrame][oRow][oCol] = scalar_t(0);
return;
}
accscalar_t divide_factor;
if (divisor_override) {
divide_factor = static_cast<accscalar_t>(divisor_override);
} else {
if(count_include_pad) {
divide_factor = static_cast<accscalar_t>(pool_size);
} else {
divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart));
}
}
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
scalar_t val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum / divide_factor);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_single_backward_out_frame_stride1(
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<scalar_t, 4> gradInput,
int kT, int kH, int kW,
accscalar_t normFactor,
int offsetZ)
{
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // input frame/time
int slice = (blockIdx.z + offsetZ) / gradInput.size(1); // input slice/feature
// guard against over-tiled threads
if (iRow < gradInput.size(2) && iCol < gradInput.size(3))
{
accscalar_t sum = 0.0;
scalar_t *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)]
[max(0, iRow - kH + 1)][max(0, iCol - kW + 1)];
int frameOffset = 0;
for (int oFrame = max(0, iFrame - kT + 1);
oFrame < min(iFrame + 1, gradOutput.size(1));
++oFrame)
{
int rowOffset = frameOffset;
for (int oRow = max(0, iRow - kH + 1);
oRow < min(iRow + 1, gradOutput.size(2));
++oRow)
{
int colOffset = rowOffset;
for (int oCol = max(0, iCol - kW + 1);
oCol < min(iCol + 1, gradOutput.size(3));
++oCol)
{
sum += gOut[colOffset];
++colOffset;
}
rowOffset += gradOutput.size(3);
}
frameOffset += gradOutput.size(2) * gradOutput.size(3);
}
gradInput[slice][iFrame][iRow][iCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum * normFactor);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_cuda_update_grad_input_atomic(
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<scalar_t, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad,
int offsetZ, int divisor_override)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.size(2) && oCol < gradOutput.size(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.size(1) + padT);
int hend = min(hstart + kH, gradInput.size(2) + padH);
int wend = min(wstart + kW, gradInput.size(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.size(1));
hend = min(hend, gradInput.size(2));
wend = min(wend, gradInput.size(3));
accscalar_t divide_factor;
if (divisor_override) {
divide_factor = static_cast<accscalar_t>(divisor_override);
} else {
if(count_include_pad) {
divide_factor = static_cast<accscalar_t>(pool_size);
} else {
divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart));
}
}
scalar_t val = ScalarConvert<accscalar_t, scalar_t>::to(
ScalarConvert<scalar_t, accscalar_t>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
gpuAtomicAdd(&gradInput[slice][iFrame][iRow][iCol], val);
}
}
}
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_cuda_update_grad_input(
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<scalar_t, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ, int divisor_override)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.size(2) && oCol < gradOutput.size(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.size(1) + padT);
int hend = min(hstart + kH, gradInput.size(2) + padH);
int wend = min(wstart + kW, gradInput.size(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.size(1));
hend = min(hend, gradInput.size(2));
wend = min(wend, gradInput.size(3));
accscalar_t divide_factor;
if (divisor_override) {
divide_factor = static_cast<accscalar_t>(divisor_override);
} else {
if(count_include_pad) {
divide_factor = static_cast<accscalar_t>(pool_size);
} else {
divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart));
}
}
scalar_t val = ScalarConvert<accscalar_t, scalar_t>::to(
ScalarConvert<scalar_t, accscalar_t>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
gradInput[slice][iFrame][iRow][iCol] = val;
}
}
}
}
}
#define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
hipLaunchKernelGGL(( avg_pool3d_cuda_update_output<KW, scalar_t, accscalar_t>) \
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
work_input.packed_accessor64<scalar_t, 4>(), \
work_output.packed_accessor64<scalar_t, 4>(), \
kT, kH, \
dT, dH, dW, \
padT, padH, padW, \
count_include_pad, \
offsetZ, divisor); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
break
void avg_pool3d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg input_arg{ input, "input", 2 };
checkAllSameGPU("avg_pool3d_out_cuda", {output_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"avg_pool3d: kernel_size must be a single int, or a tuple of three ints");
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 3,
"avg_pool3d: stride must be omitted, a single int, or a tuple of three ints");
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"avg_pool3d: padding must be a single int, or a tuple of three ints");
const int padT = safe_downcast<int, int64_t>(padding[0]);
const int padH = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[1]);
const int padW = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
// if divisor==0 then we will ignore it
int64_t divisor = 0;
if (divisor_override.has_value()) {
TORCH_CHECK(divisor_override.value() != 0, "divisor must be not zero");
divisor = divisor_override.value();
}
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode);
const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode);
const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode);
pool3d_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
1, 1, 1,
itime, iheight, iwidth,
otime, oheight, owidth,
/*check_input_size=*/ true);
if (input.ndimension() == 4) {
output.resize_({ nslices, otime, oheight, owidth});
}
else {
output.resize_({nbatch, nslices, otime, oheight, owidth});
}
Tensor work_input = input.contiguous();
Tensor work_output = output;
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"avg_pool3d_out_cuda",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
int64_t totalZ = otime * nslices * nbatch;
int64_t offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int64_t>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
switch (kW) {
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
hipLaunchKernelGGL(( avg_pool3d_cuda_update_output<scalar_t, accscalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
work_input.packed_accessor64<scalar_t, 4>(),
work_output.packed_accessor64<scalar_t, 4>(),
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
count_include_pad,
offsetZ, divisor);
C10_HIP_KERNEL_LAUNCH_CHECK();
break;
}
totalZ -= 65535;
offsetZ += 65535;
}
}
);
}
#undef LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH
void avg_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 };
TensorArg input_arg{ input, "input", 3 };
checkAllSameGPU("avg_pool3d_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"avg_pool3d: kernel_size must be a single int, or a tuple of three ints");
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 3,
"avg_pool3d: stride must be omitted, a single int, or a tuple of three ints");
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"avg_pool3d: padding must be a single int, or a tuple of three ints");
const int padT = safe_downcast<int, int64_t>(padding[0]);
const int padH = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[1]);
const int padW = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for gradOutput");
// if divisor==0 then we will ignore it
int64_t divisor = 0;
if (divisor_override.has_value()) {
TORCH_CHECK(divisor_override.value() != 0, "divisor must be not zero");
divisor = divisor_override.value();
}
// Resize and initialize result tensor.
gradInput.resize_as_(input);
gradInput.zero_();
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = gradOutput.size(-3);
const int64_t oheight = gradOutput.size(-2);
const int64_t owidth = gradOutput.size(-1);
/* XXX shape check behavior from TH */
const int64_t otime_for_shape_check = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode);
const int64_t oheight_for_shape_check = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode);
const int64_t owidth_for_chape_check = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode);
const bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW);
avg_pool3d_backward_shape_check(
input,
gradOutput,
nslices,
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
itime, iheight, iwidth,
otime, oheight, owidth);
Tensor work_grad_input = gradInput;
Tensor work_grad_output = gradOutput.contiguous();
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth});
}
// Optimizing for stride 1 is probably only of limited value, but this
// specialization yields 3x speedup over the gpuAtomicAdd implementation.
// Padding must be 0, otherwise, pool size may change.
if (dT == 1 && dH == 1 && dW == 1 && padT == 0 && padH == 0 && padW == 0) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool3d_backward_out_frame_stride1",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
int64_t totalZ = itime * nslices * nbatch;
int64_t offsetZ = 0;
dim3 block(32, 8);
accscalar_t divide_factor;
if (divisor) {
divide_factor = static_cast<accscalar_t>(divisor);
} else {
divide_factor = static_cast<accscalar_t>(kT * kH * kW);
}
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(iwidth, static_cast<int64_t>(block.x)),
cuda::ATenCeilDiv(iheight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( avg_pool3d_single_backward_out_frame_stride1<scalar_t, accscalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
work_grad_output.packed_accessor64<scalar_t, 4>(),
work_grad_input.packed_accessor64<scalar_t, 4>(),
kT, kH, kW,
1.0f/divide_factor,
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
);
}
else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool3d_backward_out_frame",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
int64_t totalZ = otime * nslices * nbatch;
int64_t offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int64_t>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
if (kernelsOverlap) {
hipLaunchKernelGGL(( avg_pool3d_cuda_update_grad_input_atomic<scalar_t, accscalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
work_grad_output.packed_accessor64<scalar_t, 4>(),
work_grad_input.packed_accessor64<scalar_t, 4>(),
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
count_include_pad,
offsetZ, divisor);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
else {
hipLaunchKernelGGL(( avg_pool3d_cuda_update_grad_input<scalar_t, accscalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
work_grad_output.packed_accessor64<scalar_t, 4>(),
work_grad_input.packed_accessor64<scalar_t, 4>(),
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
count_include_pad,
offsetZ, divisor);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
totalZ -= 65535;
offsetZ += 65535;
}
}
);
}
}
} // namespace
Tensor& avg_pool3d_out_cuda(const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override,
Tensor& output)
{
avg_pool3d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor avg_pool3d_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
Tensor output = at::empty({0}, input.options());
avg_pool3d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor& avg_pool3d_backward_out_cuda(const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override,
Tensor& gradInput)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("avg_pool3d_backward_out_cuda");
avg_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
Tensor avg_pool3d_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("avg_pool3d_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
avg_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
} // at::native
} // at
| 58379859cfda1a21d423faa5aa06101814867cc6.cu | #include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
__device__ inline int max(int a, int b) {
return a >= b ? a : b;
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_cuda_update_output(
PackedTensorAccessor64<scalar_t, 4> input,
PackedTensorAccessor64<scalar_t, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad,
int offsetZ, int divisor_override)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oCol < output.size(3))
{
accscalar_t sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.size(1) + padT);
int hend = min(hstart + kH, input.size(2) + padH);
int wend = min(wstart + kW, input.size(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.size(1));
hend = min(hend, input.size(2));
wend = min(wend, input.size(3));
if (tstart >= tend || hstart >= hend || wstart >= wend) {
output[slice][oFrame][oRow][oCol] = scalar_t(0);
return;
}
accscalar_t divide_factor;
if (divisor_override) {
divide_factor = static_cast<accscalar_t>(divisor_override);
} else {
if(count_include_pad) {
divide_factor = static_cast<accscalar_t>(pool_size);
} else {
divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart));
}
}
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
scalar_t val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum / divide_factor);
}
}
// Inner-most loop size (kW) passed as template parameter for
// performance reasons.
//
template<int KERNEL_WIDTH, typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_cuda_update_output(
PackedTensorAccessor64<scalar_t, 4> input,
PackedTensorAccessor64<scalar_t, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad,
int offsetZ, int divisor_override)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oCol < output.size(3))
{
accscalar_t sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.size(1) + padT);
int hend = min(hstart + kH, input.size(2) + padH);
int wend = min(wstart + KERNEL_WIDTH, input.size(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.size(1));
hend = min(hend, input.size(2));
wend = min(wend, input.size(3));
if (tstart >= tend || hstart >= hend || wstart >= wend) {
output[slice][oFrame][oRow][oCol] = scalar_t(0);
return;
}
accscalar_t divide_factor;
if (divisor_override) {
divide_factor = static_cast<accscalar_t>(divisor_override);
} else {
if(count_include_pad) {
divide_factor = static_cast<accscalar_t>(pool_size);
} else {
divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart));
}
}
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
scalar_t val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum / divide_factor);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_single_backward_out_frame_stride1(
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<scalar_t, 4> gradInput,
int kT, int kH, int kW,
accscalar_t normFactor,
int offsetZ)
{
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // input frame/time
int slice = (blockIdx.z + offsetZ) / gradInput.size(1); // input slice/feature
// guard against over-tiled threads
if (iRow < gradInput.size(2) && iCol < gradInput.size(3))
{
accscalar_t sum = 0.0;
scalar_t *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)]
[max(0, iRow - kH + 1)][max(0, iCol - kW + 1)];
int frameOffset = 0;
for (int oFrame = max(0, iFrame - kT + 1);
oFrame < min(iFrame + 1, gradOutput.size(1));
++oFrame)
{
int rowOffset = frameOffset;
for (int oRow = max(0, iRow - kH + 1);
oRow < min(iRow + 1, gradOutput.size(2));
++oRow)
{
int colOffset = rowOffset;
for (int oCol = max(0, iCol - kW + 1);
oCol < min(iCol + 1, gradOutput.size(3));
++oCol)
{
sum += gOut[colOffset];
++colOffset;
}
rowOffset += gradOutput.size(3);
}
frameOffset += gradOutput.size(2) * gradOutput.size(3);
}
gradInput[slice][iFrame][iRow][iCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum * normFactor);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_cuda_update_grad_input_atomic(
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<scalar_t, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad,
int offsetZ, int divisor_override)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.size(2) && oCol < gradOutput.size(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.size(1) + padT);
int hend = min(hstart + kH, gradInput.size(2) + padH);
int wend = min(wstart + kW, gradInput.size(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.size(1));
hend = min(hend, gradInput.size(2));
wend = min(wend, gradInput.size(3));
accscalar_t divide_factor;
if (divisor_override) {
divide_factor = static_cast<accscalar_t>(divisor_override);
} else {
if(count_include_pad) {
divide_factor = static_cast<accscalar_t>(pool_size);
} else {
divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart));
}
}
scalar_t val = ScalarConvert<accscalar_t, scalar_t>::to(
ScalarConvert<scalar_t, accscalar_t>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
gpuAtomicAdd(&gradInput[slice][iFrame][iRow][iCol], val);
}
}
}
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool3d_cuda_update_grad_input(
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<scalar_t, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ, int divisor_override)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.size(2) && oCol < gradOutput.size(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.size(1) + padT);
int hend = min(hstart + kH, gradInput.size(2) + padH);
int wend = min(wstart + kW, gradInput.size(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.size(1));
hend = min(hend, gradInput.size(2));
wend = min(wend, gradInput.size(3));
accscalar_t divide_factor;
if (divisor_override) {
divide_factor = static_cast<accscalar_t>(divisor_override);
} else {
if(count_include_pad) {
divide_factor = static_cast<accscalar_t>(pool_size);
} else {
divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart));
}
}
scalar_t val = ScalarConvert<accscalar_t, scalar_t>::to(
ScalarConvert<scalar_t, accscalar_t>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
gradInput[slice][iFrame][iRow][iCol] = val;
}
}
}
}
}
#define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
avg_pool3d_cuda_update_output<KW, scalar_t, accscalar_t> \
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \
work_input.packed_accessor64<scalar_t, 4>(), \
work_output.packed_accessor64<scalar_t, 4>(), \
kT, kH, \
dT, dH, dW, \
padT, padH, padW, \
count_include_pad, \
offsetZ, divisor); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
break
void avg_pool3d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg input_arg{ input, "input", 2 };
checkAllSameGPU("avg_pool3d_out_cuda", {output_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"avg_pool3d: kernel_size must be a single int, or a tuple of three ints");
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 3,
"avg_pool3d: stride must be omitted, a single int, or a tuple of three ints");
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"avg_pool3d: padding must be a single int, or a tuple of three ints");
const int padT = safe_downcast<int, int64_t>(padding[0]);
const int padH = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[1]);
const int padW = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
// if divisor==0 then we will ignore it
int64_t divisor = 0;
if (divisor_override.has_value()) {
TORCH_CHECK(divisor_override.value() != 0, "divisor must be not zero");
divisor = divisor_override.value();
}
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode);
const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode);
const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode);
pool3d_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
1, 1, 1,
itime, iheight, iwidth,
otime, oheight, owidth,
/*check_input_size=*/ true);
if (input.ndimension() == 4) {
output.resize_({ nslices, otime, oheight, owidth});
}
else {
output.resize_({nbatch, nslices, otime, oheight, owidth});
}
Tensor work_input = input.contiguous();
Tensor work_output = output;
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"avg_pool3d_out_cuda",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
int64_t totalZ = otime * nslices * nbatch;
int64_t offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int64_t>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
switch (kW) {
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
avg_pool3d_cuda_update_output<scalar_t, accscalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
work_input.packed_accessor64<scalar_t, 4>(),
work_output.packed_accessor64<scalar_t, 4>(),
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
count_include_pad,
offsetZ, divisor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
break;
}
totalZ -= 65535;
offsetZ += 65535;
}
}
);
}
#undef LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH
void avg_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 };
TensorArg input_arg{ input, "input", 3 };
checkAllSameGPU("avg_pool3d_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"avg_pool3d: kernel_size must be a single int, or a tuple of three ints");
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 3,
"avg_pool3d: stride must be omitted, a single int, or a tuple of three ints");
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"avg_pool3d: padding must be a single int, or a tuple of three ints");
const int padT = safe_downcast<int, int64_t>(padding[0]);
const int padH = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[1]);
const int padW = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for gradOutput");
// if divisor==0 then we will ignore it
int64_t divisor = 0;
if (divisor_override.has_value()) {
TORCH_CHECK(divisor_override.value() != 0, "divisor must be not zero");
divisor = divisor_override.value();
}
// Resize and initialize result tensor.
gradInput.resize_as_(input);
gradInput.zero_();
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = gradOutput.size(-3);
const int64_t oheight = gradOutput.size(-2);
const int64_t owidth = gradOutput.size(-1);
/* XXX shape check behavior from TH */
const int64_t otime_for_shape_check = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode);
const int64_t oheight_for_shape_check = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode);
const int64_t owidth_for_chape_check = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode);
const bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW);
avg_pool3d_backward_shape_check(
input,
gradOutput,
nslices,
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
itime, iheight, iwidth,
otime, oheight, owidth);
Tensor work_grad_input = gradInput;
Tensor work_grad_output = gradOutput.contiguous();
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth});
}
// Optimizing for stride 1 is probably only of limited value, but this
// specialization yields 3x speedup over the gpuAtomicAdd implementation.
// Padding must be 0, otherwise, pool size may change.
if (dT == 1 && dH == 1 && dW == 1 && padT == 0 && padH == 0 && padW == 0) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool3d_backward_out_frame_stride1",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
int64_t totalZ = itime * nslices * nbatch;
int64_t offsetZ = 0;
dim3 block(32, 8);
accscalar_t divide_factor;
if (divisor) {
divide_factor = static_cast<accscalar_t>(divisor);
} else {
divide_factor = static_cast<accscalar_t>(kT * kH * kW);
}
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(iwidth, static_cast<int64_t>(block.x)),
cuda::ATenCeilDiv(iheight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
avg_pool3d_single_backward_out_frame_stride1<scalar_t, accscalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
work_grad_output.packed_accessor64<scalar_t, 4>(),
work_grad_input.packed_accessor64<scalar_t, 4>(),
kT, kH, kW,
1.0f/divide_factor,
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
);
}
else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool3d_backward_out_frame",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
int64_t totalZ = otime * nslices * nbatch;
int64_t offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int64_t>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
if (kernelsOverlap) {
avg_pool3d_cuda_update_grad_input_atomic<scalar_t, accscalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
work_grad_output.packed_accessor64<scalar_t, 4>(),
work_grad_input.packed_accessor64<scalar_t, 4>(),
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
count_include_pad,
offsetZ, divisor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
else {
avg_pool3d_cuda_update_grad_input<scalar_t, accscalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
work_grad_output.packed_accessor64<scalar_t, 4>(),
work_grad_input.packed_accessor64<scalar_t, 4>(),
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
count_include_pad,
offsetZ, divisor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
totalZ -= 65535;
offsetZ += 65535;
}
}
);
}
}
} // namespace
Tensor& avg_pool3d_out_cuda(const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override,
Tensor& output)
{
avg_pool3d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor avg_pool3d_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
Tensor output = at::empty({0}, input.options());
avg_pool3d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor& avg_pool3d_backward_out_cuda(const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override,
Tensor& gradInput)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("avg_pool3d_backward_out_cuda");
avg_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
Tensor avg_pool3d_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("avg_pool3d_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
avg_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
} // at::native
} // at
|
d52a8e81aae5f6f1acbda05ba6479b98f4331a6b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include<iostream>
#define imin(a,b) (a<b?a:b)
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock);
__global__
void dot(float* a, float* b, float* c)
{
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
// sync between All the threads in a Block
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads(); // Also incorrect
}
//__syncthreads(); // incorrect
i /= 2;
}
if (cacheIndex == 0)
{
c[blockIdx.x] = cache[0];
}
}
int main(void)
{
float c;
float* a, * b, * partial_c;
float* dev_a, * dev_b, * dev_partial_c;
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
partial_c = (float*)malloc(sizeof(float) * blocksPerGrid);
hipMalloc((void**)&dev_a, sizeof(float) * N);
hipMalloc((void**)&dev_b, sizeof(float) * N);
hipMalloc((void**)&dev_partial_c, sizeof(float) * blocksPerGrid);
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i * 2;
}
hipMemcpy(dev_a, a, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(float) * N, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dot) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c);
hipMemcpy(partial_c, dev_partial_c, sizeof(float) * blocksPerGrid, hipMemcpyDeviceToHost);
c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
printf("Does GPU value %.6g = %.6g?\n", c, sum_squares((float)(N - 1)));
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_partial_c);
free(a);
free(b);
free(partial_c);
return 0;
} | d52a8e81aae5f6f1acbda05ba6479b98f4331a6b.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include<iostream>
#define imin(a,b) (a<b?a:b)
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock);
__global__
void dot(float* a, float* b, float* c)
{
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
// sync between All the threads in a Block
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads(); // Also incorrect
}
//__syncthreads(); // incorrect
i /= 2;
}
if (cacheIndex == 0)
{
c[blockIdx.x] = cache[0];
}
}
int main(void)
{
float c;
float* a, * b, * partial_c;
float* dev_a, * dev_b, * dev_partial_c;
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
partial_c = (float*)malloc(sizeof(float) * blocksPerGrid);
cudaMalloc((void**)&dev_a, sizeof(float) * N);
cudaMalloc((void**)&dev_b, sizeof(float) * N);
cudaMalloc((void**)&dev_partial_c, sizeof(float) * blocksPerGrid);
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i * 2;
}
cudaMemcpy(dev_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
dot <<<blocksPerGrid, threadsPerBlock>>> (dev_a, dev_b, dev_partial_c);
cudaMemcpy(partial_c, dev_partial_c, sizeof(float) * blocksPerGrid, cudaMemcpyDeviceToHost);
c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
printf("Does GPU value %.6g = %.6g?\n", c, sum_squares((float)(N - 1)));
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
free(a);
free(b);
free(partial_c);
return 0;
} |
d724b0b94c9ffe25e03b468dd7dd022f8c91e524.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Simulate randomly migrating cell
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/polarity.cuh"
#include "../include/solvers.cuh"
#include "../include/utils.cuh"
#include "../include/vtk.cuh"
const auto r_max = 1;
const auto n_cells = 500;
const auto n_time_steps = 150;
const auto dt = 0.05;
__device__ Po_cell relu_w_migration(
Po_cell Xi, Po_cell r, float dist, int i, int j)
{
Po_cell dF{0};
if (i == j) return dF;
if (dist > r_max) return dF;
auto F = fmaxf(0.7 - dist, 0) * 2 - fmaxf(dist - 0.8, 0);
dF.x = r.x * F / dist;
dF.y = r.y * F / dist;
dF.z = r.z * F / dist;
dF += migration_force(Xi, r, dist);
return dF;
}
__global__ void update_polarities(Po_cell* d_X, hiprandState_t* d_state)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i != 0) return;
// Pick random perturbation in cone around z axis
Polarity perturbation{hiprand_normal(&d_state[i]),
2.f * static_cast<float>(M_PI) * hiprand_uniform(&d_state[i])};
// Rotate perturbation such that z axis would be in direction of migration
auto dir = pol_to_float3(perturbation);
auto u_phi = d_X[i].phi + M_PI / 2.;
float3 u{cosf(u_phi), sinf(u_phi), 0};
auto sin_theta = sinf(d_X[i].theta);
auto cos_theta = cosf(d_X[i].theta);
float3 new_dir;
new_dir.x = (cos_theta + u.x * u.x * (1 - cos_theta)) * dir.x +
u.x * u.y * (1 - cos_theta) * dir.y + u.y * sin_theta * dir.z;
new_dir.y = u.x * u.y * (1 - cos_theta) * dir.x +
(cos_theta + u.y * u.y * (1 - cos_theta)) * dir.y -
u.x * sin_theta * dir.z;
new_dir.z =
-u.y * sin_theta * dir.x + u.x * sin_theta * dir.y + cos_theta * dir.z;
auto new_polarity = pt_to_pol(new_dir);
d_X[i].theta = new_polarity.theta;
d_X[i].phi = new_polarity.phi;
}
int main(int argc, const char* argv[])
{
// Prepare initial state
Solution<Po_cell, Tile_solver> cells{n_cells};
relaxed_sphere(0.75, cells);
cells.h_X[0] = Po_cell{0};
cells.h_X[0].phi = 0.01;
cells.copy_to_device();
hiprandState_t* d_state;
hipMalloc(&d_state, n_cells * sizeof(hiprandState_t));
auto seed = time(NULL);
hipLaunchKernelGGL(( setup_rand_states), dim3((n_cells + 128 - 1) / 128), dim3(128), 0, 0,
n_cells, seed, d_state);
// Integrate cell positions
Vtk_output output{"random_walk"};
for (auto time_step = 0; time_step <= n_time_steps; time_step++) {
cells.copy_to_host();
hipLaunchKernelGGL(( update_polarities), dim3((n_cells + 32 - 1) / 32), dim3(32), 0, 0, cells.d_X, d_state);
cells.take_step<relu_w_migration>(dt);
output.write_positions(cells);
output.write_polarity(cells);
}
return 0;
}
| d724b0b94c9ffe25e03b468dd7dd022f8c91e524.cu | // Simulate randomly migrating cell
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/polarity.cuh"
#include "../include/solvers.cuh"
#include "../include/utils.cuh"
#include "../include/vtk.cuh"
const auto r_max = 1;
const auto n_cells = 500;
const auto n_time_steps = 150;
const auto dt = 0.05;
__device__ Po_cell relu_w_migration(
Po_cell Xi, Po_cell r, float dist, int i, int j)
{
Po_cell dF{0};
if (i == j) return dF;
if (dist > r_max) return dF;
auto F = fmaxf(0.7 - dist, 0) * 2 - fmaxf(dist - 0.8, 0);
dF.x = r.x * F / dist;
dF.y = r.y * F / dist;
dF.z = r.z * F / dist;
dF += migration_force(Xi, r, dist);
return dF;
}
__global__ void update_polarities(Po_cell* d_X, curandState* d_state)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i != 0) return;
// Pick random perturbation in cone around z axis
Polarity perturbation{curand_normal(&d_state[i]),
2.f * static_cast<float>(M_PI) * curand_uniform(&d_state[i])};
// Rotate perturbation such that z axis would be in direction of migration
auto dir = pol_to_float3(perturbation);
auto u_phi = d_X[i].phi + M_PI / 2.;
float3 u{cosf(u_phi), sinf(u_phi), 0};
auto sin_theta = sinf(d_X[i].theta);
auto cos_theta = cosf(d_X[i].theta);
float3 new_dir;
new_dir.x = (cos_theta + u.x * u.x * (1 - cos_theta)) * dir.x +
u.x * u.y * (1 - cos_theta) * dir.y + u.y * sin_theta * dir.z;
new_dir.y = u.x * u.y * (1 - cos_theta) * dir.x +
(cos_theta + u.y * u.y * (1 - cos_theta)) * dir.y -
u.x * sin_theta * dir.z;
new_dir.z =
-u.y * sin_theta * dir.x + u.x * sin_theta * dir.y + cos_theta * dir.z;
auto new_polarity = pt_to_pol(new_dir);
d_X[i].theta = new_polarity.theta;
d_X[i].phi = new_polarity.phi;
}
int main(int argc, const char* argv[])
{
// Prepare initial state
Solution<Po_cell, Tile_solver> cells{n_cells};
relaxed_sphere(0.75, cells);
cells.h_X[0] = Po_cell{0};
cells.h_X[0].phi = 0.01;
cells.copy_to_device();
curandState* d_state;
cudaMalloc(&d_state, n_cells * sizeof(curandState));
auto seed = time(NULL);
setup_rand_states<<<(n_cells + 128 - 1) / 128, 128>>>(
n_cells, seed, d_state);
// Integrate cell positions
Vtk_output output{"random_walk"};
for (auto time_step = 0; time_step <= n_time_steps; time_step++) {
cells.copy_to_host();
update_polarities<<<(n_cells + 32 - 1) / 32, 32>>>(cells.d_X, d_state);
cells.take_step<relu_w_migration>(dt);
output.write_positions(cells);
output.write_polarity(cells);
}
return 0;
}
|
59a181b7bf0ba3729e94df74b7a6e9d19e04e854.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "bertCommon.h"
#include "common_hip.cuh"
#include <cassert>
#include <cstring>
#include <hip/hip_runtime.h>
#include <type_traits>
#include <vector>
using namespace nvinfer1;
namespace bert
{
inline __device__ void res_add(
float (&hdata)[4], const uint32_t idata, const uint32_t ires, const float dqData, const float dqRes)
{
char4 ires4 = reinterpret_cast<const char4&>(ires);
char4 idata4 = reinterpret_cast<const char4&>(idata);
hdata[0] = float(idata4.x) * dqData + float(ires4.x) * dqRes;
hdata[1] = float(idata4.y) * dqData + float(ires4.y) * dqRes;
hdata[2] = float(idata4.z) * dqData + float(ires4.z) * dqRes;
hdata[3] = float(idata4.w) * dqData + float(ires4.w) * dqRes;
}
template <int32_t WARPS, int32_t HEADS, int32_t THREADS_PER_ROW>
__global__ void skipln_vec32_mtron(const int8_t* input, const int8_t* skip, int8_t* output, int8_t* preln,
const half* beta, const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale,
const float qSkipScale, const int32_t total)
{
// clang-format off
enum { HEAD_SIZE = 64 };
enum { BYTES_PER_LDG = 16 };
enum { THREADS_PER_CTA = WARPS * 32 };
enum { ROWS_PER_LDG = THREADS_PER_CTA / THREADS_PER_ROW };
enum { VECS_PER_CTA = THREADS_PER_ROW / 2 };
enum { PARAM_BYTES = HEADS * HEAD_SIZE * 2 };
enum { PARAM_LDGS = PARAM_BYTES / (THREADS_PER_CTA * BYTES_PER_LDG) };
enum { LDGS = HEADS * 2 / ROWS_PER_LDG };
// clang-format on
static_assert(VECS_PER_CTA == 4, "");
static_assert(PARAM_LDGS == 1, "");
static_assert(ROWS_PER_LDG == HEADS, "");
static_assert(LDGS == 2, "");
static_assert(LDGS * ROWS_PER_LDG == HEADS * 2, "");
static_assert(THREADS_PER_CTA * BYTES_PER_LDG == PARAM_BYTES, "");
static_assert(PARAM_LDGS == 1, "");
extern __shared__ char smem_[];
// space for CTA-wide reduction
__shared__ half2 smem_red[VECS_PER_CTA][WARPS];
constexpr float rld = 1.f / (float(HEADS) * float(HEAD_SIZE));
const int32_t bidx = blockIdx.x;
const int32_t tidx = threadIdx.x;
const int32_t row = tidx / THREADS_PER_ROW;
const int32_t col = tidx % THREADS_PER_ROW;
const int32_t lane = tidx % 32;
const int32_t warp = tidx / 32;
const bool is_warp_lead = (lane < THREADS_PER_ROW) && ((lane & 1) == 0);
const bool is_cta_lead = (tidx < THREADS_PER_ROW) && ((tidx & 1) == 0);
// token position: every two threads load together the 32B at one token
// position
const int32_t pos = col / 2;
const int32_t pos_offset = bidx * VECS_PER_CTA + pos; // for token positions per block, disabling 2 threads per pos
const bool my_pred = pos_offset < total;
const int32_t row_stride_bytes = total * 32;
uint4 in_data[LDGS];
uint4 in_skip[LDGS];
float hdata[LDGS * 4][4];
const int32_t gmem_offset = row * row_stride_bytes + (bidx * THREADS_PER_ROW + col) * BYTES_PER_LDG;
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii] = {0, 0, 0, 0};
in_skip[ii] = {0, 0, 0, 0};
if (my_pred)
{
ldg(input + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
ldg(skip + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_skip[ii]);
}
}
uint4* smem_b = reinterpret_cast<uint4*>(&smem_[0]) + tidx;
uint4* smem_g = reinterpret_cast<uint4*>(&smem_[PARAM_BYTES]) + tidx;
const int8_t* beta_ptr = reinterpret_cast<const int8_t*>(beta) + tidx * BYTES_PER_LDG;
const int8_t* gamma_ptr = reinterpret_cast<const int8_t*>(gamma) + tidx * BYTES_PER_LDG;
ldg(beta_ptr, *smem_b);
ldg(gamma_ptr, *smem_g);
half* b = reinterpret_cast<half*>(&smem_[0]);
half* g = reinterpret_cast<half*>(&smem_[PARAM_BYTES]);
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
res_add(hdata[ii * 4 + 0], in_data[ii].x, in_skip[ii].x, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 1], in_data[ii].y, in_skip[ii].y, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 2], in_data[ii].z, in_skip[ii].z, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 3], in_data[ii].w, in_skip[ii].w, dqScaleIn, dqScaleSkip);
}
half2 stats_local = {0, 0};
#pragma unroll
for (int32_t ii = 0; ii < LDGS * 4; ii++)
{
#pragma unroll
for (int32_t jj = 0; jj < 4; jj++)
{
const float tmp = hdata[ii][jj] * (rld);
stats_local = stats_local + __floats2half2_rn(tmp, tmp * hdata[ii][jj]);
}
}
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 1);
__syncwarp();
if (VECS_PER_CTA == 1)
{
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 2);
__syncwarp();
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4);
__syncwarp();
}
else if (VECS_PER_CTA == 2)
{
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4);
__syncwarp();
}
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 8);
__syncwarp();
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 16);
__syncwarp();
if (is_warp_lead)
{
smem_red[pos][warp] = stats_local;
}
__syncthreads();
if (is_cta_lead)
{
for (int32_t ii = 1; ii < WARPS; ii++)
{
stats_local = stats_local + smem_red[pos][ii];
}
float mu = __low2float(stats_local);
float sos = __high2float(stats_local);
float rsigma = rsqrtf(sos - mu * mu);
smem_red[pos][0] = __floats2half2_rn(mu, rsigma);
}
__syncthreads();
// load params into smem: 2x Headsx32x2x2B
const float2 statsf = __half22float2(smem_red[pos][0]);
// Copy skip connection output before Layer Norm
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii].x = pack4(hdata[ii * 4 + 0], qSkipScale);
in_data[ii].y = pack4(hdata[ii * 4 + 1], qSkipScale);
in_data[ii].z = pack4(hdata[ii * 4 + 2], qSkipScale);
in_data[ii].w = pack4(hdata[ii * 4 + 3], qSkipScale);
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
if (my_pred)
{
stg(preln + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
}
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
#pragma unroll
for (int32_t jj = 0; jj < 4; jj++)
{
#pragma unroll
for (int32_t kk = 0; kk < 4; kk++)
{
const int32_t param_idx = (ii * ROWS_PER_LDG + row) * 32 + (jj * 4 + kk) + (tidx & 1) * 16;
const float bb = b[param_idx];
const float gg = g[param_idx];
hdata[ii * 4 + jj][kk] = gg * statsf.y * (hdata[ii * 4 + jj][kk] - statsf.x) + bb;
}
}
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii].x = pack4(hdata[ii * 4 + 0], qScale);
in_data[ii].y = pack4(hdata[ii * 4 + 1], qScale);
in_data[ii].z = pack4(hdata[ii * 4 + 2], qScale);
in_data[ii].w = pack4(hdata[ii * 4 + 3], qScale);
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
if (my_pred)
{
stg(output + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
}
}
// store
}
int32_t launch_large_mtron(hipStream_t stream, const int32_t ld, const int32_t total, const int8_t* input,
const int8_t* skip, const half* beta, const half* gamma, int8_t* output, int8_t* preln, const float dqScaleIn,
const float dqScaleSkip, const float qScale, const float qSkipScale)
{
if (ld == 1024)
{
constexpr int32_t WARPS = 4;
constexpr int32_t THREADS_PER_ROW = 8;
constexpr int32_t HEADS = 16;
constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half);
constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2;
const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA;
hipLaunchKernelGGL(( skipln_vec32_mtron<WARPS, HEADS, THREADS_PER_ROW>), dim3(blocks), dim3(WARPS * 32), PARAM_BYTES, stream,
input, skip, output, preln, beta, gamma, dqScaleIn, dqScaleSkip, qScale, qSkipScale, total);
}
else if (ld == 768)
{
constexpr int32_t WARPS = 3;
constexpr int32_t THREADS_PER_ROW = 8;
constexpr int32_t HEADS = 12;
constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half);
constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2;
const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA;
hipLaunchKernelGGL(( skipln_vec32_mtron<WARPS, HEADS, THREADS_PER_ROW>), dim3(blocks), dim3(WARPS * 32), PARAM_BYTES, stream,
input, skip, output, preln, beta, gamma, dqScaleIn, dqScaleSkip, qScale, qSkipScale, total);
}
else
{
return STATUS_FAILURE;
}
return hipPeekAtLastError();
}
// naive kernel that only changes the addressing seems to be faster for small problem sizes
template <int32_t TPB, int32_t VPT>
__global__ void skiplnDQQ_vec4(const int32_t ld, const int8_t* input, const int8_t* skip, int8_t* output, int8_t* preln,
const half* beta, const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale,
const float qSkipScale, const int32_t total)
{
const int32_t hinner = threadIdx.x % 4;
const int32_t houter = threadIdx.x / 4;
const int32_t tidx = threadIdx.x;
const int32_t bidx = blockIdx.x;
const int32_t idx = houter * total * 32 + bidx * 32 + hinner * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
int8_t in_local[VPT];
int8_t skip_local[VPT];
half in_local_dq[VPT]; // dequantized input + skip
half beta_local[VPT];
half gamma_local[VPT];
// load input tensors
copy<sizeof(int8_t) * VPT>(&input[idx], in_local);
copy<sizeof(int8_t) * VPT>(&skip[idx], skip_local);
// load parameters
copy<sizeof(half) * VPT>(&beta[tidx * VPT], beta_local);
copy<sizeof(half) * VPT>(&gamma[tidx * VPT], gamma_local);
half2 stats_local = __floats2half2_rn(0.f, 0.f); // accumulator
const half rld = half(1.f) / half(ld);
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
// DQ input and skip
const float tmp_in = in_local[it];
const float tmp_skip = skip_local[it];
in_local_dq[it] = dqScaleIn * tmp_in + dqScaleSkip * tmp_skip;
const half tmp = rld * in_local_dq[it];
const half2 tmp2 = __halves2half2(tmp, tmp * in_local_dq[it]);
stats_local = stats_local + tmp2;
}
using BlockReduce = hipcub::BlockReduce<half2, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ half mu; // mean
__shared__ half rsigma; // 1 / std.dev.
const half2 sum2 = BlockReduce(temp_storage).Reduce(stats_local, hipcub::Sum());
// Copy skip connection output before Layer Norm
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
in_local[it] = quantize(in_local_dq[it], qSkipScale);
}
copy<sizeof(int8_t) * VPT>(in_local, &preln[idx]);
if (tidx == 0)
{
mu = __low2half(sum2);
rsigma = rsqrtf(__high2half(sum2) - mu * mu);
}
__syncthreads();
static_assert(VPT % 4 == 0, "");
uint32_t out_local[VPT/4];
#pragma unroll
for (int it = 0; it < VPT / 4; it++)
{
const float tmp0 = gamma_local[it*4+0] * (in_local_dq[it*4+0] - mu) * rsigma + beta_local[it*4+0];
const float tmp1 = gamma_local[it*4+1] * (in_local_dq[it*4+1] - mu) * rsigma + beta_local[it*4+1];
const float tmp2 = gamma_local[it*4+2] * (in_local_dq[it*4+2] - mu) * rsigma + beta_local[it*4+2];
const float tmp3 = gamma_local[it*4+3] * (in_local_dq[it*4+3] - mu) * rsigma + beta_local[it*4+3];
out_local[it] = float4_to_char4(tmp0 * qScale, tmp1 * qScale, tmp2 * qScale, tmp3 * qScale);
}
copy<sizeof(int8_t) * VPT>(out_local, &output[idx]);
}
int32_t launch_small_mtron(hipStream_t stream, const int32_t ld, const int total, const int8_t* input,
const int8_t* skip, const half* beta, const half* gamma, int8_t* output, int8_t* preln, const float dqScaleIn,
const float dqScaleSkip, const float qScale, const float qSkipScale)
{
const int32_t gridSize = total;
// we align reads with the number of parameters, i.e. 8-wide instead of 16
constexpr int32_t VPT = 16 / sizeof(half); // 8
if (ld == 768)
{
constexpr int32_t TPB = 768 / VPT;
hipLaunchKernelGGL(( skiplnDQQ_vec4<TPB, VPT>), dim3(gridSize), dim3(TPB), 0, stream,
ld, input, skip, output, preln, beta, gamma, dqScaleIn, dqScaleSkip, qScale, qSkipScale, total);
}
else if (ld == 1024)
{
constexpr int32_t TPB = 1024 / VPT; // 128
hipLaunchKernelGGL(( skiplnDQQ_vec4<TPB, VPT>), dim3(gridSize), dim3(TPB), 0, stream,
ld, input, skip, output, preln, beta, gamma, dqScaleIn, dqScaleSkip, qScale, qSkipScale, total);
}
else
{
std::cout << "SkipLayerNormDQQ - FATAL: unsupported hidden layer size: " << ld << std::endl;
return STATUS_FAILURE;
}
return hipPeekAtLastError();
}
} // namespace bert
| 59a181b7bf0ba3729e94df74b7a6e9d19e04e854.cu |
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "bertCommon.h"
#include "common.cuh"
#include <cassert>
#include <cstring>
#include <cuda.h>
#include <type_traits>
#include <vector>
using namespace nvinfer1;
namespace bert
{
inline __device__ void res_add(
float (&hdata)[4], const uint32_t idata, const uint32_t ires, const float dqData, const float dqRes)
{
char4 ires4 = reinterpret_cast<const char4&>(ires);
char4 idata4 = reinterpret_cast<const char4&>(idata);
hdata[0] = float(idata4.x) * dqData + float(ires4.x) * dqRes;
hdata[1] = float(idata4.y) * dqData + float(ires4.y) * dqRes;
hdata[2] = float(idata4.z) * dqData + float(ires4.z) * dqRes;
hdata[3] = float(idata4.w) * dqData + float(ires4.w) * dqRes;
}
template <int32_t WARPS, int32_t HEADS, int32_t THREADS_PER_ROW>
__global__ void skipln_vec32_mtron(const int8_t* input, const int8_t* skip, int8_t* output, int8_t* preln,
const half* beta, const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale,
const float qSkipScale, const int32_t total)
{
// clang-format off
enum { HEAD_SIZE = 64 };
enum { BYTES_PER_LDG = 16 };
enum { THREADS_PER_CTA = WARPS * 32 };
enum { ROWS_PER_LDG = THREADS_PER_CTA / THREADS_PER_ROW };
enum { VECS_PER_CTA = THREADS_PER_ROW / 2 };
enum { PARAM_BYTES = HEADS * HEAD_SIZE * 2 };
enum { PARAM_LDGS = PARAM_BYTES / (THREADS_PER_CTA * BYTES_PER_LDG) };
enum { LDGS = HEADS * 2 / ROWS_PER_LDG };
// clang-format on
static_assert(VECS_PER_CTA == 4, "");
static_assert(PARAM_LDGS == 1, "");
static_assert(ROWS_PER_LDG == HEADS, "");
static_assert(LDGS == 2, "");
static_assert(LDGS * ROWS_PER_LDG == HEADS * 2, "");
static_assert(THREADS_PER_CTA * BYTES_PER_LDG == PARAM_BYTES, "");
static_assert(PARAM_LDGS == 1, "");
extern __shared__ char smem_[];
// space for CTA-wide reduction
__shared__ half2 smem_red[VECS_PER_CTA][WARPS];
constexpr float rld = 1.f / (float(HEADS) * float(HEAD_SIZE));
const int32_t bidx = blockIdx.x;
const int32_t tidx = threadIdx.x;
const int32_t row = tidx / THREADS_PER_ROW;
const int32_t col = tidx % THREADS_PER_ROW;
const int32_t lane = tidx % 32;
const int32_t warp = tidx / 32;
const bool is_warp_lead = (lane < THREADS_PER_ROW) && ((lane & 1) == 0);
const bool is_cta_lead = (tidx < THREADS_PER_ROW) && ((tidx & 1) == 0);
// token position: every two threads load together the 32B at one token
// position
const int32_t pos = col / 2;
const int32_t pos_offset = bidx * VECS_PER_CTA + pos; // for token positions per block, disabling 2 threads per pos
const bool my_pred = pos_offset < total;
const int32_t row_stride_bytes = total * 32;
uint4 in_data[LDGS];
uint4 in_skip[LDGS];
float hdata[LDGS * 4][4];
const int32_t gmem_offset = row * row_stride_bytes + (bidx * THREADS_PER_ROW + col) * BYTES_PER_LDG;
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii] = {0, 0, 0, 0};
in_skip[ii] = {0, 0, 0, 0};
if (my_pred)
{
ldg(input + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
ldg(skip + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_skip[ii]);
}
}
uint4* smem_b = reinterpret_cast<uint4*>(&smem_[0]) + tidx;
uint4* smem_g = reinterpret_cast<uint4*>(&smem_[PARAM_BYTES]) + tidx;
const int8_t* beta_ptr = reinterpret_cast<const int8_t*>(beta) + tidx * BYTES_PER_LDG;
const int8_t* gamma_ptr = reinterpret_cast<const int8_t*>(gamma) + tidx * BYTES_PER_LDG;
ldg(beta_ptr, *smem_b);
ldg(gamma_ptr, *smem_g);
half* b = reinterpret_cast<half*>(&smem_[0]);
half* g = reinterpret_cast<half*>(&smem_[PARAM_BYTES]);
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
res_add(hdata[ii * 4 + 0], in_data[ii].x, in_skip[ii].x, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 1], in_data[ii].y, in_skip[ii].y, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 2], in_data[ii].z, in_skip[ii].z, dqScaleIn, dqScaleSkip);
res_add(hdata[ii * 4 + 3], in_data[ii].w, in_skip[ii].w, dqScaleIn, dqScaleSkip);
}
half2 stats_local = {0, 0};
#pragma unroll
for (int32_t ii = 0; ii < LDGS * 4; ii++)
{
#pragma unroll
for (int32_t jj = 0; jj < 4; jj++)
{
const float tmp = hdata[ii][jj] * (rld);
stats_local = stats_local + __floats2half2_rn(tmp, tmp * hdata[ii][jj]);
}
}
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 1);
__syncwarp();
if (VECS_PER_CTA == 1)
{
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 2);
__syncwarp();
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4);
__syncwarp();
}
else if (VECS_PER_CTA == 2)
{
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4);
__syncwarp();
}
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 8);
__syncwarp();
stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 16);
__syncwarp();
if (is_warp_lead)
{
smem_red[pos][warp] = stats_local;
}
__syncthreads();
if (is_cta_lead)
{
for (int32_t ii = 1; ii < WARPS; ii++)
{
stats_local = stats_local + smem_red[pos][ii];
}
float mu = __low2float(stats_local);
float sos = __high2float(stats_local);
float rsigma = rsqrtf(sos - mu * mu);
smem_red[pos][0] = __floats2half2_rn(mu, rsigma);
}
__syncthreads();
// load params into smem: 2x Headsx32x2x2B
const float2 statsf = __half22float2(smem_red[pos][0]);
// Copy skip connection output before Layer Norm
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii].x = pack4(hdata[ii * 4 + 0], qSkipScale);
in_data[ii].y = pack4(hdata[ii * 4 + 1], qSkipScale);
in_data[ii].z = pack4(hdata[ii * 4 + 2], qSkipScale);
in_data[ii].w = pack4(hdata[ii * 4 + 3], qSkipScale);
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
if (my_pred)
{
stg(preln + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
}
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
#pragma unroll
for (int32_t jj = 0; jj < 4; jj++)
{
#pragma unroll
for (int32_t kk = 0; kk < 4; kk++)
{
const int32_t param_idx = (ii * ROWS_PER_LDG + row) * 32 + (jj * 4 + kk) + (tidx & 1) * 16;
const float bb = b[param_idx];
const float gg = g[param_idx];
hdata[ii * 4 + jj][kk] = gg * statsf.y * (hdata[ii * 4 + jj][kk] - statsf.x) + bb;
}
}
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
in_data[ii].x = pack4(hdata[ii * 4 + 0], qScale);
in_data[ii].y = pack4(hdata[ii * 4 + 1], qScale);
in_data[ii].z = pack4(hdata[ii * 4 + 2], qScale);
in_data[ii].w = pack4(hdata[ii * 4 + 3], qScale);
}
#pragma unroll
for (int32_t ii = 0; ii < LDGS; ii++)
{
if (my_pred)
{
stg(output + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]);
}
}
// store
}
int32_t launch_large_mtron(cudaStream_t stream, const int32_t ld, const int32_t total, const int8_t* input,
const int8_t* skip, const half* beta, const half* gamma, int8_t* output, int8_t* preln, const float dqScaleIn,
const float dqScaleSkip, const float qScale, const float qSkipScale)
{
if (ld == 1024)
{
constexpr int32_t WARPS = 4;
constexpr int32_t THREADS_PER_ROW = 8;
constexpr int32_t HEADS = 16;
constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half);
constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2;
const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA;
skipln_vec32_mtron<WARPS, HEADS, THREADS_PER_ROW><<<blocks, WARPS * 32, PARAM_BYTES, stream>>>(
input, skip, output, preln, beta, gamma, dqScaleIn, dqScaleSkip, qScale, qSkipScale, total);
}
else if (ld == 768)
{
constexpr int32_t WARPS = 3;
constexpr int32_t THREADS_PER_ROW = 8;
constexpr int32_t HEADS = 12;
constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half);
constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2;
const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA;
skipln_vec32_mtron<WARPS, HEADS, THREADS_PER_ROW><<<blocks, WARPS * 32, PARAM_BYTES, stream>>>(
input, skip, output, preln, beta, gamma, dqScaleIn, dqScaleSkip, qScale, qSkipScale, total);
}
else
{
return STATUS_FAILURE;
}
return cudaPeekAtLastError();
}
// naive kernel that only changes the addressing seems to be faster for small problem sizes
template <int32_t TPB, int32_t VPT>
__global__ void skiplnDQQ_vec4(const int32_t ld, const int8_t* input, const int8_t* skip, int8_t* output, int8_t* preln,
const half* beta, const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale,
const float qSkipScale, const int32_t total)
{
const int32_t hinner = threadIdx.x % 4;
const int32_t houter = threadIdx.x / 4;
const int32_t tidx = threadIdx.x;
const int32_t bidx = blockIdx.x;
const int32_t idx = houter * total * 32 + bidx * 32 + hinner * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
int8_t in_local[VPT];
int8_t skip_local[VPT];
half in_local_dq[VPT]; // dequantized input + skip
half beta_local[VPT];
half gamma_local[VPT];
// load input tensors
copy<sizeof(int8_t) * VPT>(&input[idx], in_local);
copy<sizeof(int8_t) * VPT>(&skip[idx], skip_local);
// load parameters
copy<sizeof(half) * VPT>(&beta[tidx * VPT], beta_local);
copy<sizeof(half) * VPT>(&gamma[tidx * VPT], gamma_local);
half2 stats_local = __floats2half2_rn(0.f, 0.f); // accumulator
const half rld = half(1.f) / half(ld);
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
// DQ input and skip
const float tmp_in = in_local[it];
const float tmp_skip = skip_local[it];
in_local_dq[it] = dqScaleIn * tmp_in + dqScaleSkip * tmp_skip;
const half tmp = rld * in_local_dq[it];
const half2 tmp2 = __halves2half2(tmp, tmp * in_local_dq[it]);
stats_local = stats_local + tmp2;
}
using BlockReduce = cub::BlockReduce<half2, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ half mu; // mean
__shared__ half rsigma; // 1 / std.dev.
const half2 sum2 = BlockReduce(temp_storage).Reduce(stats_local, cub::Sum());
// Copy skip connection output before Layer Norm
#pragma unroll
for (int32_t it = 0; it < VPT; it++)
{
in_local[it] = quantize(in_local_dq[it], qSkipScale);
}
copy<sizeof(int8_t) * VPT>(in_local, &preln[idx]);
if (tidx == 0)
{
mu = __low2half(sum2);
rsigma = rsqrtf(__high2half(sum2) - mu * mu);
}
__syncthreads();
static_assert(VPT % 4 == 0, "");
uint32_t out_local[VPT/4];
#pragma unroll
for (int it = 0; it < VPT / 4; it++)
{
const float tmp0 = gamma_local[it*4+0] * (in_local_dq[it*4+0] - mu) * rsigma + beta_local[it*4+0];
const float tmp1 = gamma_local[it*4+1] * (in_local_dq[it*4+1] - mu) * rsigma + beta_local[it*4+1];
const float tmp2 = gamma_local[it*4+2] * (in_local_dq[it*4+2] - mu) * rsigma + beta_local[it*4+2];
const float tmp3 = gamma_local[it*4+3] * (in_local_dq[it*4+3] - mu) * rsigma + beta_local[it*4+3];
out_local[it] = float4_to_char4(tmp0 * qScale, tmp1 * qScale, tmp2 * qScale, tmp3 * qScale);
}
copy<sizeof(int8_t) * VPT>(out_local, &output[idx]);
}
int32_t launch_small_mtron(cudaStream_t stream, const int32_t ld, const int total, const int8_t* input,
const int8_t* skip, const half* beta, const half* gamma, int8_t* output, int8_t* preln, const float dqScaleIn,
const float dqScaleSkip, const float qScale, const float qSkipScale)
{
const int32_t gridSize = total;
// we align reads with the number of parameters, i.e. 8-wide instead of 16
constexpr int32_t VPT = 16 / sizeof(half); // 8
if (ld == 768)
{
constexpr int32_t TPB = 768 / VPT;
skiplnDQQ_vec4<TPB, VPT><<<gridSize, TPB, 0, stream>>>(
ld, input, skip, output, preln, beta, gamma, dqScaleIn, dqScaleSkip, qScale, qSkipScale, total);
}
else if (ld == 1024)
{
constexpr int32_t TPB = 1024 / VPT; // 128
skiplnDQQ_vec4<TPB, VPT><<<gridSize, TPB, 0, stream>>>(
ld, input, skip, output, preln, beta, gamma, dqScaleIn, dqScaleSkip, qScale, qSkipScale, total);
}
else
{
std::cout << "SkipLayerNormDQQ - FATAL: unsupported hidden layer size: " << ld << std::endl;
return STATUS_FAILURE;
}
return cudaPeekAtLastError();
}
} // namespace bert
|
4f514aaf0872ba11fc42db2a63373c7ee1b4c329.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__global__ void revArray(int N, float *a, float *b) {
int n = threadIdx.x + blockIdx.x*blockDim.x;
if(n<N) {
b[N-1-n] = a[n];
}
}
int main(int argc, char **argv) {
int N = 100;
//Host memory allocation
float *h_a = (float*) malloc(N*sizeof(float));
float *h_b = (float*) malloc(N*sizeof(float));
int n;
for(n=0;n<N;n++) {
h_a[n] = 1+n;
}
// Device memory allocation
float *d_a, *d_b;
hipMalloc(&d_a, N*sizeof(float));
hipMalloc(&d_b, N*sizeof(float));
// Copy data from host to device
hipMemcpy(d_a, h_a, N*sizeof(float), hipMemcpyHostToDevice);
//save this for later
int NthreadsPerBlock = 10;
int NthreadBlocks = (N+NthreadsPerBlock-1)/NthreadsPerBlock ;
hipLaunchKernelGGL(( revArray), dim3(NthreadBlocks), dim3(NthreadsPerBlock), 0, 0, N,d_a,d_b);
//copy result from device to host
hipMemcpy(h_b, d_b, N*sizeof(float), hipMemcpyDeviceToHost);
for(n=0;n<N;++n) {
printf("h_b[%d] = %g\n",n,h_b[n]);
}
free(h_a);
free(h_b);
hipFree(d_a);
hipFree(d_b);
return 0;
}
| 4f514aaf0872ba11fc42db2a63373c7ee1b4c329.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void revArray(int N, float *a, float *b) {
int n = threadIdx.x + blockIdx.x*blockDim.x;
if(n<N) {
b[N-1-n] = a[n];
}
}
int main(int argc, char **argv) {
int N = 100;
//Host memory allocation
float *h_a = (float*) malloc(N*sizeof(float));
float *h_b = (float*) malloc(N*sizeof(float));
int n;
for(n=0;n<N;n++) {
h_a[n] = 1+n;
}
// Device memory allocation
float *d_a, *d_b;
cudaMalloc(&d_a, N*sizeof(float));
cudaMalloc(&d_b, N*sizeof(float));
// Copy data from host to device
cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice);
//save this for later
int NthreadsPerBlock = 10;
int NthreadBlocks = (N+NthreadsPerBlock-1)/NthreadsPerBlock ;
revArray<<<NthreadBlocks, NthreadsPerBlock>>>(N,d_a,d_b);
//copy result from device to host
cudaMemcpy(h_b, d_b, N*sizeof(float), cudaMemcpyDeviceToHost);
for(n=0;n<N;++n) {
printf("h_b[%d] = %g\n",n,h_b[n]);
}
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
5fc7788fc13680ad9ccc98414fc59d36ab2320c3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "rocblas.h"
#include "matrix_mul.h"
#include <rocblas.h>
// Host multiplication function
// Compute C = A * B
// hA is the height of A
// wA is the width of A
// wB is the width of B
extern "C"
void Mul(float* A, float* B, int hA, int wA, int wB,
float* C)
{
int size;
const float alpha = 1;
const float beta = 0;
// Load A and B to the device
float* Ad;
size = hA * wA * sizeof(float);
hipMalloc((void**)&Ad, size);
hipMemcpy(Ad, A, size, hipMemcpyHostToDevice);
float* Bd;
size = wA * wB * sizeof(float);
hipMalloc((void**)&Bd, size);
hipMemcpy(Bd, B, size, hipMemcpyHostToDevice);
// Allocate C on the device
float* Cd;
size = hA * wB * sizeof(float);
hipMalloc((void**)&Cd, size);
hipMemcpy(Cd, C, size, hipMemcpyHostToDevice);
//cublasHandle
hipblasHandle_t handle;
hipblasCreate(&handle);
// Compute the execution configuration
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
wA, /* [m] */
hA, /* [n] */
wB, /* [k] */
&alpha, /* alfa */
Ad, wA, /* A[m][k], num columnas (lda) */
Bd, wB, /* B[k][n], num columnas (ldb) */
&beta, /* beta */
Cd, wB /* C[m][n], num columnas (ldc) */
);
// Read C from the device
hipMemcpy(C, Cd, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(Ad);
hipFree(Bd);
hipFree(Cd);
hipblasDestroy(handle);
}
| 5fc7788fc13680ad9ccc98414fc59d36ab2320c3.cu | #include <stdio.h>
#include "cublas.h"
#include "matrix_mul.h"
#include <cublas_v2.h>
// Host multiplication function
// Compute C = A * B
// hA is the height of A
// wA is the width of A
// wB is the width of B
extern "C"
void Mul(float* A, float* B, int hA, int wA, int wB,
float* C)
{
int size;
const float alpha = 1;
const float beta = 0;
// Load A and B to the device
float* Ad;
size = hA * wA * sizeof(float);
cudaMalloc((void**)&Ad, size);
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
float* Bd;
size = wA * wB * sizeof(float);
cudaMalloc((void**)&Bd, size);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
// Allocate C on the device
float* Cd;
size = hA * wB * sizeof(float);
cudaMalloc((void**)&Cd, size);
cudaMemcpy(Cd, C, size, cudaMemcpyHostToDevice);
//cublasHandle
cublasHandle_t handle;
cublasCreate(&handle);
// Compute the execution configuration
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,
wA, /* [m] */
hA, /* [n] */
wB, /* [k] */
&alpha, /* alfa */
Ad, wA, /* A[m][k], num columnas (lda) */
Bd, wB, /* B[k][n], num columnas (ldb) */
&beta, /* beta */
Cd, wB /* C[m][n], num columnas (ldc) */
);
// Read C from the device
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
cublasDestroy(handle);
}
|
193ac1a7f8baf642a747a15a223cacb07a305a5a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
__global__ void up_sweep(int n, int d, int *data) {
int k = threadIdx.x + (blockIdx.x * blockDim.x);
if (k < n) {
int p2d = pow(2.0, (double)d);
int p2da1 = pow(2.0, (double)(d + 1));
if (k % p2da1 == 0) {
data[k + p2da1 - 1] += data[k + p2d - 1];
}
}
}
__global__ void down_sweep(int n, int d, int *data) {
int k = threadIdx.x + (blockIdx.x * blockDim.x);
if (k < n) {
int p2d = pow(2.0, (double)d);
int p2da1 = pow(2.0, (double)(d + 1));
if (k % p2da1 == 0) {
int temp = data[k + p2d - 1];
data[k + p2d - 1] = data[k + p2da1 - 1];
data[k + p2da1 - 1] += temp;
}
}
}
void padArrayRange(int start, int end, int *a) {
for (int i = start; i < end; i++) {
a[i] = 0;
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
float scan(int n, int *odata, const int *idata) {
int m = pow(2, ilog2ceil(n));
int *new_idata = (int*)malloc(m * sizeof(int));
dim3 fullBlocksPerGrid((m + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
hipEvent_t start, stop;
float ms_time = 0.0f;
float ms_total_time = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
// Expand array to next power of 2 size
for (int i = 0; i < n; i++) {
new_idata[i] = idata[i];
}
padArrayRange(n, m, new_idata);
// Can use one array for input and output in this implementation
int *dev_data;
hipMalloc((void**)&dev_data, m * sizeof(int));
hipMemcpy(dev_data, new_idata, m * sizeof(int), hipMemcpyHostToDevice);
// Execute scan on device
hipEventRecord(start);
for (int d = 0; d < ilog2ceil(n); d++) {
hipLaunchKernelGGL(( up_sweep), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, n, d, dev_data);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
ms_time = 0.0f;
hipMemset((void*)&dev_data[m - 1], 0, sizeof(int));
hipEventRecord(start);
for (int d = ilog2ceil(n) - 1; d >= 0; d--) {
hipLaunchKernelGGL(( down_sweep), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, n, d, dev_data);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
hipMemcpy(odata, dev_data, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_data);
free(new_idata);
return ms_total_time;
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int *bools = (int*)malloc(n * sizeof(int));
int *scan_data = (int*)malloc(n * sizeof(int));
int num_remaining = -1;
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
hipEvent_t start, stop;
float ms_time = 0.0f;
float ms_total_time = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
int *dev_bools;
int *dev_idata;
int *dev_odata;
int *dev_scan_data;
hipMalloc((void**)&dev_bools, n * sizeof(int));
hipMalloc((void**)&dev_idata, n * sizeof(int));
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_odata, n * sizeof(int));
hipMalloc((void**)&dev_scan_data, n * sizeof(int));
// Map to boolean
hipEventRecord(start);
hipLaunchKernelGGL(( StreamCompaction::Common::kernMapToBoolean), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, n, dev_bools, dev_idata);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
ms_time = 0.0f;
hipMemcpy(bools, dev_bools, n * sizeof(int), hipMemcpyDeviceToHost);
// Execute the scan
ms_total_time += scan(n, scan_data, bools);
num_remaining = scan_data[n - 1] + bools[n - 1];
// Execute the scatter
hipMemcpy(dev_scan_data, scan_data, n * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start);
hipLaunchKernelGGL(( StreamCompaction::Common::kernScatter), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, n, dev_odata, dev_idata, dev_bools, dev_scan_data);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
printf("CUDA execution time for stream compaction: %.5fms\n", ms_total_time);
hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_bools);
hipFree(dev_idata);
hipFree(dev_odata);
hipFree(dev_scan_data);
free(bools);
free(scan_data);
return num_remaining;
}
}
}
| 193ac1a7f8baf642a747a15a223cacb07a305a5a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
__global__ void up_sweep(int n, int d, int *data) {
int k = threadIdx.x + (blockIdx.x * blockDim.x);
if (k < n) {
int p2d = pow(2.0, (double)d);
int p2da1 = pow(2.0, (double)(d + 1));
if (k % p2da1 == 0) {
data[k + p2da1 - 1] += data[k + p2d - 1];
}
}
}
__global__ void down_sweep(int n, int d, int *data) {
int k = threadIdx.x + (blockIdx.x * blockDim.x);
if (k < n) {
int p2d = pow(2.0, (double)d);
int p2da1 = pow(2.0, (double)(d + 1));
if (k % p2da1 == 0) {
int temp = data[k + p2d - 1];
data[k + p2d - 1] = data[k + p2da1 - 1];
data[k + p2da1 - 1] += temp;
}
}
}
void padArrayRange(int start, int end, int *a) {
for (int i = start; i < end; i++) {
a[i] = 0;
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
float scan(int n, int *odata, const int *idata) {
int m = pow(2, ilog2ceil(n));
int *new_idata = (int*)malloc(m * sizeof(int));
dim3 fullBlocksPerGrid((m + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
cudaEvent_t start, stop;
float ms_time = 0.0f;
float ms_total_time = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Expand array to next power of 2 size
for (int i = 0; i < n; i++) {
new_idata[i] = idata[i];
}
padArrayRange(n, m, new_idata);
// Can use one array for input and output in this implementation
int *dev_data;
cudaMalloc((void**)&dev_data, m * sizeof(int));
cudaMemcpy(dev_data, new_idata, m * sizeof(int), cudaMemcpyHostToDevice);
// Execute scan on device
cudaEventRecord(start);
for (int d = 0; d < ilog2ceil(n); d++) {
up_sweep<<<fullBlocksPerGrid, threadsPerBlock>>>(n, d, dev_data);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
ms_time = 0.0f;
cudaMemset((void*)&dev_data[m - 1], 0, sizeof(int));
cudaEventRecord(start);
for (int d = ilog2ceil(n) - 1; d >= 0; d--) {
down_sweep<<<fullBlocksPerGrid, threadsPerBlock>>>(n, d, dev_data);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
cudaMemcpy(odata, dev_data, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_data);
free(new_idata);
return ms_total_time;
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int *bools = (int*)malloc(n * sizeof(int));
int *scan_data = (int*)malloc(n * sizeof(int));
int num_remaining = -1;
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
cudaEvent_t start, stop;
float ms_time = 0.0f;
float ms_total_time = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *dev_bools;
int *dev_idata;
int *dev_odata;
int *dev_scan_data;
cudaMalloc((void**)&dev_bools, n * sizeof(int));
cudaMalloc((void**)&dev_idata, n * sizeof(int));
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_odata, n * sizeof(int));
cudaMalloc((void**)&dev_scan_data, n * sizeof(int));
// Map to boolean
cudaEventRecord(start);
StreamCompaction::Common::kernMapToBoolean<<<fullBlocksPerGrid, threadsPerBlock>>>(n, dev_bools, dev_idata);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
ms_time = 0.0f;
cudaMemcpy(bools, dev_bools, n * sizeof(int), cudaMemcpyDeviceToHost);
// Execute the scan
ms_total_time += scan(n, scan_data, bools);
num_remaining = scan_data[n - 1] + bools[n - 1];
// Execute the scatter
cudaMemcpy(dev_scan_data, scan_data, n * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
StreamCompaction::Common::kernScatter<<<fullBlocksPerGrid, threadsPerBlock>>>(n, dev_odata, dev_idata, dev_bools, dev_scan_data);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
printf("CUDA execution time for stream compaction: %.5fms\n", ms_total_time);
cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_bools);
cudaFree(dev_idata);
cudaFree(dev_odata);
cudaFree(dev_scan_data);
free(bools);
free(scan_data);
return num_remaining;
}
}
}
|
a721520104b31f157943d40c3dc60f25a727a2fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ScaleForward(const int n, const Dtype* in,
const Dtype* scale, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
__global__ void ScaleBiasForward(const int n, const Dtype* in,
const Dtype* scale, const Dtype* bias,
const int scale_dim, const int inner_dim, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0] && this->layer_param_.phase()==TRAIN) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
const Dtype* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bias_layer_ &&
this->param_propagate_down_[this->param_propagate_down_.size() - 1]) {
bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_);
}
const bool scale_param = (bottom.size() == 1);
Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1];
if ((!scale_param && propagate_down[1]) ||
(scale_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
const bool in_place = (bottom[0] == top[0]);
const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data();
// Hack: store big eltwise product in bottom[0] diff, except in the special
// case where this layer itself does the eltwise product, in which case we
// can store it directly in the scale diff, and we're done.
// If we're computing in-place (and not doing eltwise computation), this
// hack doesn't work and we store the product in temp_.
const bool is_eltwise = (bottom[0]->count() == scale->count());
Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() :
(in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff()));
caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product);
if (!is_eltwise) {
Dtype* sum_result = NULL;
if (inner_dim_ == 1) {
sum_result = product;
} else if (sum_result_.count() == 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(inner_dim_, product, sum_mult, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff);
}
} else {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
sum_result = (outer_dim_ == 1) ?
scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data();
caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_,
Dtype(1), product, sum_mult, Dtype(0), sum_result);
}
if (outer_dim_ != 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
if (scale_dim_ == 1) {
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff);
}
} else {
Dtype* scale_diff = scale->mutable_gpu_diff();
caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_,
Dtype(1), sum_result, sum_mult, Dtype(scale_param),
scale_diff);
}
}
}
}
if (propagate_down[0]) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* scale_data = scale->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer);
} // namespace caffe
| a721520104b31f157943d40c3dc60f25a727a2fb.cu | #include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ScaleForward(const int n, const Dtype* in,
const Dtype* scale, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
__global__ void ScaleBiasForward(const int n, const Dtype* in,
const Dtype* scale, const Dtype* bias,
const int scale_dim, const int inner_dim, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0] && this->layer_param_.phase()==TRAIN) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
const Dtype* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bias_layer_ &&
this->param_propagate_down_[this->param_propagate_down_.size() - 1]) {
bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_);
}
const bool scale_param = (bottom.size() == 1);
Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1];
if ((!scale_param && propagate_down[1]) ||
(scale_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
const bool in_place = (bottom[0] == top[0]);
const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data();
// Hack: store big eltwise product in bottom[0] diff, except in the special
// case where this layer itself does the eltwise product, in which case we
// can store it directly in the scale diff, and we're done.
// If we're computing in-place (and not doing eltwise computation), this
// hack doesn't work and we store the product in temp_.
const bool is_eltwise = (bottom[0]->count() == scale->count());
Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() :
(in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff()));
caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product);
if (!is_eltwise) {
Dtype* sum_result = NULL;
if (inner_dim_ == 1) {
sum_result = product;
} else if (sum_result_.count() == 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(inner_dim_, product, sum_mult, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff);
}
} else {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
sum_result = (outer_dim_ == 1) ?
scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data();
caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_,
Dtype(1), product, sum_mult, Dtype(0), sum_result);
}
if (outer_dim_ != 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
if (scale_dim_ == 1) {
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff);
}
} else {
Dtype* scale_diff = scale->mutable_gpu_diff();
caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_,
Dtype(1), sum_result, sum_mult, Dtype(scale_param),
scale_diff);
}
}
}
}
if (propagate_down[0]) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* scale_data = scale->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer);
} // namespace caffe
|
2b1ea02ae2643aa8e4d3fc420302f5dcf53a6990.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/transform.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_list_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <random>
template <typename T>
struct TypedColumnTest : public cudf::test::BaseFixture {
cudf::data_type type() { return cudf::data_type{cudf::type_to_id<T>()}; }
TypedColumnTest()
: data{_num_elements * cudf::size_of(type()), cudf::get_default_stream()},
mask{cudf::bitmask_allocation_size_bytes(_num_elements), cudf::get_default_stream()}
{
auto typed_data = static_cast<char*>(data.data());
auto typed_mask = static_cast<char*>(mask.data());
thrust::sequence(
rmm::exec_policy(cudf::get_default_stream()), typed_data, typed_data + data.size());
thrust::sequence(
rmm::exec_policy(cudf::get_default_stream()), typed_mask, typed_mask + mask.size());
}
cudf::size_type num_elements() { return _num_elements; }
std::random_device r;
std::default_random_engine generator{r()};
std::uniform_int_distribution<cudf::size_type> distribution{200, 1000};
cudf::size_type _num_elements{distribution(generator)};
rmm::device_buffer data{};
rmm::device_buffer mask{};
rmm::device_buffer all_valid_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)};
rmm::device_buffer all_null_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)};
};
TYPED_TEST_SUITE(TypedColumnTest, cudf::test::Types<int32_t>);
/**
* @brief Verifies equality of the properties and data of a `column`'s views.
*
* @param col The `column` to verify
*/
void verify_column_views(cudf::column col)
{
cudf::column_view view = col;
cudf::mutable_column_view mutable_view = col;
EXPECT_EQ(col.type(), view.type());
EXPECT_EQ(col.type(), mutable_view.type());
EXPECT_EQ(col.size(), view.size());
EXPECT_EQ(col.size(), mutable_view.size());
EXPECT_EQ(col.null_count(), view.null_count());
EXPECT_EQ(col.null_count(), mutable_view.null_count());
EXPECT_EQ(col.nullable(), view.nullable());
EXPECT_EQ(col.nullable(), mutable_view.nullable());
EXPECT_EQ(col.num_children(), view.num_children());
EXPECT_EQ(col.num_children(), mutable_view.num_children());
EXPECT_EQ(view.head(), mutable_view.head());
EXPECT_EQ(view.data<char>(), mutable_view.data<char>());
EXPECT_EQ(view.offset(), mutable_view.offset());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask), 0};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull)
{
cudf::column col{this->type(),
this->num_elements(),
std::move(this->data),
std::move(this->all_null_mask),
this->num_elements()};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountNoMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
rmm::device_buffer empty_null_mask{};
EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
auto invalid_size_null_mask =
create_null_mask(::min(this->num_elements() - 50, 0), cudf::mask_state::ALL_VALID);
EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_NO_THROW(col.set_null_count(0));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountAllNull)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)};
EXPECT_NO_THROW(col.set_null_count(this->num_elements()));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllNull)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)};
EXPECT_EQ(this->num_elements(), col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_EQ(0, col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, CopyDataNoMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size());
}
TYPED_TEST(TypedColumnTest, MoveDataNoMask)
{
void* original_data = this->data.data();
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
}
TYPED_TEST(TypedColumnTest, CopyDataAndMask)
{
cudf::column col{this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
EXPECT_NE(v.null_mask(), this->all_valid_mask.data());
CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size());
CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.null_mask(), this->all_valid_mask.data(), this->mask.size());
}
TYPED_TEST(TypedColumnTest, MoveDataAndMask)
{
void* original_data = this->data.data();
void* original_mask = this->all_valid_mask.data();
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
EXPECT_EQ(v.null_mask(), original_mask);
}
TYPED_TEST(TypedColumnTest, CopyConstructorNoMask)
{
cudf::column original{this->type(), this->num_elements(), std::move(this->data)};
cudf::column copy{original};
verify_column_views(copy);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
}
TYPED_TEST(TypedColumnTest, CopyConstructorWithMask)
{
cudf::column original{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
cudf::column copy{original};
verify_column_views(copy);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
TYPED_TEST(TypedColumnTest, MoveConstructorNoMask)
{
cudf::column original{this->type(), this->num_elements(), std::move(this->data)};
auto original_data = original.view().head();
cudf::column moved_to{std::move(original)};
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
verify_column_views(moved_to);
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
}
TYPED_TEST(TypedColumnTest, MoveConstructorWithMask)
{
cudf::column original{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
auto original_data = original.view().head();
auto original_mask = original.view().null_mask();
cudf::column moved_to{std::move(original)};
verify_column_views(moved_to);
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
EXPECT_EQ(original_mask, moved_to_view.null_mask());
}
TYPED_TEST(TypedColumnTest, DeviceUvectorConstructorNoMask)
{
rmm::device_uvector<TypeParam> original{static_cast<std::size_t>(this->num_elements()),
cudf::get_default_stream()};
thrust::copy(rmm::exec_policy(cudf::get_default_stream()),
static_cast<TypeParam*>(this->data.data()),
static_cast<TypeParam*>(this->data.data()) + this->num_elements(),
original.begin());
auto original_data = original.data();
cudf::column moved_to{std::move(original)};
verify_column_views(moved_to);
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
}
TYPED_TEST(TypedColumnTest, DeviceUvectorConstructorWithMask)
{
rmm::device_uvector<TypeParam> original{static_cast<std::size_t>(this->num_elements()),
cudf::get_default_stream()};
thrust::copy(rmm::exec_policy(cudf::get_default_stream()),
static_cast<TypeParam*>(this->data.data()),
static_cast<TypeParam*>(this->data.data()) + this->num_elements(),
original.begin());
auto original_data = original.data();
auto original_mask = this->all_valid_mask.data();
cudf::column moved_to{std::move(original), std::move(this->all_valid_mask)};
verify_column_views(moved_to);
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
EXPECT_EQ(original_mask, moved_to_view.null_mask());
}
TYPED_TEST(TypedColumnTest, ConstructWithChildren)
{
std::vector<std::unique_ptr<cudf::column>> children;
;
children.emplace_back(std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::INT8},
42,
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}));
children.emplace_back(std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::FLOAT64},
314,
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}));
cudf::column col{this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()},
cudf::UNKNOWN_NULL_COUNT,
std::move(children)};
verify_column_views(col);
EXPECT_EQ(2, col.num_children());
EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type());
EXPECT_EQ(42, col.child(0).size());
EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type());
EXPECT_EQ(314, col.child(1).size());
}
TYPED_TEST(TypedColumnTest, ReleaseNoChildren)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(0u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ReleaseWithChildren)
{
std::vector<std::unique_ptr<cudf::column>> children;
children.emplace_back(std::make_unique<cudf::column>(
this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}));
children.emplace_back(std::make_unique<cudf::column>(
this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}));
cudf::column col{this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()},
cudf::UNKNOWN_NULL_COUNT,
std::move(children)};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(2u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask)
{
cudf::column original{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
cudf::column_view original_view = original;
cudf::column copy{original_view};
verify_column_views(copy);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy);
// Verify deep copy
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
template <typename T>
struct ListsColumnTest : public cudf::test::BaseFixture {
};
using NumericTypesNotBool =
cudf::test::Concat<cudf::test::IntegralTypesNotBool, cudf::test::FloatingPointTypes>;
TYPED_TEST_SUITE(ListsColumnTest, NumericTypesNotBool);
TYPED_TEST(ListsColumnTest, ListsColumnViewConstructor)
{
cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}};
auto result = std::make_unique<cudf::column>(list);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(list, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructor)
{
cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}};
cudf::test::lists_column_wrapper<TypeParam> expect{{3, 4}, {5, 6, 7}};
auto sliced = cudf::slice(list, {1, 3}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedIncludesEmpty)
{
cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {}, {3, 4}, {8, 9}};
cudf::test::lists_column_wrapper<TypeParam> expect{{}, {3, 4}};
auto sliced = cudf::slice(list, {1, 3}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedNonNestedEmpty)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
// Column of List<int>
LCW list{{1, 2}, {}, {3, 4}, {8, 9}};
// Column of 1 row, an empty List<int>
LCW expect{LCW{}};
auto sliced = cudf::slice(list, {1, 2}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedNestedEmpty)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>;
// Column of List<List<int>>, with incomplete hierarchy
LCW list{{LCW{1}, LCW{2}},
{}, // < ----------- empty List<List<int>>, slice this
{LCW{3}, LCW{4, 5}}};
// Make 1-row column of type List<List<int>>, the row data contains 0 element.
// Well-formed memory layout:
// type: List<List<int>>
// Length: 1
// Mask: 1
// Offsets: 0, 0
// List<int>
// Length: 0
// Offset:
// INT
// Length: 0
auto leaf = std::make_unique<cudf::column>(cudf::column(LCW{}));
auto offset = std::make_unique<cudf::column>(cudf::column(FWCW_SZ{0, 0}));
auto null_mask = cudf::create_null_mask(0, cudf::mask_state::UNALLOCATED);
auto expect =
cudf::make_lists_column(1, std::move(offset), std::move(leaf), 0, std::move(null_mask));
auto sliced = cudf::slice(list, {1, 2}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNested)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>;
// Column of List<List<int>>, with incomplete hierarchy
LCW list{{LCW{1}, LCW{2}}, {}, {LCW{3}, LCW{4, 5}}};
auto expect = cudf::empty_like(list);
auto sliced = cudf::slice(list, {0, 0}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNonNested)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>;
LCW list{{1, 2}, {}, {3, 4}, {8, 9}};
auto expect = cudf::empty_like(list);
auto sliced = cudf::slice(list, {0, 0}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructorWithNulls)
{
auto valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0; });
auto expect_valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 != 0; });
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
cudf::test::lists_column_wrapper<TypeParam> list{
{{{{1, 2}, {3, 4}}, valids}, LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}},
valids};
cudf::test::lists_column_wrapper<TypeParam> expect{
{LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, expect_valids};
auto sliced = cudf::slice(list, {1, 5}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, result->view());
// TODO: null mask equality is being checked separately because
// expect_columns_equal doesn't do the check for lists columns.
// This is fixed in https://github.com/rapidsai/cudf/pull/5904,
// so we should remove this check after that's merged:
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
cudf::mask_to_bools(result->view().null_mask(), 0, 4)->view(),
cudf::mask_to_bools(static_cast<cudf::column_view>(expect).null_mask(), 0, 4)->view());
}
CUDF_TEST_PROGRAM_MAIN()
| 2b1ea02ae2643aa8e4d3fc420302f5dcf53a6990.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/transform.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_list_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <random>
template <typename T>
struct TypedColumnTest : public cudf::test::BaseFixture {
cudf::data_type type() { return cudf::data_type{cudf::type_to_id<T>()}; }
TypedColumnTest()
: data{_num_elements * cudf::size_of(type()), cudf::get_default_stream()},
mask{cudf::bitmask_allocation_size_bytes(_num_elements), cudf::get_default_stream()}
{
auto typed_data = static_cast<char*>(data.data());
auto typed_mask = static_cast<char*>(mask.data());
thrust::sequence(
rmm::exec_policy(cudf::get_default_stream()), typed_data, typed_data + data.size());
thrust::sequence(
rmm::exec_policy(cudf::get_default_stream()), typed_mask, typed_mask + mask.size());
}
cudf::size_type num_elements() { return _num_elements; }
std::random_device r;
std::default_random_engine generator{r()};
std::uniform_int_distribution<cudf::size_type> distribution{200, 1000};
cudf::size_type _num_elements{distribution(generator)};
rmm::device_buffer data{};
rmm::device_buffer mask{};
rmm::device_buffer all_valid_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)};
rmm::device_buffer all_null_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)};
};
TYPED_TEST_SUITE(TypedColumnTest, cudf::test::Types<int32_t>);
/**
* @brief Verifies equality of the properties and data of a `column`'s views.
*
* @param col The `column` to verify
*/
void verify_column_views(cudf::column col)
{
cudf::column_view view = col;
cudf::mutable_column_view mutable_view = col;
EXPECT_EQ(col.type(), view.type());
EXPECT_EQ(col.type(), mutable_view.type());
EXPECT_EQ(col.size(), view.size());
EXPECT_EQ(col.size(), mutable_view.size());
EXPECT_EQ(col.null_count(), view.null_count());
EXPECT_EQ(col.null_count(), mutable_view.null_count());
EXPECT_EQ(col.nullable(), view.nullable());
EXPECT_EQ(col.nullable(), mutable_view.nullable());
EXPECT_EQ(col.num_children(), view.num_children());
EXPECT_EQ(col.num_children(), mutable_view.num_children());
EXPECT_EQ(view.head(), mutable_view.head());
EXPECT_EQ(view.data<char>(), mutable_view.data<char>());
EXPECT_EQ(view.offset(), mutable_view.offset());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask), 0};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull)
{
cudf::column col{this->type(),
this->num_elements(),
std::move(this->data),
std::move(this->all_null_mask),
this->num_elements()};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountNoMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
rmm::device_buffer empty_null_mask{};
EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
auto invalid_size_null_mask =
create_null_mask(std::min(this->num_elements() - 50, 0), cudf::mask_state::ALL_VALID);
EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_NO_THROW(col.set_null_count(0));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountAllNull)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)};
EXPECT_NO_THROW(col.set_null_count(this->num_elements()));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllNull)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)};
EXPECT_EQ(this->num_elements(), col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_EQ(0, col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, CopyDataNoMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size());
}
TYPED_TEST(TypedColumnTest, MoveDataNoMask)
{
void* original_data = this->data.data();
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
}
TYPED_TEST(TypedColumnTest, CopyDataAndMask)
{
cudf::column col{this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
EXPECT_NE(v.null_mask(), this->all_valid_mask.data());
CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size());
CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.null_mask(), this->all_valid_mask.data(), this->mask.size());
}
TYPED_TEST(TypedColumnTest, MoveDataAndMask)
{
void* original_data = this->data.data();
void* original_mask = this->all_valid_mask.data();
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
EXPECT_EQ(v.null_mask(), original_mask);
}
TYPED_TEST(TypedColumnTest, CopyConstructorNoMask)
{
cudf::column original{this->type(), this->num_elements(), std::move(this->data)};
cudf::column copy{original};
verify_column_views(copy);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
}
TYPED_TEST(TypedColumnTest, CopyConstructorWithMask)
{
cudf::column original{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
cudf::column copy{original};
verify_column_views(copy);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
TYPED_TEST(TypedColumnTest, MoveConstructorNoMask)
{
cudf::column original{this->type(), this->num_elements(), std::move(this->data)};
auto original_data = original.view().head();
cudf::column moved_to{std::move(original)};
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
verify_column_views(moved_to);
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
}
TYPED_TEST(TypedColumnTest, MoveConstructorWithMask)
{
cudf::column original{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
auto original_data = original.view().head();
auto original_mask = original.view().null_mask();
cudf::column moved_to{std::move(original)};
verify_column_views(moved_to);
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
EXPECT_EQ(original_mask, moved_to_view.null_mask());
}
TYPED_TEST(TypedColumnTest, DeviceUvectorConstructorNoMask)
{
rmm::device_uvector<TypeParam> original{static_cast<std::size_t>(this->num_elements()),
cudf::get_default_stream()};
thrust::copy(rmm::exec_policy(cudf::get_default_stream()),
static_cast<TypeParam*>(this->data.data()),
static_cast<TypeParam*>(this->data.data()) + this->num_elements(),
original.begin());
auto original_data = original.data();
cudf::column moved_to{std::move(original)};
verify_column_views(moved_to);
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
}
TYPED_TEST(TypedColumnTest, DeviceUvectorConstructorWithMask)
{
rmm::device_uvector<TypeParam> original{static_cast<std::size_t>(this->num_elements()),
cudf::get_default_stream()};
thrust::copy(rmm::exec_policy(cudf::get_default_stream()),
static_cast<TypeParam*>(this->data.data()),
static_cast<TypeParam*>(this->data.data()) + this->num_elements(),
original.begin());
auto original_data = original.data();
auto original_mask = this->all_valid_mask.data();
cudf::column moved_to{std::move(original), std::move(this->all_valid_mask)};
verify_column_views(moved_to);
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
EXPECT_EQ(original_mask, moved_to_view.null_mask());
}
TYPED_TEST(TypedColumnTest, ConstructWithChildren)
{
std::vector<std::unique_ptr<cudf::column>> children;
;
children.emplace_back(std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::INT8},
42,
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}));
children.emplace_back(std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::FLOAT64},
314,
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}));
cudf::column col{this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()},
cudf::UNKNOWN_NULL_COUNT,
std::move(children)};
verify_column_views(col);
EXPECT_EQ(2, col.num_children());
EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type());
EXPECT_EQ(42, col.child(0).size());
EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type());
EXPECT_EQ(314, col.child(1).size());
}
TYPED_TEST(TypedColumnTest, ReleaseNoChildren)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(0u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ReleaseWithChildren)
{
std::vector<std::unique_ptr<cudf::column>> children;
children.emplace_back(std::make_unique<cudf::column>(
this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}));
children.emplace_back(std::make_unique<cudf::column>(
this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()}));
cudf::column col{this->type(),
this->num_elements(),
rmm::device_buffer{this->data, cudf::get_default_stream()},
rmm::device_buffer{this->all_valid_mask, cudf::get_default_stream()},
cudf::UNKNOWN_NULL_COUNT,
std::move(children)};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(2u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask)
{
cudf::column original{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
cudf::column_view original_view = original;
cudf::column copy{original_view};
verify_column_views(copy);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy);
// Verify deep copy
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
template <typename T>
struct ListsColumnTest : public cudf::test::BaseFixture {
};
using NumericTypesNotBool =
cudf::test::Concat<cudf::test::IntegralTypesNotBool, cudf::test::FloatingPointTypes>;
TYPED_TEST_SUITE(ListsColumnTest, NumericTypesNotBool);
TYPED_TEST(ListsColumnTest, ListsColumnViewConstructor)
{
cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}};
auto result = std::make_unique<cudf::column>(list);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(list, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructor)
{
cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}};
cudf::test::lists_column_wrapper<TypeParam> expect{{3, 4}, {5, 6, 7}};
auto sliced = cudf::slice(list, {1, 3}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedIncludesEmpty)
{
cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {}, {3, 4}, {8, 9}};
cudf::test::lists_column_wrapper<TypeParam> expect{{}, {3, 4}};
auto sliced = cudf::slice(list, {1, 3}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedNonNestedEmpty)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
// Column of List<int>
LCW list{{1, 2}, {}, {3, 4}, {8, 9}};
// Column of 1 row, an empty List<int>
LCW expect{LCW{}};
auto sliced = cudf::slice(list, {1, 2}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedNestedEmpty)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>;
// Column of List<List<int>>, with incomplete hierarchy
LCW list{{LCW{1}, LCW{2}},
{}, // < ----------- empty List<List<int>>, slice this
{LCW{3}, LCW{4, 5}}};
// Make 1-row column of type List<List<int>>, the row data contains 0 element.
// Well-formed memory layout:
// type: List<List<int>>
// Length: 1
// Mask: 1
// Offsets: 0, 0
// List<int>
// Length: 0
// Offset:
// INT
// Length: 0
auto leaf = std::make_unique<cudf::column>(cudf::column(LCW{}));
auto offset = std::make_unique<cudf::column>(cudf::column(FWCW_SZ{0, 0}));
auto null_mask = cudf::create_null_mask(0, cudf::mask_state::UNALLOCATED);
auto expect =
cudf::make_lists_column(1, std::move(offset), std::move(leaf), 0, std::move(null_mask));
auto sliced = cudf::slice(list, {1, 2}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNested)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>;
// Column of List<List<int>>, with incomplete hierarchy
LCW list{{LCW{1}, LCW{2}}, {}, {LCW{3}, LCW{4, 5}}};
auto expect = cudf::empty_like(list);
auto sliced = cudf::slice(list, {0, 0}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNonNested)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>;
LCW list{{1, 2}, {}, {3, 4}, {8, 9}};
auto expect = cudf::empty_like(list);
auto sliced = cudf::slice(list, {0, 0}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructorWithNulls)
{
auto valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0; });
auto expect_valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 != 0; });
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
cudf::test::lists_column_wrapper<TypeParam> list{
{{{{1, 2}, {3, 4}}, valids}, LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}},
valids};
cudf::test::lists_column_wrapper<TypeParam> expect{
{LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, expect_valids};
auto sliced = cudf::slice(list, {1, 5}).front();
auto result = std::make_unique<cudf::column>(sliced);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, result->view());
// TODO: null mask equality is being checked separately because
// expect_columns_equal doesn't do the check for lists columns.
// This is fixed in https://github.com/rapidsai/cudf/pull/5904,
// so we should remove this check after that's merged:
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
cudf::mask_to_bools(result->view().null_mask(), 0, 4)->view(),
cudf::mask_to_bools(static_cast<cudf::column_view>(expect).null_mask(), 0, 4)->view());
}
CUDF_TEST_PROGRAM_MAIN()
|
a813b0fe6eb206b185c792b1e154bf81bb128efe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "boxFilter.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *srcImage = NULL;
hipMalloc(&srcImage, XSIZE*YSIZE);
unsigned char *dstImage = NULL;
hipMalloc(&dstImage, XSIZE*YSIZE);
unsigned int width = 1;
unsigned int height = 1;
int channel = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
boxFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, srcImage,dstImage,width,height,channel);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
boxFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, srcImage,dstImage,width,height,channel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
boxFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, srcImage,dstImage,width,height,channel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a813b0fe6eb206b185c792b1e154bf81bb128efe.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "boxFilter.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *srcImage = NULL;
cudaMalloc(&srcImage, XSIZE*YSIZE);
unsigned char *dstImage = NULL;
cudaMalloc(&dstImage, XSIZE*YSIZE);
unsigned int width = 1;
unsigned int height = 1;
int channel = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
boxFilter<<<gridBlock,threadBlock>>>(srcImage,dstImage,width,height,channel);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
boxFilter<<<gridBlock,threadBlock>>>(srcImage,dstImage,width,height,channel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
boxFilter<<<gridBlock,threadBlock>>>(srcImage,dstImage,width,height,channel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a033d83fcceed1ac49ad98085c80b35d5c808ae7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#define N 2048 * 2048 // Number of elements in each vector
/*
* Optimize this already-accelerated codebase. Work iteratively,
* and use nsys to support your work.
*
* Aim to profile `saxpy` (without modifying `N`) running under
* 20us.
*
* Some bugs have been placed in this codebase for your edification.
*/
inline hipError_t checkCuda(hipError_t result)
{
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
__global__ void saxpy(float * a, float * b, float * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < N )
{
c[tid] = (2 * a[tid]) + b[tid];
}
}
int main()
{
float *a, *b, *c;
int size = N * sizeof (int); // The total number of bytes per vector
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
// Initialize memory
for( int i = 0; i < N; ++i )
{
a[i] = 2;
b[i] = 1;
c[i] = 0;
}
/*
*Initialy set to 128 threads per block and ((N + threads_per_block - 1) / threads_per_block) blocks
*resulted in a time of 16308857 nano seconds with 1195 memory operations on device and 4 memory operations on host
*
*size_t threads_per_block = 128;
*size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
*/
int deviceId;
hipGetDevice(&deviceId);
hipDeviceProp_t props;
hipGetDeviceProperties(&props, deviceId);
size_t threads_per_block = 256;
size_t number_of_blocks;
//Calculating the number of Blocks needed
int BlockNum = (N + threads_per_block - 1) / threads_per_block;
//Calculating the closest multiple of the number of streaming processers
number_of_blocks = (((BlockNum - 1) / props.multiProcessorCount) + 1) * props.multiProcessorCount;
//Set the number of blocks to be a multiple of the number of SMs. resulted in time of 15733043 nanoseconds
//Prefetching a
hipMemPrefetchAsync(a, size, deviceId);
//Prefetching b
hipMemPrefetchAsync(b, size, deviceId);
//Prefetching c
hipMemPrefetchAsync(c, size, deviceId);
//Prefetching results in a time of 68670 nanoseconds or approx 69us
hipLaunchKernelGGL(( saxpy), dim3(number_of_blocks),dim3(threads_per_block), 0, 0, a, b, c);
checkCuda(hipGetLastError());
checkCuda(hipDeviceSynchronize());
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %f, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %f, ", i, c[i]);
printf ("\n");
hipFree( a ); hipFree( b ); hipFree( c );
}
| a033d83fcceed1ac49ad98085c80b35d5c808ae7.cu | #include <stdio.h>
#include <assert.h>
#define N 2048 * 2048 // Number of elements in each vector
/*
* Optimize this already-accelerated codebase. Work iteratively,
* and use nsys to support your work.
*
* Aim to profile `saxpy` (without modifying `N`) running under
* 20us.
*
* Some bugs have been placed in this codebase for your edification.
*/
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
__global__ void saxpy(float * a, float * b, float * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < N )
{
c[tid] = (2 * a[tid]) + b[tid];
}
}
int main()
{
float *a, *b, *c;
int size = N * sizeof (int); // The total number of bytes per vector
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
// Initialize memory
for( int i = 0; i < N; ++i )
{
a[i] = 2;
b[i] = 1;
c[i] = 0;
}
/*
*Initialy set to 128 threads per block and ((N + threads_per_block - 1) / threads_per_block) blocks
*resulted in a time of 16308857 nano seconds with 1195 memory operations on device and 4 memory operations on host
*
*size_t threads_per_block = 128;
*size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
*/
int deviceId;
cudaGetDevice(&deviceId);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, deviceId);
size_t threads_per_block = 256;
size_t number_of_blocks;
//Calculating the number of Blocks needed
int BlockNum = (N + threads_per_block - 1) / threads_per_block;
//Calculating the closest multiple of the number of streaming processers
number_of_blocks = (((BlockNum - 1) / props.multiProcessorCount) + 1) * props.multiProcessorCount;
//Set the number of blocks to be a multiple of the number of SMs. resulted in time of 15733043 nanoseconds
//Prefetching a
cudaMemPrefetchAsync(a, size, deviceId);
//Prefetching b
cudaMemPrefetchAsync(b, size, deviceId);
//Prefetching c
cudaMemPrefetchAsync(c, size, deviceId);
//Prefetching results in a time of 68670 nanoseconds or approx 69us
saxpy<<<number_of_blocks,threads_per_block>>>(a, b, c);
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %f, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %f, ", i, c[i]);
printf ("\n");
cudaFree( a ); cudaFree( b ); cudaFree( c );
}
|
77152b3a9f7c67e1b30ce6ed3e5ebd739ffa6524.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include "lab1.h"
#include "PerlinNoise.h"
#include "particle.h"
#include <iostream>
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 1440;
struct Lab1VideoGenerator::Impl {
int t = 0;
};
Lab1VideoGenerator::Lab1VideoGenerator(): impl(new Impl) {
}
Lab1VideoGenerator::~Lab1VideoGenerator() {}
void Lab1VideoGenerator::get_info(Lab1VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 48;
info.fps_d = 1;
};
struct RGB {
uint8_t r = 0;
uint8_t g = 0;
uint8_t b = 0;
};
/*
y = 0.299r + 0.587g + 0.114b
u = -0.169r - 0.331g + 0.5b + 128
v = 0.5r - 0.419g - 0.081b + 128
*/
uint8_t* mapRGB2YUV(RGB* arr){
uint8_t* yuvArr = new uint8_t[W*H*3/2];
for(int i = 0; i < W*H; ++i){
RGB rgb = arr[i];
uint8_t r = rgb.r;
uint8_t g = rgb.g;
uint8_t b = rgb.b;
yuvArr[i] = 0.299 * r + 0.587 * g + 0.114 * b;
}
for(int i = W*H; i < W*H*3/2; ++i){
yuvArr[i] = 128;
}
return yuvArr;
}
int particle_num = 1000;
unsigned int scl = 10;
double PI = 3.14159265359;
Particle* particles = new Particle[particle_num];
double* flowfield = new double[H/scl * W/scl];
PerlinNoise pn(512);
uint8_t* yuvArr = new uint8_t[W*H];
void Lab1VideoGenerator::Generate(uint8_t *yuv) {
if(impl->t == 0){
for (int i = 0; i < W*H; ++i){
yuvArr[i] = 255;
}
hipMemset(yuv+W*H, 128, W*H/2);
for (int i = 0; i < particle_num; ++i){
particles[i].setPosition(rand() % W, rand() % H);
}
}
// flowfield
unsigned int kk = 0;
unsigned t = impl->t;
for(unsigned int i = 0; i < H/scl; ++i) {
for(unsigned int j = 0; j < W/scl; ++j) {
double x = (double)j/W*scl;
double y = (double)i/H*scl;
// Typical Perlin noise
double n = pn.noise(x, y, 0.0005 * (t+1));
flowfield[kk] = n * 8 * PI;
++kk;
}
}
for (int i = 0; i < particle_num; ++i){
int x = particles[i].getX();
int y = particles[i].getY();
int kk = y*W + x;
// get force
int fx = x / scl;
int fy = y / scl;
int fkk = fy * (W / scl) + fx;
double angle = flowfield[fkk];
double mag = 5;
double forceX = cos(angle) * mag;
double forceY = sin(angle) * mag;
particles[i].applyForce(forceX, forceY);
particles[i].update();
if(yuvArr[kk] < 3) yuvArr[kk] = 0;
else yuvArr[kk] -= 3;
if(yuvArr[(kk+1) % (W*H)] < 3) yuvArr[(kk+1) % (W*H)] = 0;
else yuvArr[(kk+1) % (W*H)] -= 3;
if(yuvArr[(kk-1) % (W*H)] < 3) yuvArr[(kk-1) % (W*H)] = 0;
else yuvArr[(kk-1) % (W*H)] -= 3;
if(yuvArr[(kk+W) % (W*H)] < 3) yuvArr[(kk+W) % (W*H)] = 0;
else yuvArr[(kk+W) % (W*H)] -= 3;
if(yuvArr[(kk+W+1) % (W*H)] < 2) yuvArr[(kk+W+1) % (W*H)] = 0;
else yuvArr[(kk+W+1) % (W*H)] -= 2;
if(yuvArr[(kk+W-1) % (W*H)] < 2) yuvArr[(kk+W-1) % (W*H)] = 0;
else yuvArr[(kk+W-1) % (W*H)] -= 2;
if(yuvArr[(kk-W) % (W*H)] < 3) yuvArr[(kk-W) % (W*H)] = 0;
else yuvArr[(kk-W) % (W*H)] -= 3;
if(yuvArr[(kk-W+1) % (W*H)] < 2) yuvArr[(kk-W+1) % (W*H)] = 0;
else yuvArr[(kk-W+1) % (W*H)] -= 2;
if(yuvArr[(kk-W-1) % (W*H)] < 2) yuvArr[(kk-W-1) % (W*H)] = 0;
else yuvArr[(kk-W-1) % (W*H)] -= 2;
}
hipMemcpy(yuv, yuvArr, W*H, hipMemcpyHostToDevice);
++(impl->t);
}
| 77152b3a9f7c67e1b30ce6ed3e5ebd739ffa6524.cu | #include <cmath>
#include "lab1.h"
#include "PerlinNoise.h"
#include "particle.h"
#include <iostream>
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 1440;
struct Lab1VideoGenerator::Impl {
int t = 0;
};
Lab1VideoGenerator::Lab1VideoGenerator(): impl(new Impl) {
}
Lab1VideoGenerator::~Lab1VideoGenerator() {}
void Lab1VideoGenerator::get_info(Lab1VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 48;
info.fps_d = 1;
};
struct RGB {
uint8_t r = 0;
uint8_t g = 0;
uint8_t b = 0;
};
/*
y = 0.299r + 0.587g + 0.114b
u = -0.169r - 0.331g + 0.5b + 128
v = 0.5r - 0.419g - 0.081b + 128
*/
uint8_t* mapRGB2YUV(RGB* arr){
uint8_t* yuvArr = new uint8_t[W*H*3/2];
for(int i = 0; i < W*H; ++i){
RGB rgb = arr[i];
uint8_t r = rgb.r;
uint8_t g = rgb.g;
uint8_t b = rgb.b;
yuvArr[i] = 0.299 * r + 0.587 * g + 0.114 * b;
}
for(int i = W*H; i < W*H*3/2; ++i){
yuvArr[i] = 128;
}
return yuvArr;
}
int particle_num = 1000;
unsigned int scl = 10;
double PI = 3.14159265359;
Particle* particles = new Particle[particle_num];
double* flowfield = new double[H/scl * W/scl];
PerlinNoise pn(512);
uint8_t* yuvArr = new uint8_t[W*H];
void Lab1VideoGenerator::Generate(uint8_t *yuv) {
if(impl->t == 0){
for (int i = 0; i < W*H; ++i){
yuvArr[i] = 255;
}
cudaMemset(yuv+W*H, 128, W*H/2);
for (int i = 0; i < particle_num; ++i){
particles[i].setPosition(rand() % W, rand() % H);
}
}
// flowfield
unsigned int kk = 0;
unsigned t = impl->t;
for(unsigned int i = 0; i < H/scl; ++i) {
for(unsigned int j = 0; j < W/scl; ++j) {
double x = (double)j/W*scl;
double y = (double)i/H*scl;
// Typical Perlin noise
double n = pn.noise(x, y, 0.0005 * (t+1));
flowfield[kk] = n * 8 * PI;
++kk;
}
}
for (int i = 0; i < particle_num; ++i){
int x = particles[i].getX();
int y = particles[i].getY();
int kk = y*W + x;
// get force
int fx = x / scl;
int fy = y / scl;
int fkk = fy * (W / scl) + fx;
double angle = flowfield[fkk];
double mag = 5;
double forceX = cos(angle) * mag;
double forceY = sin(angle) * mag;
particles[i].applyForce(forceX, forceY);
particles[i].update();
if(yuvArr[kk] < 3) yuvArr[kk] = 0;
else yuvArr[kk] -= 3;
if(yuvArr[(kk+1) % (W*H)] < 3) yuvArr[(kk+1) % (W*H)] = 0;
else yuvArr[(kk+1) % (W*H)] -= 3;
if(yuvArr[(kk-1) % (W*H)] < 3) yuvArr[(kk-1) % (W*H)] = 0;
else yuvArr[(kk-1) % (W*H)] -= 3;
if(yuvArr[(kk+W) % (W*H)] < 3) yuvArr[(kk+W) % (W*H)] = 0;
else yuvArr[(kk+W) % (W*H)] -= 3;
if(yuvArr[(kk+W+1) % (W*H)] < 2) yuvArr[(kk+W+1) % (W*H)] = 0;
else yuvArr[(kk+W+1) % (W*H)] -= 2;
if(yuvArr[(kk+W-1) % (W*H)] < 2) yuvArr[(kk+W-1) % (W*H)] = 0;
else yuvArr[(kk+W-1) % (W*H)] -= 2;
if(yuvArr[(kk-W) % (W*H)] < 3) yuvArr[(kk-W) % (W*H)] = 0;
else yuvArr[(kk-W) % (W*H)] -= 3;
if(yuvArr[(kk-W+1) % (W*H)] < 2) yuvArr[(kk-W+1) % (W*H)] = 0;
else yuvArr[(kk-W+1) % (W*H)] -= 2;
if(yuvArr[(kk-W-1) % (W*H)] < 2) yuvArr[(kk-W-1) % (W*H)] = 0;
else yuvArr[(kk-W-1) % (W*H)] -= 2;
}
cudaMemcpy(yuv, yuvArr, W*H, cudaMemcpyHostToDevice);
++(impl->t);
}
|
85ba7a710e11a92ff65269d79aad0cd89a0d33f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// LocalCluster.cu
//
#include "LocalCluster.h"
#include "Image.h"
#include "ErrorCode.h"
#include <iostream>
#include <cmath>
#include "stdio.h"
using namespace std;
// DEF_BLOCK_X DEF_BLOCK_Y DEF_BLOCK_Z
//
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 1
#define DEF_BLOCK_Z 8
// Device
//
const int static __device__ _idxDev[8][2] = {
// [0][ ], [1][ ]
{ 1, 0}, { 1, -1},
// [2][ ], [3][ ]
{ 0, -1}, {-1, -1},
// [4][ ], [5][ ]
{-1, 0}, {-1, 1},
// [6][ ], [7][ ]
{ 0, 1}, { 1, 1}
};
// Host _adjustRoiSize ROI
// ROI
static __host__ void //
_adjustRoiSize(
ImageCuda *inimg, //
ImageCuda *outimg //
);
// Kernel _localCluKer
// pntRange
// temp (
// pntRange 100
// pntCount (
// pntCount 8
//
static __global__ void // Kernel
_localCluKer(
ImageCuda inimg, //
ImageCuda outimg, //
const int pntrange, //
//
//
// 100
unsigned char gapthred, //
//
unsigned char diffethred, //
//
unsigned char problack, // 0
unsigned char prowhite, // 250
int pntCount, // temp
// 0
//
// 8
int sx, //
int ex, //
int sy, //
int ey //
);
// Kernel _localCluKer
static __global__ void _localCluKer(
ImageCuda inimg, ImageCuda outimg, const int pntrange,
unsigned char gapthred, unsigned char diffethred,
unsigned char problack, unsigned char prowhite,
int pntcount, int sx, int ex, int sy, int ey)
{
// extern
extern __shared__ float temp[];
// x y
// z
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
//
int idx = (threadIdx.y * blockDim.x + threadIdx.x) * 8 + z;
float *curtemp = temp + idx;
//
//
if (x >= inimg.imgMeta.width || y >= inimg.imgMeta.height)
return;
//
int idxcv;
unsigned char cv;
idxcv = y * inimg.pitchBytes + x;
cv = inimg.imgMeta.imgData[idxcv];
//
//
//
if (x < sx || x > ex || y < sy || y > ey ||
cv <= problack || cv >= prowhite) {
outimg.imgMeta.imgData[idxcv] = cv;
return;
}
//
//
unsigned char cuv;
int idxcuv = idxcv;
//
float side1, side2;
// sum pntrange
// flag
float sum = cv;
int i = 0;
//
int dx = x, dy = y;
//
idxcuv = (y + _idxDev[z][1]) * inimg.pitchBytes +
(x + _idxDev[z][0]);
cuv = inimg.imgMeta.imgData[idxcuv];
//
unsigned char pre1, pre2;
pre1 = inimg.imgMeta.imgData[(y - _idxDev[z][1]) * inimg.pitchBytes +
(x - _idxDev[z][0])];
pre2 = cv;
//
unsigned char latt1, latt2;
latt1 = inimg.imgMeta.imgData[_idxDev[z][0] + idxcuv +
_idxDev[z][1] * inimg.pitchBytes];
// pntrange
for (i = 1; i < pntrange; i++) {
// gapthred
//
if (abs((float)cv -(float) cuv) > gapthred)
break;
//
//
//
dx += _idxDev[z][0] * 2;
dy += _idxDev[z][1] * 2;
int idxsid;
idxsid = dy * inimg.pitchBytes + dx;
//
latt2 = inimg.imgMeta.imgData[idxsid];
side1 = pre1 + pre2;
side2 = latt1 + latt2;
// side1 side2 diffethred
//
if (abs(side1 - side2) > diffethred)
break;
// pre1 pre2 cuv latt1
// latt1
//
pre1 = pre2;
pre2 = cuv;
cuv = latt1;
latt1 = latt2;
// sum
sum += (float)cuv;
}
//
*curtemp = sum / i;
//
__syncthreads();
//
// z 0 z
// 0 return
if (z != 0)
return;
float sumag = 0.0f;
curtemp = &(temp[(threadIdx.y * blockDim.x + threadIdx.x) * 8]);
// temp pntCount
//
// cv 0
// 8 - pntCount
int mark = 0;
for (int j = 8 - pntcount; j > 0; j--) {
for (int i = 0; i < 8; i++) {
if (abs(curtemp[i] - cv) >= abs(curtemp[mark] - cv) )
mark = i;
}
curtemp[mark] = cv;
}
for (int j = 0; j < 8; j ++)
sumag += curtemp[j];
// cv 8 - pntcount cv
//
sumag -= (8 - pntcount) * cv;
//
outimg.imgMeta.imgData[idxcv] =
(sumag / pntcount > 255) ? 255 :
(unsigned char)(sumag / pntcount);
}
// Host _adjustRoiSize ROI
inline static __host__ void _adjustRoiSize(ImageCuda *inimg,
ImageCuda *outimg)
{
if (inimg->imgMeta.width > outimg->imgMeta.width)
inimg->imgMeta.width = outimg->imgMeta.width;
else
outimg->imgMeta.width = inimg->imgMeta.width;
if (inimg->imgMeta.height > outimg->imgMeta.height)
inimg->imgMeta.height = outimg->imgMeta.height;
else
outimg->imgMeta.height = inimg->imgMeta.height;
}
// Host localCluster
__host__ int LocalCluster::localCluster(Image *inimg, Image *outimg)
{
// NULL
if (inimg == NULL || inimg->imgData == NULL)
return NULL_POINTER;
// ROI
int imgroix = inimg->roiX2 - inimg->roiX1;
int imgroiy = inimg->roiY2 - inimg->roiY1;
// device
int errcode;
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// outimg device
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
//
//
if (errcode != NO_ERROR) {
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, imgroix, imgroiy);
//
if (errcode != NO_ERROR)
return errcode;
}
// ROI
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// ROI
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// ROI
_adjustRoiSize(&insubimgCud, &outsubimgCud);
// kernel
dim3 blockdim;
dim3 griddim;
blockdim.x = DEF_BLOCK_X;
blockdim.y = DEF_BLOCK_Y;
blockdim.z = DEF_BLOCK_Z;
griddim.x = (insubimgCud.imgMeta.width + blockdim.x - 1) /
blockdim.x;
griddim.y = (insubimgCud.imgMeta.height + blockdim.y - 1) /
blockdim.y;
griddim.z = 1;
//
int sx, ex, sy, ey;
sx = this->pntRange + 2;
ex = insubimgCud.imgMeta.width - this->pntRange - 2;
sy = this->pntRange + 2;
ey = insubimgCud.imgMeta.height - this->pntRange - 2;
//
int size = DEF_BLOCK_X * DEF_BLOCK_Y *
DEF_BLOCK_Z * sizeof (float);
// kernel
hipLaunchKernelGGL(( _localCluKer), dim3(griddim), dim3(blockdim), size, 0,
insubimgCud,outsubimgCud,
this->pntRange, this->gapThred, this->diffeThred,
this->proBlack, this->proWhite, this->pntCount,
sx, ex, sy, ey);
// CUDA
if (hipGetLastError() != hipSuccess)
return CUDA_ERROR;
//
return NO_ERROR;
}
| 85ba7a710e11a92ff65269d79aad0cd89a0d33f9.cu | // LocalCluster.cu
// 局部聚类
#include "LocalCluster.h"
#include "Image.h"
#include "ErrorCode.h"
#include <iostream>
#include <cmath>
#include "stdio.h"
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y 以及 DEF_BLOCK_Z
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 1
#define DEF_BLOCK_Z 8
// Device 全局常量:
// 用于计算点之间的坐标关系。
const int static __device__ _idxDev[8][2] = {
// [0][ ], [1][ ]
{ 1, 0}, { 1, -1},
// [2][ ], [3][ ]
{ 0, -1}, {-1, -1},
// [4][ ], [5][ ]
{-1, 0}, {-1, 1},
// [6][ ], [7][ ]
{ 0, 1}, { 1, 1}
};
// Host 函数:_adjustRoiSize(调整 ROI 子图的大小)
// 调整 ROI 子图的大小,使输入和输出的子图大小统一。
static __host__ void // 无返回值
_adjustRoiSize(
ImageCuda *inimg, // 输入图像
ImageCuda *outimg // 输出图像
);
// Kernel 函数:_localCluKer(对每一个点进行局部聚类处理)
// 给定一张图像略去边缘部分,在每一个点的八个方向上各求出 pntRange
// 个点的像素平均值并存放在共享变量 temp 中(根据河边老师发来的串行
// 实现代码,pntRange 不能超过 100)。通过各个方向平均值与当前
// 像素值做差,从差值中选取满足条件的 pntCount 个点求平均值(根据
// 河边老师发来的串行实现代码,pntCount 不能超过 8),将该平均值
// 与当前像素值相加得出点的新像素值。
static __global__ void // Kernel 函数无返回值
_localCluKer(
ImageCuda inimg, // 待处理图像
ImageCuda outimg, // 输出图像
const int pntrange, // 在当前像素点的八个方向上,
// 每个方向上取得点的个数,
// 据河边老师发来的串行实现代码,
// 不超过 100。
unsigned char gapthred, // 当前像素点和相邻点的灰度差
// 的阈值。
unsigned char diffethred, // 当前点两侧各两个点的像素和
// 的差的阈值。
unsigned char problack, // 像素值,默认为 0
unsigned char prowhite, // 像素值,默认为 250
int pntCount, // 利用循环在 temp 数组中寻找
// 最接近 0 的值循环次数的上界,
// 据河边老师发来的串行实现代码,
// 不超过 8。
int sx, // 处理边界,横坐标小于该值的点保留原值
int ex, // 处理边界,横坐标大于该值的点保留原值
int sy, // 处理边界,纵坐标小于该值的点保留原值
int ey // 处理边界,纵坐标大于该值的点保留原值
);
// Kernel 函数:_localCluKer(对每一个点进行局部聚类处理)
static __global__ void _localCluKer(
ImageCuda inimg, ImageCuda outimg, const int pntrange,
unsigned char gapthred, unsigned char diffethred,
unsigned char problack, unsigned char prowhite,
int pntcount, int sx, int ex, int sy, int ey)
{
// 声明一个 extern 共享数组,存放每一个点的八个方向上的平均值。
extern __shared__ float temp[];
// 计算线程对应像素点的坐标位置,坐标的 x 和 y 分量。
// z 表示计算方向
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
// 计算当前线程需要用到的共享内存地址
int idx = (threadIdx.y * blockDim.x + threadIdx.x) * 8 + z;
float *curtemp = temp + idx;
// 检查第像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (x >= inimg.imgMeta.width || y >= inimg.imgMeta.height)
return;
// 当前线程对应的像素点一维索引和像素值
int idxcv;
unsigned char cv;
idxcv = y * inimg.pitchBytes + x;
cv = inimg.imgMeta.imgData[idxcv];
// 对非边缘部分的点进行计算
// 对于黑白比较明显的点保留原值
// 图像边缘上的点保持原像素值
if (x < sx || x > ex || y < sy || y > ey ||
cv <= problack || cv >= prowhite) {
outimg.imgMeta.imgData[idxcv] = cv;
return;
}
// 正在处理点像素值和索引
// 注:正在处理点于当前点不同,处理点是当前计算点某个方向上的点。
unsigned char cuv;
int idxcuv = idxcv;
// 处理点两侧各取两个点的像素和
float side1, side2;
// sum 用于累计 pntrange 个点的图像值
// flag 为标记,标记循环次数
float sum = cv;
int i = 0;
// 计算中涉及到的点的坐标
int dx = x, dy = y;
// 取出第一个点的像素值
idxcuv = (y + _idxDev[z][1]) * inimg.pitchBytes +
(x + _idxDev[z][0]);
cuv = inimg.imgMeta.imgData[idxcuv];
// 在具体的某一个处理方向,处理的第一个点的左侧两个点像素值
unsigned char pre1, pre2;
pre1 = inimg.imgMeta.imgData[(y - _idxDev[z][1]) * inimg.pitchBytes +
(x - _idxDev[z][0])];
pre2 = cv;
// 在具体的某一个处理方向,处理的第一个点的右侧两个点像素值
unsigned char latt1, latt2;
latt1 = inimg.imgMeta.imgData[_idxDev[z][0] + idxcuv +
_idxDev[z][1] * inimg.pitchBytes];
// 每一个线程循环处理某一个方向 pntrange 个点
for (i = 1; i < pntrange; i++) {
// 正在处理点和计算点的做差,如果超过 gapthred,
// 则停止在该该方向上的累加,即跳出循环。
if (abs((float)cv -(float) cuv) > gapthred)
break;
// 两侧各取两个点:
// 在具体的某一个处理方向,正在处理点的右侧第二个点的坐标,
// 计算该点的索引,并取出该值
dx += _idxDev[z][0] * 2;
dy += _idxDev[z][1] * 2;
int idxsid;
idxsid = dy * inimg.pitchBytes + dx;
// 取出该方向上右侧第二个点的像素值并把每侧的两个值相加
latt2 = inimg.imgMeta.imgData[idxsid];
side1 = pre1 + pre2;
side2 = latt1 + latt2;
// side1 与 side2 做差,如果超过 diffethred,
// 则停止在该该方向上的累加,即跳出循环。
if (abs(side1 - side2) > diffethred)
break;
// 更新 pre1, pre2, cuv, latt1 为计算下个点做准备
// 在计算下一个点时,它的当前值变为 latt1,
// 其他三个变量也顺势向右移动一个点
pre1 = pre2;
pre2 = cuv;
cuv = latt1;
latt1 = latt2;
// 将满足条件的像素值累加到 sum 中
sum += (float)cuv;
}
// 当前计算方向上的像素平均值
*curtemp = sum / i;
// 设置线程块里面的线程同步
__syncthreads();
// 对每一个点接下来的处理只需要用一个线程来处理,
// 此时我们选择 z 等于 0 来处理,因此对 z 不等于
// 0 的线程 return。
if (z != 0)
return;
float sumag = 0.0f;
curtemp = &(temp[(threadIdx.y * blockDim.x + threadIdx.x) * 8]);
// 处理 temp,寻找 pntCount 个较小值(即最小,次小,以此类推),
// 此时,该算法并未采取排序,而是当找到一个最大值时,并将该值重新
// 设为 cv(作为哑值处理),再接着找次大值,并也设为 0,直到找完
// 8 - pntCount 个次大值,然后再将数组里面的所有值求平均值。
int mark = 0;
for (int j = 8 - pntcount; j > 0; j--) {
for (int i = 0; i < 8; i++) {
if (abs(curtemp[i] - cv) >= abs(curtemp[mark] - cv) )
mark = i;
}
curtemp[mark] = cv;
}
for (int j = 0; j < 8; j ++)
sumag += curtemp[j];
// 因为在处理时设定的哑值为 cv(即多加了 8 - pntcount 个 cv) ,
// 故此处需要将改值减去
sumag -= (8 - pntcount) * cv;
// 防止累加值越界并将结果写入输出图片
outimg.imgMeta.imgData[idxcv] =
(sumag / pntcount > 255) ? 255 :
(unsigned char)(sumag / pntcount);
}
// Host 函数:_adjustRoiSize(调整输入和输出图像的 ROI 的大小)
inline static __host__ void _adjustRoiSize(ImageCuda *inimg,
ImageCuda *outimg)
{
if (inimg->imgMeta.width > outimg->imgMeta.width)
inimg->imgMeta.width = outimg->imgMeta.width;
else
outimg->imgMeta.width = inimg->imgMeta.width;
if (inimg->imgMeta.height > outimg->imgMeta.height)
inimg->imgMeta.height = outimg->imgMeta.height;
else
outimg->imgMeta.height = inimg->imgMeta.height;
}
// Host 成员方法: localCluster(局部聚类)
__host__ int LocalCluster::localCluster(Image *inimg, Image *outimg)
{
// 检查输入图像是否为 NULL
if (inimg == NULL || inimg->imgData == NULL)
return NULL_POINTER;
// 输入图像的 ROI 区域尺寸
int imgroix = inimg->roiX2 - inimg->roiX1;
int imgroiy = inimg->roiY2 - inimg->roiY1;
// 将输入图像复制到 device
int errcode;
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 将 outimg 复制到 device
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和
// 输入图像尺寸相同的图像
if (errcode != NO_ERROR) {
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, imgroix, imgroiy);
// 如果创建图像也操作失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
}
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 调整输入和输出图像的 ROI 子图,使大小统一
_adjustRoiSize(&insubimgCud, &outsubimgCud);
// 为 kernel 分配线程
dim3 blockdim;
dim3 griddim;
blockdim.x = DEF_BLOCK_X;
blockdim.y = DEF_BLOCK_Y;
blockdim.z = DEF_BLOCK_Z;
griddim.x = (insubimgCud.imgMeta.width + blockdim.x - 1) /
blockdim.x;
griddim.y = (insubimgCud.imgMeta.height + blockdim.y - 1) /
blockdim.y;
griddim.z = 1;
// 计算处理边界,处理的点如果不在这四个值的包围范围内部,则保留原值
int sx, ex, sy, ey;
sx = this->pntRange + 2;
ex = insubimgCud.imgMeta.width - this->pntRange - 2;
sy = this->pntRange + 2;
ey = insubimgCud.imgMeta.height - this->pntRange - 2;
// 计算共享内存大小
int size = DEF_BLOCK_X * DEF_BLOCK_Y *
DEF_BLOCK_Z * sizeof (float);
// 调用 kernel
_localCluKer<<<griddim, blockdim, size>>>(
insubimgCud,outsubimgCud,
this->pntRange, this->gapThred, this->diffeThred,
this->proBlack, this->proWhite, this->pntCount,
sx, ex, sy, ey);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕退出。
return NO_ERROR;
}
|
c0795859126e192b85021e6a59beb28b950db490.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mg_louvain_helper.hpp"
#include <experimental/graph.hpp>
#include <utilities/device_comm.cuh>
#include <utilities/error.hpp>
#include <utilities/host_scalar_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/for_each.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
namespace cugraph {
namespace test {
template <typename T>
rmm::device_uvector<T> gather_distributed_vector(raft::handle_t const &handle,
T const *d_input,
size_t size)
{
auto rx_sizes =
cugraph::experimental::host_scalar_gather(handle.get_comms(), size, 0, handle.get_stream());
std::vector<size_t> rx_displs(static_cast<size_t>(handle.get_comms().get_rank()) == 0
? handle.get_comms().get_size()
: int{0},
size_t{0});
if (static_cast<size_t>(handle.get_comms().get_rank()) == 0) {
std::partial_sum(rx_sizes.begin(), rx_sizes.end() - 1, rx_displs.begin() + 1);
}
auto total_size = thrust::reduce(thrust::host, rx_sizes.begin(), rx_sizes.end());
rmm::device_uvector<T> gathered_v(total_size, handle.get_stream());
cugraph::experimental::device_gatherv(handle.get_comms(),
d_input,
gathered_v.data(),
size,
rx_sizes,
rx_displs,
0,
handle.get_stream());
return gathered_v;
}
template <typename vertex_t>
bool compare_renumbered_vectors(raft::handle_t const &handle,
rmm::device_uvector<vertex_t> const &v1,
rmm::device_uvector<vertex_t> const &v2)
{
vertex_t max = 1 + thrust::reduce(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
v1.begin(),
v1.end(),
vertex_t{0});
rmm::device_uvector<size_t> map(max, size_t{0});
auto iter = thrust::make_zip_iterator(thrust::make_tuple(v1.begin(), v2.begin()));
thrust::for_each(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
iter,
iter + v1.size(),
[d_map = map.data()] __device__(auto pair) {
vertex_t e1 = thrust::get<0>(pair);
vertex_t e2 = thrust::get<1>(pair);
d_map[e1] = e2;
});
auto error_count =
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
iter,
iter + v1.size(),
[d_map = map.data()] __device__(auto pair) {
vertex_t e1 = thrust::get<0>(pair);
vertex_t e2 = thrust::get<1>(pair);
return (d_map[e1] != e2);
});
return (error_count == 0);
}
template <typename T>
void single_gpu_renumber_edgelist_given_number_map(raft::handle_t const &handle,
rmm::device_uvector<T> &edgelist_rows_v,
rmm::device_uvector<T> &edgelist_cols_v,
rmm::device_uvector<T> &renumber_map_gathered_v)
{
rmm::device_uvector<T> index_v(renumber_map_gathered_v.size(), handle.get_stream());
thrust::for_each(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(renumber_map_gathered_v.size()),
[d_renumber_map_gathered = renumber_map_gathered_v.data(), d_index = index_v.data()] __device__(
auto idx) { d_index[d_renumber_map_gathered[idx]] = idx; });
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edgelist_rows_v.begin(),
edgelist_rows_v.end(),
edgelist_rows_v.begin(),
[d_index = index_v.data()] __device__(auto v) { return d_index[v]; });
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edgelist_cols_v.begin(),
edgelist_cols_v.end(),
edgelist_cols_v.begin(),
[d_index = index_v.data()] __device__(auto v) { return d_index[v]; });
}
template <typename vertex_t, typename edge_t, typename weight_t>
std::
tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, rmm::device_uvector<weight_t>>
compressed_sparse_to_edgelist(edge_t const *compressed_sparse_offsets,
vertex_t const *compressed_sparse_indices,
weight_t const *compressed_sparse_weights,
vertex_t major_first,
vertex_t major_last,
hipStream_t stream)
{
edge_t number_of_edges{0};
raft::update_host(
&number_of_edges, compressed_sparse_offsets + (major_last - major_first), 1, stream);
CUDA_TRY(hipStreamSynchronize(stream));
rmm::device_uvector<vertex_t> edgelist_major_vertices(number_of_edges, stream);
rmm::device_uvector<vertex_t> edgelist_minor_vertices(number_of_edges, stream);
rmm::device_uvector<weight_t> edgelist_weights(
compressed_sparse_weights != nullptr ? number_of_edges : 0, stream);
// FIXME: this is highly inefficient for very high-degree vertices, for better performance, we can
// fill high-degree vertices using one CUDA block per vertex, mid-degree vertices using one CUDA
// warp per vertex, and low-degree vertices using one CUDA thread per block
thrust::for_each(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(major_first),
thrust::make_counting_iterator(major_last),
[compressed_sparse_offsets,
major_first,
p_majors = edgelist_major_vertices.begin()] __device__(auto v) {
auto first = compressed_sparse_offsets[v - major_first];
auto last = compressed_sparse_offsets[v - major_first + 1];
thrust::fill(thrust::seq, p_majors + first, p_majors + last, v);
});
thrust::copy(rmm::exec_policy(stream)->on(stream),
compressed_sparse_indices,
compressed_sparse_indices + number_of_edges,
edgelist_minor_vertices.begin());
if (compressed_sparse_weights != nullptr) {
thrust::copy(rmm::exec_policy(stream)->on(stream),
compressed_sparse_weights,
compressed_sparse_weights + number_of_edges,
edgelist_weights.data());
}
return std::make_tuple(std::move(edgelist_major_vertices),
std::move(edgelist_minor_vertices),
std::move(edgelist_weights));
}
template <typename vertex_t, typename weight_t>
void sort_and_coarsen_edgelist(rmm::device_uvector<vertex_t> &edgelist_major_vertices /* [INOUT] */,
rmm::device_uvector<vertex_t> &edgelist_minor_vertices /* [INOUT] */,
rmm::device_uvector<weight_t> &edgelist_weights /* [INOUT] */,
hipStream_t stream)
{
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices.begin(), edgelist_minor_vertices.begin()));
size_t number_of_edges{0};
if (edgelist_weights.size() > 0) {
thrust::sort_by_key(rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size(),
edgelist_weights.begin());
rmm::device_uvector<vertex_t> tmp_edgelist_major_vertices(edgelist_major_vertices.size(),
stream);
rmm::device_uvector<vertex_t> tmp_edgelist_minor_vertices(tmp_edgelist_major_vertices.size(),
stream);
rmm::device_uvector<weight_t> tmp_edgelist_weights(tmp_edgelist_major_vertices.size(), stream);
auto it = thrust::reduce_by_key(
rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size(),
edgelist_weights.begin(),
thrust::make_zip_iterator(thrust::make_tuple(tmp_edgelist_major_vertices.begin(),
tmp_edgelist_minor_vertices.begin())),
tmp_edgelist_weights.begin());
number_of_edges = thrust::distance(tmp_edgelist_weights.begin(), thrust::get<1>(it));
edgelist_major_vertices = std::move(tmp_edgelist_major_vertices);
edgelist_minor_vertices = std::move(tmp_edgelist_minor_vertices);
edgelist_weights = std::move(tmp_edgelist_weights);
} else {
thrust::sort(rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size());
auto it = thrust::unique(rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size());
number_of_edges = thrust::distance(pair_first, it);
}
edgelist_major_vertices.resize(number_of_edges, stream);
edgelist_minor_vertices.resize(number_of_edges, stream);
edgelist_weights.resize(number_of_edges, stream);
edgelist_major_vertices.shrink_to_fit(stream);
edgelist_minor_vertices.shrink_to_fit(stream);
edgelist_weights.shrink_to_fit(stream);
}
template <typename vertex_t, typename edge_t, typename weight_t>
std::
tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, rmm::device_uvector<weight_t>>
compressed_sparse_to_relabeled_and_sorted_and_coarsened_edgelist(
edge_t const *compressed_sparse_offsets,
vertex_t const *compressed_sparse_indices,
weight_t const *compressed_sparse_weights,
vertex_t const *p_major_labels,
vertex_t const *p_minor_labels,
vertex_t major_first,
vertex_t major_last,
vertex_t minor_first,
vertex_t minor_last,
hipStream_t stream)
{
// FIXME: it might be possible to directly create relabled & coarsened edgelist from the
// compressed sparse format to save memory
rmm::device_uvector<vertex_t> edgelist_major_vertices(0, stream);
rmm::device_uvector<vertex_t> edgelist_minor_vertices(0, stream);
rmm::device_uvector<weight_t> edgelist_weights(0, stream);
std::tie(edgelist_major_vertices, edgelist_minor_vertices, edgelist_weights) =
compressed_sparse_to_edgelist(compressed_sparse_offsets,
compressed_sparse_indices,
compressed_sparse_weights,
major_first,
major_last,
stream);
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices.begin(), edgelist_minor_vertices.begin()));
thrust::transform(
rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size(),
pair_first,
[p_major_labels, p_minor_labels, major_first, minor_first] __device__(auto val) {
return thrust::make_tuple(p_major_labels[thrust::get<0>(val) - major_first],
p_minor_labels[thrust::get<1>(val) - minor_first]);
});
sort_and_coarsen_edgelist(
edgelist_major_vertices, edgelist_minor_vertices, edgelist_weights, stream);
return std::make_tuple(std::move(edgelist_major_vertices),
std::move(edgelist_minor_vertices),
std::move(edgelist_weights));
}
// single-GPU version
template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed>
std::unique_ptr<cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false>>
coarsen_graph(
raft::handle_t const &handle,
cugraph::experimental::graph_view_t<vertex_t, edge_t, weight_t, store_transposed, false> const
&graph_view,
vertex_t const *labels)
{
rmm::device_uvector<vertex_t> coarsened_edgelist_major_vertices(0, handle.get_stream());
rmm::device_uvector<vertex_t> coarsened_edgelist_minor_vertices(0, handle.get_stream());
rmm::device_uvector<weight_t> coarsened_edgelist_weights(0, handle.get_stream());
std::tie(coarsened_edgelist_major_vertices,
coarsened_edgelist_minor_vertices,
coarsened_edgelist_weights) =
compressed_sparse_to_relabeled_and_sorted_and_coarsened_edgelist(
graph_view.offsets(),
graph_view.indices(),
graph_view.weights(),
labels,
labels,
vertex_t{0},
graph_view.get_number_of_vertices(),
vertex_t{0},
graph_view.get_number_of_vertices(),
handle.get_stream());
cugraph::experimental::edgelist_t<vertex_t, edge_t, weight_t> edgelist{};
edgelist.p_src_vertices = store_transposed ? coarsened_edgelist_minor_vertices.data()
: coarsened_edgelist_major_vertices.data();
edgelist.p_dst_vertices = store_transposed ? coarsened_edgelist_major_vertices.data()
: coarsened_edgelist_minor_vertices.data();
edgelist.p_edge_weights = coarsened_edgelist_weights.data();
edgelist.number_of_edges = static_cast<edge_t>(coarsened_edgelist_major_vertices.size());
vertex_t new_number_of_vertices =
1 + thrust::reduce(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + graph_view.get_number_of_vertices(),
vertex_t{0},
thrust::maximum<vertex_t>());
return std::make_unique<
cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false>>(
handle,
edgelist,
new_number_of_vertices,
cugraph::experimental::graph_properties_t{
graph_view.is_symmetric(), false, graph_view.is_weighted()},
true);
}
// explicit instantiation
template void single_gpu_renumber_edgelist_given_number_map(
raft::handle_t const &handle,
rmm::device_uvector<int> &d_edgelist_rows,
rmm::device_uvector<int> &d_edgelist_cols,
rmm::device_uvector<int> &d_renumber_map_gathered_v);
template rmm::device_uvector<int> gather_distributed_vector(raft::handle_t const &handle,
int const *d_input,
size_t size);
template bool compare_renumbered_vectors(raft::handle_t const &handle,
rmm::device_uvector<int> const &v1,
rmm::device_uvector<int> const &v2);
template std::unique_ptr<cugraph::experimental::graph_t<int32_t, int32_t, float, false, false>>
coarsen_graph(
raft::handle_t const &handle,
cugraph::experimental::graph_view_t<int32_t, int32_t, float, false, false> const &graph_view,
int32_t const *labels);
} // namespace test
} // namespace cugraph
| c0795859126e192b85021e6a59beb28b950db490.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mg_louvain_helper.hpp"
#include <experimental/graph.hpp>
#include <utilities/device_comm.cuh>
#include <utilities/error.hpp>
#include <utilities/host_scalar_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/for_each.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
namespace cugraph {
namespace test {
template <typename T>
rmm::device_uvector<T> gather_distributed_vector(raft::handle_t const &handle,
T const *d_input,
size_t size)
{
auto rx_sizes =
cugraph::experimental::host_scalar_gather(handle.get_comms(), size, 0, handle.get_stream());
std::vector<size_t> rx_displs(static_cast<size_t>(handle.get_comms().get_rank()) == 0
? handle.get_comms().get_size()
: int{0},
size_t{0});
if (static_cast<size_t>(handle.get_comms().get_rank()) == 0) {
std::partial_sum(rx_sizes.begin(), rx_sizes.end() - 1, rx_displs.begin() + 1);
}
auto total_size = thrust::reduce(thrust::host, rx_sizes.begin(), rx_sizes.end());
rmm::device_uvector<T> gathered_v(total_size, handle.get_stream());
cugraph::experimental::device_gatherv(handle.get_comms(),
d_input,
gathered_v.data(),
size,
rx_sizes,
rx_displs,
0,
handle.get_stream());
return gathered_v;
}
template <typename vertex_t>
bool compare_renumbered_vectors(raft::handle_t const &handle,
rmm::device_uvector<vertex_t> const &v1,
rmm::device_uvector<vertex_t> const &v2)
{
vertex_t max = 1 + thrust::reduce(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
v1.begin(),
v1.end(),
vertex_t{0});
rmm::device_uvector<size_t> map(max, size_t{0});
auto iter = thrust::make_zip_iterator(thrust::make_tuple(v1.begin(), v2.begin()));
thrust::for_each(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
iter,
iter + v1.size(),
[d_map = map.data()] __device__(auto pair) {
vertex_t e1 = thrust::get<0>(pair);
vertex_t e2 = thrust::get<1>(pair);
d_map[e1] = e2;
});
auto error_count =
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
iter,
iter + v1.size(),
[d_map = map.data()] __device__(auto pair) {
vertex_t e1 = thrust::get<0>(pair);
vertex_t e2 = thrust::get<1>(pair);
return (d_map[e1] != e2);
});
return (error_count == 0);
}
template <typename T>
void single_gpu_renumber_edgelist_given_number_map(raft::handle_t const &handle,
rmm::device_uvector<T> &edgelist_rows_v,
rmm::device_uvector<T> &edgelist_cols_v,
rmm::device_uvector<T> &renumber_map_gathered_v)
{
rmm::device_uvector<T> index_v(renumber_map_gathered_v.size(), handle.get_stream());
thrust::for_each(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(renumber_map_gathered_v.size()),
[d_renumber_map_gathered = renumber_map_gathered_v.data(), d_index = index_v.data()] __device__(
auto idx) { d_index[d_renumber_map_gathered[idx]] = idx; });
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edgelist_rows_v.begin(),
edgelist_rows_v.end(),
edgelist_rows_v.begin(),
[d_index = index_v.data()] __device__(auto v) { return d_index[v]; });
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edgelist_cols_v.begin(),
edgelist_cols_v.end(),
edgelist_cols_v.begin(),
[d_index = index_v.data()] __device__(auto v) { return d_index[v]; });
}
template <typename vertex_t, typename edge_t, typename weight_t>
std::
tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, rmm::device_uvector<weight_t>>
compressed_sparse_to_edgelist(edge_t const *compressed_sparse_offsets,
vertex_t const *compressed_sparse_indices,
weight_t const *compressed_sparse_weights,
vertex_t major_first,
vertex_t major_last,
cudaStream_t stream)
{
edge_t number_of_edges{0};
raft::update_host(
&number_of_edges, compressed_sparse_offsets + (major_last - major_first), 1, stream);
CUDA_TRY(cudaStreamSynchronize(stream));
rmm::device_uvector<vertex_t> edgelist_major_vertices(number_of_edges, stream);
rmm::device_uvector<vertex_t> edgelist_minor_vertices(number_of_edges, stream);
rmm::device_uvector<weight_t> edgelist_weights(
compressed_sparse_weights != nullptr ? number_of_edges : 0, stream);
// FIXME: this is highly inefficient for very high-degree vertices, for better performance, we can
// fill high-degree vertices using one CUDA block per vertex, mid-degree vertices using one CUDA
// warp per vertex, and low-degree vertices using one CUDA thread per block
thrust::for_each(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(major_first),
thrust::make_counting_iterator(major_last),
[compressed_sparse_offsets,
major_first,
p_majors = edgelist_major_vertices.begin()] __device__(auto v) {
auto first = compressed_sparse_offsets[v - major_first];
auto last = compressed_sparse_offsets[v - major_first + 1];
thrust::fill(thrust::seq, p_majors + first, p_majors + last, v);
});
thrust::copy(rmm::exec_policy(stream)->on(stream),
compressed_sparse_indices,
compressed_sparse_indices + number_of_edges,
edgelist_minor_vertices.begin());
if (compressed_sparse_weights != nullptr) {
thrust::copy(rmm::exec_policy(stream)->on(stream),
compressed_sparse_weights,
compressed_sparse_weights + number_of_edges,
edgelist_weights.data());
}
return std::make_tuple(std::move(edgelist_major_vertices),
std::move(edgelist_minor_vertices),
std::move(edgelist_weights));
}
template <typename vertex_t, typename weight_t>
void sort_and_coarsen_edgelist(rmm::device_uvector<vertex_t> &edgelist_major_vertices /* [INOUT] */,
rmm::device_uvector<vertex_t> &edgelist_minor_vertices /* [INOUT] */,
rmm::device_uvector<weight_t> &edgelist_weights /* [INOUT] */,
cudaStream_t stream)
{
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices.begin(), edgelist_minor_vertices.begin()));
size_t number_of_edges{0};
if (edgelist_weights.size() > 0) {
thrust::sort_by_key(rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size(),
edgelist_weights.begin());
rmm::device_uvector<vertex_t> tmp_edgelist_major_vertices(edgelist_major_vertices.size(),
stream);
rmm::device_uvector<vertex_t> tmp_edgelist_minor_vertices(tmp_edgelist_major_vertices.size(),
stream);
rmm::device_uvector<weight_t> tmp_edgelist_weights(tmp_edgelist_major_vertices.size(), stream);
auto it = thrust::reduce_by_key(
rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size(),
edgelist_weights.begin(),
thrust::make_zip_iterator(thrust::make_tuple(tmp_edgelist_major_vertices.begin(),
tmp_edgelist_minor_vertices.begin())),
tmp_edgelist_weights.begin());
number_of_edges = thrust::distance(tmp_edgelist_weights.begin(), thrust::get<1>(it));
edgelist_major_vertices = std::move(tmp_edgelist_major_vertices);
edgelist_minor_vertices = std::move(tmp_edgelist_minor_vertices);
edgelist_weights = std::move(tmp_edgelist_weights);
} else {
thrust::sort(rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size());
auto it = thrust::unique(rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size());
number_of_edges = thrust::distance(pair_first, it);
}
edgelist_major_vertices.resize(number_of_edges, stream);
edgelist_minor_vertices.resize(number_of_edges, stream);
edgelist_weights.resize(number_of_edges, stream);
edgelist_major_vertices.shrink_to_fit(stream);
edgelist_minor_vertices.shrink_to_fit(stream);
edgelist_weights.shrink_to_fit(stream);
}
template <typename vertex_t, typename edge_t, typename weight_t>
std::
tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, rmm::device_uvector<weight_t>>
compressed_sparse_to_relabeled_and_sorted_and_coarsened_edgelist(
edge_t const *compressed_sparse_offsets,
vertex_t const *compressed_sparse_indices,
weight_t const *compressed_sparse_weights,
vertex_t const *p_major_labels,
vertex_t const *p_minor_labels,
vertex_t major_first,
vertex_t major_last,
vertex_t minor_first,
vertex_t minor_last,
cudaStream_t stream)
{
// FIXME: it might be possible to directly create relabled & coarsened edgelist from the
// compressed sparse format to save memory
rmm::device_uvector<vertex_t> edgelist_major_vertices(0, stream);
rmm::device_uvector<vertex_t> edgelist_minor_vertices(0, stream);
rmm::device_uvector<weight_t> edgelist_weights(0, stream);
std::tie(edgelist_major_vertices, edgelist_minor_vertices, edgelist_weights) =
compressed_sparse_to_edgelist(compressed_sparse_offsets,
compressed_sparse_indices,
compressed_sparse_weights,
major_first,
major_last,
stream);
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices.begin(), edgelist_minor_vertices.begin()));
thrust::transform(
rmm::exec_policy(stream)->on(stream),
pair_first,
pair_first + edgelist_major_vertices.size(),
pair_first,
[p_major_labels, p_minor_labels, major_first, minor_first] __device__(auto val) {
return thrust::make_tuple(p_major_labels[thrust::get<0>(val) - major_first],
p_minor_labels[thrust::get<1>(val) - minor_first]);
});
sort_and_coarsen_edgelist(
edgelist_major_vertices, edgelist_minor_vertices, edgelist_weights, stream);
return std::make_tuple(std::move(edgelist_major_vertices),
std::move(edgelist_minor_vertices),
std::move(edgelist_weights));
}
// single-GPU version
template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed>
std::unique_ptr<cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false>>
coarsen_graph(
raft::handle_t const &handle,
cugraph::experimental::graph_view_t<vertex_t, edge_t, weight_t, store_transposed, false> const
&graph_view,
vertex_t const *labels)
{
rmm::device_uvector<vertex_t> coarsened_edgelist_major_vertices(0, handle.get_stream());
rmm::device_uvector<vertex_t> coarsened_edgelist_minor_vertices(0, handle.get_stream());
rmm::device_uvector<weight_t> coarsened_edgelist_weights(0, handle.get_stream());
std::tie(coarsened_edgelist_major_vertices,
coarsened_edgelist_minor_vertices,
coarsened_edgelist_weights) =
compressed_sparse_to_relabeled_and_sorted_and_coarsened_edgelist(
graph_view.offsets(),
graph_view.indices(),
graph_view.weights(),
labels,
labels,
vertex_t{0},
graph_view.get_number_of_vertices(),
vertex_t{0},
graph_view.get_number_of_vertices(),
handle.get_stream());
cugraph::experimental::edgelist_t<vertex_t, edge_t, weight_t> edgelist{};
edgelist.p_src_vertices = store_transposed ? coarsened_edgelist_minor_vertices.data()
: coarsened_edgelist_major_vertices.data();
edgelist.p_dst_vertices = store_transposed ? coarsened_edgelist_major_vertices.data()
: coarsened_edgelist_minor_vertices.data();
edgelist.p_edge_weights = coarsened_edgelist_weights.data();
edgelist.number_of_edges = static_cast<edge_t>(coarsened_edgelist_major_vertices.size());
vertex_t new_number_of_vertices =
1 + thrust::reduce(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + graph_view.get_number_of_vertices(),
vertex_t{0},
thrust::maximum<vertex_t>());
return std::make_unique<
cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, store_transposed, false>>(
handle,
edgelist,
new_number_of_vertices,
cugraph::experimental::graph_properties_t{
graph_view.is_symmetric(), false, graph_view.is_weighted()},
true);
}
// explicit instantiation
template void single_gpu_renumber_edgelist_given_number_map(
raft::handle_t const &handle,
rmm::device_uvector<int> &d_edgelist_rows,
rmm::device_uvector<int> &d_edgelist_cols,
rmm::device_uvector<int> &d_renumber_map_gathered_v);
template rmm::device_uvector<int> gather_distributed_vector(raft::handle_t const &handle,
int const *d_input,
size_t size);
template bool compare_renumbered_vectors(raft::handle_t const &handle,
rmm::device_uvector<int> const &v1,
rmm::device_uvector<int> const &v2);
template std::unique_ptr<cugraph::experimental::graph_t<int32_t, int32_t, float, false, false>>
coarsen_graph(
raft::handle_t const &handle,
cugraph::experimental::graph_view_t<int32_t, int32_t, float, false, false> const &graph_view,
int32_t const *labels);
} // namespace test
} // namespace cugraph
|
56ab454af89d59be7c5ca2ea168139cfc694d080.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void get_entropy(int nbins, int nsamples, int nx, float * bin_scores, int pitch_bin_scores, float * entropies)
{
int
col_x = blockDim.x * blockIdx.x + threadIdx.x;
if(col_x >= nx)
return;
float
* in_col = bin_scores + col_x * pitch_bin_scores,
entropy = 0.f, prob, logp;
for(int i = 0; i < nbins; i++) {
prob = 0.f;
for(int j = 0; j < nsamples; j++)
prob += in_col[j * nbins + i];
prob /= (double) nsamples;
if(prob <= 0.f)
logp = 0.f;
else
logp = __log2f(prob);
entropy += prob * logp;
}
entropies[col_x] = -entropy;
} | 56ab454af89d59be7c5ca2ea168139cfc694d080.cu | #include "includes.h"
__global__ void get_entropy(int nbins, int nsamples, int nx, float * bin_scores, int pitch_bin_scores, float * entropies)
{
int
col_x = blockDim.x * blockIdx.x + threadIdx.x;
if(col_x >= nx)
return;
float
* in_col = bin_scores + col_x * pitch_bin_scores,
entropy = 0.f, prob, logp;
for(int i = 0; i < nbins; i++) {
prob = 0.f;
for(int j = 0; j < nsamples; j++)
prob += in_col[j * nbins + i];
prob /= (double) nsamples;
if(prob <= 0.f)
logp = 0.f;
else
logp = __log2f(prob);
entropy += prob * logp;
}
entropies[col_x] = -entropy;
} |
e6bdba949d975d15a115d339b6616e656381689f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <iostream>
#include "maxLayer.h"
namespace Tn
{
template<typename T>
void write(char* &buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char* &buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
using namespace Inception;
namespace nvinfer1
{
MaxLayerPlugin::MaxLayerPlugin()
{
mClassCount = CLASS_NUM;
}
MaxLayerPlugin::~MaxLayerPlugin()
{
}
MaxLayerPlugin::MaxLayerPlugin(const void *data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char*>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
assert(d == a + length);
}
void MaxLayerPlugin::serialize(void *buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
assert(d == a + getSerializationSize());
}
size_t MaxLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount);
}
int MaxLayerPlugin::initialize()
{
return 0;
}
Dims MaxLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
// output the result to channel;
return Dims3(1, 1, mClassCount);
}
// set plugin namespace
void MaxLayerPlugin::setPluginNamespace(const char *pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* MaxLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType MaxLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool MaxLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if output tensor is broadcast across a batch.
bool MaxLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void MaxLayerPlugin::configurePlugin(const PluginTensorDesc *in, int nbInput, const PluginTensorDesc *out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void MaxLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void MaxLayerPlugin::detachFromContext()
{
}
const char* MaxLayerPlugin::getPluginType() const
{
return "MaxLayer_TRT";
}
const char* MaxLayerPlugin::getPluginVersion() const
{
return "1";
}
void MaxLayerPlugin::destroy()
{
delete this;
}
// clone the plugin
IPluginV2IOExt* MaxLayerPlugin::clone() const
{
MaxLayerPlugin *p = new MaxLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__global__ void Max(const float *input, float *output, int classes) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= classes) return;
output[idx] = input[idx];
for (int i = 1; i < 4; i++) {
if (output[idx] < input[idx + i * classes]) {
output[idx] = input[idx + i * classes];
}
}
}
void MaxLayerPlugin::forwardGpu(const float *const *inputs, float *output, hipStream_t stream, int batchSize)
{
int outputElem = mClassCount;
for (int idx = 0; idx < batchSize; ++idx) {
hipMemset(output + idx * outputElem, 0, sizeof(float));
}
hipLaunchKernelGGL(( Max), dim3(1), dim3(mClassCount), 0, 0, inputs[0], output, mClassCount);
}
int MaxLayerPlugin::enqueue(int batchSize, const void* const *inputs, void** outputs, void* workspace, hipStream_t stream)
{
assert(batchSize == 1);
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection MaxLayerPluginCreator::mFC{};
std::vector<PluginField> MaxLayerPluginCreator::mPluginAttributes;
MaxLayerPluginCreator::MaxLayerPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* MaxLayerPluginCreator::getPluginName() const
{
return "MaxLayer_TRT";
}
const char* MaxLayerPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* MaxLayerPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* MaxLayerPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
MaxLayerPlugin* obj = new MaxLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* MaxLayerPluginCreator::deserializePlugin(const char *name, const void *serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
MaxLayerPlugin* obj = new MaxLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| e6bdba949d975d15a115d339b6616e656381689f.cu | #include <assert.h>
#include <iostream>
#include "maxLayer.h"
namespace Tn
{
template<typename T>
void write(char* &buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char* &buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
using namespace Inception;
namespace nvinfer1
{
MaxLayerPlugin::MaxLayerPlugin()
{
mClassCount = CLASS_NUM;
}
MaxLayerPlugin::~MaxLayerPlugin()
{
}
MaxLayerPlugin::MaxLayerPlugin(const void *data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char*>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
assert(d == a + length);
}
void MaxLayerPlugin::serialize(void *buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
assert(d == a + getSerializationSize());
}
size_t MaxLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount);
}
int MaxLayerPlugin::initialize()
{
return 0;
}
Dims MaxLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
// output the result to channel;
return Dims3(1, 1, mClassCount);
}
// set plugin namespace
void MaxLayerPlugin::setPluginNamespace(const char *pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* MaxLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType MaxLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool MaxLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if output tensor is broadcast across a batch.
bool MaxLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void MaxLayerPlugin::configurePlugin(const PluginTensorDesc *in, int nbInput, const PluginTensorDesc *out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void MaxLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void MaxLayerPlugin::detachFromContext()
{
}
const char* MaxLayerPlugin::getPluginType() const
{
return "MaxLayer_TRT";
}
const char* MaxLayerPlugin::getPluginVersion() const
{
return "1";
}
void MaxLayerPlugin::destroy()
{
delete this;
}
// clone the plugin
IPluginV2IOExt* MaxLayerPlugin::clone() const
{
MaxLayerPlugin *p = new MaxLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__global__ void Max(const float *input, float *output, int classes) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= classes) return;
output[idx] = input[idx];
for (int i = 1; i < 4; i++) {
if (output[idx] < input[idx + i * classes]) {
output[idx] = input[idx + i * classes];
}
}
}
void MaxLayerPlugin::forwardGpu(const float *const *inputs, float *output, cudaStream_t stream, int batchSize)
{
int outputElem = mClassCount;
for (int idx = 0; idx < batchSize; ++idx) {
cudaMemset(output + idx * outputElem, 0, sizeof(float));
}
Max<<<1, mClassCount>>>(inputs[0], output, mClassCount);
}
int MaxLayerPlugin::enqueue(int batchSize, const void* const *inputs, void** outputs, void* workspace, cudaStream_t stream)
{
assert(batchSize == 1);
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection MaxLayerPluginCreator::mFC{};
std::vector<PluginField> MaxLayerPluginCreator::mPluginAttributes;
MaxLayerPluginCreator::MaxLayerPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* MaxLayerPluginCreator::getPluginName() const
{
return "MaxLayer_TRT";
}
const char* MaxLayerPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* MaxLayerPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* MaxLayerPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
MaxLayerPlugin* obj = new MaxLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* MaxLayerPluginCreator::deserializePlugin(const char *name, const void *serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
MaxLayerPlugin* obj = new MaxLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
eb6d39af856f8ef23164f6b86d8dac8ff72d4bde.hip | // !!! This is a file automatically generated by hipify!!!
// Insert the begin and end event.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventRecord(start,0);
float elapsedTime;
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
p->time = elapsedTime; | eb6d39af856f8ef23164f6b86d8dac8ff72d4bde.cu | // Insert the begin and end event.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventRecord(start,0);
float elapsedTime;
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
p->time = elapsedTime; |
41e858a21ad6871c081f75dd188a10bd5818a8b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<stdio.h>
#include<math.h>
#include<cuda_runtime.h>
#include<stdlib.h>
#include<stdio.h>
#include<string.h>
#include<math.h>
#include <sys/time.h>
#include "rnd.c"
#include "rnd.h"
__global__ void sort(int n_c,float* a,int* b)
{
int i = blockIdx.x;
int len_of_block = b[i];
for(int j=0;j<len_of_block;j++)
{
int k=j;
while(k>0 && *((float*)a+i*n_c+k) < *((float*)a+i*n_c+k-1))
{
float temp = *((float*)a+i*n_c+k);
*((float*)a+i*n_c+k) = *((float*)a+i*n_c+k-1);
*((float*)a+i*n_c+k-1) = temp;
k--;
}
}
}
void display(float* array,int length)
{
//printf("size = %d \n",sizeof(array));
for(int i=0;i<length;i++)
{
printf("%f \n",*((float*) array+i));
}
printf("\n");
}
void display_(float* array,int length)
{
//printf("size = %d \n",sizeof(array));
for(int i=0;i<length;i++)
{
printf("%f ",*((float*) array+i));
}
printf("\n");
}
float* input;
float* bucket;
float* cuda_bucket;
int* cuda_count;
float* output;
int main(int argc,char *args[])
{
if(argc!=3)
{
printf("./GPU_Prime -t Problem_Size\n");
return 0;
}
int n = atoi(args[2]);
int max_num = n*10;
input = (float*)malloc(sizeof(float)*n);
output = (float*)malloc(sizeof(float)*n);
random_number_generator_normal(input,n,max_num);
int no_of_buckets = 4;
bucket = (float *)malloc(sizeof(float)*no_of_buckets*n);
int* count;
count = (int *)malloc(sizeof(int)*no_of_buckets);
for(int k=0;k<no_of_buckets;k++)
{
count[k] = 0;
}
printf("Before :\n");
display(input,n);
int bucket_no = 0;
int limit = max_num/no_of_buckets;
printf("limit %d\n",limit);
printf("putting in bucket\n");
for(int i=0;i<n;i++)
{
bucket_no = floor(input[i]/limit);
*((float *)bucket+bucket_no*n+count[bucket_no])=input[i] ;
count[bucket_no] += 1;
}
for(int j=0;j<no_of_buckets;j++)
{
printf("j %d %d ",j,count[j]);
display_(((float*)bucket+j*n),count[j]);
}
struct timeval time;
gettimeofday(&time,NULL);
double t1 = time.tv_sec + (time.tv_usec/1000000.0);
hipMalloc((void**)&cuda_bucket,sizeof(float)*no_of_buckets*n);
hipMalloc((void**)&cuda_count,sizeof(int)*no_of_buckets);
hipMemcpy(cuda_bucket,bucket,sizeof(float)*no_of_buckets*n,hipMemcpyHostToDevice);
hipMemcpy(cuda_count,count,sizeof(int)*no_of_buckets,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sort), dim3(no_of_buckets),dim3(1), 0, 0, n,cuda_bucket,cuda_count);
hipMemcpy(bucket,cuda_bucket,sizeof(float)*no_of_buckets*n,hipMemcpyDeviceToHost);
printf("Bucket After :\n");
int o_index = 0;
for(int j=0;j<no_of_buckets;j++)
{
printf("j %d %d ",j,count[j]);
display_(((float*)bucket+j*n),count[j]);
for(int k=0;k<count[j];k++)
{
output[o_index] = *((float*)bucket+j*n+k);
o_index++;
}
}
printf("After :\n");
display(output,n);
gettimeofday(&time,NULL);
double t2 = time.tv_sec + (time.tv_usec/1000000.0);
printf("Time Taken %f \n",t2-t1);
//hipFree(cuda_input);
//hipFree(cuda_answer);
}
| 41e858a21ad6871c081f75dd188a10bd5818a8b7.cu | #include<cuda.h>
#include<stdio.h>
#include<math.h>
#include<cuda_runtime.h>
#include<stdlib.h>
#include<stdio.h>
#include<string.h>
#include<math.h>
#include <sys/time.h>
#include "rnd.c"
#include "rnd.h"
__global__ void sort(int n_c,float* a,int* b)
{
int i = blockIdx.x;
int len_of_block = b[i];
for(int j=0;j<len_of_block;j++)
{
int k=j;
while(k>0 && *((float*)a+i*n_c+k) < *((float*)a+i*n_c+k-1))
{
float temp = *((float*)a+i*n_c+k);
*((float*)a+i*n_c+k) = *((float*)a+i*n_c+k-1);
*((float*)a+i*n_c+k-1) = temp;
k--;
}
}
}
void display(float* array,int length)
{
//printf("size = %d \n",sizeof(array));
for(int i=0;i<length;i++)
{
printf("%f \n",*((float*) array+i));
}
printf("\n");
}
void display_(float* array,int length)
{
//printf("size = %d \n",sizeof(array));
for(int i=0;i<length;i++)
{
printf("%f ",*((float*) array+i));
}
printf("\n");
}
float* input;
float* bucket;
float* cuda_bucket;
int* cuda_count;
float* output;
int main(int argc,char *args[])
{
if(argc!=3)
{
printf("./GPU_Prime -t Problem_Size\n");
return 0;
}
int n = atoi(args[2]);
int max_num = n*10;
input = (float*)malloc(sizeof(float)*n);
output = (float*)malloc(sizeof(float)*n);
random_number_generator_normal(input,n,max_num);
int no_of_buckets = 4;
bucket = (float *)malloc(sizeof(float)*no_of_buckets*n);
int* count;
count = (int *)malloc(sizeof(int)*no_of_buckets);
for(int k=0;k<no_of_buckets;k++)
{
count[k] = 0;
}
printf("Before :\n");
display(input,n);
int bucket_no = 0;
int limit = max_num/no_of_buckets;
printf("limit %d\n",limit);
printf("putting in bucket\n");
for(int i=0;i<n;i++)
{
bucket_no = floor(input[i]/limit);
*((float *)bucket+bucket_no*n+count[bucket_no])=input[i] ;
count[bucket_no] += 1;
}
for(int j=0;j<no_of_buckets;j++)
{
printf("j %d %d ",j,count[j]);
display_(((float*)bucket+j*n),count[j]);
}
struct timeval time;
gettimeofday(&time,NULL);
double t1 = time.tv_sec + (time.tv_usec/1000000.0);
cudaMalloc((void**)&cuda_bucket,sizeof(float)*no_of_buckets*n);
cudaMalloc((void**)&cuda_count,sizeof(int)*no_of_buckets);
cudaMemcpy(cuda_bucket,bucket,sizeof(float)*no_of_buckets*n,cudaMemcpyHostToDevice);
cudaMemcpy(cuda_count,count,sizeof(int)*no_of_buckets,cudaMemcpyHostToDevice);
sort<<<no_of_buckets,1>>>(n,cuda_bucket,cuda_count);
cudaMemcpy(bucket,cuda_bucket,sizeof(float)*no_of_buckets*n,cudaMemcpyDeviceToHost);
printf("Bucket After :\n");
int o_index = 0;
for(int j=0;j<no_of_buckets;j++)
{
printf("j %d %d ",j,count[j]);
display_(((float*)bucket+j*n),count[j]);
for(int k=0;k<count[j];k++)
{
output[o_index] = *((float*)bucket+j*n+k);
o_index++;
}
}
printf("After :\n");
display(output,n);
gettimeofday(&time,NULL);
double t2 = time.tv_sec + (time.tv_usec/1000000.0);
printf("Time Taken %f \n",t2-t1);
//cudaFree(cuda_input);
//cudaFree(cuda_answer);
}
|
980dbfb27e6cd22e9be7db8e221ebf5e0b20cba6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <windows.h>
#include <wchar.h>
#define NUM_PARTICLES 100000000
#define NUM_ITERATIONS 100
#define BLOCK_SIZE 256 //128 //256
#define NUM_DIMENSIONS 3
typedef struct Particle {
float3 position;
float3 velocity;
} Particle;
int wmain(void) {
SYSTEMTIME lt = {0};
GetLocalTime(<);
wprintf(L"The local time is: %02d:%02d:%02d:%04d\n",
lt.wHour, lt.wMinute, lt.wSecond, lt.wMilliseconds);
return 0;
}
float distance1d(float x1, float x2)
{
return sqrt((x2 - x1)*(x2 - x1));
}
float distance3d(float3 x1, float3 x2){
float distance_sum = 0.0f;
distance_sum += distance1d(x1.x, x2.x);
distance_sum += distance1d(x1.y, x2.y);
distance_sum += distance1d(x1.z, x2.z);
return distance_sum;
}
__global__
void timestep(Particle *particles, int n, float *iter_randoms, int iter)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
float dt = 1.0;
int iter_index = iter*3;
for (int i = index; i < n; i = i + stride){
particles[i].velocity.x = iter_randoms[iter_index];
particles[i].velocity.y = iter_randoms[iter_index+1];
particles[i].velocity.z = iter_randoms[iter_index+2];
particles[i].position.x = particles[i].position.x + dt*particles[i].velocity.x;
particles[i].position.y = particles[i].position.y + dt*particles[i].velocity.y;
particles[i].position.z = particles[i].position.z + dt*particles[i].velocity.z;
}
}
void timestep_cpu(Particle *particles, int n, float *iter_randoms, int iter){
float dt = 1.0;
int iter_index = iter*3;
for (int i = 0; i < n; i++){
//printf("%d", i);
particles[i].velocity.x = iter_randoms[iter_index];
particles[i].velocity.y = iter_randoms[iter_index+1];
particles[i].velocity.z = iter_randoms[iter_index+2];
particles[i].position.x = particles[i].position.x + dt*particles[i].velocity.x;
particles[i].position.y = particles[i].position.y + dt*particles[i].velocity.y;
particles[i].position.z = particles[i].position.z + dt*particles[i].velocity.z;
}
}
void init_random(Particle *particles, Particle *cpu_particles, int i){
particles[i].position.x = (rand() % 10)/10.0 - 0.5;
//printf("%f %f %f\n", particles[i].position.x, particles[i].position.y, particles[i].position.z);
particles[i].position.y = (rand() % 10)/10.0 - 0.5;
particles[i].position.z = (rand() % 10)/10.0 - 0.5;
cpu_particles[i].position.x = particles[i].position.x;
//printf("%f %f %f\n", particles[i].position.x, particles[i].position.y, particles[i].position.z);
cpu_particles[i].position.y = particles[i].position.y;
cpu_particles[i].position.z = particles[i].position.z;
}
int main(){
wmain();
srand(1337);
//bool useGPU = true;
Particle *particles;
Particle *cpu_particles;
float *iter_randoms;
hipMallocManaged(&iter_randoms, NUM_DIMENSIONS*NUM_ITERATIONS*sizeof(float));
for(int i = 0; i < NUM_DIMENSIONS*NUM_ITERATIONS; i++){
iter_randoms[i] = (rand() % 10)/10.0 - 0.5;
}
hipMallocManaged(&particles, NUM_PARTICLES*sizeof(Particle));
hipMallocManaged(&cpu_particles, NUM_PARTICLES*sizeof(Particle));
for(int i = 0; i < NUM_PARTICLES; i++){
init_random(particles, cpu_particles, i);
}
printf("Before GPU\n");
wmain();
int numBlocks = (NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE;
for(int iter = 0; iter < NUM_ITERATIONS; iter++){
//printf("iter %d", iter);
hipLaunchKernelGGL(( timestep), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, particles, NUM_PARTICLES, iter_randoms, iter);
hipDeviceSynchronize();
}
printf("After GPU\n");
wmain();
/*
printf("Before CPU\n");
wmain();
//hipDeviceSynchronize();
for(int iter = 0; iter < NUM_ITERATIONS; iter++)
timestep_cpu(cpu_particles, NUM_PARTICLES, iter_randoms, iter);
printf("After CPU\n");
wmain();
*/
float total_error = 0.0f;
for(int i = 0; i < NUM_PARTICLES; i++){
total_error += distance3d(particles[i].position, cpu_particles[i].position);
}
printf("total_error %f\n", total_error);
for(int i = NUM_PARTICLES-5; i < NUM_PARTICLES; i++){
printf("%f %f %f\n", particles[i].position.x, particles[i].position.y, particles[i].position.z);
printf("%f %f %f\n", particles[i].velocity.x, particles[i].velocity.y, particles[i].velocity.z);
}
printf("\n");
for(int i = NUM_PARTICLES-5; i < NUM_PARTICLES; i++){
printf("%f %f %f\n", cpu_particles[i].position.x, cpu_particles[i].position.y, cpu_particles[i].position.z);
printf("%f %f %f\n", cpu_particles[i].velocity.x, cpu_particles[i].velocity.y, cpu_particles[i].velocity.z);
}
hipFree(iter_randoms);
hipFree(particles);
wmain();
return 0;
} | 980dbfb27e6cd22e9be7db8e221ebf5e0b20cba6.cu | #include <iostream>
#include <math.h>
#include <windows.h>
#include <wchar.h>
#define NUM_PARTICLES 100000000
#define NUM_ITERATIONS 100
#define BLOCK_SIZE 256 //128 //256
#define NUM_DIMENSIONS 3
typedef struct Particle {
float3 position;
float3 velocity;
} Particle;
int wmain(void) {
SYSTEMTIME lt = {0};
GetLocalTime(<);
wprintf(L"The local time is: %02d:%02d:%02d:%04d\n",
lt.wHour, lt.wMinute, lt.wSecond, lt.wMilliseconds);
return 0;
}
float distance1d(float x1, float x2)
{
return sqrt((x2 - x1)*(x2 - x1));
}
float distance3d(float3 x1, float3 x2){
float distance_sum = 0.0f;
distance_sum += distance1d(x1.x, x2.x);
distance_sum += distance1d(x1.y, x2.y);
distance_sum += distance1d(x1.z, x2.z);
return distance_sum;
}
__global__
void timestep(Particle *particles, int n, float *iter_randoms, int iter)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
float dt = 1.0;
int iter_index = iter*3;
for (int i = index; i < n; i = i + stride){
particles[i].velocity.x = iter_randoms[iter_index];
particles[i].velocity.y = iter_randoms[iter_index+1];
particles[i].velocity.z = iter_randoms[iter_index+2];
particles[i].position.x = particles[i].position.x + dt*particles[i].velocity.x;
particles[i].position.y = particles[i].position.y + dt*particles[i].velocity.y;
particles[i].position.z = particles[i].position.z + dt*particles[i].velocity.z;
}
}
void timestep_cpu(Particle *particles, int n, float *iter_randoms, int iter){
float dt = 1.0;
int iter_index = iter*3;
for (int i = 0; i < n; i++){
//printf("%d", i);
particles[i].velocity.x = iter_randoms[iter_index];
particles[i].velocity.y = iter_randoms[iter_index+1];
particles[i].velocity.z = iter_randoms[iter_index+2];
particles[i].position.x = particles[i].position.x + dt*particles[i].velocity.x;
particles[i].position.y = particles[i].position.y + dt*particles[i].velocity.y;
particles[i].position.z = particles[i].position.z + dt*particles[i].velocity.z;
}
}
void init_random(Particle *particles, Particle *cpu_particles, int i){
particles[i].position.x = (rand() % 10)/10.0 - 0.5;
//printf("%f %f %f\n", particles[i].position.x, particles[i].position.y, particles[i].position.z);
particles[i].position.y = (rand() % 10)/10.0 - 0.5;
particles[i].position.z = (rand() % 10)/10.0 - 0.5;
cpu_particles[i].position.x = particles[i].position.x;
//printf("%f %f %f\n", particles[i].position.x, particles[i].position.y, particles[i].position.z);
cpu_particles[i].position.y = particles[i].position.y;
cpu_particles[i].position.z = particles[i].position.z;
}
int main(){
wmain();
srand(1337);
//bool useGPU = true;
Particle *particles;
Particle *cpu_particles;
float *iter_randoms;
cudaMallocManaged(&iter_randoms, NUM_DIMENSIONS*NUM_ITERATIONS*sizeof(float));
for(int i = 0; i < NUM_DIMENSIONS*NUM_ITERATIONS; i++){
iter_randoms[i] = (rand() % 10)/10.0 - 0.5;
}
cudaMallocManaged(&particles, NUM_PARTICLES*sizeof(Particle));
cudaMallocManaged(&cpu_particles, NUM_PARTICLES*sizeof(Particle));
for(int i = 0; i < NUM_PARTICLES; i++){
init_random(particles, cpu_particles, i);
}
printf("Before GPU\n");
wmain();
int numBlocks = (NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE;
for(int iter = 0; iter < NUM_ITERATIONS; iter++){
//printf("iter %d", iter);
timestep<<<numBlocks, BLOCK_SIZE>>>(particles, NUM_PARTICLES, iter_randoms, iter);
cudaDeviceSynchronize();
}
printf("After GPU\n");
wmain();
/*
printf("Before CPU\n");
wmain();
//cudaDeviceSynchronize();
for(int iter = 0; iter < NUM_ITERATIONS; iter++)
timestep_cpu(cpu_particles, NUM_PARTICLES, iter_randoms, iter);
printf("After CPU\n");
wmain();
*/
float total_error = 0.0f;
for(int i = 0; i < NUM_PARTICLES; i++){
total_error += distance3d(particles[i].position, cpu_particles[i].position);
}
printf("total_error %f\n", total_error);
for(int i = NUM_PARTICLES-5; i < NUM_PARTICLES; i++){
printf("%f %f %f\n", particles[i].position.x, particles[i].position.y, particles[i].position.z);
printf("%f %f %f\n", particles[i].velocity.x, particles[i].velocity.y, particles[i].velocity.z);
}
printf("\n");
for(int i = NUM_PARTICLES-5; i < NUM_PARTICLES; i++){
printf("%f %f %f\n", cpu_particles[i].position.x, cpu_particles[i].position.y, cpu_particles[i].position.z);
printf("%f %f %f\n", cpu_particles[i].velocity.x, cpu_particles[i].velocity.y, cpu_particles[i].velocity.z);
}
cudaFree(iter_randoms);
cudaFree(particles);
wmain();
return 0;
} |
cea2a7a60e7addbcfa1feed2c5a83d5c92e93804.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Created by A. Aichert on Wed Jan 10th 2018
#include <iostream>
#include <LibUtilsCuda/CudaMemory.h>
#include <LibUtilsCuda/UtilsCuda.hxx>
#include <LibEpipolarConsistency/RectifiedFBCC.h>
// Make sure a point is within rectangle [0,0]-[n_u n_v]
__device__ inline bool inBounds(float u, float v, float n_u, float n_v)
{
if (u<=n_u&&v<=n_v&&u>=0&&v>=0) return true;
return false;
}
// A basic sorting algorithm of four values.
__device__ __host__ void sort4(float *v)
{
for (int j=0;j<3;j++)
for (int i=0;i<3;i++)
if (v[i]>v[i+1])
{
float tmp=v[i];
v[i]=v[i+1];
v[i+1]=tmp;
}
}
// Compute ECC or FBCC for certain epipolar lines on the image.
__global__ void kernel_computeLineIntegrals(
float* lines, short line_stride, short n_lines,
float *fbcc_d, short fbcc_stride,
hipTextureObject_t tex, short n_u, short n_v,
float *out)
{
// Find index of current thread
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx>=n_lines) return;
// The line
float l[]={lines[idx*line_stride+0], lines[idx*line_stride+1], lines[idx*line_stride+2]};
// Establish 1D line coordinate system (line coordinate t)
// Line Origin : Closest point to the origin (t=0)
float o[]={-l[2]*l[0],-l[2]*l[1]};
// Line direction
float d[]={l[1],-l[0]};
// Compute range at which the line intersects the image
float t_min,t_max;
{
float ts[]={
( 1-o[0])/d[0],
(n_u-1-o[0])/d[0],
( 1-o[1])/d[1],
(n_v-1-o[1])/d[1]
};
// Avoid Inf/NaN
if (d[0]*d[0]<1e-12) ts[0]=-(ts[1]=1e10f);
if (d[1]*d[1]<1e-12) ts[2]=-(ts[3]=1e10f);
// Sort and the middle two are image edges
sort4(ts);
t_min=ts[1];
t_max=ts[2];
}
// Early exit if point for t_min (and hence no other) is within bounds or line was invalid
if (!inBounds(o[0]+t_min*d[0],o[1]+t_min*d[1],n_u,n_v))
{
out[idx]=0;
return;
}
// Pixel step in image for raycasting.
const float step=0.4;
// Account for half-pixel offset when sampling textures
o[0]+=.5f;
o[1]+=.5f;
if (fbcc_d)
{
FBCC_weighting_info fbcc=*((FBCC_weighting_info*)(fbcc_d+fbcc_stride*idx));
// Rectified by Weighting
float sum=0;
for (float t=t_min; t<=t_max; t+=step)
{
// Compute virtual detector's u-coordinate relative to closest point on line to source
float u_prime=fbcc.phi.transform(t)-fbcc.t_prime_ak;
float fbcc_weight=1.0/sqrtf(u_prime*u_prime+fbcc.d_l_kappa_C_sq);
float Jphi=fbcc.phi.derivative(t);
// Weighted integral
sum+=step*tex2D<float>(tex,o[0]+t*d[0],o[1]+t*d[1])*Jphi*fbcc_weight;
}
out[idx]=sum;
}
else
{
// Usual ECC. Numeric differenciation in direction l[0],l[1]
l[0]*=0.5f;
l[1]*=0.5f;
// Start summation over two parallel lines
float sump=0, summ=0;
for (float t=t_min; t<=t_max; t+=step)
{
sump+=tex2D<float>(tex,o[0]+t*d[0]+l[0],o[1]+t*d[1]+l[1])*step;
summ+=tex2D<float>(tex,o[0]+t*d[0]-l[0],o[1]+t*d[1]-l[1])*step;
}
// and return difference (approximation to derivative)
out[idx]=(sump-summ);
}
}
// Compute ECC or FBCC for certain epipolar lines on the image.
void cuda_computeLineIntegrals(
float* lines_d, short line_stride, short n_lines, // Lines in Hessian normal form, number of float values to next line and number of lines
float *fbcc_d, short fbcc_stride, // Optional: FBCC_weighting_info
hipTextureObject_t I, short n_u, short n_v, // The image and its size
float *integrals_out_d) // Output memory: the integrals (size is n_lines)
{
// Threads per block and problem size
dim3 block_size;
block_size.x=32;
dim3 grid_size;
grid_size.x = iDivUp(n_lines,block_size.x);
// Launch kernel (radon transform)
hipLaunchKernelGGL(( kernel_computeLineIntegrals), dim3(grid_size), dim3(block_size), 0, 0,
lines_d, line_stride, n_lines,
fbcc_d, fbcc_stride,
I, n_u, n_u,
integrals_out_d
);
hipDeviceSynchronize();
cudaCheckState
}
| cea2a7a60e7addbcfa1feed2c5a83d5c92e93804.cu |
// Created by A. Aichert on Wed Jan 10th 2018
#include <iostream>
#include <LibUtilsCuda/CudaMemory.h>
#include <LibUtilsCuda/UtilsCuda.hxx>
#include <LibEpipolarConsistency/RectifiedFBCC.h>
// Make sure a point is within rectangle [0,0]-[n_u n_v]
__device__ inline bool inBounds(float u, float v, float n_u, float n_v)
{
if (u<=n_u&&v<=n_v&&u>=0&&v>=0) return true;
return false;
}
// A basic sorting algorithm of four values.
__device__ __host__ void sort4(float *v)
{
for (int j=0;j<3;j++)
for (int i=0;i<3;i++)
if (v[i]>v[i+1])
{
float tmp=v[i];
v[i]=v[i+1];
v[i+1]=tmp;
}
}
// Compute ECC or FBCC for certain epipolar lines on the image.
__global__ void kernel_computeLineIntegrals(
float* lines, short line_stride, short n_lines,
float *fbcc_d, short fbcc_stride,
cudaTextureObject_t tex, short n_u, short n_v,
float *out)
{
// Find index of current thread
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx>=n_lines) return;
// The line
float l[]={lines[idx*line_stride+0], lines[idx*line_stride+1], lines[idx*line_stride+2]};
// Establish 1D line coordinate system (line coordinate t)
// Line Origin : Closest point to the origin (t=0)
float o[]={-l[2]*l[0],-l[2]*l[1]};
// Line direction
float d[]={l[1],-l[0]};
// Compute range at which the line intersects the image
float t_min,t_max;
{
float ts[]={
( 1-o[0])/d[0],
(n_u-1-o[0])/d[0],
( 1-o[1])/d[1],
(n_v-1-o[1])/d[1]
};
// Avoid Inf/NaN
if (d[0]*d[0]<1e-12) ts[0]=-(ts[1]=1e10f);
if (d[1]*d[1]<1e-12) ts[2]=-(ts[3]=1e10f);
// Sort and the middle two are image edges
sort4(ts);
t_min=ts[1];
t_max=ts[2];
}
// Early exit if point for t_min (and hence no other) is within bounds or line was invalid
if (!inBounds(o[0]+t_min*d[0],o[1]+t_min*d[1],n_u,n_v))
{
out[idx]=0;
return;
}
// Pixel step in image for raycasting.
const float step=0.4;
// Account for half-pixel offset when sampling textures
o[0]+=.5f;
o[1]+=.5f;
if (fbcc_d)
{
FBCC_weighting_info fbcc=*((FBCC_weighting_info*)(fbcc_d+fbcc_stride*idx));
// Rectified by Weighting
float sum=0;
for (float t=t_min; t<=t_max; t+=step)
{
// Compute virtual detector's u-coordinate relative to closest point on line to source
float u_prime=fbcc.phi.transform(t)-fbcc.t_prime_ak;
float fbcc_weight=1.0/sqrtf(u_prime*u_prime+fbcc.d_l_kappa_C_sq);
float Jphi=fbcc.phi.derivative(t);
// Weighted integral
sum+=step*tex2D<float>(tex,o[0]+t*d[0],o[1]+t*d[1])*Jphi*fbcc_weight;
}
out[idx]=sum;
}
else
{
// Usual ECC. Numeric differenciation in direction l[0],l[1]
l[0]*=0.5f;
l[1]*=0.5f;
// Start summation over two parallel lines
float sump=0, summ=0;
for (float t=t_min; t<=t_max; t+=step)
{
sump+=tex2D<float>(tex,o[0]+t*d[0]+l[0],o[1]+t*d[1]+l[1])*step;
summ+=tex2D<float>(tex,o[0]+t*d[0]-l[0],o[1]+t*d[1]-l[1])*step;
}
// and return difference (approximation to derivative)
out[idx]=(sump-summ);
}
}
// Compute ECC or FBCC for certain epipolar lines on the image.
void cuda_computeLineIntegrals(
float* lines_d, short line_stride, short n_lines, // Lines in Hessian normal form, number of float values to next line and number of lines
float *fbcc_d, short fbcc_stride, // Optional: FBCC_weighting_info
cudaTextureObject_t I, short n_u, short n_v, // The image and its size
float *integrals_out_d) // Output memory: the integrals (size is n_lines)
{
// Threads per block and problem size
dim3 block_size;
block_size.x=32;
dim3 grid_size;
grid_size.x = iDivUp(n_lines,block_size.x);
// Launch kernel (radon transform)
kernel_computeLineIntegrals<<<grid_size, block_size>>>(
lines_d, line_stride, n_lines,
fbcc_d, fbcc_stride,
I, n_u, n_u,
integrals_out_d
);
cudaDeviceSynchronize();
cudaCheckState
}
|
29a77c5cd0d718e4777d38c77e7c35658a22e7e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < len) out[index] = in1[index] + in2[index];
}
int main(int argc, char **argv)
{
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = ( float * )wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = ( float * )wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = ( float * )malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
//@@----------------------------------------------------------------
int size = inputLength *sizeof(float) ;
hipMalloc((void**)&deviceInput1,size);
hipMalloc((void**)&deviceInput2,size);
hipMalloc((void**)&deviceOutput,size);
//@@----------------------------------------------------------------
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
//@@----------------------------------------------------------------
hipMemcpy(deviceInput1,hostInput1,size,hipMemcpyHostToDevice);
hipMemcpy(deviceInput2,hostInput2,size,hipMemcpyHostToDevice);
//@@----------------------------------------------------------------
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
//@@----------------------------------------------------------------
dim3 DimGrid((inputLength-1)/256+1,1,1);
dim3 DimBlock(256,1,1);
//@@----------------------------------------------------------------
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
//@@----------------------------------------------------------------
hipLaunchKernelGGL(( vecAdd), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceInput1,deviceInput2,deviceOutput,inputLength);
//@@----------------------------------------------------------------
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
//@@----------------------------------------------------------------
hipMemcpy(hostOutput,deviceOutput,size,hipMemcpyDeviceToHost);
//@@----------------------------------------------------------------
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
//@@----------------------------------------------------------------
hipFree(deviceInput1);
hipFree(deviceInput2);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
//@@----------------------------------------------------------------
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
| 29a77c5cd0d718e4777d38c77e7c35658a22e7e2.cu | #include <wb.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < len) out[index] = in1[index] + in2[index];
}
int main(int argc, char **argv)
{
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = ( float * )wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = ( float * )wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = ( float * )malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
//@@----------------------------------------------------------------
int size = inputLength *sizeof(float) ;
cudaMalloc((void**)&deviceInput1,size);
cudaMalloc((void**)&deviceInput2,size);
cudaMalloc((void**)&deviceOutput,size);
//@@----------------------------------------------------------------
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
//@@----------------------------------------------------------------
cudaMemcpy(deviceInput1,hostInput1,size,cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2,hostInput2,size,cudaMemcpyHostToDevice);
//@@----------------------------------------------------------------
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
//@@----------------------------------------------------------------
dim3 DimGrid((inputLength-1)/256+1,1,1);
dim3 DimBlock(256,1,1);
//@@----------------------------------------------------------------
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
//@@----------------------------------------------------------------
vecAdd<<<DimGrid,DimBlock>>>(deviceInput1,deviceInput2,deviceOutput,inputLength);
//@@----------------------------------------------------------------
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
//@@----------------------------------------------------------------
cudaMemcpy(hostOutput,deviceOutput,size,cudaMemcpyDeviceToHost);
//@@----------------------------------------------------------------
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
//@@----------------------------------------------------------------
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
//@@----------------------------------------------------------------
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
c85893b38e3552b9d8252213200ae4108e297665.hip | // !!! This is a file automatically generated by hipify!!!
#include "central.h"
#include <hip/hip_runtime_api.h>
#include "sobel.cuh"
__global__ void sobelKernel (uchar* dstImg, uchar* srcImg, int rows, int cols)
{
//TODO: implement the sobel operator (6P)
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int step = gridDim.x * blockDim.x;
double sX = 0, sY = 0;
if(in_img(x-1,y-1,rows,cols) && in_img(x+1,y+1,rows,cols)) {
// (x,y) = step * y + x
sX = (-srcImg[step * (y - 1) + x - 1] - 2 * srcImg[step * y + x - 1] - srcImg[step * (y + 1) + x - 1]
+ srcImg[step * (y - 1) + x + 1] + 2 * srcImg[step * y + x + 1] + srcImg[step * (y + 1) + x + 1]) * 0.125;
sY = (-srcImg[step * (y - 1) + x - 1] - 2 * srcImg[step * (y - 1) + x] - srcImg[step * (y - 1) + x + 1]
+ srcImg[step * (y + 1) + x - 1] + 2 * srcImg[step * (y + 1) + x] + srcImg[step * (y + 1) + x + 1]) * 0.125;
}
// save sobel length in image
if(in_img(x,y,rows,cols))
dstImg[step * y + x] = sqrt(sX * sX + sY * sY);
}
void sobel (Mat_<uchar>& dstImg, const Mat_<uchar>& srcImg)
{
//TODO: implement (4P)
unsigned int data_length = dstImg.rows * dstImg.cols;
size_t size = data_length * sizeof(uchar);
uchar *gpuSrcImg, *gpuDstImg;
// allocate memory for the images
CHECK_CUDA(hipMalloc((void**) &gpuSrcImg, size));
CHECK_CUDA(hipMalloc((void**) &gpuDstImg, size));
// transfer the initialized source image to the device
CHECK_CUDA(hipMemcpy(gpuSrcImg, srcImg.data, size, hipMemcpyHostToDevice));
dim3 threads(16, 16);
dim3 blocks(round_up(dstImg.cols, threads.x), round_up(dstImg.rows, threads.y));
hipLaunchKernelGGL(( sobelKernel), dim3(blocks), dim3(threads), 0, 0, gpuDstImg, gpuSrcImg, srcImg.rows, srcImg.cols);
// copy results back to the host (implies hipDeviceSynchronize())
CHECK_CUDA(hipMemcpy(dstImg.data, gpuDstImg, size, hipMemcpyDeviceToHost));
// free the memory
CHECK_CUDA(hipFree(gpuSrcImg));
CHECK_CUDA(hipFree(gpuDstImg));
CHECK_CUDA(hipDeviceReset());
}
| c85893b38e3552b9d8252213200ae4108e297665.cu | #include "central.h"
#include <cuda_runtime_api.h>
#include "sobel.cuh"
__global__ void sobelKernel (uchar* dstImg, uchar* srcImg, int rows, int cols)
{
//TODO: implement the sobel operator (6P)
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int step = gridDim.x * blockDim.x;
double sX = 0, sY = 0;
if(in_img(x-1,y-1,rows,cols) && in_img(x+1,y+1,rows,cols)) {
// (x,y) = step * y + x
sX = (-srcImg[step * (y - 1) + x - 1] - 2 * srcImg[step * y + x - 1] - srcImg[step * (y + 1) + x - 1]
+ srcImg[step * (y - 1) + x + 1] + 2 * srcImg[step * y + x + 1] + srcImg[step * (y + 1) + x + 1]) * 0.125;
sY = (-srcImg[step * (y - 1) + x - 1] - 2 * srcImg[step * (y - 1) + x] - srcImg[step * (y - 1) + x + 1]
+ srcImg[step * (y + 1) + x - 1] + 2 * srcImg[step * (y + 1) + x] + srcImg[step * (y + 1) + x + 1]) * 0.125;
}
// save sobel length in image
if(in_img(x,y,rows,cols))
dstImg[step * y + x] = sqrt(sX * sX + sY * sY);
}
void sobel (Mat_<uchar>& dstImg, const Mat_<uchar>& srcImg)
{
//TODO: implement (4P)
unsigned int data_length = dstImg.rows * dstImg.cols;
size_t size = data_length * sizeof(uchar);
uchar *gpuSrcImg, *gpuDstImg;
// allocate memory for the images
CHECK_CUDA(cudaMalloc((void**) &gpuSrcImg, size));
CHECK_CUDA(cudaMalloc((void**) &gpuDstImg, size));
// transfer the initialized source image to the device
CHECK_CUDA(cudaMemcpy(gpuSrcImg, srcImg.data, size, cudaMemcpyHostToDevice));
dim3 threads(16, 16);
dim3 blocks(round_up(dstImg.cols, threads.x), round_up(dstImg.rows, threads.y));
sobelKernel<<<blocks, threads>>>(gpuDstImg, gpuSrcImg, srcImg.rows, srcImg.cols);
// copy results back to the host (implies cudaDeviceSynchronize())
CHECK_CUDA(cudaMemcpy(dstImg.data, gpuDstImg, size, cudaMemcpyDeviceToHost));
// free the memory
CHECK_CUDA(cudaFree(gpuSrcImg));
CHECK_CUDA(cudaFree(gpuDstImg));
CHECK_CUDA(cudaDeviceReset());
}
|
45d59da585e55c20ca98961e4a7aed6df321d600.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: dnlebard
#include "HarmonicAngleForceGPU.cuh"
#include "TextureTools.h"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
// SMALL a relatively small number
#define SMALL Scalar(0.001)
/*! \file HarmonicAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic angle forces. Used by HarmonicAngleForceComputeGPU.
*/
//! Texture for reading angle parameters
scalar2_tex_t angle_params_tex;
//! Kernel for caculating harmonic angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch Pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param d_params Parameters for the angle force
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_harmonic_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar2 *d_params,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles = n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
// initialize the virial to 0
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = sqrtf(rsqab);
Scalar rsqcb = dot(dcb, dcb);
Scalar rcb = sqrtf(rsqcb);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb;
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc);
if (s_abbc < SMALL) s_abbc = SMALL;
s_abbc = Scalar(1.0)/s_abbc;
// actually calculate the force
Scalar dth = fast::acos(c_abbc) - t_0;
Scalar tk = K*dth;
Scalar a = -Scalar(1.0) * tk * s_abbc;
Scalar a11 = a*c_abbc/rsqab;
Scalar a12 = -a / (rab*rcb);
Scalar a22 = a*c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = tk*dth*Scalar(Scalar(1.0)/Scalar(6.0));
// upper triangular version of virial tensor
Scalar angle_virial[6];
angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]);
angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]);
angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]);
angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]);
angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]);
angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]);
if (cur_angle_abc == 0)
{
force_idx.x += fab[0];
force_idx.y += fab[1];
force_idx.z += fab[2];
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0];
force_idx.y += fcb[1];
force_idx.z += fcb[2];
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial arary
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\returns Any error code resulting from the kernel launch
\note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
hipError_t gpu_compute_harmonic_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
unsigned int n_angle_types,
int block_size)
{
assert(d_params);
// setup the grid to run the kernel
dim3 grid( N / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// bind the texture
hipError_t error = hipBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types);
if (error != hipSuccess)
return error;
// run the kernel
hipLaunchKernelGGL(( gpu_compute_harmonic_angle_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, d_params, box,
atable, apos_list, pitch, n_angles_list);
return hipSuccess;
}
| 45d59da585e55c20ca98961e4a7aed6df321d600.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: dnlebard
#include "HarmonicAngleForceGPU.cuh"
#include "TextureTools.h"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
// SMALL a relatively small number
#define SMALL Scalar(0.001)
/*! \file HarmonicAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic angle forces. Used by HarmonicAngleForceComputeGPU.
*/
//! Texture for reading angle parameters
scalar2_tex_t angle_params_tex;
//! Kernel for caculating harmonic angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch Pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param d_params Parameters for the angle force
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_harmonic_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar2 *d_params,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles = n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
// initialize the virial to 0
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = sqrtf(rsqab);
Scalar rsqcb = dot(dcb, dcb);
Scalar rcb = sqrtf(rsqcb);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb;
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
Scalar s_abbc = sqrtf(Scalar(1.0) - c_abbc*c_abbc);
if (s_abbc < SMALL) s_abbc = SMALL;
s_abbc = Scalar(1.0)/s_abbc;
// actually calculate the force
Scalar dth = fast::acos(c_abbc) - t_0;
Scalar tk = K*dth;
Scalar a = -Scalar(1.0) * tk * s_abbc;
Scalar a11 = a*c_abbc/rsqab;
Scalar a12 = -a / (rab*rcb);
Scalar a22 = a*c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = tk*dth*Scalar(Scalar(1.0)/Scalar(6.0));
// upper triangular version of virial tensor
Scalar angle_virial[6];
angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]);
angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]);
angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]);
angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]);
angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]);
angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]);
if (cur_angle_abc == 0)
{
force_idx.x += fab[0];
force_idx.y += fab[1];
force_idx.z += fab[2];
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0];
force_idx.y += fcb[1];
force_idx.z += fcb[2];
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial arary
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\returns Any error code resulting from the kernel launch
\note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
cudaError_t gpu_compute_harmonic_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
unsigned int n_angle_types,
int block_size)
{
assert(d_params);
// setup the grid to run the kernel
dim3 grid( N / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// bind the texture
cudaError_t error = cudaBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types);
if (error != cudaSuccess)
return error;
// run the kernel
gpu_compute_harmonic_angle_forces_kernel<<< grid, threads>>>(d_force, d_virial, virial_pitch, N, d_pos, d_params, box,
atable, apos_list, pitch, n_angles_list);
return cudaSuccess;
}
|
ba239470f643a9bb7c0355d493c4ff865513cbe2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
The implementation of this file is based on qkvToContext plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications: scaling is moved from masked softmax to the gemm before that.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hipcub/hipcub.hpp>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#include <math_constants.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "attention_impl.h"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
static size_t AlignTo(size_t a, size_t b) {
return CeilDiv(a, b) * b;
}
size_t ScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length, int all_sequence_length) {
const size_t len = batch_size * num_heads * sequence_length * all_sequence_length;
const size_t bytes = len * element_size;
const size_t alignment = 256;
const size_t bytesAligned = AlignTo(bytes, alignment);
return bytesAligned;
}
size_t GetAttentionWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int head_size,
int sequence_length,
int past_sequence_length) {
size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size;
return qkv_size + 2 * ScratchSize(element_size, batch_size, num_heads, sequence_length, past_sequence_length + sequence_length);
}
template <typename T, unsigned TPB>
__device__ inline void Softmax(const int all_sequence_length,
const int sequence_length,
const int valid_end,
const int valid_start,
const T* input,
T* output) {
using BlockReduce = hipcub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float sum_reverse_block;
__shared__ float max_block;
float thread_data_max(-CUDART_INF_F);
// e^x is represented as infinity if x is large enough, like 100.f.
// Infinity divided by Infinity is a NAN. Thus, softmax gets a NAN if one or more item are large enough.
// a math transform as below is leveraged to get a stable softmax:
// e^xi/(e^x1 + ...e^xn) = e^(xi - max) / (e^(x1 - max) + ... + e^(xn - max))
const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length;
for (int i = threadIdx.x; i < valid_end; i += TPB) {
if (i >= valid_start) {
const int index = offset + i;
if (thread_data_max < float(input[index])) {
thread_data_max = float(input[index]);
}
}
}
const auto max = BlockReduce(tmp_storage).Reduce(thread_data_max, hipcub::Max());
// Store max value
if (threadIdx.x == 0) {
max_block = max;
}
__syncthreads();
float thread_data_sum(0.f);
for (int i = threadIdx.x; i < valid_end; i += TPB) {
if (i >= valid_start) {
const int index = offset + i;
const float val = input[index];
thread_data_sum += expf(val - max_block);
}
}
const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_sum, hipcub::Sum());
if (threadIdx.x == 0) {
sum_reverse_block = 1.f / sum;
}
__syncthreads();
for (int i = threadIdx.x; i < all_sequence_length; i += TPB) {
const int index = offset + i;
const float val = (i >= valid_start && i < valid_end) ? expf(float(input[index]) - max_block) * sum_reverse_block : 0.f;
output[index] = T(val);
}
}
template <typename T, unsigned TPB>
__device__ inline void SoftmaxSmall(const int all_sequence_length,
const int sequence_length,
const int valid_end,
const int valid_start,
const T* input,
T* output,
bool is_unidirectional) {
using BlockReduce = hipcub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float sum_reverse_block;
__shared__ float max_block;
// Input dimension is BxNxSxS*; blockIdx.y is batch index b; gridDim.x=N*S; blockIdx.x is index within N*S;
const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length;
const int index = offset + threadIdx.x;
bool is_valid = false; // whether it has attention mask == 1.
// Update end position for unidirectional.
int end = valid_end;
if (is_unidirectional) {
int end_unid = all_sequence_length - sequence_length + (blockIdx.x % sequence_length) + 1;
if (end_unid <= valid_start) {
// In this situation, mask of [0, end_unid) and [valid_start, valid_end) has -10000, and [end_unid, valid_start) and [valid_end, all_seq_len) has -20000.
// So [0, end_unid) will also have value after softmax.
is_valid = threadIdx.x < end_unid;
} else {
end = min(valid_end, end_unid);
}
}
is_valid = is_valid || (threadIdx.x >= valid_start && threadIdx.x < end);
// e^x is represented as infinity if x is large enough, like 100.f.
// Infinity divided by Infinity is a NAN. Thus, softmax gets a NAN if one or more item are large enough.
// a math transform as below is leveraged to get a stable softmax:
// e^xi/(e^x1 + ...e^xn) = e^(xi - max) / (e^(x1 - max) + ... + e^(xn - max))
float thread_data_max = is_valid ? float(input[index]) : float(-CUDART_INF_F);
const auto max = BlockReduce(tmp_storage).Reduce(thread_data_max, hipcub::Max(), end);
// Store max value
if (threadIdx.x == 0) {
max_block = max;
}
__syncthreads();
float thread_data_exp(0.f);
if (is_valid) {
thread_data_exp = expf(float(input[index]) - max_block);
}
const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_exp, hipcub::Sum(), end);
// Store value of 1.0/sum.
if (threadIdx.x == 0) {
sum_reverse_block = (1.f) / sum;
}
__syncthreads();
// threadIdx.x might be larger than all_sequence_length due to alignment to 32x.
if (threadIdx.x < all_sequence_length) {
output[index] = T(thread_data_exp * sum_reverse_block);
}
}
template <typename T, unsigned TPB>
__device__ inline void SoftmaxWithMask2DSmall(const int all_sequence_length,
const int sequence_length,
const int* attention_mask, // 2D attention mask
const T* input,
T* output,
const bool is_unidirectional,
const float scalar) {
using BlockReduce = hipcub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float sum_reverse_block;
__shared__ float max_block;
// Input dimension is BxNxSxS*; blockIdx.y is batch index b; gridDim.x=N*S; blockIdx.x is index within N*S;
int index = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length + threadIdx.x;
float thread_data = -CUDART_INF_F;
if (threadIdx.x < all_sequence_length) {
const int& mask = attention_mask[blockIdx.y * all_sequence_length + threadIdx.x];
float mask_value = mask > 0 ? 0.0f : -10000.0f;
if (is_unidirectional) {
int from_index = all_sequence_length - sequence_length + (blockIdx.x % sequence_length); // offset of from token in all sequence length.
if (threadIdx.x > from_index) {
mask_value += -10000.0f;
}
}
thread_data = float(input[index]) * scalar + mask_value;
}
const float max = BlockReduce(tmp_storage).Reduce(thread_data, hipcub::Max(), all_sequence_length);
// Store max value
if (threadIdx.x == 0) {
max_block = max;
}
__syncthreads();
float thread_data_exp = threadIdx.x < all_sequence_length ? expf(thread_data - max_block) : 0.0f;
const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_exp, hipcub::Sum(), all_sequence_length);
// Store value of 1.0/sum
if (threadIdx.x == 0) {
sum_reverse_block = (1.f) / sum;
}
__syncthreads();
if (threadIdx.x < all_sequence_length) {
output[index] = T(thread_data_exp * sum_reverse_block);
}
}
template <typename T, unsigned TPB>
__global__ void SoftmaxKernelSmall(const int all_sequence_length, const int sequence_length, const T* input, T* output, bool is_unidirectional) {
SoftmaxSmall<T, TPB>(all_sequence_length, sequence_length, all_sequence_length, 0, input, output, is_unidirectional);
}
template <typename T, unsigned TPB>
__global__ void SoftmaxKernel(const int all_sequence_length, const int sequence_length, const T* input, T* output) {
Softmax<T, TPB>(all_sequence_length, sequence_length, all_sequence_length, 0, input, output);
}
template <typename T>
bool ComputeSoftmax(
hipStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads,
const T* input, T* output, bool is_unidirectional) {
const dim3 grid(sequence_length * num_heads, batch_size, 1);
if (all_sequence_length <= 32) {
const int blockSize = 32;
hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 64) {
const int blockSize = 64;
hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 128) {
const int blockSize = 128;
hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 256) {
const int blockSize = 256;
hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 512) {
const int blockSize = 512;
hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 1024) {
const int blockSize = 1024;
hipLaunchKernelGGL(( SoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (!is_unidirectional) {
const int blockSize = 1024;
hipLaunchKernelGGL(( SoftmaxKernel<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, input, output);
} else {
ORT_THROW("Attention CUDA operator does not support unidirectional with total sequence length > 1024.");
}
return CUDA_CALL(hipPeekAtLastError());
}
template <typename T, unsigned TPB>
__global__ void MaskedSoftmaxKernelSmall(const int all_sequence_length, const int sequence_length, const int* mask_end, const int* mask_start, const T* input, T* output, bool is_unidirectional) {
__shared__ int start_position;
__shared__ int end_position;
if (threadIdx.x == 0) {
const int batch = blockIdx.y;
start_position = mask_start != nullptr ? max(0, mask_start[batch]) : 0;
end_position = min(all_sequence_length, mask_end[batch]);
// Attend to no word has same effect as attend to all words. This is added to get parity with CPU result.
if (start_position >= end_position) {
start_position = 0;
end_position = all_sequence_length;
}
}
__syncthreads();
SoftmaxSmall<T, TPB>(all_sequence_length, sequence_length, end_position, start_position, input, output, is_unidirectional);
}
template <typename T, unsigned TPB>
__global__ void MaskedSoftmaxKernel(const int all_sequence_length, const int sequence_length, const int* mask_end, const int* mask_start, const T* input, T* output) {
__shared__ int start_position;
__shared__ int end_position;
if (threadIdx.x == 0) {
const int batch = blockIdx.y;
start_position = mask_start != nullptr ? max(0, mask_start[batch]) : 0;
end_position = min(all_sequence_length, mask_end[batch]);
// Attend to no word has same effect as attend to all words. This is added to get parity with CPU result.
if (start_position >= end_position) {
start_position = 0;
end_position = all_sequence_length;
}
}
__syncthreads();
Softmax<T, TPB>(all_sequence_length, sequence_length, end_position, start_position, input, output);
}
template <typename T, unsigned TPB>
__global__ void SoftmaxWithMask2DSmallKernel(const int all_sequence_length, const int sequence_length, const int* attention_mask, const T* input, T* output, const bool is_unidirectional, const float scalar) {
SoftmaxWithMask2DSmall<T, TPB>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
}
template <typename T>
bool ComputeSoftmaxWithMask1D(hipStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads,
const int* mask_index, const int* mask_start, const T* input, T* output, const bool is_unidirectional) {
const dim3 grid(sequence_length * num_heads, batch_size, 1);
if (all_sequence_length <= 32) {
const int blockSize = 32;
hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 64) {
const int blockSize = 64;
hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 128) {
const int blockSize = 128;
hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 256) {
const int blockSize = 256;
hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 512) {
const int blockSize = 512;
hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 1024) {
const int blockSize = 1024;
hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (!is_unidirectional) {
const int blockSize = 1024;
hipLaunchKernelGGL(( MaskedSoftmaxKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, mask_index, mask_start, input, output);
} else {
ORT_THROW("Attention CUDA operator does not support unidirectional with total sequence length > 1024.");
}
return CUDA_CALL(hipPeekAtLastError());
}
template <typename T>
bool ComputeSoftmaxWithMask2D(hipStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads,
const int* attention_mask, const T* input, T* output, const bool is_unidirectional, const float scalar) {
const dim3 grid(sequence_length * num_heads, batch_size, 1);
if (all_sequence_length <= 32) {
const int blockSize = 32;
hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 64) {
const int blockSize = 64;
hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 128) {
const int blockSize = 128;
hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 256) {
const int blockSize = 256;
hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 512) {
const int blockSize = 512;
hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 1024) {
const int blockSize = 1024;
hipLaunchKernelGGL(( SoftmaxWithMask2DSmallKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else {
ORT_THROW("Attention CUDA operator does not supported 2D attention mask with total sequence length > 1024.");
}
return CUDA_CALL(hipPeekAtLastError());
}
template <typename T>
__global__ void TransposeCtx(const int H, const T* input, T* output) {
// Input: BxNxSxH
// Output: BxSxNxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int num_heads = blockDim.y;
int sequence_length = gridDim.x;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
const int in_offset = s * H + n * sequence_length * H + b * NHS;
const int out_offset = n * H + s * NH + b * NHS;
const int i = threadIdx.x;
if (i < H) {
output[out_offset + i] = input[in_offset + i];
}
}
bool LaunchTransCtx(hipStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const float* input, float* output) {
const dim3 grid(sequence_length, batch_size, 1);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
const dim3 block(H, num_heads, 1);
hipLaunchKernelGGL(( TransposeCtx<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( TransposeCtx<float>), dim3(grid), dim3(block), 0, stream, head_size, input, output);
}
return CUDA_CALL(hipPeekAtLastError());
}
bool LaunchTransCtx(hipStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const half* input, half* output) {
const dim3 grid(sequence_length, batch_size, 1);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const dim3 block(H, num_heads, 1);
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
hipLaunchKernelGGL(( TransposeCtx<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const dim3 block(H, num_heads, 1);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
hipLaunchKernelGGL(( TransposeCtx<half2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( TransposeCtx<half>), dim3(grid), dim3(block), 0, stream, head_size, input, output);
}
return CUDA_CALL(hipPeekAtLastError());
}
template <typename T>
__global__ void TransposeQKV(const int H, const T* input, T* output) {
// Input: BxSx3xNxH
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size;
const int i = threadIdx.x;
if (i < H) {
output[out_offset + i] = input[in_offset + i];
}
}
bool LaunchTransQkv(hipStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const float* input, float* output) {
const dim3 grid(sequence_length, batch_size, 3);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
const dim3 block(H, num_heads, 1);
hipLaunchKernelGGL(( TransposeQKV<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( TransposeQKV<float>), dim3(grid), dim3(block), 0, stream, head_size, input, output);
}
return CUDA_CALL(hipPeekAtLastError());
}
bool LaunchTransQkv(hipStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const half* input, half* output) {
const dim3 grid(sequence_length, batch_size, 3);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const dim3 block(H, num_heads, 1);
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
hipLaunchKernelGGL(( TransposeQKV<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const dim3 block(H, num_heads, 1);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
hipLaunchKernelGGL(( TransposeQKV<half2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel..
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( TransposeQKV<half>), dim3(grid), dim3(block), 0, stream, head_size, input, output);
}
return CUDA_CALL(hipPeekAtLastError());
}
template <typename T>
__global__ void ConcatPastToPresent(const int sequence_length,
const T* past,
const T* k_v,
T* present) {
const int h = threadIdx.x;
const int n = threadIdx.y;
const int s = blockIdx.x;
const int b = blockIdx.y;
const int is_v = blockIdx.z; // 0 for k, 1 for v
const int all_sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int num_heads = blockDim.y;
const int H = blockDim.x;
// past: 2 x BxNxS'xH (past_k and past_v)
// k_v: 2 x BxNxSxH (k and v)
// present: 2 x BxNxS*xH (present_k and present_v)
const int past_sequence_length = all_sequence_length - sequence_length;
const int present_SH = all_sequence_length * H;
const int present_NSH = num_heads * present_SH;
int out_offset = b * present_NSH + n * present_SH + s * H + h + is_v * (present_NSH * batch_size);
if (s < past_sequence_length) {
const int past_SH = past_sequence_length * H;
const int past_NSH = num_heads * past_SH;
const int in_offset = b * past_NSH + n * past_SH + s * H + h + is_v * (past_NSH * batch_size);
present[out_offset] = past[in_offset];
} else if (s < all_sequence_length) {
const int SH = sequence_length * H;
const int NSH = num_heads * SH;
const int in_offset = b * NSH + n * SH + (s - past_sequence_length) * H + h + is_v * (NSH * batch_size);
present[out_offset] = k_v[in_offset];
}
}
bool LaunchConcatPastToPresent(hipStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const float* past,
const float* k_v,
float* present) {
const dim3 grid(all_sequence_length, batch_size, 2);
if (0 == (head_size & 1)) {
const dim3 block(head_size / 2, num_heads, 1);
hipLaunchKernelGGL(( ConcatPastToPresent<float2>), dim3(grid), dim3(block), 0, stream, sequence_length, reinterpret_cast<const float2*>(past), reinterpret_cast<const float2*>(k_v), reinterpret_cast<float2*>(present));
} else {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( ConcatPastToPresent<float>), dim3(grid), dim3(block), 0, stream, sequence_length, past, k_v, present);
}
return CUDA_CALL(hipPeekAtLastError());
}
bool LaunchConcatPastToPresent(hipStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const half* past,
const half* k_v,
half* present) {
const dim3 grid(all_sequence_length, batch_size, 2);
if (0 == (head_size % 4)) {
const dim3 block(head_size / 4, num_heads, 1);
hipLaunchKernelGGL(( ConcatPastToPresent<float2>), dim3(grid), dim3(block), 0, stream, sequence_length, reinterpret_cast<const float2*>(past), reinterpret_cast<const float2*>(k_v), reinterpret_cast<float2*>(present));
} else if (0 == (head_size & 1)) {
const dim3 block(head_size / 2, num_heads, 1);
hipLaunchKernelGGL(( ConcatPastToPresent<half2>), dim3(grid), dim3(block), 0, stream, sequence_length, reinterpret_cast<const half2*>(past), reinterpret_cast<const half2*>(k_v), reinterpret_cast<half2*>(present));
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( ConcatPastToPresent<half>), dim3(grid), dim3(block), 0, stream, sequence_length, past, k_v, present);
}
return CUDA_CALL(hipPeekAtLastError());
}
hipblasStatus_t inline CublasGemmStridedBatched(
hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k, const float alpha,
const float* A, int lda, long long int strideA, const float* B, int ldb, long long int strideB,
const float beta, float* C, int ldc, long long int strideC, int batchCount) {
return hipblasSgemmStridedBatched(
handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount);
}
hipblasStatus_t inline CublasGemmStridedBatched(
hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k, const half alpha,
const half* A, int lda, long long int strideA, const half* B, int ldb, long long int strideB,
const half beta, half* C, int ldc, long long int strideC, int batchCount) {
return hipblasHgemmStridedBatched(
handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount);
}
template <typename T>
bool QkvToContext(
const hipDeviceProp_t& prop, hipblasHandle_t& cublas, hipStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size,
const T* input, T* output, T* workspace,
const int* mask_index, const std::vector<int64_t>* mask_index_dims,
bool is_unidirectional, int past_sequence_length, const T* past, T* present) {
const int all_sequence_length = past_sequence_length + sequence_length;
const size_t bytes = ScratchSize(element_size, batch_size, num_heads, sequence_length, all_sequence_length);
T* scratch1 = workspace;
T* scratch2 = scratch1 + (bytes / element_size);
T* scratch3 = scratch2 + (bytes / element_size);
// input should be BxSx3xNxH => scratch3: 3xBxNxSxH
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, input, scratch3)) {
return false;
}
// now scratch3 has Q, K, V: each has size BxNxSxH
const int batches = batch_size * num_heads;
const int size_per_batch = sequence_length * head_size;
const int total_size = batches * size_per_batch;
const T* q = scratch3;
const T* k = q + total_size;
const T* v = k + total_size;
hipblasSetStream(cublas, stream);
CublasMathModeSetter helper(prop, cublas, CUBLAS_TENSOR_OP_MATH);
// Concat past (2xBxNxS'xH) to present (2xBxNxS*xH):
// past_k (BxNxS'xH) + k (BxNxSxH) => present_k (BxNxS*xH)
// past_v (BxNxS'xH) + v (BxNxSxH) => present_v (BxNxS*xH)
const int present_size_per_batch = all_sequence_length * head_size;
if (nullptr != present) {
if (!LaunchConcatPastToPresent(stream, all_sequence_length, sequence_length, batch_size, head_size, num_heads, past, k, present)) {
return false;
}
// update pointers to present_k and present_v.
k = present;
v = present + batches * present_size_per_batch;
}
bool use_2d_attention_mask = (nullptr != mask_index && nullptr != mask_index_dims && mask_index_dims->size() == 2);
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS*
// Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS*
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
const int temp_matrix_size = sequence_length * all_sequence_length;
T alpha = (T)(use_2d_attention_mask ? 1.0f : rsqrt_head_size);
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, all_sequence_length, sequence_length, head_size, alpha, k, head_size, present_size_per_batch,
q, head_size, size_per_batch, 0.f, scratch1, all_sequence_length, temp_matrix_size, batches))) {
return false;
}
// apply softmax and store result P to scratch2: BxNxSxS*
if (use_2d_attention_mask) { // 2d attention mask
if (!ComputeSoftmaxWithMask2D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2, is_unidirectional, rsqrt_head_size)) {
return false;
}
} else if (nullptr != mask_index) { // 1d mask index
ORT_ENFORCE(nullptr != mask_index_dims && mask_index_dims->size() == 1);
// mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions.
const int* mask_start = (mask_index_dims->at(0) > batch_size) ? mask_index + batch_size : nullptr;
if (!ComputeSoftmaxWithMask1D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, mask_start, scratch1, scratch2, is_unidirectional)) {
return false;
}
} else { // no mask
if (!ComputeSoftmax<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, scratch1, scratch2, is_unidirectional)) {
return false;
}
}
// compute P*V (as V*P), and store in scratch3: BxNxSxH
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, sequence_length, all_sequence_length, 1.f, v, head_size, present_size_per_batch,
scratch2, all_sequence_length, temp_matrix_size, 0.f, scratch3, head_size, size_per_batch, batches))) {
return false;
}
// scratch3 is BxNxSxH, transpose to output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, scratch3, output);
}
bool LaunchAttentionKernel(
const hipDeviceProp_t& prop,
const void* input,
const int* mask_index,
const std::vector<int64_t>* mask_index_dims,
void* output,
const int batch_size,
const int sequence_length,
const int num_heads,
const int head_size,
void* workspace,
hipblasHandle_t& cublas,
const size_t element_size,
bool is_unidirectional,
int past_sequence_length,
const void* past,
void* present) {
// use default stream
const hipStream_t stream = nullptr;
if (element_size == 2) {
return QkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace),
mask_index, mask_index_dims, is_unidirectional,
past_sequence_length, reinterpret_cast<const half*>(past), reinterpret_cast<half*>(present));
} else {
return QkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace),
mask_index, mask_index_dims, is_unidirectional,
past_sequence_length, reinterpret_cast<const float*>(past), reinterpret_cast<float*>(present));
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| ba239470f643a9bb7c0355d493c4ff865513cbe2.cu | /*
The implementation of this file is based on qkvToContext plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications: scaling is moved from masked softmax to the gemm before that.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cub/cub.cuh>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include <math_constants.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "attention_impl.h"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
static size_t AlignTo(size_t a, size_t b) {
return CeilDiv(a, b) * b;
}
size_t ScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length, int all_sequence_length) {
const size_t len = batch_size * num_heads * sequence_length * all_sequence_length;
const size_t bytes = len * element_size;
const size_t alignment = 256;
const size_t bytesAligned = AlignTo(bytes, alignment);
return bytesAligned;
}
size_t GetAttentionWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int head_size,
int sequence_length,
int past_sequence_length) {
size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size;
return qkv_size + 2 * ScratchSize(element_size, batch_size, num_heads, sequence_length, past_sequence_length + sequence_length);
}
template <typename T, unsigned TPB>
__device__ inline void Softmax(const int all_sequence_length,
const int sequence_length,
const int valid_end,
const int valid_start,
const T* input,
T* output) {
using BlockReduce = cub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float sum_reverse_block;
__shared__ float max_block;
float thread_data_max(-CUDART_INF_F);
// e^x is represented as infinity if x is large enough, like 100.f.
// Infinity divided by Infinity is a NAN. Thus, softmax gets a NAN if one or more item are large enough.
// a math transform as below is leveraged to get a stable softmax:
// e^xi/(e^x1 + ...e^xn) = e^(xi - max) / (e^(x1 - max) + ... + e^(xn - max))
const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length;
for (int i = threadIdx.x; i < valid_end; i += TPB) {
if (i >= valid_start) {
const int index = offset + i;
if (thread_data_max < float(input[index])) {
thread_data_max = float(input[index]);
}
}
}
const auto max = BlockReduce(tmp_storage).Reduce(thread_data_max, cub::Max());
// Store max value
if (threadIdx.x == 0) {
max_block = max;
}
__syncthreads();
float thread_data_sum(0.f);
for (int i = threadIdx.x; i < valid_end; i += TPB) {
if (i >= valid_start) {
const int index = offset + i;
const float val = input[index];
thread_data_sum += expf(val - max_block);
}
}
const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_sum, cub::Sum());
if (threadIdx.x == 0) {
sum_reverse_block = 1.f / sum;
}
__syncthreads();
for (int i = threadIdx.x; i < all_sequence_length; i += TPB) {
const int index = offset + i;
const float val = (i >= valid_start && i < valid_end) ? expf(float(input[index]) - max_block) * sum_reverse_block : 0.f;
output[index] = T(val);
}
}
template <typename T, unsigned TPB>
__device__ inline void SoftmaxSmall(const int all_sequence_length,
const int sequence_length,
const int valid_end,
const int valid_start,
const T* input,
T* output,
bool is_unidirectional) {
using BlockReduce = cub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float sum_reverse_block;
__shared__ float max_block;
// Input dimension is BxNxSxS*; blockIdx.y is batch index b; gridDim.x=N*S; blockIdx.x is index within N*S;
const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length;
const int index = offset + threadIdx.x;
bool is_valid = false; // whether it has attention mask == 1.
// Update end position for unidirectional.
int end = valid_end;
if (is_unidirectional) {
int end_unid = all_sequence_length - sequence_length + (blockIdx.x % sequence_length) + 1;
if (end_unid <= valid_start) {
// In this situation, mask of [0, end_unid) and [valid_start, valid_end) has -10000, and [end_unid, valid_start) and [valid_end, all_seq_len) has -20000.
// So [0, end_unid) will also have value after softmax.
is_valid = threadIdx.x < end_unid;
} else {
end = min(valid_end, end_unid);
}
}
is_valid = is_valid || (threadIdx.x >= valid_start && threadIdx.x < end);
// e^x is represented as infinity if x is large enough, like 100.f.
// Infinity divided by Infinity is a NAN. Thus, softmax gets a NAN if one or more item are large enough.
// a math transform as below is leveraged to get a stable softmax:
// e^xi/(e^x1 + ...e^xn) = e^(xi - max) / (e^(x1 - max) + ... + e^(xn - max))
float thread_data_max = is_valid ? float(input[index]) : float(-CUDART_INF_F);
const auto max = BlockReduce(tmp_storage).Reduce(thread_data_max, cub::Max(), end);
// Store max value
if (threadIdx.x == 0) {
max_block = max;
}
__syncthreads();
float thread_data_exp(0.f);
if (is_valid) {
thread_data_exp = expf(float(input[index]) - max_block);
}
const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_exp, cub::Sum(), end);
// Store value of 1.0/sum.
if (threadIdx.x == 0) {
sum_reverse_block = (1.f) / sum;
}
__syncthreads();
// threadIdx.x might be larger than all_sequence_length due to alignment to 32x.
if (threadIdx.x < all_sequence_length) {
output[index] = T(thread_data_exp * sum_reverse_block);
}
}
template <typename T, unsigned TPB>
__device__ inline void SoftmaxWithMask2DSmall(const int all_sequence_length,
const int sequence_length,
const int* attention_mask, // 2D attention mask
const T* input,
T* output,
const bool is_unidirectional,
const float scalar) {
using BlockReduce = cub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float sum_reverse_block;
__shared__ float max_block;
// Input dimension is BxNxSxS*; blockIdx.y is batch index b; gridDim.x=N*S; blockIdx.x is index within N*S;
int index = (blockIdx.y * gridDim.x + blockIdx.x) * all_sequence_length + threadIdx.x;
float thread_data = -CUDART_INF_F;
if (threadIdx.x < all_sequence_length) {
const int& mask = attention_mask[blockIdx.y * all_sequence_length + threadIdx.x];
float mask_value = mask > 0 ? 0.0f : -10000.0f;
if (is_unidirectional) {
int from_index = all_sequence_length - sequence_length + (blockIdx.x % sequence_length); // offset of from token in all sequence length.
if (threadIdx.x > from_index) {
mask_value += -10000.0f;
}
}
thread_data = float(input[index]) * scalar + mask_value;
}
const float max = BlockReduce(tmp_storage).Reduce(thread_data, cub::Max(), all_sequence_length);
// Store max value
if (threadIdx.x == 0) {
max_block = max;
}
__syncthreads();
float thread_data_exp = threadIdx.x < all_sequence_length ? expf(thread_data - max_block) : 0.0f;
const auto sum = BlockReduce(tmp_storage).Reduce(thread_data_exp, cub::Sum(), all_sequence_length);
// Store value of 1.0/sum
if (threadIdx.x == 0) {
sum_reverse_block = (1.f) / sum;
}
__syncthreads();
if (threadIdx.x < all_sequence_length) {
output[index] = T(thread_data_exp * sum_reverse_block);
}
}
template <typename T, unsigned TPB>
__global__ void SoftmaxKernelSmall(const int all_sequence_length, const int sequence_length, const T* input, T* output, bool is_unidirectional) {
SoftmaxSmall<T, TPB>(all_sequence_length, sequence_length, all_sequence_length, 0, input, output, is_unidirectional);
}
template <typename T, unsigned TPB>
__global__ void SoftmaxKernel(const int all_sequence_length, const int sequence_length, const T* input, T* output) {
Softmax<T, TPB>(all_sequence_length, sequence_length, all_sequence_length, 0, input, output);
}
template <typename T>
bool ComputeSoftmax(
cudaStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads,
const T* input, T* output, bool is_unidirectional) {
const dim3 grid(sequence_length * num_heads, batch_size, 1);
if (all_sequence_length <= 32) {
const int blockSize = 32;
SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 64) {
const int blockSize = 64;
SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 128) {
const int blockSize = 128;
SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 256) {
const int blockSize = 256;
SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 512) {
const int blockSize = 512;
SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (all_sequence_length <= 1024) {
const int blockSize = 1024;
SoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output, is_unidirectional);
} else if (!is_unidirectional) {
const int blockSize = 1024;
SoftmaxKernel<T, blockSize><<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, input, output);
} else {
ORT_THROW("Attention CUDA operator does not support unidirectional with total sequence length > 1024.");
}
return CUDA_CALL(cudaPeekAtLastError());
}
template <typename T, unsigned TPB>
__global__ void MaskedSoftmaxKernelSmall(const int all_sequence_length, const int sequence_length, const int* mask_end, const int* mask_start, const T* input, T* output, bool is_unidirectional) {
__shared__ int start_position;
__shared__ int end_position;
if (threadIdx.x == 0) {
const int batch = blockIdx.y;
start_position = mask_start != nullptr ? max(0, mask_start[batch]) : 0;
end_position = min(all_sequence_length, mask_end[batch]);
// Attend to no word has same effect as attend to all words. This is added to get parity with CPU result.
if (start_position >= end_position) {
start_position = 0;
end_position = all_sequence_length;
}
}
__syncthreads();
SoftmaxSmall<T, TPB>(all_sequence_length, sequence_length, end_position, start_position, input, output, is_unidirectional);
}
template <typename T, unsigned TPB>
__global__ void MaskedSoftmaxKernel(const int all_sequence_length, const int sequence_length, const int* mask_end, const int* mask_start, const T* input, T* output) {
__shared__ int start_position;
__shared__ int end_position;
if (threadIdx.x == 0) {
const int batch = blockIdx.y;
start_position = mask_start != nullptr ? max(0, mask_start[batch]) : 0;
end_position = min(all_sequence_length, mask_end[batch]);
// Attend to no word has same effect as attend to all words. This is added to get parity with CPU result.
if (start_position >= end_position) {
start_position = 0;
end_position = all_sequence_length;
}
}
__syncthreads();
Softmax<T, TPB>(all_sequence_length, sequence_length, end_position, start_position, input, output);
}
template <typename T, unsigned TPB>
__global__ void SoftmaxWithMask2DSmallKernel(const int all_sequence_length, const int sequence_length, const int* attention_mask, const T* input, T* output, const bool is_unidirectional, const float scalar) {
SoftmaxWithMask2DSmall<T, TPB>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
}
template <typename T>
bool ComputeSoftmaxWithMask1D(cudaStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads,
const int* mask_index, const int* mask_start, const T* input, T* output, const bool is_unidirectional) {
const dim3 grid(sequence_length * num_heads, batch_size, 1);
if (all_sequence_length <= 32) {
const int blockSize = 32;
MaskedSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 64) {
const int blockSize = 64;
MaskedSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 128) {
const int blockSize = 128;
MaskedSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 256) {
const int blockSize = 256;
MaskedSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 512) {
const int blockSize = 512;
MaskedSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (all_sequence_length <= 1024) {
const int blockSize = 1024;
MaskedSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output, is_unidirectional);
} else if (!is_unidirectional) {
const int blockSize = 1024;
MaskedSoftmaxKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, mask_index, mask_start, input, output);
} else {
ORT_THROW("Attention CUDA operator does not support unidirectional with total sequence length > 1024.");
}
return CUDA_CALL(cudaPeekAtLastError());
}
template <typename T>
bool ComputeSoftmaxWithMask2D(cudaStream_t stream, const int all_sequence_length, const int sequence_length, const int batch_size, const int num_heads,
const int* attention_mask, const T* input, T* output, const bool is_unidirectional, const float scalar) {
const dim3 grid(sequence_length * num_heads, batch_size, 1);
if (all_sequence_length <= 32) {
const int blockSize = 32;
SoftmaxWithMask2DSmallKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 64) {
const int blockSize = 64;
SoftmaxWithMask2DSmallKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 128) {
const int blockSize = 128;
SoftmaxWithMask2DSmallKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 256) {
const int blockSize = 256;
SoftmaxWithMask2DSmallKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 512) {
const int blockSize = 512;
SoftmaxWithMask2DSmallKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else if (all_sequence_length <= 1024) {
const int blockSize = 1024;
SoftmaxWithMask2DSmallKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(all_sequence_length, sequence_length, attention_mask, input, output, is_unidirectional, scalar);
} else {
ORT_THROW("Attention CUDA operator does not supported 2D attention mask with total sequence length > 1024.");
}
return CUDA_CALL(cudaPeekAtLastError());
}
template <typename T>
__global__ void TransposeCtx(const int H, const T* input, T* output) {
// Input: BxNxSxH
// Output: BxSxNxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int num_heads = blockDim.y;
int sequence_length = gridDim.x;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
const int in_offset = s * H + n * sequence_length * H + b * NHS;
const int out_offset = n * H + s * NH + b * NHS;
const int i = threadIdx.x;
if (i < H) {
output[out_offset + i] = input[in_offset + i];
}
}
bool LaunchTransCtx(cudaStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const float* input, float* output) {
const dim3 grid(sequence_length, batch_size, 1);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
const dim3 block(H, num_heads, 1);
TransposeCtx<float2><<<grid, block, 0, stream>>>(H, input2, output2);
} else {
const dim3 block(head_size, num_heads, 1);
TransposeCtx<float><<<grid, block, 0, stream>>>(head_size, input, output);
}
return CUDA_CALL(cudaPeekAtLastError());
}
bool LaunchTransCtx(cudaStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const half* input, half* output) {
const dim3 grid(sequence_length, batch_size, 1);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const dim3 block(H, num_heads, 1);
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
TransposeCtx<float2><<<grid, block, 0, stream>>>(H, input2, output2);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const dim3 block(H, num_heads, 1);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
TransposeCtx<half2><<<grid, block, 0, stream>>>(H, input2, output2);
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.
const dim3 block(head_size, num_heads, 1);
TransposeCtx<half><<<grid, block, 0, stream>>>(head_size, input, output);
}
return CUDA_CALL(cudaPeekAtLastError());
}
template <typename T>
__global__ void TransposeQKV(const int H, const T* input, T* output) {
// Input: BxSx3xNxH
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size;
const int i = threadIdx.x;
if (i < H) {
output[out_offset + i] = input[in_offset + i];
}
}
bool LaunchTransQkv(cudaStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const float* input, float* output) {
const dim3 grid(sequence_length, batch_size, 3);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
const dim3 block(H, num_heads, 1);
TransposeQKV<float2><<<grid, block, 0, stream>>>(H, input2, output2);
} else {
const dim3 block(head_size, num_heads, 1);
TransposeQKV<float><<<grid, block, 0, stream>>>(head_size, input, output);
}
return CUDA_CALL(cudaPeekAtLastError());
}
bool LaunchTransQkv(cudaStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const half* input, half* output) {
const dim3 grid(sequence_length, batch_size, 3);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const dim3 block(H, num_heads, 1);
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
TransposeQKV<float2><<<grid, block, 0, stream>>>(H, input2, output2);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const dim3 block(H, num_heads, 1);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
TransposeQKV<half2><<<grid, block, 0, stream>>>(H, input2, output2);
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel..
const dim3 block(head_size, num_heads, 1);
TransposeQKV<half><<<grid, block, 0, stream>>>(head_size, input, output);
}
return CUDA_CALL(cudaPeekAtLastError());
}
template <typename T>
__global__ void ConcatPastToPresent(const int sequence_length,
const T* past,
const T* k_v,
T* present) {
const int h = threadIdx.x;
const int n = threadIdx.y;
const int s = blockIdx.x;
const int b = blockIdx.y;
const int is_v = blockIdx.z; // 0 for k, 1 for v
const int all_sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int num_heads = blockDim.y;
const int H = blockDim.x;
// past: 2 x BxNxS'xH (past_k and past_v)
// k_v: 2 x BxNxSxH (k and v)
// present: 2 x BxNxS*xH (present_k and present_v)
const int past_sequence_length = all_sequence_length - sequence_length;
const int present_SH = all_sequence_length * H;
const int present_NSH = num_heads * present_SH;
int out_offset = b * present_NSH + n * present_SH + s * H + h + is_v * (present_NSH * batch_size);
if (s < past_sequence_length) {
const int past_SH = past_sequence_length * H;
const int past_NSH = num_heads * past_SH;
const int in_offset = b * past_NSH + n * past_SH + s * H + h + is_v * (past_NSH * batch_size);
present[out_offset] = past[in_offset];
} else if (s < all_sequence_length) {
const int SH = sequence_length * H;
const int NSH = num_heads * SH;
const int in_offset = b * NSH + n * SH + (s - past_sequence_length) * H + h + is_v * (NSH * batch_size);
present[out_offset] = k_v[in_offset];
}
}
bool LaunchConcatPastToPresent(cudaStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const float* past,
const float* k_v,
float* present) {
const dim3 grid(all_sequence_length, batch_size, 2);
if (0 == (head_size & 1)) {
const dim3 block(head_size / 2, num_heads, 1);
ConcatPastToPresent<float2><<<grid, block, 0, stream>>>(sequence_length, reinterpret_cast<const float2*>(past), reinterpret_cast<const float2*>(k_v), reinterpret_cast<float2*>(present));
} else {
const dim3 block(head_size, num_heads, 1);
ConcatPastToPresent<float><<<grid, block, 0, stream>>>(sequence_length, past, k_v, present);
}
return CUDA_CALL(cudaPeekAtLastError());
}
bool LaunchConcatPastToPresent(cudaStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const half* past,
const half* k_v,
half* present) {
const dim3 grid(all_sequence_length, batch_size, 2);
if (0 == (head_size % 4)) {
const dim3 block(head_size / 4, num_heads, 1);
ConcatPastToPresent<float2><<<grid, block, 0, stream>>>(sequence_length, reinterpret_cast<const float2*>(past), reinterpret_cast<const float2*>(k_v), reinterpret_cast<float2*>(present));
} else if (0 == (head_size & 1)) {
const dim3 block(head_size / 2, num_heads, 1);
ConcatPastToPresent<half2><<<grid, block, 0, stream>>>(sequence_length, reinterpret_cast<const half2*>(past), reinterpret_cast<const half2*>(k_v), reinterpret_cast<half2*>(present));
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.
const dim3 block(head_size, num_heads, 1);
ConcatPastToPresent<half><<<grid, block, 0, stream>>>(sequence_length, past, k_v, present);
}
return CUDA_CALL(cudaPeekAtLastError());
}
cublasStatus_t inline CublasGemmStridedBatched(
cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k, const float alpha,
const float* A, int lda, long long int strideA, const float* B, int ldb, long long int strideB,
const float beta, float* C, int ldc, long long int strideC, int batchCount) {
return cublasSgemmStridedBatched(
handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount);
}
cublasStatus_t inline CublasGemmStridedBatched(
cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k, const half alpha,
const half* A, int lda, long long int strideA, const half* B, int ldb, long long int strideB,
const half beta, half* C, int ldc, long long int strideC, int batchCount) {
return cublasHgemmStridedBatched(
handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount);
}
template <typename T>
bool QkvToContext(
const cudaDeviceProp& prop, cublasHandle_t& cublas, cudaStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size,
const T* input, T* output, T* workspace,
const int* mask_index, const std::vector<int64_t>* mask_index_dims,
bool is_unidirectional, int past_sequence_length, const T* past, T* present) {
const int all_sequence_length = past_sequence_length + sequence_length;
const size_t bytes = ScratchSize(element_size, batch_size, num_heads, sequence_length, all_sequence_length);
T* scratch1 = workspace;
T* scratch2 = scratch1 + (bytes / element_size);
T* scratch3 = scratch2 + (bytes / element_size);
// input should be BxSx3xNxH => scratch3: 3xBxNxSxH
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, input, scratch3)) {
return false;
}
// now scratch3 has Q, K, V: each has size BxNxSxH
const int batches = batch_size * num_heads;
const int size_per_batch = sequence_length * head_size;
const int total_size = batches * size_per_batch;
const T* q = scratch3;
const T* k = q + total_size;
const T* v = k + total_size;
cublasSetStream(cublas, stream);
CublasMathModeSetter helper(prop, cublas, CUBLAS_TENSOR_OP_MATH);
// Concat past (2xBxNxS'xH) to present (2xBxNxS*xH):
// past_k (BxNxS'xH) + k (BxNxSxH) => present_k (BxNxS*xH)
// past_v (BxNxS'xH) + v (BxNxSxH) => present_v (BxNxS*xH)
const int present_size_per_batch = all_sequence_length * head_size;
if (nullptr != present) {
if (!LaunchConcatPastToPresent(stream, all_sequence_length, sequence_length, batch_size, head_size, num_heads, past, k, present)) {
return false;
}
// update pointers to present_k and present_v.
k = present;
v = present + batches * present_size_per_batch;
}
bool use_2d_attention_mask = (nullptr != mask_index && nullptr != mask_index_dims && mask_index_dims->size() == 2);
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS*
// Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS*
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
const int temp_matrix_size = sequence_length * all_sequence_length;
T alpha = (T)(use_2d_attention_mask ? 1.0f : rsqrt_head_size);
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, CUBLAS_OP_T, CUBLAS_OP_N, all_sequence_length, sequence_length, head_size, alpha, k, head_size, present_size_per_batch,
q, head_size, size_per_batch, 0.f, scratch1, all_sequence_length, temp_matrix_size, batches))) {
return false;
}
// apply softmax and store result P to scratch2: BxNxSxS*
if (use_2d_attention_mask) { // 2d attention mask
if (!ComputeSoftmaxWithMask2D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2, is_unidirectional, rsqrt_head_size)) {
return false;
}
} else if (nullptr != mask_index) { // 1d mask index
ORT_ENFORCE(nullptr != mask_index_dims && mask_index_dims->size() == 1);
// mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions.
const int* mask_start = (mask_index_dims->at(0) > batch_size) ? mask_index + batch_size : nullptr;
if (!ComputeSoftmaxWithMask1D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, mask_start, scratch1, scratch2, is_unidirectional)) {
return false;
}
} else { // no mask
if (!ComputeSoftmax<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, scratch1, scratch2, is_unidirectional)) {
return false;
}
}
// compute P*V (as V*P), and store in scratch3: BxNxSxH
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, sequence_length, all_sequence_length, 1.f, v, head_size, present_size_per_batch,
scratch2, all_sequence_length, temp_matrix_size, 0.f, scratch3, head_size, size_per_batch, batches))) {
return false;
}
// scratch3 is BxNxSxH, transpose to output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, scratch3, output);
}
bool LaunchAttentionKernel(
const cudaDeviceProp& prop,
const void* input,
const int* mask_index,
const std::vector<int64_t>* mask_index_dims,
void* output,
const int batch_size,
const int sequence_length,
const int num_heads,
const int head_size,
void* workspace,
cublasHandle_t& cublas,
const size_t element_size,
bool is_unidirectional,
int past_sequence_length,
const void* past,
void* present) {
// use default stream
const cudaStream_t stream = nullptr;
if (element_size == 2) {
return QkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace),
mask_index, mask_index_dims, is_unidirectional,
past_sequence_length, reinterpret_cast<const half*>(past), reinterpret_cast<half*>(present));
} else {
return QkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace),
mask_index, mask_index_dims, is_unidirectional,
past_sequence_length, reinterpret_cast<const float*>(past), reinterpret_cast<float*>(present));
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
23b85e220dcc27c8417af9592d0951ab81b1ac9c.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/vol2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_depth,
int kernel_width,
int kernel_height,
int stride_depth,
int stride_width,
int stride_height,
int padding_depth,
int padding_width,
int padding_height,
int dilation_depth,
int dilation_width,
int dilation_height,
int output_padding_depth,
int output_padding_width,
int output_padding_height,
int weight_nullable) {
// Allow for empty batch size but not other dimensions
bool valid_empty = false;
int ndim = input.dim();
if (ndim == 4) {
valid_empty = input.size(0) == 0 && input.size(1) != 0 &&
input.size(2) != 0 && input.size(3) != 0;
} else if (ndim == 5) {
valid_empty = input.size(0) == 0 && input.size(1) != 0 &&
input.size(2) != 0 && input.size(3) != 0 && input.size(4) != 0;
}
TORCH_CHECK(
(input.numel() != 0 || valid_empty) && (ndim == 4 || ndim == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
input.sizes());
TORCH_CHECK(
stride_depth > 0 && stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_depth > 0 && dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_depth: ",
dilation_depth,
", dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_depth < stride_depth ||
output_padding_depth < dilation_depth) &&
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation,",
" but got output_padding_depth: ",
output_padding_depth,
" output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_depth: ",
dilation_depth,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
// number of input & output planes and kernel size is indirectly defined by
// the weight tensor
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && weight.dim() == 5,
"non-empty 5D (n_output_plane x n_input_plane ",
"x kernel_depth x kernel_height x kernel_width) tensor ",
"expected for weight, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int dimf = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5) {
dimf++;
dimd++;
dimh++;
dimw++;
}
if (weight.defined()) {
const int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
int64_t input_width = input.size(dimw);
int64_t input_height = input.size(dimh);
int64_t input_depth = input.size(dimd);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_depth,
" x ",
input_height,
" x ",
input_width,
"). Calculated output size per channel: (",
output_depth,
" x ",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (grad_output.defined()) {
if (weight.defined()) {
const int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
const int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimd, output_depth);
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& finput,
Tensor& fgrad_input) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
Tensor columns = finput;
Tensor ones = fgrad_input;
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4},
columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose3d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
slow_conv_transpose3d_shape_check(
input_,
Tensor(),
weight_,
bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
TORCH_CHECK(
!bias.defined() || bias.is_contiguous(),
"bias tensor has to be contiguous");
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
int is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_(
{batch_size, n_output_plane, output_depth, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 3 ||
ones.size(0) * ones.size(1) * ones.size(2) <
output_depth * output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_depth, output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
static_cast<scalar_t>(1),
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
static_cast<scalar_t>(0),
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
at::native::col2vol<scalar_t, accscalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_depth * output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
static_cast<scalar_t>(1),
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
static_cast<scalar_t>(1),
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& finput,
const Tensor& fgrad_input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
Tensor grad_columns = finput;
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"slow_conv_transpose3d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_(
{batch_size, n_input_plane, input_depth, input_height, input_width});
// Resize temporary columns
grad_columns.resize_(
{n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n;
Tensor grad_output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
// Extract columns:
at::native::vol2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
static_cast<scalar_t>(1),
grad_columns.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(0),
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
grad_input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_acc_grad_parameters_cuda(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& finput,
const Tensor& fgrad_input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
Tensor columns = finput;
Tensor ones = fgrad_input;
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose3d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
1);
int n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 3 ||
ones.size(0) * ones.size(1) * ones.size(2) <
output_depth * output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_depth, output_height, output_width});
ones.fill_(1);
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"slow_conv_transpose3d_acc_grad_parameters_cuda",
[&] {
// Helpers
Tensor input_n;
Tensor grad_output_n;
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
// Extract columns:
at::native::vol2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kt * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
columns.data_ptr<scalar_t>(),
k,
input_n.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(1),
grad_weight.data_ptr<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_depth * output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
't',
k_,
m_,
scale,
grad_output_n.data_ptr<scalar_t>(),
k_,
ones.data_ptr<scalar_t>(),
1,
static_cast<scalar_t>(1),
grad_bias.data_ptr<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{input.size(1), input_depth, input_height, input_width});
}
});
}
} // namespace
Tensor& slow_conv_transpose3d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
finput,
fgrad);
return output;
}
Tensor slow_conv_transpose3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
finput,
fgrad);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& finput,
const Tensor& fgrad) {
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& finput,
const Tensor& fgrad,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
| 23b85e220dcc27c8417af9592d0951ab81b1ac9c.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/vol2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_depth,
int kernel_width,
int kernel_height,
int stride_depth,
int stride_width,
int stride_height,
int padding_depth,
int padding_width,
int padding_height,
int dilation_depth,
int dilation_width,
int dilation_height,
int output_padding_depth,
int output_padding_width,
int output_padding_height,
int weight_nullable) {
// Allow for empty batch size but not other dimensions
bool valid_empty = false;
int ndim = input.dim();
if (ndim == 4) {
valid_empty = input.size(0) == 0 && input.size(1) != 0 &&
input.size(2) != 0 && input.size(3) != 0;
} else if (ndim == 5) {
valid_empty = input.size(0) == 0 && input.size(1) != 0 &&
input.size(2) != 0 && input.size(3) != 0 && input.size(4) != 0;
}
TORCH_CHECK(
(input.numel() != 0 || valid_empty) && (ndim == 4 || ndim == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
input.sizes());
TORCH_CHECK(
stride_depth > 0 && stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_depth > 0 && dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_depth: ",
dilation_depth,
", dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_depth < stride_depth ||
output_padding_depth < dilation_depth) &&
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation,",
" but got output_padding_depth: ",
output_padding_depth,
" output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_depth: ",
dilation_depth,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
// number of input & output planes and kernel size is indirectly defined by
// the weight tensor
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && weight.dim() == 5,
"non-empty 5D (n_output_plane x n_input_plane ",
"x kernel_depth x kernel_height x kernel_width) tensor ",
"expected for weight, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int dimf = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5) {
dimf++;
dimd++;
dimh++;
dimw++;
}
if (weight.defined()) {
const int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
int64_t input_width = input.size(dimw);
int64_t input_height = input.size(dimh);
int64_t input_depth = input.size(dimd);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_depth,
" x ",
input_height,
" x ",
input_width,
"). Calculated output size per channel: (",
output_depth,
" x ",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (grad_output.defined()) {
if (weight.defined()) {
const int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
const int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimd, output_depth);
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& finput,
Tensor& fgrad_input) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
Tensor columns = finput;
Tensor ones = fgrad_input;
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4},
columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose3d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
slow_conv_transpose3d_shape_check(
input_,
Tensor(),
weight_,
bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
TORCH_CHECK(
!bias.defined() || bias.is_contiguous(),
"bias tensor has to be contiguous");
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
int is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_(
{batch_size, n_output_plane, output_depth, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 3 ||
ones.size(0) * ones.size(1) * ones.size(2) <
output_depth * output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_depth, output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
static_cast<scalar_t>(1),
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
static_cast<scalar_t>(0),
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
at::native::col2vol<scalar_t, accscalar_t>(
at::cuda::getCurrentCUDAStream(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_depth * output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
static_cast<scalar_t>(1),
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
static_cast<scalar_t>(1),
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& finput,
const Tensor& fgrad_input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
Tensor grad_columns = finput;
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"slow_conv_transpose3d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_(
{batch_size, n_input_plane, input_depth, input_height, input_width});
// Resize temporary columns
grad_columns.resize_(
{n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n;
Tensor grad_output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
// Extract columns:
at::native::vol2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
static_cast<scalar_t>(1),
grad_columns.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(0),
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
grad_input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_acc_grad_parameters_cuda(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& finput,
const Tensor& fgrad_input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
Tensor columns = finput;
Tensor ones = fgrad_input;
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose3d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
1);
int n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 3 ||
ones.size(0) * ones.size(1) * ones.size(2) <
output_depth * output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_depth, output_height, output_width});
ones.fill_(1);
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"slow_conv_transpose3d_acc_grad_parameters_cuda",
[&] {
// Helpers
Tensor input_n;
Tensor grad_output_n;
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
// Extract columns:
at::native::vol2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kt * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
columns.data_ptr<scalar_t>(),
k,
input_n.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(1),
grad_weight.data_ptr<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_depth * output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
't',
k_,
m_,
scale,
grad_output_n.data_ptr<scalar_t>(),
k_,
ones.data_ptr<scalar_t>(),
1,
static_cast<scalar_t>(1),
grad_bias.data_ptr<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{input.size(1), input_depth, input_height, input_width});
}
});
}
} // namespace
Tensor& slow_conv_transpose3d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
finput,
fgrad);
return output;
}
Tensor slow_conv_transpose3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
finput,
fgrad);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& finput,
const Tensor& fgrad) {
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& finput,
const Tensor& fgrad,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
finput,
fgrad,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
16b49ebbf338b13cef14090aeea36eafb162ea72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceHistogram utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <algorithm>
#include <typeinfo>
#include <hipcub/hipcub.hpp>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/device/device_histogram.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
// Dispatch types
enum Backend
{
CUB, // CUB method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
bool g_verbose_input = false;
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Dispatch to different DeviceHistogram entrypoints
//---------------------------------------------------------------------
template <int NUM_ACTIVE_CHANNELS, int NUM_CHANNELS, int BACKEND>
struct Dispatch;
template <int NUM_ACTIVE_CHANNELS, int NUM_CHANNELS>
struct Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, CUB>
{
/**
* Dispatch to CUB multi histogram-range entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static hipError_t Range(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT *(&d_histogram)[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *(&d_levels)[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::MultiHistogramRange<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
d_levels,
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
/**
* Dispatch to CUB multi histogram-even entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static hipError_t Even(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT *(&d_histogram)[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT *upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
hipStream_t stream,
bool debug_synchronous)
{
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::MultiHistogramEven<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
lower_level,
upper_level,
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
};
template <>
struct Dispatch<1, 1, CUB>
{
/**
* Dispatch to CUB single histogram-range entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static hipError_t Range(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT* (&d_histogram)[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT (&d_levels)[1], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::HistogramRange(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram[0],
num_levels[0],
d_levels[0],
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
/**
* Dispatch to CUB single histogram-even entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static hipError_t Even(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT* (&d_histogram)[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT *upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::HistogramEven(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram[0],
num_levels[0],
lower_level[0],
upper_level[0],
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
};
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceHistogram
* /
template <int BINS, int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleT, typename SampleIteratorT, typename CounterT, int ALGORITHM>
__global__ void CnpDispatchKernel(
Int2Type<ALGORITHM> algorithm,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
SampleT *d_samples,
SampleIteratorT d_sample_itr,
ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_out_histograms,
int num_samples,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = hipErrorNotSupported;
#else
*d_cdp_error = Dispatch<BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(algorithm, Int2Type<false>(), timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_out_histograms.array, num_samples, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/ **
* Dispatch to CDP kernel
* /
template <int BINS, int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleT, typename SampleIteratorT, typename CounterT, int ALGORITHM>
hipError_t Dispatch(
Int2Type<ALGORITHM> algorithm,
Int2Type<true> use_cdp,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleT *d_samples,
SampleIteratorT d_sample_itr,
CounterT *d_histograms[NUM_ACTIVE_CHANNELS],
int num_samples,
hipStream_t stream,
bool debug_synchronous)
{
// Setup array wrapper for histogram channel output (because we can't pass static arrays as kernel parameters)
ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_histo_wrapper;
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
d_histo_wrapper.array[CHANNEL] = d_histograms[CHANNEL];
// Invoke kernel to invoke device-side dispatch
CnpDispatchKernel<BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, ALGORITHM><<<1,1>>>(algorithm, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_histo_wrapper, num_samples, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(hipMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, hipMemcpyDeviceToHost));
// Copy out error
hipError_t retval;
CubDebugExit(hipMemcpy(&retval, d_cdp_error, sizeof(hipError_t) * 1, hipMemcpyDeviceToHost));
return retval;
}
*/
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
// Searches for bin given a list of bin-boundary levels
template <typename LevelT>
struct SearchTransform
{
LevelT *levels; // Pointer to levels array
int num_levels; // Number of levels in array
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
int bin = int(std::upper_bound(levels, levels + num_levels, (LevelT) sample) - levels - 1);
if (bin < 0)
{
// Sample out of range
return num_levels;
}
return bin;
}
};
// Scales samples to evenly-spaced bins
template <typename LevelT>
struct ScaleTransform
{
int num_levels; // Number of levels in array
LevelT max; // Max sample level (exclusive)
LevelT min; // Min sample level (inclusive)
LevelT scale; // Bin scaling factor
void Init(
int num_levels_, // Number of levels in array
LevelT max_, // Max sample level (exclusive)
LevelT min_, // Min sample level (inclusive)
LevelT scale_) // Bin scaling factor
{
this->num_levels = num_levels_;
this->max = max_;
this->min = min_;
this->scale = scale_;
}
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
if ((sample < min) || (sample >= max))
{
// Sample out of range
return num_levels;
}
return (int) ((((LevelT) sample) - min) / scale);
}
};
// Scales samples to evenly-spaced bins
template <>
struct ScaleTransform<float>
{
int num_levels; // Number of levels in array
float max; // Max sample level (exclusive)
float min; // Min sample level (inclusive)
float scale; // Bin scaling factor
void Init(
int _num_levels, // Number of levels in array
float _max, // Max sample level (exclusive)
float _min, // Min sample level (inclusive)
float _scale) // Bin scaling factor
{
this->num_levels = _num_levels;
this->max = _max;
this->min = _min;
this->scale = 1.0f / _scale;
}
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
if ((sample < min) || (sample >= max))
{
// Sample out of range
return num_levels;
}
return (int) ((((float) sample) - min) * scale);
}
};
/**
* Generate sample
*/
template <typename T, typename LevelT>
void Sample(T &datum, LevelT max_level, int entropy_reduction)
{
unsigned int max = (unsigned int) -1;
unsigned int bits;
RandomBits(bits, entropy_reduction);
float fraction = (float(bits) / max);
datum = (T) (fraction * max_level);
}
/**
* Initialize histogram samples
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename LevelT,
typename SampleT,
typename OffsetT>
void InitializeSamples(
LevelT max_level,
int entropy_reduction,
SampleT *h_samples,
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
// Initialize samples
for (OffsetT row = 0; row < num_rows; ++row)
{
for (OffsetT pixel = 0; pixel < num_row_pixels; ++pixel)
{
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
// Sample offset
OffsetT offset = (row * (row_stride_bytes / sizeof(SampleT))) + (pixel * NUM_CHANNELS) + channel;
// Init sample value
Sample(h_samples[offset], max_level, entropy_reduction);
if (g_verbose_input)
{
if (channel > 0) printf(", ");
std::cout << CoutCast(h_samples[offset]);
}
}
}
}
}
/**
* Initialize histogram solutions
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename SampleIteratorT,
typename TransformOp,
typename OffsetT>
void InitializeBins(
SampleIteratorT h_samples,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
TransformOp transform_op[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
CounterT *h_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
// Init bins
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
{
for (int bin = 0; bin < num_levels[CHANNEL] - 1; ++bin)
{
h_histogram[CHANNEL][bin] = 0;
}
}
// Initialize samples
if (g_verbose_input) printf("Samples: \n");
for (OffsetT row = 0; row < num_rows; ++row)
{
for (OffsetT pixel = 0; pixel < num_row_pixels; ++pixel)
{
if (g_verbose_input) printf("[");
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
// Sample offset
OffsetT offset = (row * (row_stride_bytes / sizeof(SampleT))) + (pixel * NUM_CHANNELS) + channel;
// Update sample bin
int bin = transform_op[channel](h_samples[offset]);
if (g_verbose_input) printf(" (%d)", bin); fflush(stdout);
if ((bin >= 0) && (bin < num_levels[channel] - 1))
{
// valid bin
h_histogram[channel][bin]++;
}
}
if (g_verbose_input) printf("]");
}
if (g_verbose_input) printf("\n\n");
}
}
/**
* Test histogram-even
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT,
typename SampleIteratorT>
void TestEven(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
SampleIteratorT h_samples,
SampleIteratorT d_samples)
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
printf("\n----------------------------\n");
printf("%s cub::DeviceHistogramEven (%s) %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
(IsPointer<SampleIteratorT>::VALUE) ? "pointer" : "iterator",
(int) (num_row_pixels * num_rows),
(int) num_rows,
(int) num_row_pixels,
(int) row_stride_bytes,
(int) total_samples,
(int) sizeof(SampleT),
typeid(SampleT).name(),
entropy_reduction,
typeid(CounterT).name(),
NUM_ACTIVE_CHANNELS,
NUM_CHANNELS);
std::cout << CoutCast(max_level) << "\n";
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
std::cout << "\n\tChannel " << channel << ": " << num_levels[channel] - 1 << " bins [" << lower_level[channel] << ", " << upper_level[channel] << ")\n";
fflush(stdout);
// Allocate and initialize host and device data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
CounterT* h_histogram[NUM_ACTIVE_CHANNELS];
ScaleTransform<LevelT> transform_op[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int bins = num_levels[channel] - 1;
h_histogram[channel] = new CounterT[bins];
transform_op[channel].Init(
num_levels[channel],
upper_level[channel],
lower_level[channel],
static_cast<LevelT>(((upper_level[channel] - lower_level[channel]) / bins)));
}
InitializeBins<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
CounterT* d_histogram[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * (num_levels[channel] - 1)));
CubDebugExit(hipMemset(d_histogram[channel], 0, sizeof(CounterT) * (num_levels[channel] - 1)));
}
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
hipError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Allocate temporary storage with "canary" zones
int canary_bytes = 256;
char canary_token = 8;
char* canary_zone = new char[canary_bytes];
memset(canary_zone, canary_token, canary_bytes);
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2)));
CubDebugExit(hipMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2)));
// Run warmup/correctness iteration
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
1, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, false);
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(SampleT);
printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s",
avg_millis,
giga_rate,
giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS,
giga_rate / NUM_CHANNELS,
giga_bandwidth);
}
printf("\n\n");
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
if (h_histogram[channel])
delete[] h_histogram[channel];
if (d_histogram[channel])
CubDebugExit(g_allocator.DeviceFree(d_histogram[channel]));
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, error);
}
/**
* Test histogram-even (native pointer input)
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEvenNative(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
// Allocate and initialize host sample data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
SampleT* h_samples = new Foo[total_samples];
InitializeSamples<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
max_level, entropy_reduction, h_samples, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
SampleT* d_samples = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples));
CubDebugExit(hipMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, hipMemcpyHostToDevice));
TestEven<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
h_samples, d_samples);
// Cleanup
if (h_samples) delete[] h_samples;
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
}
/**
* Test histogram-even (native pointer input)
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEvenIterator(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
SampleT sample = (SampleT) lower_level[0];
ConstantInputIterator<SampleT> sample_itr(sample);
TestEven<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
sample_itr, sample_itr);
}
/**
* Test histogram-range
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestRange(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT* levels[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
printf("\n----------------------------\n");
printf("%s cub::DeviceHistogramRange %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
(int) (num_row_pixels * num_rows),
(int) num_rows,
(int) num_row_pixels,
(int) row_stride_bytes,
(int) total_samples,
(int) sizeof(SampleT),
typeid(SampleT).name(),
entropy_reduction,
typeid(CounterT).name(),
NUM_ACTIVE_CHANNELS,
NUM_CHANNELS);
std::cout << CoutCast(max_level) << "\n";
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
printf("Channel %d: %d bins [", channel, num_levels[channel] - 1);
std::cout << levels[channel][0];
for (int level = 1; level < num_levels[channel]; ++level)
std::cout << ", " << levels[channel][level];
printf("]\n");
}
fflush(stdout);
// Allocate and initialize host and device data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
SampleT* h_samples = new Foo[total_samples];
CounterT* h_histogram[NUM_ACTIVE_CHANNELS];
SearchTransform<LevelT> transform_op[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
transform_op[channel].levels = levels[channel];
transform_op[channel].num_levels = num_levels[channel];
int bins = num_levels[channel] - 1;
h_histogram[channel] = new CounterT[bins];
}
InitializeSamples<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
max_level, entropy_reduction, h_samples, num_row_pixels, num_rows, row_stride_bytes);
InitializeBins<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
SampleT* d_samples = NULL;
LevelT* d_levels[NUM_ACTIVE_CHANNELS];
CounterT* d_histogram[NUM_ACTIVE_CHANNELS];
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples));
CubDebugExit(hipMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, hipMemcpyHostToDevice));
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_levels[channel], sizeof(LevelT) * num_levels[channel]));
CubDebugExit(hipMemcpy(d_levels[channel], levels[channel], sizeof(LevelT) * num_levels[channel], hipMemcpyHostToDevice));
int bins = num_levels[channel] - 1;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * bins));
CubDebugExit(hipMemset(d_histogram[channel], 0, sizeof(CounterT) * bins));
}
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
hipError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Allocate temporary storage with "canary" zones
int canary_bytes = 256;
char canary_token = 9;
char* canary_zone = new char[canary_bytes];
memset(canary_zone, canary_token, canary_bytes);
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2)));
CubDebugExit(hipMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2)));
// Run warmup/correctness iteration
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
1, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, false);
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(SampleT);
printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s",
avg_millis,
giga_rate,
giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS,
giga_rate / NUM_CHANNELS,
giga_bandwidth);
}
printf("\n\n");
// Cleanup
if (h_samples) delete[] h_samples;
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
if (h_histogram[channel])
delete[] h_histogram[channel];
if (d_histogram[channel])
CubDebugExit(g_allocator.DeviceFree(d_histogram[channel]));
if (d_levels[channel])
CubDebugExit(g_allocator.DeviceFree(d_levels[channel]));
}
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, error);
}
/**
* Test histogram-even
*/
template <
Backend BACKEND,
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEven(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
LevelT lower_level[NUM_ACTIVE_CHANNELS];
LevelT upper_level[NUM_ACTIVE_CHANNELS];
// Find smallest level increment
int max_bins = max_num_levels - 1;
LevelT min_level_increment = static_cast<LevelT>(max_level / max_bins);
// Set upper and lower levels for each channel
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int num_bins = num_levels[channel] - 1;
lower_level[channel] = static_cast<LevelT>((max_level - (num_bins * min_level_increment)) / 2);
upper_level[channel] = static_cast<LevelT>((max_level + (num_bins * min_level_increment)) / 2);
}
// Test pointer-based samples
TestEvenNative<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level, num_row_pixels, num_rows, row_stride_bytes);
// Test iterator-based samples (CUB-only)
TestEvenIterator<CUB, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level, num_row_pixels, num_rows, row_stride_bytes);
}
/**
* Test histogram-range
*/
template <
Backend BACKEND,
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestRange(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
// Find smallest level increment
int max_bins = max_num_levels - 1;
LevelT min_level_increment = max_level / max_bins;
LevelT* levels[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
levels[channel] = new LevelT[num_levels[channel]];
int num_bins = num_levels[channel] - 1;
LevelT lower_level = (max_level - (num_bins * min_level_increment)) / 2;
for (int level = 0; level < num_levels[channel]; ++level)
levels[channel][level] = lower_level + (level * min_level_increment);
}
TestRange<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, levels, num_row_pixels, num_rows, row_stride_bytes);
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
delete[] levels[channel];
}
/**
* Test different entrypoints
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
TestEven<CUB, SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
TestRange<CUB, SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
}
/**
* Test different number of levels
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
LevelT max_level,
int max_num_levels)
{
int num_levels[NUM_ACTIVE_CHANNELS];
// Unnecessary testing
// // All the same level
// for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
// {
// num_levels[channel] = max_num_levels;
// }
// Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
// num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
// All different levels
num_levels[0] = max_num_levels;
for (int channel = 1; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
num_levels[channel] = (num_levels[channel - 1] / 2) + 1;
}
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
}
/**
* Test different entropy-levels
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
LevelT max_level,
int max_num_levels)
{
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, 0, max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, -1, max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, 5, max_level, max_num_levels);
}
/**
* Test different row strides
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
LevelT max_level,
int max_num_levels)
{
OffsetT row_stride_bytes = num_row_pixels * NUM_CHANNELS * sizeof(SampleT);
// No padding
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, max_level, max_num_levels);
// 13 samples padding
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes + (13 * sizeof(SampleT)), max_level, max_num_levels);
}
/**
* Test different problem sizes
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
LevelT max_level,
int max_num_levels)
{
// 0 row/col images
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(1920), OffsetT(0), max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(0), OffsetT(0), max_level, max_num_levels);
// 1080 image
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(1920), OffsetT(1080), max_level, max_num_levels);
// Sample different aspect ratios sizes
for (OffsetT rows = 1; rows < 1000000; rows *= 1000)
{
for (OffsetT cols = 1; cols < (1000000 / rows); cols *= 1000)
{
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
cols, rows, max_level, max_num_levels);
}
}
// Randomly select linear problem size between 1:10,000,000
unsigned int max_int = (unsigned int) -1;
for (int i = 0; i < 4; ++i)
{
unsigned int num_items;
RandomBits(num_items);
num_items = (unsigned int) ((double(num_items) * double(10000000)) / double(max_int));
num_items = CUB_MAX(1, num_items);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(num_items), 1, max_level, max_num_levels);
}
}
/**
* Test different channel interleavings (valid specialiation)
*/
template <
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestChannels(
LevelT max_level,
int max_num_levels,
Int2Type<true> /*is_valid_tag*/)
{
Test<SampleT, 1, 1, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 4, 3, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 3, 3, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 4, 4, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
}
/**
* Test different channel interleavings (invalid specialiation)
*/
template <
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestChannels(
LevelT /*max_level*/,
int /*max_num_levels*/,
Int2Type<false> /*is_valid_tag*/)
{}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_row_pixels = -1;
int entropy_reduction = 0;
int num_rows = 1;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", num_row_pixels);
int row_stride_pixels = num_row_pixels;
args.GetCmdLineArgument("rows", num_rows);
args.GetCmdLineArgument("stride", row_stride_pixels);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
args.GetCmdLineArgument("entropy", entropy_reduction);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<pixels per row>] "
"[--rows=<number of rows>] "
"[--stride=<row stride in pixels>] "
"[--i=<timing iterations>] "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--entropy=<entropy-reduction factor (default 0)>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version = 0;
CubDebugExit(PtxVersion(ptx_version));
if (num_row_pixels < 0)
{
num_row_pixels = 1920 * 1080;
row_stride_pixels = num_row_pixels;
}
#if defined(CUB_TEST_MINIMAL)
// Compile/run quick tests
{
// HistogramEven: unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: signed char 256 bins
typedef signed char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestRange<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#elif defined(CUB_TEST_BENCHMARK)
// Compile/run quick tests
{
// HistogramEven: unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 4/4 multichannel Unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[4] = {257, 257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 4, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 3/4 multichannel Unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[3] = {257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: short [0,1024] 256 bins
typedef unsigned short SampleT;
typedef unsigned short LevelT;
LevelT max_level = 1024;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: float [0,1.0] 256 bins
typedef float SampleT;
typedef float LevelT;
LevelT max_level = 1.0;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 3/4 multichannel float [0,1.0] 256 bins
typedef float SampleT;
typedef float LevelT;
LevelT max_level = 1.0;
int num_levels[3] = {257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: signed char 256 bins
typedef signed char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestRange<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: 3/4 channel, unsigned char, varied bins (256, 128, 64)
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[3] = {257, 129, 65};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestRange<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
{
// HistogramEven: double [0,1.0] 64 bins
typedef double SampleT;
typedef double LevelT;
LevelT max_level = 1.0;
int num_levels[1] = {65};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: short [0,1024] 512 bins
typedef unsigned short SampleT;
typedef unsigned short LevelT;
LevelT max_level = 1024;
int num_levels[1] = {513};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
TestChannels <unsigned char, int, int, int>(256, 256 + 1, Int2Type<true>());
TestChannels <signed char, int, int, int>(256, 256 + 1, Int2Type<true>());
TestChannels <unsigned short, int, int, int>(128, 128 + 1, Int2Type<true>());
TestChannels <unsigned short, int, int, int>(8192, 8192 + 1, Int2Type<true>());
TestChannels <float, int, float, int>(1.0, 256 + 1, Int2Type<true>());
// Test down-conversion of size_t offsets to int
TestChannels <unsigned char, int, int, long long>(256, 256 + 1, Int2Type<(sizeof(size_t) != sizeof(int))>());
}
#endif
return 0;
}
| 16b49ebbf338b13cef14090aeea36eafb162ea72.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceHistogram utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <algorithm>
#include <typeinfo>
#include <cub/util_allocator.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/device/device_histogram.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
// Dispatch types
enum Backend
{
CUB, // CUB method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
bool g_verbose_input = false;
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Dispatch to different DeviceHistogram entrypoints
//---------------------------------------------------------------------
template <int NUM_ACTIVE_CHANNELS, int NUM_CHANNELS, int BACKEND>
struct Dispatch;
template <int NUM_ACTIVE_CHANNELS, int NUM_CHANNELS>
struct Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, CUB>
{
/**
* Dispatch to CUB multi histogram-range entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Range(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT *(&d_histogram)[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *(&d_levels)[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::MultiHistogramRange<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
d_levels,
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
/**
* Dispatch to CUB multi histogram-even entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Even(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT *(&d_histogram)[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT *upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream,
bool debug_synchronous)
{
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::MultiHistogramEven<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
lower_level,
upper_level,
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
};
template <>
struct Dispatch<1, 1, CUB>
{
/**
* Dispatch to CUB single histogram-range entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Range(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT* (&d_histogram)[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT (&d_levels)[1], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::HistogramRange(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram[0],
num_levels[0],
d_levels[0],
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
/**
* Dispatch to CUB single histogram-even entrypoint
*/
template <typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT>
//CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Even(
int timing_timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples).
CounterT* (&d_histogram)[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int *num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT *lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT *upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceHistogram::HistogramEven(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram[0],
num_levels[0],
lower_level[0],
upper_level[0],
num_row_pixels,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
return error;
}
};
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceHistogram
* /
template <int BINS, int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleT, typename SampleIteratorT, typename CounterT, int ALGORITHM>
__global__ void CnpDispatchKernel(
Int2Type<ALGORITHM> algorithm,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
SampleT *d_samples,
SampleIteratorT d_sample_itr,
ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_out_histograms,
int num_samples,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = cudaErrorNotSupported;
#else
*d_cdp_error = Dispatch<BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(algorithm, Int2Type<false>(), timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_out_histograms.array, num_samples, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/ **
* Dispatch to CDP kernel
* /
template <int BINS, int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleT, typename SampleIteratorT, typename CounterT, int ALGORITHM>
cudaError_t Dispatch(
Int2Type<ALGORITHM> algorithm,
Int2Type<true> use_cdp,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
SampleT *d_samples,
SampleIteratorT d_sample_itr,
CounterT *d_histograms[NUM_ACTIVE_CHANNELS],
int num_samples,
cudaStream_t stream,
bool debug_synchronous)
{
// Setup array wrapper for histogram channel output (because we can't pass static arrays as kernel parameters)
ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_histo_wrapper;
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
d_histo_wrapper.array[CHANNEL] = d_histograms[CHANNEL];
// Invoke kernel to invoke device-side dispatch
CnpDispatchKernel<BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, ALGORITHM><<<1,1>>>(algorithm, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_histo_wrapper, num_samples, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost));
// Copy out error
cudaError_t retval;
CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost));
return retval;
}
*/
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
// Searches for bin given a list of bin-boundary levels
template <typename LevelT>
struct SearchTransform
{
LevelT *levels; // Pointer to levels array
int num_levels; // Number of levels in array
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
int bin = int(std::upper_bound(levels, levels + num_levels, (LevelT) sample) - levels - 1);
if (bin < 0)
{
// Sample out of range
return num_levels;
}
return bin;
}
};
// Scales samples to evenly-spaced bins
template <typename LevelT>
struct ScaleTransform
{
int num_levels; // Number of levels in array
LevelT max; // Max sample level (exclusive)
LevelT min; // Min sample level (inclusive)
LevelT scale; // Bin scaling factor
void Init(
int num_levels_, // Number of levels in array
LevelT max_, // Max sample level (exclusive)
LevelT min_, // Min sample level (inclusive)
LevelT scale_) // Bin scaling factor
{
this->num_levels = num_levels_;
this->max = max_;
this->min = min_;
this->scale = scale_;
}
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
if ((sample < min) || (sample >= max))
{
// Sample out of range
return num_levels;
}
return (int) ((((LevelT) sample) - min) / scale);
}
};
// Scales samples to evenly-spaced bins
template <>
struct ScaleTransform<float>
{
int num_levels; // Number of levels in array
float max; // Max sample level (exclusive)
float min; // Min sample level (inclusive)
float scale; // Bin scaling factor
void Init(
int _num_levels, // Number of levels in array
float _max, // Max sample level (exclusive)
float _min, // Min sample level (inclusive)
float _scale) // Bin scaling factor
{
this->num_levels = _num_levels;
this->max = _max;
this->min = _min;
this->scale = 1.0f / _scale;
}
// Functor for converting samples to bin-ids (num_levels is returned if sample is out of range)
template <typename SampleT>
int operator()(SampleT sample)
{
if ((sample < min) || (sample >= max))
{
// Sample out of range
return num_levels;
}
return (int) ((((float) sample) - min) * scale);
}
};
/**
* Generate sample
*/
template <typename T, typename LevelT>
void Sample(T &datum, LevelT max_level, int entropy_reduction)
{
unsigned int max = (unsigned int) -1;
unsigned int bits;
RandomBits(bits, entropy_reduction);
float fraction = (float(bits) / max);
datum = (T) (fraction * max_level);
}
/**
* Initialize histogram samples
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename LevelT,
typename SampleT,
typename OffsetT>
void InitializeSamples(
LevelT max_level,
int entropy_reduction,
SampleT *h_samples,
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
// Initialize samples
for (OffsetT row = 0; row < num_rows; ++row)
{
for (OffsetT pixel = 0; pixel < num_row_pixels; ++pixel)
{
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
// Sample offset
OffsetT offset = (row * (row_stride_bytes / sizeof(SampleT))) + (pixel * NUM_CHANNELS) + channel;
// Init sample value
Sample(h_samples[offset], max_level, entropy_reduction);
if (g_verbose_input)
{
if (channel > 0) printf(", ");
std::cout << CoutCast(h_samples[offset]);
}
}
}
}
}
/**
* Initialize histogram solutions
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename SampleIteratorT,
typename TransformOp,
typename OffsetT>
void InitializeBins(
SampleIteratorT h_samples,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
TransformOp transform_op[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
CounterT *h_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_levels[i]</tt> - 1.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
// Init bins
for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL)
{
for (int bin = 0; bin < num_levels[CHANNEL] - 1; ++bin)
{
h_histogram[CHANNEL][bin] = 0;
}
}
// Initialize samples
if (g_verbose_input) printf("Samples: \n");
for (OffsetT row = 0; row < num_rows; ++row)
{
for (OffsetT pixel = 0; pixel < num_row_pixels; ++pixel)
{
if (g_verbose_input) printf("[");
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
// Sample offset
OffsetT offset = (row * (row_stride_bytes / sizeof(SampleT))) + (pixel * NUM_CHANNELS) + channel;
// Update sample bin
int bin = transform_op[channel](h_samples[offset]);
if (g_verbose_input) printf(" (%d)", bin); fflush(stdout);
if ((bin >= 0) && (bin < num_levels[channel] - 1))
{
// valid bin
h_histogram[channel][bin]++;
}
}
if (g_verbose_input) printf("]");
}
if (g_verbose_input) printf("\n\n");
}
}
/**
* Test histogram-even
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT,
typename SampleIteratorT>
void TestEven(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
SampleIteratorT h_samples,
SampleIteratorT d_samples)
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
printf("\n----------------------------\n");
printf("%s cub::DeviceHistogramEven (%s) %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
(IsPointer<SampleIteratorT>::VALUE) ? "pointer" : "iterator",
(int) (num_row_pixels * num_rows),
(int) num_rows,
(int) num_row_pixels,
(int) row_stride_bytes,
(int) total_samples,
(int) sizeof(SampleT),
typeid(SampleT).name(),
entropy_reduction,
typeid(CounterT).name(),
NUM_ACTIVE_CHANNELS,
NUM_CHANNELS);
std::cout << CoutCast(max_level) << "\n";
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
std::cout << "\n\tChannel " << channel << ": " << num_levels[channel] - 1 << " bins [" << lower_level[channel] << ", " << upper_level[channel] << ")\n";
fflush(stdout);
// Allocate and initialize host and device data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
CounterT* h_histogram[NUM_ACTIVE_CHANNELS];
ScaleTransform<LevelT> transform_op[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int bins = num_levels[channel] - 1;
h_histogram[channel] = new CounterT[bins];
transform_op[channel].Init(
num_levels[channel],
upper_level[channel],
lower_level[channel],
static_cast<LevelT>(((upper_level[channel] - lower_level[channel]) / bins)));
}
InitializeBins<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
CounterT* d_histogram[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * (num_levels[channel] - 1)));
CubDebugExit(cudaMemset(d_histogram[channel], 0, sizeof(CounterT) * (num_levels[channel] - 1)));
}
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
cudaError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Allocate temporary storage with "canary" zones
int canary_bytes = 256;
char canary_token = 8;
char* canary_zone = new char[canary_bytes];
memset(canary_zone, canary_token, canary_bytes);
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2)));
CubDebugExit(cudaMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2)));
// Run warmup/correctness iteration
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
1, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Even(
g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
0, false);
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(SampleT);
printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s",
avg_millis,
giga_rate,
giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS,
giga_rate / NUM_CHANNELS,
giga_bandwidth);
}
printf("\n\n");
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
if (h_histogram[channel])
delete[] h_histogram[channel];
if (d_histogram[channel])
CubDebugExit(g_allocator.DeviceFree(d_histogram[channel]));
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, error);
}
/**
* Test histogram-even (native pointer input)
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEvenNative(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
// Allocate and initialize host sample data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
SampleT* h_samples = new Foo[total_samples];
InitializeSamples<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
max_level, entropy_reduction, h_samples, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
SampleT* d_samples = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples));
CubDebugExit(cudaMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, cudaMemcpyHostToDevice));
TestEven<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
h_samples, d_samples);
// Cleanup
if (h_samples) delete[] h_samples;
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
}
/**
* Test histogram-even (native pointer input)
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEvenIterator(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
SampleT sample = (SampleT) lower_level[0];
ConstantInputIterator<SampleT> sample_itr(sample);
TestEven<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, row_stride_bytes,
sample_itr, sample_itr);
}
/**
* Test histogram-range
*/
template <
Backend BACKEND,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestRange(
LevelT max_level,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT* levels[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest
{
OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT));
printf("\n----------------------------\n");
printf("%s cub::DeviceHistogramRange %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ",
(BACKEND == CDP) ? "CDP CUB" : "CUB",
(int) (num_row_pixels * num_rows),
(int) num_rows,
(int) num_row_pixels,
(int) row_stride_bytes,
(int) total_samples,
(int) sizeof(SampleT),
typeid(SampleT).name(),
entropy_reduction,
typeid(CounterT).name(),
NUM_ACTIVE_CHANNELS,
NUM_CHANNELS);
std::cout << CoutCast(max_level) << "\n";
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
printf("Channel %d: %d bins [", channel, num_levels[channel] - 1);
std::cout << levels[channel][0];
for (int level = 1; level < num_levels[channel]; ++level)
std::cout << ", " << levels[channel][level];
printf("]\n");
}
fflush(stdout);
// Allocate and initialize host and device data
typedef SampleT Foo; // rename type to quelch gcc warnings (bug?)
SampleT* h_samples = new Foo[total_samples];
CounterT* h_histogram[NUM_ACTIVE_CHANNELS];
SearchTransform<LevelT> transform_op[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
transform_op[channel].levels = levels[channel];
transform_op[channel].num_levels = num_levels[channel];
int bins = num_levels[channel] - 1;
h_histogram[channel] = new CounterT[bins];
}
InitializeSamples<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
max_level, entropy_reduction, h_samples, num_row_pixels, num_rows, row_stride_bytes);
InitializeBins<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes);
// Allocate and initialize device data
SampleT* d_samples = NULL;
LevelT* d_levels[NUM_ACTIVE_CHANNELS];
CounterT* d_histogram[NUM_ACTIVE_CHANNELS];
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples));
CubDebugExit(cudaMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, cudaMemcpyHostToDevice));
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_levels[channel], sizeof(LevelT) * num_levels[channel]));
CubDebugExit(cudaMemcpy(d_levels[channel], levels[channel], sizeof(LevelT) * num_levels[channel], cudaMemcpyHostToDevice));
int bins = num_levels[channel] - 1;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * bins));
CubDebugExit(cudaMemset(d_histogram[channel], 0, sizeof(CounterT) * bins));
}
// Allocate CDP device arrays
size_t *d_temp_storage_bytes = NULL;
cudaError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Allocate temporary storage with "canary" zones
int canary_bytes = 256;
char canary_token = 9;
char* canary_zone = new char[canary_bytes];
memset(canary_zone, canary_token, canary_bytes);
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2)));
CubDebugExit(cudaMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2)));
// Run warmup/correctness iteration
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
1, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, true);
// Check canary zones
int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose);
AssertEquals(0, error);
error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose);
AssertEquals(0, error);
// Flush any stdout/stderr
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
fflush(stdout);
fflush(stderr);
// Check for correctness (and display results, if specified)
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose);
printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n");
error |= channel_error;
}
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
Dispatch<NUM_ACTIVE_CHANNELS, NUM_CHANNELS, BACKEND>::Range(
g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
((char *) d_temp_storage) + canary_bytes, temp_storage_bytes,
d_samples,
d_histogram,
num_levels, d_levels,
num_row_pixels, num_rows, row_stride_bytes,
0, false);
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(SampleT);
printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s",
avg_millis,
giga_rate,
giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS,
giga_rate / NUM_CHANNELS,
giga_bandwidth);
}
printf("\n\n");
// Cleanup
if (h_samples) delete[] h_samples;
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
if (h_histogram[channel])
delete[] h_histogram[channel];
if (d_histogram[channel])
CubDebugExit(g_allocator.DeviceFree(d_histogram[channel]));
if (d_levels[channel])
CubDebugExit(g_allocator.DeviceFree(d_levels[channel]));
}
if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, error);
}
/**
* Test histogram-even
*/
template <
Backend BACKEND,
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestEven(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
LevelT lower_level[NUM_ACTIVE_CHANNELS];
LevelT upper_level[NUM_ACTIVE_CHANNELS];
// Find smallest level increment
int max_bins = max_num_levels - 1;
LevelT min_level_increment = static_cast<LevelT>(max_level / max_bins);
// Set upper and lower levels for each channel
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
int num_bins = num_levels[channel] - 1;
lower_level[channel] = static_cast<LevelT>((max_level - (num_bins * min_level_increment)) / 2);
upper_level[channel] = static_cast<LevelT>((max_level + (num_bins * min_level_increment)) / 2);
}
// Test pointer-based samples
TestEvenNative<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level, num_row_pixels, num_rows, row_stride_bytes);
// Test iterator-based samples (CUB-only)
TestEvenIterator<CUB, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, lower_level, upper_level, num_row_pixels, num_rows, row_stride_bytes);
}
/**
* Test histogram-range
*/
template <
Backend BACKEND,
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestRange(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
// Find smallest level increment
int max_bins = max_num_levels - 1;
LevelT min_level_increment = max_level / max_bins;
LevelT* levels[NUM_ACTIVE_CHANNELS];
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
levels[channel] = new LevelT[num_levels[channel]];
int num_bins = num_levels[channel] - 1;
LevelT lower_level = (max_level - (num_bins * min_level_increment)) / 2;
for (int level = 0; level < num_levels[channel]; ++level)
levels[channel][level] = lower_level + (level * min_level_increment);
}
TestRange<BACKEND, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleT, CounterT, LevelT, OffsetT>(
max_level, entropy_reduction, num_levels, levels, num_row_pixels, num_rows, row_stride_bytes);
for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
delete[] levels[channel];
}
/**
* Test different entrypoints
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
int num_levels[NUM_ACTIVE_CHANNELS],
LevelT max_level,
int max_num_levels)
{
TestEven<CUB, SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
TestRange<CUB, SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
}
/**
* Test different number of levels
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
int entropy_reduction,
LevelT max_level,
int max_num_levels)
{
int num_levels[NUM_ACTIVE_CHANNELS];
// Unnecessary testing
// // All the same level
// for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel)
// {
// num_levels[channel] = max_num_levels;
// }
// Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
// num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
// All different levels
num_levels[0] = max_num_levels;
for (int channel = 1; channel < NUM_ACTIVE_CHANNELS; ++channel)
{
num_levels[channel] = (num_levels[channel - 1] / 2) + 1;
}
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels);
}
/**
* Test different entropy-levels
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
OffsetT row_stride_bytes,
LevelT max_level,
int max_num_levels)
{
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, 0, max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, -1, max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, 5, max_level, max_num_levels);
}
/**
* Test different row strides
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
OffsetT num_row_pixels,
OffsetT num_rows,
LevelT max_level,
int max_num_levels)
{
OffsetT row_stride_bytes = num_row_pixels * NUM_CHANNELS * sizeof(SampleT);
// No padding
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes, max_level, max_num_levels);
// 13 samples padding
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
num_row_pixels, num_rows, row_stride_bytes + (13 * sizeof(SampleT)), max_level, max_num_levels);
}
/**
* Test different problem sizes
*/
template <
typename SampleT,
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename CounterT,
typename LevelT,
typename OffsetT>
void Test(
LevelT max_level,
int max_num_levels)
{
// 0 row/col images
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(1920), OffsetT(0), max_level, max_num_levels);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(0), OffsetT(0), max_level, max_num_levels);
// 1080 image
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(1920), OffsetT(1080), max_level, max_num_levels);
// Sample different aspect ratios sizes
for (OffsetT rows = 1; rows < 1000000; rows *= 1000)
{
for (OffsetT cols = 1; cols < (1000000 / rows); cols *= 1000)
{
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
cols, rows, max_level, max_num_levels);
}
}
// Randomly select linear problem size between 1:10,000,000
unsigned int max_int = (unsigned int) -1;
for (int i = 0; i < 4; ++i)
{
unsigned int num_items;
RandomBits(num_items);
num_items = (unsigned int) ((double(num_items) * double(10000000)) / double(max_int));
num_items = CUB_MAX(1, num_items);
Test<SampleT, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, CounterT, LevelT, OffsetT>(
OffsetT(num_items), 1, max_level, max_num_levels);
}
}
/**
* Test different channel interleavings (valid specialiation)
*/
template <
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestChannels(
LevelT max_level,
int max_num_levels,
Int2Type<true> /*is_valid_tag*/)
{
Test<SampleT, 1, 1, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 4, 3, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 3, 3, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
Test<SampleT, 4, 4, CounterT, LevelT, OffsetT>(max_level, max_num_levels);
}
/**
* Test different channel interleavings (invalid specialiation)
*/
template <
typename SampleT,
typename CounterT,
typename LevelT,
typename OffsetT>
void TestChannels(
LevelT /*max_level*/,
int /*max_num_levels*/,
Int2Type<false> /*is_valid_tag*/)
{}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_row_pixels = -1;
int entropy_reduction = 0;
int num_rows = 1;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", num_row_pixels);
int row_stride_pixels = num_row_pixels;
args.GetCmdLineArgument("rows", num_rows);
args.GetCmdLineArgument("stride", row_stride_pixels);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
args.GetCmdLineArgument("entropy", entropy_reduction);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<pixels per row>] "
"[--rows=<number of rows>] "
"[--stride=<row stride in pixels>] "
"[--i=<timing iterations>] "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--entropy=<entropy-reduction factor (default 0)>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version = 0;
CubDebugExit(PtxVersion(ptx_version));
if (num_row_pixels < 0)
{
num_row_pixels = 1920 * 1080;
row_stride_pixels = num_row_pixels;
}
#if defined(CUB_TEST_MINIMAL)
// Compile/run quick tests
{
// HistogramEven: unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: signed char 256 bins
typedef signed char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestRange<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#elif defined(CUB_TEST_BENCHMARK)
// Compile/run quick tests
{
// HistogramEven: unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 4/4 multichannel Unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[4] = {257, 257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 4, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 3/4 multichannel Unsigned char 256 bins
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[3] = {257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: short [0,1024] 256 bins
typedef unsigned short SampleT;
typedef unsigned short LevelT;
LevelT max_level = 1024;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: float [0,1.0] 256 bins
typedef float SampleT;
typedef float LevelT;
LevelT max_level = 1.0;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: 3/4 multichannel float [0,1.0] 256 bins
typedef float SampleT;
typedef float LevelT;
LevelT max_level = 1.0;
int num_levels[3] = {257, 257, 257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestEven<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: signed char 256 bins
typedef signed char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[1] = {257};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestRange<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramRange: 3/4 channel, unsigned char, varied bins (256, 128, 64)
typedef unsigned char SampleT;
typedef int LevelT;
LevelT max_level = 256;
int num_levels[3] = {257, 129, 65};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4;
TestRange<CUB, SampleT, 4, 3, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
{
// HistogramEven: double [0,1.0] 64 bins
typedef double SampleT;
typedef double LevelT;
LevelT max_level = 1.0;
int num_levels[1] = {65};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
{
// HistogramEven: short [0,1024] 512 bins
typedef unsigned short SampleT;
typedef unsigned short LevelT;
LevelT max_level = 1024;
int num_levels[1] = {513};
int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1;
TestEven<CUB, SampleT, 1, 1, int, LevelT, int>(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]);
}
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
TestChannels <unsigned char, int, int, int>(256, 256 + 1, Int2Type<true>());
TestChannels <signed char, int, int, int>(256, 256 + 1, Int2Type<true>());
TestChannels <unsigned short, int, int, int>(128, 128 + 1, Int2Type<true>());
TestChannels <unsigned short, int, int, int>(8192, 8192 + 1, Int2Type<true>());
TestChannels <float, int, float, int>(1.0, 256 + 1, Int2Type<true>());
// Test down-conversion of size_t offsets to int
TestChannels <unsigned char, int, int, long long>(256, 256 + 1, Int2Type<(sizeof(size_t) != sizeof(int))>());
}
#endif
return 0;
}
|
bb3ef3257cead07a6f7eb04932817860e260d9b5.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <thrust/execution_policy.h>
#include "cupy_common.h"
#include "cupy_thrust.h"
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
#include <hip/hip_fp16.h>
#endif
using namespace thrust;
#if CUPY_USE_HIP
typedef hipStream_t hipStream_t;
namespace cuda {
using thrust::hip::par;
}
#endif // #if CUPY_USE_HIP
extern "C" char *cupy_malloc(void *, ptrdiff_t);
extern "C" void cupy_free(void *, char *);
class cupy_allocator {
private:
void* memory;
public:
typedef char value_type;
cupy_allocator(void* memory) : memory(memory) {}
char *allocate(std::ptrdiff_t num_bytes) {
return cupy_malloc(memory, num_bytes);
}
void deallocate(char *ptr, size_t n) {
cupy_free(memory, ptr);
}
};
/*
* ------------------------------------ Minimum boilerplate to support complex numbers -------------------------------------
* We need a specialized operator< here in order to match the NumPy behavior:
* "The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is
* determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts.
*
* In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
* where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if
* it exists. Non-nan values are sorted as before."
* Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.html
*/
template <typename T>
__host__ __device__ __forceinline__ bool _cmp_less(const T& lhs, const T& rhs) {
bool lhsRe = isnan(lhs.x);
bool lhsIm = isnan(lhs.y);
bool rhsRe = isnan(rhs.x);
bool rhsIm = isnan(rhs.y);
// neither side has nan
if (!lhsRe && !lhsIm && !rhsRe && !rhsIm) {
return (lhs.x < rhs.x || ((lhs.x == rhs.x) && (lhs.y < rhs.y)));
}
// one side has nan, and the other does not
if (!lhsRe && !lhsIm && (rhsRe || rhsIm)) {
return true;
}
if ((lhsRe || lhsIm) && !rhsRe && !rhsIm) {
return false;
}
// pick 2 from 3 possibilities (R + nanj, nan + Rj, nan + nanj)
if (lhsRe && !rhsRe) {
return false;
}
if (!lhsRe && rhsRe) {
return true;
}
if (lhsIm && !rhsIm) {
return false;
}
if (!lhsIm && rhsIm) {
return true;
}
// pick 1 from 3 and compare the numerical values (nan+nan*I compares to itself as false)
return (((lhsIm && rhsIm) && (lhs.x < rhs.x)) || ((lhsRe && rhsRe) && (lhs.y < rhs.y)));
}
/*
* Unfortunately we need explicit (instead of templated) definitions here, because the template specializations would
* go through some wild routes in Thrust that passing by reference to device functions is not working...
*/
__host__ __device__ __forceinline__ bool operator<(const hipComplex& lhs, const hipComplex& rhs) {
return _cmp_less(lhs, rhs);
}
__host__ __device__ __forceinline__ bool operator<(const hipDoubleComplex& lhs, const hipDoubleComplex& rhs) {
return _cmp_less(lhs, rhs);
}
/* ------------------------------------ end of boilerplate ------------------------------------ */
/*
* --------------------------------- Minimum boilerplate to support half precision floats ----------------------------------
* half_isnan is copied from cupy/cuda/cupy_cub.cu, and the specialization of less<__half> is also borrowed from there.
* TODO(leofang): is it possible to refactor the code and avoid repetition?
*/
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
__host__ __device__ __forceinline__ bool half_isnan(const __half& x) {
#ifdef __CUDA_ARCH__
return __hisnan(x);
#else
// TODO(leofang): do we really need this branch?
return isnan(__half2float(x));
#endif
}
// specialize thrust::less for __half
template <>
struct less<__half> {
__host__ __device__ __forceinline__ bool operator() (const __half& lhs, const __half& rhs) const {
if (half_isnan(lhs)) {
return false;
} else if (half_isnan(rhs)) {
return true;
} else {
#ifdef __CUDA_ARCH__
return lhs < rhs;
#else
// TODO(leofang): do we really need this branch?
return __half2float(lhs) < __half2float(rhs);
#endif
}
}
};
#endif // include cupy_fp16.h
/* ------------------------------------ end of boilerplate ------------------------------------ */
/*
* sort
*/
template <typename T>
void cupy::thrust::_sort(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, size_t stream,
void* memory) {
size_t ndim = shape.size();
ptrdiff_t size;
device_ptr<T> dp_data_first, dp_data_last;
device_ptr<size_t> dp_keys_first, dp_keys_last;
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
// Compute the total size of the array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
dp_data_first = device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = device_pointer_cast(static_cast<T*>(data_start) + size);
if (ndim == 1) {
stable_sort(cuda::par(alloc).on(stream_), dp_data_first, dp_data_last, less<T>());
} else {
// Generate key indices.
dp_keys_first = device_pointer_cast(keys_start);
dp_keys_last = device_pointer_cast(keys_start + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
divides<size_t>());
stable_sort(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
less< tuple<size_t, T> >());
}
}
template void cupy::thrust::_sort<cpy_byte>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_ubyte>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_short>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_ushort>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_int>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_uint>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_long>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_ulong>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_float>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_double>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_complex64>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_complex128>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_bool>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
void cupy::thrust::_sort_fp16(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, size_t stream,
void* memory) {
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
cupy::thrust::_sort<__half>(data_start, keys_start, shape, stream, memory);
#endif
}
/*
* lexsort
*/
template <typename T>
class elem_less {
public:
elem_less(const T *data):_data(data) {}
__device__ __forceinline__ bool operator()(size_t i, size_t j) const {
less<T> comp;
return comp(_data[i], _data[j]);
}
private:
const T *_data;
};
template <typename T>
void cupy::thrust::_lexsort(size_t *idx_start, void *keys_start, size_t k,
size_t n, size_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
device_ptr<size_t> dp_first = device_pointer_cast(idx_start);
device_ptr<size_t> dp_last = device_pointer_cast(idx_start + n);
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
sequence(cuda::par(alloc).on(stream_), dp_first, dp_last);
for (size_t i = 0; i < k; ++i) {
T *key_start = static_cast<T*>(keys_start) + i * n;
stable_sort(
cuda::par(alloc).on(stream_),
dp_first,
dp_last,
elem_less<T>(key_start)
);
}
}
template void cupy::thrust::_lexsort<cpy_byte>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_ubyte>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_short>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_ushort>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_int>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_uint>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_long>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_ulong>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_float>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_double>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_complex64>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_complex128>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_bool>(
size_t *, void *, size_t, size_t, size_t, void *);
void cupy::thrust::_lexsort_fp16(size_t *idx_start, void *keys_start, size_t k,
size_t n, size_t stream, void *memory) {
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
cupy::thrust::_lexsort<__half>(idx_start, keys_start, k, n, stream, memory);
#endif
}
/*
* argsort
*/
template <typename T>
void cupy::thrust::_argsort(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
size_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
size_t ndim = shape.size();
ptrdiff_t size;
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
device_ptr<size_t> dp_idx_first, dp_idx_last;
device_ptr<T> dp_data_first, dp_data_last;
device_ptr<size_t> dp_keys_first, dp_keys_last;
// Compute the total size of the data array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
// Cast device pointers of data.
dp_data_first = device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = device_pointer_cast(static_cast<T*>(data_start) + size);
// Generate an index sequence.
dp_idx_first = device_pointer_cast(static_cast<size_t*>(idx_start));
dp_idx_last = device_pointer_cast(static_cast<size_t*>(idx_start) + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_idx_first,
modulus<size_t>());
if (ndim == 1) {
// Sort the index sequence by data.
stable_sort_by_key(cuda::par(alloc).on(stream_),
dp_data_first,
dp_data_last,
dp_idx_first);
} else {
// Generate key indices.
dp_keys_first = device_pointer_cast(static_cast<size_t*>(keys_start));
dp_keys_last = device_pointer_cast(static_cast<size_t*>(keys_start) + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
divides<size_t>());
stable_sort_by_key(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
dp_idx_first);
}
}
template void cupy::thrust::_argsort<cpy_byte>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_ubyte>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_short>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_ushort>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_int>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_uint>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_long>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_ulong>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_float>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_double>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_complex64>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_complex128>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_bool>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
void cupy::thrust::_argsort_fp16(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
size_t stream, void *memory) {
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
cupy::thrust::_argsort<__half>(idx_start, data_start, keys_start, shape, stream, memory);
#endif
}
| bb3ef3257cead07a6f7eb04932817860e260d9b5.cu | #include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <thrust/execution_policy.h>
#include "cupy_common.h"
#include "cupy_thrust.h"
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
#include <cuda_fp16.h>
#endif
using namespace thrust;
#if CUPY_USE_HIP
typedef hipStream_t cudaStream_t;
namespace cuda {
using thrust::hip::par;
}
#endif // #if CUPY_USE_HIP
extern "C" char *cupy_malloc(void *, ptrdiff_t);
extern "C" void cupy_free(void *, char *);
class cupy_allocator {
private:
void* memory;
public:
typedef char value_type;
cupy_allocator(void* memory) : memory(memory) {}
char *allocate(std::ptrdiff_t num_bytes) {
return cupy_malloc(memory, num_bytes);
}
void deallocate(char *ptr, size_t n) {
cupy_free(memory, ptr);
}
};
/*
* ------------------------------------ Minimum boilerplate to support complex numbers -------------------------------------
* We need a specialized operator< here in order to match the NumPy behavior:
* "The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is
* determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts.
*
* In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
* where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if
* it exists. Non-nan values are sorted as before."
* Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.html
*/
template <typename T>
__host__ __device__ __forceinline__ bool _cmp_less(const T& lhs, const T& rhs) {
bool lhsRe = isnan(lhs.x);
bool lhsIm = isnan(lhs.y);
bool rhsRe = isnan(rhs.x);
bool rhsIm = isnan(rhs.y);
// neither side has nan
if (!lhsRe && !lhsIm && !rhsRe && !rhsIm) {
return (lhs.x < rhs.x || ((lhs.x == rhs.x) && (lhs.y < rhs.y)));
}
// one side has nan, and the other does not
if (!lhsRe && !lhsIm && (rhsRe || rhsIm)) {
return true;
}
if ((lhsRe || lhsIm) && !rhsRe && !rhsIm) {
return false;
}
// pick 2 from 3 possibilities (R + nanj, nan + Rj, nan + nanj)
if (lhsRe && !rhsRe) {
return false;
}
if (!lhsRe && rhsRe) {
return true;
}
if (lhsIm && !rhsIm) {
return false;
}
if (!lhsIm && rhsIm) {
return true;
}
// pick 1 from 3 and compare the numerical values (nan+nan*I compares to itself as false)
return (((lhsIm && rhsIm) && (lhs.x < rhs.x)) || ((lhsRe && rhsRe) && (lhs.y < rhs.y)));
}
/*
* Unfortunately we need explicit (instead of templated) definitions here, because the template specializations would
* go through some wild routes in Thrust that passing by reference to device functions is not working...
*/
__host__ __device__ __forceinline__ bool operator<(const cuComplex& lhs, const cuComplex& rhs) {
return _cmp_less(lhs, rhs);
}
__host__ __device__ __forceinline__ bool operator<(const cuDoubleComplex& lhs, const cuDoubleComplex& rhs) {
return _cmp_less(lhs, rhs);
}
/* ------------------------------------ end of boilerplate ------------------------------------ */
/*
* --------------------------------- Minimum boilerplate to support half precision floats ----------------------------------
* half_isnan is copied from cupy/cuda/cupy_cub.cu, and the specialization of less<__half> is also borrowed from there.
* TODO(leofang): is it possible to refactor the code and avoid repetition?
*/
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
__host__ __device__ __forceinline__ bool half_isnan(const __half& x) {
#ifdef __CUDA_ARCH__
return __hisnan(x);
#else
// TODO(leofang): do we really need this branch?
return isnan(__half2float(x));
#endif
}
// specialize thrust::less for __half
template <>
struct less<__half> {
__host__ __device__ __forceinline__ bool operator() (const __half& lhs, const __half& rhs) const {
if (half_isnan(lhs)) {
return false;
} else if (half_isnan(rhs)) {
return true;
} else {
#ifdef __CUDA_ARCH__
return lhs < rhs;
#else
// TODO(leofang): do we really need this branch?
return __half2float(lhs) < __half2float(rhs);
#endif
}
}
};
#endif // include cupy_fp16.h
/* ------------------------------------ end of boilerplate ------------------------------------ */
/*
* sort
*/
template <typename T>
void cupy::thrust::_sort(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, size_t stream,
void* memory) {
size_t ndim = shape.size();
ptrdiff_t size;
device_ptr<T> dp_data_first, dp_data_last;
device_ptr<size_t> dp_keys_first, dp_keys_last;
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
// Compute the total size of the array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
dp_data_first = device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = device_pointer_cast(static_cast<T*>(data_start) + size);
if (ndim == 1) {
stable_sort(cuda::par(alloc).on(stream_), dp_data_first, dp_data_last, less<T>());
} else {
// Generate key indices.
dp_keys_first = device_pointer_cast(keys_start);
dp_keys_last = device_pointer_cast(keys_start + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
divides<size_t>());
stable_sort(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
less< tuple<size_t, T> >());
}
}
template void cupy::thrust::_sort<cpy_byte>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_ubyte>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_short>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_ushort>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_int>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_uint>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_long>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_ulong>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_float>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_double>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_complex64>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_complex128>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
template void cupy::thrust::_sort<cpy_bool>(
void *, size_t *, const std::vector<ptrdiff_t>& shape, size_t, void *);
void cupy::thrust::_sort_fp16(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, size_t stream,
void* memory) {
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
cupy::thrust::_sort<__half>(data_start, keys_start, shape, stream, memory);
#endif
}
/*
* lexsort
*/
template <typename T>
class elem_less {
public:
elem_less(const T *data):_data(data) {}
__device__ __forceinline__ bool operator()(size_t i, size_t j) const {
less<T> comp;
return comp(_data[i], _data[j]);
}
private:
const T *_data;
};
template <typename T>
void cupy::thrust::_lexsort(size_t *idx_start, void *keys_start, size_t k,
size_t n, size_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
device_ptr<size_t> dp_first = device_pointer_cast(idx_start);
device_ptr<size_t> dp_last = device_pointer_cast(idx_start + n);
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
sequence(cuda::par(alloc).on(stream_), dp_first, dp_last);
for (size_t i = 0; i < k; ++i) {
T *key_start = static_cast<T*>(keys_start) + i * n;
stable_sort(
cuda::par(alloc).on(stream_),
dp_first,
dp_last,
elem_less<T>(key_start)
);
}
}
template void cupy::thrust::_lexsort<cpy_byte>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_ubyte>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_short>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_ushort>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_int>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_uint>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_long>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_ulong>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_float>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_double>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_complex64>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_complex128>(
size_t *, void *, size_t, size_t, size_t, void *);
template void cupy::thrust::_lexsort<cpy_bool>(
size_t *, void *, size_t, size_t, size_t, void *);
void cupy::thrust::_lexsort_fp16(size_t *idx_start, void *keys_start, size_t k,
size_t n, size_t stream, void *memory) {
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
cupy::thrust::_lexsort<__half>(idx_start, keys_start, k, n, stream, memory);
#endif
}
/*
* argsort
*/
template <typename T>
void cupy::thrust::_argsort(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
size_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
size_t ndim = shape.size();
ptrdiff_t size;
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
device_ptr<size_t> dp_idx_first, dp_idx_last;
device_ptr<T> dp_data_first, dp_data_last;
device_ptr<size_t> dp_keys_first, dp_keys_last;
// Compute the total size of the data array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
// Cast device pointers of data.
dp_data_first = device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = device_pointer_cast(static_cast<T*>(data_start) + size);
// Generate an index sequence.
dp_idx_first = device_pointer_cast(static_cast<size_t*>(idx_start));
dp_idx_last = device_pointer_cast(static_cast<size_t*>(idx_start) + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_idx_first,
modulus<size_t>());
if (ndim == 1) {
// Sort the index sequence by data.
stable_sort_by_key(cuda::par(alloc).on(stream_),
dp_data_first,
dp_data_last,
dp_idx_first);
} else {
// Generate key indices.
dp_keys_first = device_pointer_cast(static_cast<size_t*>(keys_start));
dp_keys_last = device_pointer_cast(static_cast<size_t*>(keys_start) + size);
transform(cuda::par(alloc).on(stream_),
make_counting_iterator<size_t>(0),
make_counting_iterator<size_t>(size),
make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
divides<size_t>());
stable_sort_by_key(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
dp_idx_first);
}
}
template void cupy::thrust::_argsort<cpy_byte>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_ubyte>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_short>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_ushort>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_int>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_uint>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_long>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_ulong>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_float>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_double>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_complex64>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_complex128>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
template void cupy::thrust::_argsort<cpy_bool>(
size_t *, void *, void *, const std::vector<ptrdiff_t>& shape, size_t,
void *);
void cupy::thrust::_argsort_fp16(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
size_t stream, void *memory) {
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
cupy::thrust::_argsort<__half>(idx_start, data_start, keys_start, shape, stream, memory);
#endif
}
|
14af328a09b8a3485c7e4c2c08790e6810872006.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#include <hip/hip_runtime.h>
// defines, project
#define MEMCOPY_ITERATIONS 10
#define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M
#define DEFAULT_INCREMENT (1 << 22) //4 M
#define CACHE_CLEAR_SIZE (1 << 24) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (1 << 26) //64 M
#define SHMOO_MEMSIZE_START (1 << 10) //1 KB
#define SHMOO_INCREMENT_1KB (1 << 10) //1 KB
#define SHMOO_INCREMENT_2KB (1 << 11) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_INCREMENT_1MB (1 << 20) //1 MB
#define SHMOO_INCREMENT_2MB (1 << 21) //2 MB
#define SHMOO_INCREMENT_4MB (1 << 22) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_LIMIT_1MB (1 << 20) //1 MB
#define SHMOO_LIMIT_16MB (1 << 24) //16 MB
#define SHMOO_LIMIT_32MB (1 << 25) //32 MB
//enums, project
enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE };
enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE };
enum printMode { USER_READABLE, CSV };
enum memoryMode { PINNED, PAGEABLE };
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(const int argc, const char **argv);
void testBandwidth( unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, float *bandwidths, unsigned int count);
void printResultsCSV(unsigned int *memSizes, float *bandwidths, unsigned int count);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, (const char**)argv);
cutilExit(argc, argv);
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
void runTest(const int argc, const char **argv)
{
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PAGEABLE;
//process command line args
if(cutCheckCmdLineFlag( argc, argv, "help"))
{
printHelp();
return;
}
if(cutCheckCmdLineFlag( argc, argv, "csv"))
{
printmode = CSV;
}
if( cutGetCmdLineArgumentstr(argc, argv, "memory", &memModeStr) )
{
if( strcmp(memModeStr, "pageable") == 0 )
{
memMode = PAGEABLE;
}
else if( strcmp(memModeStr, "pinned") == 0)
{
memMode = PINNED;
}
else
{
printf("Invalid memory mode - valid modes are pageable or pinned\n");
printf("See --help for more information\n");
return;
}
}
else
{
//default - pageable memory
memMode = PAGEABLE;
}
if( cutGetCmdLineArgumentstr(argc, argv, "device", &device) )
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if( deviceCount == 0 )
{
printf("!!!!!No devices found!!!!!\n");
return;
}
if( strcmp (device, "all") == 0 )
{
printf ("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount-1;
}
else
{
startDevice = endDevice = atoi(device);
if( startDevice >= deviceCount || startDevice < 0)
{
printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0);
startDevice = endDevice = 0;
}
}
}
printf("Running on......\n");
for( int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, currentDevice);
printf (" device %d:%s\n", currentDevice,deviceProp.name);
}
if( cutGetCmdLineArgumentstr(argc, argv, "mode", &modeStr) )
{
//figure out the mode
if( strcmp(modeStr, "quick") == 0 )
{
mode = QUICK_MODE;
}
else if( strcmp(modeStr, "shmoo") == 0 )
{
mode = SHMOO_MODE;
}
else if( strcmp(modeStr, "range") == 0 )
{
mode = RANGE_MODE;
}
else
{
printf("Invalid mode - valid modes are quick, range, or shmoo\n");
printf("See --help for more information\n");
return;
}
}
else
{
//default mode - quick
mode = QUICK_MODE;
}
if(cutCheckCmdLineFlag( argc, argv, "htod"))
htod = true;
if(cutCheckCmdLineFlag( argc, argv, "dtoh"))
dtoh = true;
if(cutCheckCmdLineFlag( argc, argv, "dtod"))
dtod = true;
#if CUDART_VERSION >= 2020
if(cutCheckCmdLineFlag( argc, argv, "wc"))
wc = true;
#endif
if(cutCheckCmdLineFlag( argc, argv, "cputiming"))
bDontUseGPUTiming = true;
if( !htod && !dtoh && !dtod )
{
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if( RANGE_MODE == mode )
{
if( cutGetCmdLineArgumenti( argc, argv, "start", &start) )
{
if( start <= 0 )
{
printf("Illegal argument - start must be greater than zero\n");
return;
}
}
else
{
printf("Must specify a starting size in range mode\n");
printf("See --help for more information\n");
return;
}
if( cutGetCmdLineArgumenti( argc, argv, "end", &end) )
{
if( end <= 0 )
{
printf("Illegal argument - end must be greater than zero\n");
return;
}
if( start > end )
{
printf("Illegal argument - start is greater than end\n");
return;
}
}
else
{
printf("Must specify an end size in range mode.\n");
printf("See --help for more information\n");
return;
}
if( cutGetCmdLineArgumenti( argc, argv, "increment", &increment) )
{
if( increment <= 0 )
{
printf("Illegal argument - increment must be greater than zero\n");
return;
}
}
else
{
printf("Must specify an increment in user mode\n");
printf("See --help for more information\n");
return;
}
}
if( htod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
if( dtoh )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc);
}
if( dtod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
printf("&&&& Test PASSED\n");
cutFree( memModeStr);
return;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void
testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
switch( mode )
{
case QUICK_MODE:
printf("Quick Mode\n");
testBandwidthQuick( DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc );
break;
case RANGE_MODE:
printf("Range Mode\n");
testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case SHMOO_MODE:
printf("Shmoo Mode\n");
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc);
break;
default:
printf("Invalid testing mode\n");
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
float *bandwidths = ( float * ) malloc( count * sizeof(float) );
//print information for use
switch(kind)
{
case DEVICE_TO_HOST: printf("Device to Host Bandwidth for ");
break;
case HOST_TO_DEVICE: printf("Host to Device Bandwidth for ");
break;
case DEVICE_TO_DEVICE: printf("Device to Device Bandwidth\n");
break;
}
if( DEVICE_TO_DEVICE != kind )
{ switch(memMode)
{
case PAGEABLE: printf("Pageable memory\n");
break;
case PINNED: printf("Pinned memory\n");
if (wc) printf("Write-Combined memory enabled\n");
break;
}
}
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (int i = 0; i < count; i++)
bandwidths[i] = 0.0f;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipSetDevice(currentDevice);
//run each of the copies
for(unsigned int i = 0; i < count; i++)
{
memSizes[i] = start + i * increment;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[i] = testDeviceToHostTransfer( memSizes[i], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[i] = testHostToDeviceTransfer( memSizes[i], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[i] = testDeviceToDeviceTransfer( memSizes[i] );
break;
}
printf(".");
}
hipDeviceReset();
} // Complete the bandwidth computation on all the devices
printf("\n");
//print results
if(printmode == CSV)
{
printResultsCSV(memSizes, bandwidths, count);
}
else
{
printResultsReadable(memSizes, bandwidths, count);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void
testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//print info for user
switch(kind)
{
case DEVICE_TO_HOST: printf("Device to Host Bandwidth for ");
break;
case HOST_TO_DEVICE: printf("Host to Device Bandwidth for ");
break;
case DEVICE_TO_DEVICE: printf("Device to Device Bandwidth\n");
break;
}
if( DEVICE_TO_DEVICE != kind )
{ switch(memMode)
{
case PAGEABLE: printf("Pageable memory\n");
break;
case PINNED: printf("Pinned memory\n");
if (wc) printf("Write-Combined memory enabled\n");
break;
}
}
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
float *bandwidths = ( float * ) malloc( count * sizeof(float) );
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (int i = 0; i < count; i++)
bandwidths[i] = 0.0f;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while( memSize <= SHMOO_MEMSIZE_MAX )
{
if( memSize < SHMOO_LIMIT_20KB )
{
memSize += SHMOO_INCREMENT_1KB;
}
else if( memSize < SHMOO_LIMIT_50KB )
{
memSize += SHMOO_INCREMENT_2KB;
}else if( memSize < SHMOO_LIMIT_100KB )
{
memSize += SHMOO_INCREMENT_10KB;
}else if( memSize < SHMOO_LIMIT_1MB )
{
memSize += SHMOO_INCREMENT_100KB;
}else if( memSize < SHMOO_LIMIT_16MB )
{
memSize += SHMOO_INCREMENT_1MB;
}else if( memSize < SHMOO_LIMIT_32MB )
{
memSize += SHMOO_INCREMENT_2MB;
}else
{
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer( memSizes[iteration], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer( memSizes[iteration], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer( memSizes[iteration] );
break;
}
iteration++;
printf(".");
}
} // Complete the bandwidth computation on all the devices
printf("\n");
//print results
if( CSV == printmode)
{
printResultsCSV(memSizes, bandwidths, count);
}
else
{
printResultsReadable(memSizes, bandwidths, count);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
hipEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall ( hipEventCreate( &start ) );
cutilSafeCall ( hipEventCreate( &stop ) );
//allocate host memory
if( PINNED == memMode )
{
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
cutilSafeCall( hipHostMalloc( (void**)&h_idata, memSize, (wc) ? hipHostMallocWriteCombined : 0 ) );
cutilSafeCall( hipHostMalloc( (void**)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0 ) );
#else
cutilSafeCall( hipHostMalloc( (void**)&h_idata, memSize ) );
cutilSafeCall( hipHostMalloc( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_idata = (unsigned char *)malloc( memSize );
h_odata = (unsigned char *)malloc( memSize );
}
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
// allocate device memory
unsigned char* d_idata;
cutilSafeCall( hipMalloc( (void**) &d_idata, memSize));
//initialize the device memory
cutilSafeCall( hipMemcpy( d_idata, h_idata, memSize,
hipMemcpyHostToDevice) );
//copy data from GPU to Host
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( hipEventRecord( start, 0 ) );
if( PINNED == memMode )
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( hipMemcpyAsync( h_odata, d_idata, memSize,
hipMemcpyDeviceToHost, 0) );
}
}
else
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( hipMemcpy( h_odata, d_idata, memSize,
hipMemcpyDeviceToHost) );
}
}
cutilSafeCall( hipEventRecord( stop, 0 ) );
// make sure GPU has finished copying
cutilSafeCall( hipDeviceSynchronize() );
//get the the total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( hipEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilSafeCall( hipEventDestroy(stop) );
cutilSafeCall( hipEventDestroy(start) );
cutilCheckError( cutDeleteTimer( timer));
if( PINNED == memMode )
{
cutilSafeCall( hipHostFree(h_idata) );
cutilSafeCall( hipHostFree(h_odata) );
}
else
{
free(h_idata);
free(h_odata);
}
cutilSafeCall(hipFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
hipEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall( hipEventCreate( &start ) );
cutilSafeCall( hipEventCreate( &stop ) );
//allocate host memory
unsigned char *h_odata = NULL;
if( PINNED == memMode )
{
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
cutilSafeCall( hipHostMalloc( (void**)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0 ) );
#else
//pinned memory mode - use special function to get OS-pinned memory
cutilSafeCall( hipHostMalloc( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_odata = (unsigned char *)malloc( memSize );
}
unsigned char *h_cacheClear1 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
unsigned char *h_cacheClear2 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_odata[i] = (unsigned char) (i & 0xff);
}
for(unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++)
{
h_cacheClear1[i] = (unsigned char) (i & 0xff);
h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff));
}
//allocate device memory
unsigned char* d_idata;
cutilSafeCall( hipMalloc( (void**) &d_idata, memSize));
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( hipEventRecord( start, 0 ) );
//copy host memory to device memory
if( PINNED == memMode )
{
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
cutilSafeCall( hipMemcpyAsync( d_idata, h_odata, memSize,
hipMemcpyHostToDevice, 0) );
}
}
else {
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
cutilSafeCall( hipMemcpy( d_idata, h_odata, memSize,
hipMemcpyHostToDevice) );
}
}
cutilSafeCall( hipEventRecord( stop, 0 ) );
cutilSafeCall( hipDeviceSynchronize() );
//total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( hipEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
cutilCheckError( cutResetTimer( timer));
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilSafeCall( hipEventDestroy(stop) );
cutilSafeCall( hipEventDestroy(start) );
cutilCheckError( cutDeleteTimer( timer));
if( PINNED == memMode )
{
cutilSafeCall( hipHostFree(h_odata) );
}
else
{
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
cutilSafeCall(hipFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToDeviceTransfer(unsigned int memSize)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
hipEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall( hipEventCreate( &start ) );
cutilSafeCall( hipEventCreate( &stop ) );
//allocate host memory
unsigned char *h_idata = (unsigned char *)malloc( memSize );
//initialize the host memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
cutilSafeCall( hipMalloc( (void**) &d_idata, memSize));
unsigned char *d_odata;
cutilSafeCall( hipMalloc( (void**) &d_odata, memSize));
//initialize memory
cutilSafeCall( hipMemcpy( d_idata, h_idata, memSize,
hipMemcpyHostToDevice) );
//run the memcopy
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( hipEventRecord( start, 0 ) );
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( hipMemcpy( d_odata, d_idata, memSize,
hipMemcpyDeviceToDevice) );
}
cutilSafeCall( hipEventRecord( stop, 0 ) );
//Since device to device memory copies are non-blocking,
//hipDeviceSynchronize() is required in order to get
//proper timing.
cutilSafeCall( hipDeviceSynchronize() );
//get the the total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( hipEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = 2.0f * (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilCheckError( cutDeleteTimer( timer));
free(h_idata);
cutilSafeCall(hipEventDestroy(stop));
cutilSafeCall(hipEventDestroy(start));
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
return bandwidthInMBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, float *bandwidths, unsigned int count)
{
printf("Transfer Size (Bytes)\tBandwidth(MB/s)\n");
for(unsigned int i = 0; i < count; i++)
{
printf("%9u\t\t%.1f\n", memSizes[i], bandwidths[i]);
}
printf("\n");
fflush(stdout);
}
///////////////////////////////////////////////////////////////////////////
//print results in CSV format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, float *bandwidths, unsigned int count)
{
printf("Transfer size (Bytes),");
for(unsigned int i = 0; i < count; i++)
{
printf("%u,", memSizes[i]);
}
printf("\n");
printf("Bandwidth (MB/s),");
for(unsigned int i = 0; i < count; i++)
{
printf("%.1f,", bandwidths[i]);
}
printf("\n\n");
fflush(stdout);
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
printf("Usage: bandwidthTest [OPTION]...\n");
printf("Test the bandwidth for device to host, host to device, and device to device transfers\n");
printf("\n");
printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
printf("\n");
printf("Options:\n");
printf("--help\tDisplay this help menu\n");
printf("--csv\tPrint results as a CSV\n");
printf("--device=[deviceno]\tSpecify the device device to be used\n");
printf(" all - compute cumulative bandwidth on all the devices\n");
printf(" 0,1,2,...,n - Specify any particular device to be used\n");
printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
printf(" pageable - pageable memory\n");
printf(" pinned - non-pageable system memory\n");
printf("--mode=[MODE]\tSpecify the mode to use\n");
printf(" quick - performs a quick measurement\n");
printf(" range - measures a user-specified range of values\n");
printf(" shmoo - performs an intense shmoo of a large range of values\n");
printf("--htod\tMeasure host to device transfers\n");
printf("--dtoh\tMeasure device to host transfers\n");
printf("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
printf("--wc\tAllocate pinned memory as write-combined\n");
#endif
printf("--cputiming\tForce CPU-based timing always\n");
printf("Range mode options\n");
printf("--start=[SIZE]\tStarting transfer size in bytes\n");
printf("--end=[SIZE]\tEnding transfer size in bytes\n");
printf("--increment=[SIZE]\tIncrement size in bytes\n");
}
| 14af328a09b8a3485c7e4c2c08790e6810872006.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil_inline.h>
#include <cuda.h>
// defines, project
#define MEMCOPY_ITERATIONS 10
#define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M
#define DEFAULT_INCREMENT (1 << 22) //4 M
#define CACHE_CLEAR_SIZE (1 << 24) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (1 << 26) //64 M
#define SHMOO_MEMSIZE_START (1 << 10) //1 KB
#define SHMOO_INCREMENT_1KB (1 << 10) //1 KB
#define SHMOO_INCREMENT_2KB (1 << 11) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_INCREMENT_1MB (1 << 20) //1 MB
#define SHMOO_INCREMENT_2MB (1 << 21) //2 MB
#define SHMOO_INCREMENT_4MB (1 << 22) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_LIMIT_1MB (1 << 20) //1 MB
#define SHMOO_LIMIT_16MB (1 << 24) //16 MB
#define SHMOO_LIMIT_32MB (1 << 25) //32 MB
//enums, project
enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE };
enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE };
enum printMode { USER_READABLE, CSV };
enum memoryMode { PINNED, PAGEABLE };
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(const int argc, const char **argv);
void testBandwidth( unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, float *bandwidths, unsigned int count);
void printResultsCSV(unsigned int *memSizes, float *bandwidths, unsigned int count);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, (const char**)argv);
cutilExit(argc, argv);
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
void runTest(const int argc, const char **argv)
{
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PAGEABLE;
//process command line args
if(cutCheckCmdLineFlag( argc, argv, "help"))
{
printHelp();
return;
}
if(cutCheckCmdLineFlag( argc, argv, "csv"))
{
printmode = CSV;
}
if( cutGetCmdLineArgumentstr(argc, argv, "memory", &memModeStr) )
{
if( strcmp(memModeStr, "pageable") == 0 )
{
memMode = PAGEABLE;
}
else if( strcmp(memModeStr, "pinned") == 0)
{
memMode = PINNED;
}
else
{
printf("Invalid memory mode - valid modes are pageable or pinned\n");
printf("See --help for more information\n");
return;
}
}
else
{
//default - pageable memory
memMode = PAGEABLE;
}
if( cutGetCmdLineArgumentstr(argc, argv, "device", &device) )
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if( deviceCount == 0 )
{
printf("!!!!!No devices found!!!!!\n");
return;
}
if( strcmp (device, "all") == 0 )
{
printf ("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount-1;
}
else
{
startDevice = endDevice = atoi(device);
if( startDevice >= deviceCount || startDevice < 0)
{
printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0);
startDevice = endDevice = 0;
}
}
}
printf("Running on......\n");
for( int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, currentDevice);
printf (" device %d:%s\n", currentDevice,deviceProp.name);
}
if( cutGetCmdLineArgumentstr(argc, argv, "mode", &modeStr) )
{
//figure out the mode
if( strcmp(modeStr, "quick") == 0 )
{
mode = QUICK_MODE;
}
else if( strcmp(modeStr, "shmoo") == 0 )
{
mode = SHMOO_MODE;
}
else if( strcmp(modeStr, "range") == 0 )
{
mode = RANGE_MODE;
}
else
{
printf("Invalid mode - valid modes are quick, range, or shmoo\n");
printf("See --help for more information\n");
return;
}
}
else
{
//default mode - quick
mode = QUICK_MODE;
}
if(cutCheckCmdLineFlag( argc, argv, "htod"))
htod = true;
if(cutCheckCmdLineFlag( argc, argv, "dtoh"))
dtoh = true;
if(cutCheckCmdLineFlag( argc, argv, "dtod"))
dtod = true;
#if CUDART_VERSION >= 2020
if(cutCheckCmdLineFlag( argc, argv, "wc"))
wc = true;
#endif
if(cutCheckCmdLineFlag( argc, argv, "cputiming"))
bDontUseGPUTiming = true;
if( !htod && !dtoh && !dtod )
{
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if( RANGE_MODE == mode )
{
if( cutGetCmdLineArgumenti( argc, argv, "start", &start) )
{
if( start <= 0 )
{
printf("Illegal argument - start must be greater than zero\n");
return;
}
}
else
{
printf("Must specify a starting size in range mode\n");
printf("See --help for more information\n");
return;
}
if( cutGetCmdLineArgumenti( argc, argv, "end", &end) )
{
if( end <= 0 )
{
printf("Illegal argument - end must be greater than zero\n");
return;
}
if( start > end )
{
printf("Illegal argument - start is greater than end\n");
return;
}
}
else
{
printf("Must specify an end size in range mode.\n");
printf("See --help for more information\n");
return;
}
if( cutGetCmdLineArgumenti( argc, argv, "increment", &increment) )
{
if( increment <= 0 )
{
printf("Illegal argument - increment must be greater than zero\n");
return;
}
}
else
{
printf("Must specify an increment in user mode\n");
printf("See --help for more information\n");
return;
}
}
if( htod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
if( dtoh )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc);
}
if( dtod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
printf("&&&& Test PASSED\n");
cutFree( memModeStr);
return;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void
testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
switch( mode )
{
case QUICK_MODE:
printf("Quick Mode\n");
testBandwidthQuick( DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc );
break;
case RANGE_MODE:
printf("Range Mode\n");
testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case SHMOO_MODE:
printf("Shmoo Mode\n");
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc);
break;
default:
printf("Invalid testing mode\n");
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
float *bandwidths = ( float * ) malloc( count * sizeof(float) );
//print information for use
switch(kind)
{
case DEVICE_TO_HOST: printf("Device to Host Bandwidth for ");
break;
case HOST_TO_DEVICE: printf("Host to Device Bandwidth for ");
break;
case DEVICE_TO_DEVICE: printf("Device to Device Bandwidth\n");
break;
}
if( DEVICE_TO_DEVICE != kind )
{ switch(memMode)
{
case PAGEABLE: printf("Pageable memory\n");
break;
case PINNED: printf("Pinned memory\n");
if (wc) printf("Write-Combined memory enabled\n");
break;
}
}
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (int i = 0; i < count; i++)
bandwidths[i] = 0.0f;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaSetDevice(currentDevice);
//run each of the copies
for(unsigned int i = 0; i < count; i++)
{
memSizes[i] = start + i * increment;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[i] = testDeviceToHostTransfer( memSizes[i], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[i] = testHostToDeviceTransfer( memSizes[i], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[i] = testDeviceToDeviceTransfer( memSizes[i] );
break;
}
printf(".");
}
cudaThreadExit();
} // Complete the bandwidth computation on all the devices
printf("\n");
//print results
if(printmode == CSV)
{
printResultsCSV(memSizes, bandwidths, count);
}
else
{
printResultsReadable(memSizes, bandwidths, count);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void
testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//print info for user
switch(kind)
{
case DEVICE_TO_HOST: printf("Device to Host Bandwidth for ");
break;
case HOST_TO_DEVICE: printf("Host to Device Bandwidth for ");
break;
case DEVICE_TO_DEVICE: printf("Device to Device Bandwidth\n");
break;
}
if( DEVICE_TO_DEVICE != kind )
{ switch(memMode)
{
case PAGEABLE: printf("Pageable memory\n");
break;
case PINNED: printf("Pinned memory\n");
if (wc) printf("Write-Combined memory enabled\n");
break;
}
}
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
float *bandwidths = ( float * ) malloc( count * sizeof(float) );
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (int i = 0; i < count; i++)
bandwidths[i] = 0.0f;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while( memSize <= SHMOO_MEMSIZE_MAX )
{
if( memSize < SHMOO_LIMIT_20KB )
{
memSize += SHMOO_INCREMENT_1KB;
}
else if( memSize < SHMOO_LIMIT_50KB )
{
memSize += SHMOO_INCREMENT_2KB;
}else if( memSize < SHMOO_LIMIT_100KB )
{
memSize += SHMOO_INCREMENT_10KB;
}else if( memSize < SHMOO_LIMIT_1MB )
{
memSize += SHMOO_INCREMENT_100KB;
}else if( memSize < SHMOO_LIMIT_16MB )
{
memSize += SHMOO_INCREMENT_1MB;
}else if( memSize < SHMOO_LIMIT_32MB )
{
memSize += SHMOO_INCREMENT_2MB;
}else
{
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer( memSizes[iteration], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer( memSizes[iteration], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer( memSizes[iteration] );
break;
}
iteration++;
printf(".");
}
} // Complete the bandwidth computation on all the devices
printf("\n");
//print results
if( CSV == printmode)
{
printResultsCSV(memSizes, bandwidths, count);
}
else
{
printResultsReadable(memSizes, bandwidths, count);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
cudaEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall ( cudaEventCreate( &start ) );
cutilSafeCall ( cudaEventCreate( &stop ) );
//allocate host memory
if( PINNED == memMode )
{
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
cutilSafeCall( cudaHostAlloc( (void**)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
cutilSafeCall( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
#else
cutilSafeCall( cudaMallocHost( (void**)&h_idata, memSize ) );
cutilSafeCall( cudaMallocHost( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_idata = (unsigned char *)malloc( memSize );
h_odata = (unsigned char *)malloc( memSize );
}
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
// allocate device memory
unsigned char* d_idata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize));
//initialize the device memory
cutilSafeCall( cudaMemcpy( d_idata, h_idata, memSize,
cudaMemcpyHostToDevice) );
//copy data from GPU to Host
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( cudaEventRecord( start, 0 ) );
if( PINNED == memMode )
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( cudaMemcpyAsync( h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost, 0) );
}
}
else
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( cudaMemcpy( h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost) );
}
}
cutilSafeCall( cudaEventRecord( stop, 0 ) );
// make sure GPU has finished copying
cutilSafeCall( cudaThreadSynchronize() );
//get the the total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilSafeCall( cudaEventDestroy(stop) );
cutilSafeCall( cudaEventDestroy(start) );
cutilCheckError( cutDeleteTimer( timer));
if( PINNED == memMode )
{
cutilSafeCall( cudaFreeHost(h_idata) );
cutilSafeCall( cudaFreeHost(h_odata) );
}
else
{
free(h_idata);
free(h_odata);
}
cutilSafeCall(cudaFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
cudaEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall( cudaEventCreate( &start ) );
cutilSafeCall( cudaEventCreate( &stop ) );
//allocate host memory
unsigned char *h_odata = NULL;
if( PINNED == memMode )
{
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
cutilSafeCall( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
#else
//pinned memory mode - use special function to get OS-pinned memory
cutilSafeCall( cudaMallocHost( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_odata = (unsigned char *)malloc( memSize );
}
unsigned char *h_cacheClear1 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
unsigned char *h_cacheClear2 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_odata[i] = (unsigned char) (i & 0xff);
}
for(unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++)
{
h_cacheClear1[i] = (unsigned char) (i & 0xff);
h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff));
}
//allocate device memory
unsigned char* d_idata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize));
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( cudaEventRecord( start, 0 ) );
//copy host memory to device memory
if( PINNED == memMode )
{
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
cutilSafeCall( cudaMemcpyAsync( d_idata, h_odata, memSize,
cudaMemcpyHostToDevice, 0) );
}
}
else {
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
cutilSafeCall( cudaMemcpy( d_idata, h_odata, memSize,
cudaMemcpyHostToDevice) );
}
}
cutilSafeCall( cudaEventRecord( stop, 0 ) );
cutilSafeCall( cudaThreadSynchronize() );
//total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
cutilCheckError( cutResetTimer( timer));
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilSafeCall( cudaEventDestroy(stop) );
cutilSafeCall( cudaEventDestroy(start) );
cutilCheckError( cutDeleteTimer( timer));
if( PINNED == memMode )
{
cutilSafeCall( cudaFreeHost(h_odata) );
}
else
{
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
cutilSafeCall(cudaFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToDeviceTransfer(unsigned int memSize)
{
unsigned int timer = 0;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
cudaEvent_t start, stop;
cutilCheckError( cutCreateTimer( &timer ) );
cutilSafeCall( cudaEventCreate( &start ) );
cutilSafeCall( cudaEventCreate( &stop ) );
//allocate host memory
unsigned char *h_idata = (unsigned char *)malloc( memSize );
//initialize the host memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize));
unsigned char *d_odata;
cutilSafeCall( cudaMalloc( (void**) &d_odata, memSize));
//initialize memory
cutilSafeCall( cudaMemcpy( d_idata, h_idata, memSize,
cudaMemcpyHostToDevice) );
//run the memcopy
cutilCheckError( cutStartTimer( timer));
cutilSafeCall( cudaEventRecord( start, 0 ) );
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
cutilSafeCall( cudaMemcpy( d_odata, d_idata, memSize,
cudaMemcpyDeviceToDevice) );
}
cutilSafeCall( cudaEventRecord( stop, 0 ) );
//Since device to device memory copies are non-blocking,
//cudaThreadSynchronize() is required in order to get
//proper timing.
cutilSafeCall( cudaThreadSynchronize() );
//get the the total elapsed time in ms
cutilCheckError( cutStopTimer( timer));
cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( bDontUseGPUTiming )
{
elapsedTimeInMs = cutGetTimerValue( timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = 2.0f * (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
cutilCheckError( cutDeleteTimer( timer));
free(h_idata);
cutilSafeCall(cudaEventDestroy(stop));
cutilSafeCall(cudaEventDestroy(start));
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
return bandwidthInMBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, float *bandwidths, unsigned int count)
{
printf("Transfer Size (Bytes)\tBandwidth(MB/s)\n");
for(unsigned int i = 0; i < count; i++)
{
printf("%9u\t\t%.1f\n", memSizes[i], bandwidths[i]);
}
printf("\n");
fflush(stdout);
}
///////////////////////////////////////////////////////////////////////////
//print results in CSV format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, float *bandwidths, unsigned int count)
{
printf("Transfer size (Bytes),");
for(unsigned int i = 0; i < count; i++)
{
printf("%u,", memSizes[i]);
}
printf("\n");
printf("Bandwidth (MB/s),");
for(unsigned int i = 0; i < count; i++)
{
printf("%.1f,", bandwidths[i]);
}
printf("\n\n");
fflush(stdout);
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
printf("Usage: bandwidthTest [OPTION]...\n");
printf("Test the bandwidth for device to host, host to device, and device to device transfers\n");
printf("\n");
printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
printf("\n");
printf("Options:\n");
printf("--help\tDisplay this help menu\n");
printf("--csv\tPrint results as a CSV\n");
printf("--device=[deviceno]\tSpecify the device device to be used\n");
printf(" all - compute cumulative bandwidth on all the devices\n");
printf(" 0,1,2,...,n - Specify any particular device to be used\n");
printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
printf(" pageable - pageable memory\n");
printf(" pinned - non-pageable system memory\n");
printf("--mode=[MODE]\tSpecify the mode to use\n");
printf(" quick - performs a quick measurement\n");
printf(" range - measures a user-specified range of values\n");
printf(" shmoo - performs an intense shmoo of a large range of values\n");
printf("--htod\tMeasure host to device transfers\n");
printf("--dtoh\tMeasure device to host transfers\n");
printf("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
printf("--wc\tAllocate pinned memory as write-combined\n");
#endif
printf("--cputiming\tForce CPU-based timing always\n");
printf("Range mode options\n");
printf("--start=[SIZE]\tStarting transfer size in bytes\n");
printf("--end=[SIZE]\tEnding transfer size in bytes\n");
printf("--increment=[SIZE]\tIncrement size in bytes\n");
}
|
ba46f83e3f9c66b0e23f07217f1604152ced2c0a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kstd_wfc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
double *z = NULL;
hipMalloc(&z, XSIZE*YSIZE);
double *items = NULL;
hipMalloc(&items, XSIZE*YSIZE);
double winding = 1;
double *phi = NULL;
hipMalloc(&phi, XSIZE*YSIZE);
double2 *wfc = NULL;
hipMalloc(&wfc, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kstd_wfc), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,items,winding,phi,wfc);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kstd_wfc), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,items,winding,phi,wfc);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kstd_wfc), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,items,winding,phi,wfc);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ba46f83e3f9c66b0e23f07217f1604152ced2c0a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kstd_wfc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
double *z = NULL;
cudaMalloc(&z, XSIZE*YSIZE);
double *items = NULL;
cudaMalloc(&items, XSIZE*YSIZE);
double winding = 1;
double *phi = NULL;
cudaMalloc(&phi, XSIZE*YSIZE);
double2 *wfc = NULL;
cudaMalloc(&wfc, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kstd_wfc<<<gridBlock,threadBlock>>>(x,y,z,items,winding,phi,wfc);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kstd_wfc<<<gridBlock,threadBlock>>>(x,y,z,items,winding,phi,wfc);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kstd_wfc<<<gridBlock,threadBlock>>>(x,y,z,items,winding,phi,wfc);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
26105b2500ab168c01bd6ba4c31f4d842264b779.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void rmspropKernel (
int numberIterations,
int* parameterIndices,
int* counts,
int dimension,
float* parameters,
float* gradient,
float learningRate,
float decay,
float oneMinusDecay,
float epsilon,
float* accumulation) {
int updateIndex = blockIdx.x;
int parameterIndex = parameterIndices[updateIndex];
int count = counts[updateIndex];
if(parameterIndex != -1 && count > 0) {
float scalingFactor = 1.0 / (float)count;
int startEntryIndex = (blockIdx.y * blockDim.x + threadIdx.x) * numberIterations;
int firstParameterEntryIndex = parameterIndex * dimension;
int startParameterEntryIndex = firstParameterEntryIndex + startEntryIndex;
int startGradientEntryIndex = updateIndex * dimension + startEntryIndex;
int exclusiveEndParameterEntryIndex = min(startParameterEntryIndex + numberIterations, firstParameterEntryIndex + dimension);
int parameterEntryIndex = startParameterEntryIndex;
int gradientEntryIndex = startGradientEntryIndex;
while(parameterEntryIndex < exclusiveEndParameterEntryIndex) {
float scaledDerivative = scalingFactor * gradient[gradientEntryIndex];
float updatedAccumulation = decay * accumulation[parameterEntryIndex] + oneMinusDecay * (scaledDerivative * scaledDerivative);
accumulation[parameterEntryIndex] = updatedAccumulation;
float adaptiveLearningRate = learningRate / sqrtf(updatedAccumulation + epsilon);
float update = -adaptiveLearningRate * scaledDerivative;
parameters[parameterEntryIndex] += update;
parameterEntryIndex++;
gradientEntryIndex++;
}
}
} | 26105b2500ab168c01bd6ba4c31f4d842264b779.cu | __global__ void rmspropKernel (
int numberIterations,
int* parameterIndices,
int* counts,
int dimension,
float* parameters,
float* gradient,
float learningRate,
float decay,
float oneMinusDecay,
float epsilon,
float* accumulation) {
int updateIndex = blockIdx.x;
int parameterIndex = parameterIndices[updateIndex];
int count = counts[updateIndex];
if(parameterIndex != -1 && count > 0) {
float scalingFactor = 1.0 / (float)count;
int startEntryIndex = (blockIdx.y * blockDim.x + threadIdx.x) * numberIterations;
int firstParameterEntryIndex = parameterIndex * dimension;
int startParameterEntryIndex = firstParameterEntryIndex + startEntryIndex;
int startGradientEntryIndex = updateIndex * dimension + startEntryIndex;
int exclusiveEndParameterEntryIndex = min(startParameterEntryIndex + numberIterations, firstParameterEntryIndex + dimension);
int parameterEntryIndex = startParameterEntryIndex;
int gradientEntryIndex = startGradientEntryIndex;
while(parameterEntryIndex < exclusiveEndParameterEntryIndex) {
float scaledDerivative = scalingFactor * gradient[gradientEntryIndex];
float updatedAccumulation = decay * accumulation[parameterEntryIndex] + oneMinusDecay * (scaledDerivative * scaledDerivative);
accumulation[parameterEntryIndex] = updatedAccumulation;
float adaptiveLearningRate = learningRate / sqrtf(updatedAccumulation + epsilon);
float update = -adaptiveLearningRate * scaledDerivative;
parameters[parameterEntryIndex] += update;
parameterEntryIndex++;
gradientEntryIndex++;
}
}
} |
681c575ad76b24025b0b8daf4f61b711f8b5d727.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MatrixMult.h"
#include "MatrixMult.cu.h"
#define WIDTH_A 1024//1024 //1024//2048
#define HEIGHT_A 1024//2048//2048//2048
#define WIDTH_B 1024//1536//4096//2048
#define TILE 64
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main() {
// set seed for rand()
srand(2006);
// 1. allocate host memory for the two matrices
unsigned int size_A = WIDTH_A * HEIGHT_A;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WIDTH_B * WIDTH_A;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// 3. allocate device memory
float* d_A;
float* d_B;
hipMalloc((void**) &d_A, mem_size_A);
hipMalloc((void**) &d_B, mem_size_B);
// 4. copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
// 5. allocate host memory for the result C
unsigned int size_C = HEIGHT_A * WIDTH_B;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
float* seq_C = (float*) malloc(mem_size_C);
// 6. allocate device memory for the result
float* d_C;
hipMalloc((void**) &d_C, mem_size_C);
// 7. compute sequential matrix multiplication
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
matMult<float>(h_A, h_B, seq_C, WIDTH_A, HEIGHT_A, WIDTH_B);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Sequential Naive version runs in: %lu microsecs\n", elapsed);
}
// 8. perform the calculation
// setup execution parameters
int dimy = HEIGHT_A;
int dimx = WIDTH_B;
dim3 block(TILE, TILE, 1);
dim3 grid (dimx, dimy, 1);
// execute the kernel
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
hipLaunchKernelGGL(( matMultKer<float>) , dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
//matMultTiledKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
//matMultCacheKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("GPU version runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "Performance of Naive Parallel = %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
validate<float>(seq_C, h_C, size_C);
// execute the kernel
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
hipLaunchKernelGGL(( matMultTiledKer<float,TILE>) , dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("GPU version runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "Performance of Tiled Parallel = %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
// 11. copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
// 12. validate
validate<float>(seq_C, h_C, size_C);
// 7. clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
| 681c575ad76b24025b0b8daf4f61b711f8b5d727.cu | #include "MatrixMult.h"
#include "MatrixMult.cu.h"
#define WIDTH_A 1024//1024 //1024//2048
#define HEIGHT_A 1024//2048//2048//2048
#define WIDTH_B 1024//1536//4096//2048
#define TILE 64
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main() {
// set seed for rand()
srand(2006);
// 1. allocate host memory for the two matrices
unsigned int size_A = WIDTH_A * HEIGHT_A;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WIDTH_B * WIDTH_A;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// 3. allocate device memory
float* d_A;
float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
// 4. copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
// 5. allocate host memory for the result C
unsigned int size_C = HEIGHT_A * WIDTH_B;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
float* seq_C = (float*) malloc(mem_size_C);
// 6. allocate device memory for the result
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// 7. compute sequential matrix multiplication
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
matMult<float>(h_A, h_B, seq_C, WIDTH_A, HEIGHT_A, WIDTH_B);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Sequential Naive version runs in: %lu microsecs\n", elapsed);
}
// 8. perform the calculation
// setup execution parameters
int dimy = HEIGHT_A;
int dimx = WIDTH_B;
dim3 block(TILE, TILE, 1);
dim3 grid (dimx, dimy, 1);
// execute the kernel
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
matMultKer<float> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
//matMultTiledKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
//matMultCacheKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
cudaThreadSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("GPU version runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "Performance of Naive Parallel = %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
validate<float>(seq_C, h_C, size_C);
// execute the kernel
{
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
matMultTiledKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B);
cudaThreadSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("GPU version runs in: %lu microsecs\n", elapsed);
float microsecPerMatrixMul = elapsed;
double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f));
printf( "Performance of Tiled Parallel = %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y);
}
// 11. copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
// 12. validate
validate<float>(seq_C, h_C, size_C);
// 7. clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
b27f301624c7098121200df3ba2416185001cfc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "DMOTri.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/count.h>
#include <thrust/sequence.h>
#include "CudaUtil.h"
#include "CudaAtomic.h"
#include "DMOCommon.h"
#include "SurfaceConfig.h"
#include "io/FileWriter.h"
#include "Serializer.h"
#include "DMOTriImplAtomic.h"
#include "DMOTriImplCub.h"
//#include "DMOTriFlatImplCub.h"
//using namespace SurfLS;
//using namespace Surf1D;
#ifdef USECUB
using namespace DMOImplCub;
#else
using namespace DMOImplAtomic;
#endif
namespace DMO {
// counts the number of elements with quality between 0 - 0.1 and so on.
__global__ static void k_qualityHistogram(MeshTriDevice* mesh, ArrayView<int> q_vec, ArrayView<float> q_min, int n_cols, QualityCriterium q_crit) {
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nTriangles; idx += blockDim.x * gridDim.x) {
const Triangle& tri = mesh->triangles[idx];
const Vec3f points[3] = { mesh->vertexPoints[tri.v0], mesh->vertexPoints[tri.v1], mesh->vertexPoints[tri.v2] };
float q = qualityTri(points, q_crit);
myAtomicMin(&q_min[0], q);
q = fminf(0.9999f, q);
size_t index = size_t(q * n_cols);
atomicAdd(&q_vec[index], 1);
}
}
// finds the minimum element quality
__global__ static void k_findMinimumQuality(MeshTriDevice* mesh, ArrayView<float> q_min, QualityCriterium q_crit) {
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nTriangles; idx += blockDim.x * gridDim.x) {
const Triangle& tri = mesh->triangles[idx];
const Vec3f points[3] = { mesh->vertexPoints[tri.v0], mesh->vertexPoints[tri.v1], mesh->vertexPoints[tri.v2] };
float q = qualityTri(points, q_crit);
myAtomicMin(&q_min[0], q);
}
}
// checks for non decreasing quality of every element
__global__ static void k_updateCurrentQualities(MeshTriDevice* mesh, ArrayView<float> currentQualities, float lastMinQuality, ArrayView<bool> failure, QualityCriterium q_crit) {
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nTriangles; idx += blockDim.x * gridDim.x) {
const Triangle& tri = mesh->triangles[idx];
const Vec3f points[3] = { mesh->vertexPoints[tri.v0], mesh->vertexPoints[tri.v1], mesh->vertexPoints[tri.v2] };
float q = qualityTri(points, q_crit);
if (q < lastMinQuality) {
failure[0] = true;
printf("minimum quality decreased! tri %i vertices %i %i %i q %f\n", idx, tri.v0, tri.v1, tri.v2, q);
assert(0);
}
currentQualities[idx] = q;
}
}
__global__ static void k_getElementQualities(MeshTriDevice* mesh, float* outBuffer, QualityCriterium q_crit) {
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nTriangles; idx += blockDim.x * gridDim.x) {
const Triangle& tri = mesh->triangles[idx];
const Vec3f points[3] = { mesh->vertexPoints[tri.v0], mesh->vertexPoints[tri.v1], mesh->vertexPoints[tri.v2] };
outBuffer[idx] = qualityTri(points, q_crit);
}
}
// ######################################################################## //
// ### DMOTriClass ######################################################## //
// ######################################################################## //
DMOTriClass::DMOTriClass(DMOMeshTri& dmo_mesh_, QualityCriterium qualityCriterium_, const float gridScale_, int n_iter_)
: DMOBaseClass(qualityCriterium_, gridScale_, n_iter_)
, dmo_mesh(dmo_mesh_)
//, qualityCriterium(qualityCriterium_)
//, gridScale(gridScale_)
//, n_iter(n_iter_)
{
init();
}
void DMOTriClass::init() {
vertexPointsInit = device_vector<Vec3f>(dmo_mesh.vertexPoints, dmo_mesh.vertexPoints + dmo_mesh.nVerticesSurf); // copy of initial vertex positions
localSurfacesInit1d = device_vector<localSurface1d>(dmo_mesh.nVerticesSurf); // initial local surfaces1d
computeLocalSurfaces1d(dmo_mesh, localSurfacesInit1d);
localSurfacesInit = device_vector<localSurface>(dmo_mesh.nVerticesSurf); // initial local surfaces
computeLocalSurfaces(dmo_mesh, localSurfacesInit);
// new
if (dmo_mesh.nVerticesFeature > 0)
initLocalSurfacesFeature(dmo_mesh, surfacesRowPtr, localSurfacesFeatureInit, table);
//writeSurfaces(localSurfacesInit, "res/surfaces/surfaces2d2.binary");
nearestNeighbors = device_vector<int>(dmo_mesh.nVerticesSurf);
thrust::sequence(nearestNeighbors.begin(), nearestNeighbors.end(), 0);
optimizeFeatureVertex = device_vector<bool>(dmo_mesh.nVerticesSurf, true);
calcOptFeatureVec(dmo_mesh, optimizeFeatureVertex);
lastMinQuality = findMinimumQuality();
cout << "Total points " << dmo_mesh.nVerticesSurf << endl;
cout << "Free Surf points " << dmo_mesh.nVerticesSurf - dmo_mesh.nVerticesFeature << endl;
cout << "Feature Surf points " << dmo_mesh.nVerticesFeature << endl;
cout << "Colors Free " << dmo_mesh.nColorsFree << endl;
cout << "Colors Feature " << dmo_mesh.nColorsFeature << endl;
currentQualities = device_vector<float>(dmo_mesh.nTriangles, -FLT_MAX);
cout << "0," << lastMinQuality << endl;
}
void DMOTriClass::doIteration() {
int dynMemSize2D = (dmo_mesh.maxNumHalfedges + 1) * 3 * sizeof(float) + dmo_mesh.maxNumHalfedges * sizeof(int);
int dynMemSize1D = (dmo_mesh.maxNumHalfedges + 1) * 3 * sizeof(float) + dmo_mesh.maxNumHalfedges * sizeof(int);
for (int cid = 0; cid < dmo_mesh.nColorsFree; ++cid) {
k_optimizeHierarchical2D<DMO_NQ* DMO_NQ/2, USE_SURF_OF_NN> << <dmo_mesh.colorOffsetsFree[cid + 1] - dmo_mesh.colorOffsetsFree[cid], DMO_NQ* DMO_NQ/2, dynMemSize2D >> >
(dmo_mesh.colorOffsetsFree[cid], dmo_mesh.colorOffsetsFree[cid + 1], dmo_mesh.d_mesh, affineFactor, qualityCriterium, gridScale,
raw_pointer_cast(vertexPointsInit.data()), raw_pointer_cast(localSurfacesInit.data()), raw_pointer_cast(nearestNeighbors.data()),
raw(table), raw(surfacesRowPtr), raw(localSurfacesFeatureInit), dmo_mesh.maxNumHalfedges + 1);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
for (int cid = 0; cid < dmo_mesh.nColorsFeature; ++cid) {
k_optimizeHierarchical1D<DMO_NQ> << <dmo_mesh.colorOffsetsFeature[cid + 1] - dmo_mesh.colorOffsetsFeature[cid], DMO_NQ, dynMemSize1D >> >
(dmo_mesh.colorOffsetsFeature[cid], dmo_mesh.colorOffsetsFeature[cid + 1], dmo_mesh.d_mesh, affineFactor, qualityCriterium, gridScale,
raw_pointer_cast(vertexPointsInit.data()), raw_pointer_cast(localSurfacesInit1d.data()), raw_pointer_cast(nearestNeighbors.data()),
raw_pointer_cast(optimizeFeatureVertex.data()), dmo_mesh.maxNumHalfedges + 1, dmo_mesh.maxNumHalfedges);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
updateNearestNeighbor(dmo_mesh, vertexPointsInit, nearestNeighbors);
dmo_mesh.updateNormals();
++curr_it;
updateCurrentQualities();
float newQuality = findMinimumQuality();
//cout << "new quality: " << newQuality << endl;
cout << curr_it << "," << newQuality << endl;
assert(newQuality >= lastMinQuality);
lastMinQuality = newQuality;
}
void DMOTriClass::getEstimateLocalSurfacePoints(int vid, int nu, int nv, void* outSurfacePoints) {
int dynMemSize = dmo_mesh.maxNumHalfedges * sizeof(int);
k_fillEstimateLocalSurfacePoints<USE_SURF_OF_NN> << <1, nu* nv, dynMemSize>> > (vid, nu, nv, dmo_mesh.d_mesh, affineFactor, gridScale,
raw_pointer_cast(vertexPointsInit.data()), raw_pointer_cast(localSurfacesInit.data()), raw_pointer_cast(nearestNeighbors.data()), (Vec3f*)outSurfacePoints,
raw(table), raw(surfacesRowPtr), raw(localSurfacesFeatureInit));
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
void DMOTriClass::getLocalSurfacePoints(int vid, int nu, int nv, void* outSurfacePoints, int featureSid) {
k_fillLocalSurfacePoints << <1, nu* nv >> > (vid, featureSid, nu, nv, dmo_mesh.d_mesh, affineFactor, gridScale,
raw(localSurfacesInit), raw(nearestNeighbors), (Vec3f*)outSurfacePoints, raw(localSurfacesFeatureInit), raw(surfacesRowPtr));
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
void DMOTriClass::displayQualityGPU(int n_cols) {
device_vector<int> q_vec(n_cols, 0);
device_vector<float> q_min(1, FLT_MAX);
const int BLOCK_SIZE = 128;
k_qualityHistogram << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, q_vec, q_min, n_cols, qualityCriterium);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
host_vector<int> q_vecHost(q_vec);
host_vector<float> q_minHost(q_min);
printFormattedQuality(q_vecHost, q_minHost);
}
void DMOTriClass::getQualityHistogram(std::vector<int>& vec, int n_cols) {
device_vector<int> q_vec(n_cols, 0);
device_vector<float> q_min(1, FLT_MAX);
const int BLOCK_SIZE = 128;
k_qualityHistogram << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, q_vec, q_min, n_cols, qualityCriterium);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
host_vector<int> q_vecHost(q_vec);
thrust::copy(q_vec.begin(), q_vec.end(), vec.begin());
}
float DMOTriClass::findMinimumQuality() const {
device_vector<float> q_min(1, FLT_MAX);
const int BLOCK_SIZE = 128;
k_findMinimumQuality << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, q_min, qualityCriterium);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
return q_min[0];
}
void DMOTriClass::updateCurrentQualities() {
const int BLOCK_SIZE = 128;
device_vector<bool> failure(1, false);
k_updateCurrentQualities << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, currentQualities, lastMinQuality, failure, qualityCriterium);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
if (failure[0]) {
writeOFF("res/fail_debug", dmo_mesh);
throw 1;
}
}
void DMOTriClass::getElementQualities(void* outFloatBuffer) {
const int BLOCK_SIZE = 128;
k_getElementQualities << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, (float*)outFloatBuffer, qualityCriterium);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
}
| b27f301624c7098121200df3ba2416185001cfc8.cu | #include "DMOTri.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/count.h>
#include <thrust/sequence.h>
#include "CudaUtil.h"
#include "CudaAtomic.h"
#include "DMOCommon.h"
#include "SurfaceConfig.h"
#include "io/FileWriter.h"
#include "Serializer.h"
#include "DMOTriImplAtomic.h"
#include "DMOTriImplCub.h"
//#include "DMOTriFlatImplCub.h"
//using namespace SurfLS;
//using namespace Surf1D;
#ifdef USECUB
using namespace DMOImplCub;
#else
using namespace DMOImplAtomic;
#endif
namespace DMO {
// counts the number of elements with quality between 0 - 0.1 and so on.
__global__ static void k_qualityHistogram(MeshTriDevice* mesh, ArrayView<int> q_vec, ArrayView<float> q_min, int n_cols, QualityCriterium q_crit) {
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nTriangles; idx += blockDim.x * gridDim.x) {
const Triangle& tri = mesh->triangles[idx];
const Vec3f points[3] = { mesh->vertexPoints[tri.v0], mesh->vertexPoints[tri.v1], mesh->vertexPoints[tri.v2] };
float q = qualityTri(points, q_crit);
myAtomicMin(&q_min[0], q);
q = fminf(0.9999f, q);
size_t index = size_t(q * n_cols);
atomicAdd(&q_vec[index], 1);
}
}
// finds the minimum element quality
__global__ static void k_findMinimumQuality(MeshTriDevice* mesh, ArrayView<float> q_min, QualityCriterium q_crit) {
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nTriangles; idx += blockDim.x * gridDim.x) {
const Triangle& tri = mesh->triangles[idx];
const Vec3f points[3] = { mesh->vertexPoints[tri.v0], mesh->vertexPoints[tri.v1], mesh->vertexPoints[tri.v2] };
float q = qualityTri(points, q_crit);
myAtomicMin(&q_min[0], q);
}
}
// checks for non decreasing quality of every element
__global__ static void k_updateCurrentQualities(MeshTriDevice* mesh, ArrayView<float> currentQualities, float lastMinQuality, ArrayView<bool> failure, QualityCriterium q_crit) {
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nTriangles; idx += blockDim.x * gridDim.x) {
const Triangle& tri = mesh->triangles[idx];
const Vec3f points[3] = { mesh->vertexPoints[tri.v0], mesh->vertexPoints[tri.v1], mesh->vertexPoints[tri.v2] };
float q = qualityTri(points, q_crit);
if (q < lastMinQuality) {
failure[0] = true;
printf("minimum quality decreased! tri %i vertices %i %i %i q %f\n", idx, tri.v0, tri.v1, tri.v2, q);
assert(0);
}
currentQualities[idx] = q;
}
}
__global__ static void k_getElementQualities(MeshTriDevice* mesh, float* outBuffer, QualityCriterium q_crit) {
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < mesh->nTriangles; idx += blockDim.x * gridDim.x) {
const Triangle& tri = mesh->triangles[idx];
const Vec3f points[3] = { mesh->vertexPoints[tri.v0], mesh->vertexPoints[tri.v1], mesh->vertexPoints[tri.v2] };
outBuffer[idx] = qualityTri(points, q_crit);
}
}
// ######################################################################## //
// ### DMOTriClass ######################################################## //
// ######################################################################## //
DMOTriClass::DMOTriClass(DMOMeshTri& dmo_mesh_, QualityCriterium qualityCriterium_, const float gridScale_, int n_iter_)
: DMOBaseClass(qualityCriterium_, gridScale_, n_iter_)
, dmo_mesh(dmo_mesh_)
//, qualityCriterium(qualityCriterium_)
//, gridScale(gridScale_)
//, n_iter(n_iter_)
{
init();
}
void DMOTriClass::init() {
vertexPointsInit = device_vector<Vec3f>(dmo_mesh.vertexPoints, dmo_mesh.vertexPoints + dmo_mesh.nVerticesSurf); // copy of initial vertex positions
localSurfacesInit1d = device_vector<localSurface1d>(dmo_mesh.nVerticesSurf); // initial local surfaces1d
computeLocalSurfaces1d(dmo_mesh, localSurfacesInit1d);
localSurfacesInit = device_vector<localSurface>(dmo_mesh.nVerticesSurf); // initial local surfaces
computeLocalSurfaces(dmo_mesh, localSurfacesInit);
// new
if (dmo_mesh.nVerticesFeature > 0)
initLocalSurfacesFeature(dmo_mesh, surfacesRowPtr, localSurfacesFeatureInit, table);
//writeSurfaces(localSurfacesInit, "res/surfaces/surfaces2d2.binary");
nearestNeighbors = device_vector<int>(dmo_mesh.nVerticesSurf);
thrust::sequence(nearestNeighbors.begin(), nearestNeighbors.end(), 0);
optimizeFeatureVertex = device_vector<bool>(dmo_mesh.nVerticesSurf, true);
calcOptFeatureVec(dmo_mesh, optimizeFeatureVertex);
lastMinQuality = findMinimumQuality();
cout << "Total points " << dmo_mesh.nVerticesSurf << endl;
cout << "Free Surf points " << dmo_mesh.nVerticesSurf - dmo_mesh.nVerticesFeature << endl;
cout << "Feature Surf points " << dmo_mesh.nVerticesFeature << endl;
cout << "Colors Free " << dmo_mesh.nColorsFree << endl;
cout << "Colors Feature " << dmo_mesh.nColorsFeature << endl;
currentQualities = device_vector<float>(dmo_mesh.nTriangles, -FLT_MAX);
cout << "0," << lastMinQuality << endl;
}
void DMOTriClass::doIteration() {
int dynMemSize2D = (dmo_mesh.maxNumHalfedges + 1) * 3 * sizeof(float) + dmo_mesh.maxNumHalfedges * sizeof(int);
int dynMemSize1D = (dmo_mesh.maxNumHalfedges + 1) * 3 * sizeof(float) + dmo_mesh.maxNumHalfedges * sizeof(int);
for (int cid = 0; cid < dmo_mesh.nColorsFree; ++cid) {
k_optimizeHierarchical2D<DMO_NQ* DMO_NQ/2, USE_SURF_OF_NN> << <dmo_mesh.colorOffsetsFree[cid + 1] - dmo_mesh.colorOffsetsFree[cid], DMO_NQ* DMO_NQ/2, dynMemSize2D >> >
(dmo_mesh.colorOffsetsFree[cid], dmo_mesh.colorOffsetsFree[cid + 1], dmo_mesh.d_mesh, affineFactor, qualityCriterium, gridScale,
raw_pointer_cast(vertexPointsInit.data()), raw_pointer_cast(localSurfacesInit.data()), raw_pointer_cast(nearestNeighbors.data()),
raw(table), raw(surfacesRowPtr), raw(localSurfacesFeatureInit), dmo_mesh.maxNumHalfedges + 1);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
for (int cid = 0; cid < dmo_mesh.nColorsFeature; ++cid) {
k_optimizeHierarchical1D<DMO_NQ> << <dmo_mesh.colorOffsetsFeature[cid + 1] - dmo_mesh.colorOffsetsFeature[cid], DMO_NQ, dynMemSize1D >> >
(dmo_mesh.colorOffsetsFeature[cid], dmo_mesh.colorOffsetsFeature[cid + 1], dmo_mesh.d_mesh, affineFactor, qualityCriterium, gridScale,
raw_pointer_cast(vertexPointsInit.data()), raw_pointer_cast(localSurfacesInit1d.data()), raw_pointer_cast(nearestNeighbors.data()),
raw_pointer_cast(optimizeFeatureVertex.data()), dmo_mesh.maxNumHalfedges + 1, dmo_mesh.maxNumHalfedges);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
updateNearestNeighbor(dmo_mesh, vertexPointsInit, nearestNeighbors);
dmo_mesh.updateNormals();
++curr_it;
updateCurrentQualities();
float newQuality = findMinimumQuality();
//cout << "new quality: " << newQuality << endl;
cout << curr_it << "," << newQuality << endl;
assert(newQuality >= lastMinQuality);
lastMinQuality = newQuality;
}
void DMOTriClass::getEstimateLocalSurfacePoints(int vid, int nu, int nv, void* outSurfacePoints) {
int dynMemSize = dmo_mesh.maxNumHalfedges * sizeof(int);
k_fillEstimateLocalSurfacePoints<USE_SURF_OF_NN> << <1, nu* nv, dynMemSize>> > (vid, nu, nv, dmo_mesh.d_mesh, affineFactor, gridScale,
raw_pointer_cast(vertexPointsInit.data()), raw_pointer_cast(localSurfacesInit.data()), raw_pointer_cast(nearestNeighbors.data()), (Vec3f*)outSurfacePoints,
raw(table), raw(surfacesRowPtr), raw(localSurfacesFeatureInit));
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
void DMOTriClass::getLocalSurfacePoints(int vid, int nu, int nv, void* outSurfacePoints, int featureSid) {
k_fillLocalSurfacePoints << <1, nu* nv >> > (vid, featureSid, nu, nv, dmo_mesh.d_mesh, affineFactor, gridScale,
raw(localSurfacesInit), raw(nearestNeighbors), (Vec3f*)outSurfacePoints, raw(localSurfacesFeatureInit), raw(surfacesRowPtr));
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
void DMOTriClass::displayQualityGPU(int n_cols) {
device_vector<int> q_vec(n_cols, 0);
device_vector<float> q_min(1, FLT_MAX);
const int BLOCK_SIZE = 128;
k_qualityHistogram << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, q_vec, q_min, n_cols, qualityCriterium);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
host_vector<int> q_vecHost(q_vec);
host_vector<float> q_minHost(q_min);
printFormattedQuality(q_vecHost, q_minHost);
}
void DMOTriClass::getQualityHistogram(std::vector<int>& vec, int n_cols) {
device_vector<int> q_vec(n_cols, 0);
device_vector<float> q_min(1, FLT_MAX);
const int BLOCK_SIZE = 128;
k_qualityHistogram << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, q_vec, q_min, n_cols, qualityCriterium);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
host_vector<int> q_vecHost(q_vec);
thrust::copy(q_vec.begin(), q_vec.end(), vec.begin());
}
float DMOTriClass::findMinimumQuality() const {
device_vector<float> q_min(1, FLT_MAX);
const int BLOCK_SIZE = 128;
k_findMinimumQuality << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, q_min, qualityCriterium);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
return q_min[0];
}
void DMOTriClass::updateCurrentQualities() {
const int BLOCK_SIZE = 128;
device_vector<bool> failure(1, false);
k_updateCurrentQualities << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, currentQualities, lastMinQuality, failure, qualityCriterium);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
if (failure[0]) {
writeOFF("res/fail_debug", dmo_mesh);
throw 1;
}
}
void DMOTriClass::getElementQualities(void* outFloatBuffer) {
const int BLOCK_SIZE = 128;
k_getElementQualities << <getBlockCount(dmo_mesh.nTriangles, BLOCK_SIZE), BLOCK_SIZE >> > (dmo_mesh.d_mesh, (float*)outFloatBuffer, qualityCriterium);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
}
|
0cf3654aecd93c1bea817aa9acecc7c2e09a6a3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <math.h>
#include "../debug.h"
/* definitions of threadblock size in X and Y directions */
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
/* definition of matrix linear dimension */
#define SIZE 4096
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* CUDA kernel for shared memory matrix transpose */
__global__ void smem_cuda_transpose( const int m,
double const * const a,
double * const c )
{
/* declare a shared memory array */
__shared__ double smemArray[THREADS_PER_BLOCK_X][THREADS_PER_BLOCK_Y+1];
/* determine my row and column indices for the error checking code */
const int myRow = blockDim.x * blockIdx.x + threadIdx.x;
const int myCol = blockDim.y * blockIdx.y + threadIdx.y;
/* determine my row tile and column tile index */
const int tileX = blockDim.x * blockIdx.x;
const int tileY = blockDim.y * blockIdx.y;
if( myRow < m && myCol < m )
{
/* read to the shared mem array */
/* HINT: threadIdx.x should appear somewhere in the first argument to */
/* your INDX calculation for both a[] and c[]. This will ensure proper */
/* coalescing. */
smemArray[threadIdx.x][threadIdx.y] =
a[INDX( tileX + threadIdx.x, tileY + threadIdx.y, m )];
} /* end if */
/* synchronize */
__syncthreads();
if( myRow < m && myCol < m )
{
/* write the result */
c[INDX( tileY + threadIdx.x, tileX + threadIdx.y, m )] =
smemArray[threadIdx.y][threadIdx.x];
} /* end if */
return;
} /* end smem_cuda_transpose */
void host_transpose( const int m, double const * const a, double * const c )
{
/*
* naive matrix transpose goes here.
*/
for( int j = 0; j < m; j++ )
{
for( int i = 0; i < m; i++ )
{
c[INDX(i,j,m)] = a[INDX(j,i,m)];
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declaring pointers for array */
double *h_a, *h_c;
double *d_a, *d_c;
size_t numbytes = (size_t) size * (size_t) size * sizeof( double );
/* allocating host memory */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc h_a\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc h_c\n");
return 911;
}
/* allocating device memory */
checkCUDA( hipMalloc( (void**) &d_a, numbytes ) );
checkCUDA( hipMalloc( (void**) &d_c, numbytes ) );
/* set result matrices to zero */
memset( h_c, 0, numbytes );
checkCUDA( hipMemset( d_c, 0, numbytes ) );
fprintf( stdout, "Total memory required per matrix is %lf MB\n",
(double) numbytes / 1000000.0 );
/* initialize input matrix with random value */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
} /* end for */
/* copy input matrix from host to device */
checkCUDA( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) );
/* create and start timer */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* call naive cpu transpose function */
host_transpose( size, h_a, h_c );
/* stop CPU timer */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print CPU timing information */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GB/s\n",
8.0 * 2.0 * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* setup threadblock size and grid sizes */
dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 blocks( ( size / THREADS_PER_BLOCK_X ) + 1,
( size / THREADS_PER_BLOCK_Y ) + 1, 1 );
/* start timers */
checkCUDA( hipEventRecord( start, 0 ) );
/* call smem GPU transpose kernel */
hipLaunchKernelGGL(( smem_cuda_transpose), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_c );
checkKERNEL()
/* stop the timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU timing information */
fprintf(stdout, "Total time GPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GB/s\n",
8.0 * 2.0 * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy data from device to host */
checkCUDA( hipMemset( d_a, 0, numbytes ) );
checkCUDA( hipMemcpy( h_a, d_c, numbytes, hipMemcpyDeviceToHost ) );
/* compare GPU to CPU for correctness */
int success = 1;
for( int j = 0; j < size; j++ )
{
for( int i = 0; i < size; i++ )
{
if( h_c[INDX(i,j,size)] != h_a[INDX(i,j,size)] )
{
printf("Error in element %d,%d\n", i,j );
printf("Host %f, device %f\n",h_c[INDX(i,j,size)],
h_a[INDX(i,j,size)]);
success = 0;
break;
}
} /* end for i */
} /* end for j */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* free the memory */
free( h_a );
free( h_c );
checkCUDA( hipFree( d_a ) );
checkCUDA( hipFree( d_c ) );
checkCUDA( hipDeviceReset() );
return 0;
}
| 0cf3654aecd93c1bea817aa9acecc7c2e09a6a3b.cu | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <math.h>
#include "../debug.h"
/* definitions of threadblock size in X and Y directions */
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
/* definition of matrix linear dimension */
#define SIZE 4096
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* CUDA kernel for shared memory matrix transpose */
__global__ void smem_cuda_transpose( const int m,
double const * const a,
double * const c )
{
/* declare a shared memory array */
__shared__ double smemArray[THREADS_PER_BLOCK_X][THREADS_PER_BLOCK_Y+1];
/* determine my row and column indices for the error checking code */
const int myRow = blockDim.x * blockIdx.x + threadIdx.x;
const int myCol = blockDim.y * blockIdx.y + threadIdx.y;
/* determine my row tile and column tile index */
const int tileX = blockDim.x * blockIdx.x;
const int tileY = blockDim.y * blockIdx.y;
if( myRow < m && myCol < m )
{
/* read to the shared mem array */
/* HINT: threadIdx.x should appear somewhere in the first argument to */
/* your INDX calculation for both a[] and c[]. This will ensure proper */
/* coalescing. */
smemArray[threadIdx.x][threadIdx.y] =
a[INDX( tileX + threadIdx.x, tileY + threadIdx.y, m )];
} /* end if */
/* synchronize */
__syncthreads();
if( myRow < m && myCol < m )
{
/* write the result */
c[INDX( tileY + threadIdx.x, tileX + threadIdx.y, m )] =
smemArray[threadIdx.y][threadIdx.x];
} /* end if */
return;
} /* end smem_cuda_transpose */
void host_transpose( const int m, double const * const a, double * const c )
{
/*
* naive matrix transpose goes here.
*/
for( int j = 0; j < m; j++ )
{
for( int i = 0; i < m; i++ )
{
c[INDX(i,j,m)] = a[INDX(j,i,m)];
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declaring pointers for array */
double *h_a, *h_c;
double *d_a, *d_c;
size_t numbytes = (size_t) size * (size_t) size * sizeof( double );
/* allocating host memory */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc h_a\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc h_c\n");
return 911;
}
/* allocating device memory */
checkCUDA( cudaMalloc( (void**) &d_a, numbytes ) );
checkCUDA( cudaMalloc( (void**) &d_c, numbytes ) );
/* set result matrices to zero */
memset( h_c, 0, numbytes );
checkCUDA( cudaMemset( d_c, 0, numbytes ) );
fprintf( stdout, "Total memory required per matrix is %lf MB\n",
(double) numbytes / 1000000.0 );
/* initialize input matrix with random value */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
} /* end for */
/* copy input matrix from host to device */
checkCUDA( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) );
/* create and start timer */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* call naive cpu transpose function */
host_transpose( size, h_a, h_c );
/* stop CPU timer */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print CPU timing information */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GB/s\n",
8.0 * 2.0 * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* setup threadblock size and grid sizes */
dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 blocks( ( size / THREADS_PER_BLOCK_X ) + 1,
( size / THREADS_PER_BLOCK_Y ) + 1, 1 );
/* start timers */
checkCUDA( cudaEventRecord( start, 0 ) );
/* call smem GPU transpose kernel */
smem_cuda_transpose<<< blocks, threads >>>( size, d_a, d_c );
checkKERNEL()
/* stop the timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU timing information */
fprintf(stdout, "Total time GPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GB/s\n",
8.0 * 2.0 * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy data from device to host */
checkCUDA( cudaMemset( d_a, 0, numbytes ) );
checkCUDA( cudaMemcpy( h_a, d_c, numbytes, cudaMemcpyDeviceToHost ) );
/* compare GPU to CPU for correctness */
int success = 1;
for( int j = 0; j < size; j++ )
{
for( int i = 0; i < size; i++ )
{
if( h_c[INDX(i,j,size)] != h_a[INDX(i,j,size)] )
{
printf("Error in element %d,%d\n", i,j );
printf("Host %f, device %f\n",h_c[INDX(i,j,size)],
h_a[INDX(i,j,size)]);
success = 0;
break;
}
} /* end for i */
} /* end for j */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* free the memory */
free( h_a );
free( h_c );
checkCUDA( cudaFree( d_a ) );
checkCUDA( cudaFree( d_c ) );
checkCUDA( cudaDeviceReset() );
return 0;
}
|
0695a212a57136e8a28c0bbb2c10e57b8bdb3b58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@author Mark Gates
@generated from zlaset.cu normal z -> d, Fri Jul 18 17:34:12 2014
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
*/
__global__
void dlaset_full(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || offdiag == diag));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to dlaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
*/
__global__
void dlaset_lower(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to dlaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
*/
__global__
void dlaset_upper(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
/**
Purpose
-------
DLASET_STREAM initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
This is the same as DLASET, but adds stream argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of the matrix dA is set.
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag DOUBLE_PRECISION
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag DOUBLE_PRECISION
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
stream magma_queue_t
Stream to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C"
void magmablas_dlaset_stream(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
double offdiag, double diag,
double *dA, magma_int_t ldda,
magma_queue_t stream)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( (m-1)/BLK_X + 1, (n-1)/BLK_Y + 1 );
if (uplo == MagmaLower)
hipLaunchKernelGGL(( dlaset_lower), dim3(grid), dim3(threads), 0, stream , m, n, offdiag, diag, dA, ldda);
else if (uplo == MagmaUpper)
hipLaunchKernelGGL(( dlaset_upper), dim3(grid), dim3(threads), 0, stream , m, n, offdiag, diag, dA, ldda);
else
hipLaunchKernelGGL(( dlaset_full) , dim3(grid), dim3(threads), 0, stream , m, n, offdiag, diag, dA, ldda);
}
/**
@see magmablas_dlaset_stream
@ingroup magma_daux2
********************************************************************/
extern "C"
void magmablas_dlaset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
double offdiag, double diag,
double *dA, magma_int_t ldda )
{
magmablas_dlaset_stream( uplo, m, n, offdiag, diag, dA, ldda, magma_stream );
}
#define LASET_BAND_NB 64
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
@author Raffaele Solca
@author Mark Gates
*/
__global__
void dlaset_band_upper(
int m, int n,
double offdiag, double diag,
double *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * LASET_BAND_NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
double value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < LASET_BAND_NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
@author Raffaele Solca
@author Mark Gates
*/
__global__
void dlaset_band_lower(
int m, int n,
double offdiag, double diag,
double *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * LASET_BAND_NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
double value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < LASET_BAND_NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/**
Purpose
-------
DLASET_BAND_STREAM initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
This is the same as DLASET_BAND, but adds stream argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag DOUBLE_PRECISION
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag DOUBLE_PRECISION
All the main diagonal elements are set to DIAG.
@param[in]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
A(i,i) = BETA , 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
stream magma_queue_t
Stream to execute DLASET in.
@author Raffaele Solca
@author Mark Gates
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlaset_band_stream(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
double offdiag, double diag,
double *dA, magma_int_t ldda, magma_queue_t stream)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( (min(m+k-1,n) - 1)/LASET_BAND_NB + 1 );
hipLaunchKernelGGL(( dlaset_band_upper), dim3(grid), dim3(threads), 0, stream , m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( (min(m,n) - 1)/LASET_BAND_NB + 1 );
hipLaunchKernelGGL(( dlaset_band_lower), dim3(grid), dim3(threads), 0, stream , m, n, offdiag, diag, dA, ldda);
}
}
/**
@see magmablas_dlaset_band_stream
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
double offdiag, double diag,
double *dA, magma_int_t ldda)
{
magmablas_dlaset_band_stream(uplo, m, n, k, offdiag, diag, dA, ldda, magma_stream);
}
| 0695a212a57136e8a28c0bbb2c10e57b8bdb3b58.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@author Mark Gates
@generated from zlaset.cu normal z -> d, Fri Jul 18 17:34:12 2014
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
*/
__global__
void dlaset_full(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || offdiag == diag));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to dlaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
*/
__global__
void dlaset_lower(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to dlaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
*/
__global__
void dlaset_upper(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
/**
Purpose
-------
DLASET_STREAM initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
This is the same as DLASET, but adds stream argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of the matrix dA is set.
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag DOUBLE_PRECISION
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag DOUBLE_PRECISION
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
stream magma_queue_t
Stream to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C"
void magmablas_dlaset_stream(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
double offdiag, double diag,
double *dA, magma_int_t ldda,
magma_queue_t stream)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( (m-1)/BLK_X + 1, (n-1)/BLK_Y + 1 );
if (uplo == MagmaLower)
dlaset_lower<<< grid, threads, 0, stream >>> (m, n, offdiag, diag, dA, ldda);
else if (uplo == MagmaUpper)
dlaset_upper<<< grid, threads, 0, stream >>> (m, n, offdiag, diag, dA, ldda);
else
dlaset_full <<< grid, threads, 0, stream >>> (m, n, offdiag, diag, dA, ldda);
}
/**
@see magmablas_dlaset_stream
@ingroup magma_daux2
********************************************************************/
extern "C"
void magmablas_dlaset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
double offdiag, double diag,
double *dA, magma_int_t ldda )
{
magmablas_dlaset_stream( uplo, m, n, offdiag, diag, dA, ldda, magma_stream );
}
#define LASET_BAND_NB 64
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
@author Raffaele Solca
@author Mark Gates
*/
__global__
void dlaset_band_upper(
int m, int n,
double offdiag, double diag,
double *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * LASET_BAND_NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
double value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < LASET_BAND_NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
@author Raffaele Solca
@author Mark Gates
*/
__global__
void dlaset_band_lower(
int m, int n,
double offdiag, double diag,
double *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * LASET_BAND_NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
double value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < LASET_BAND_NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/**
Purpose
-------
DLASET_BAND_STREAM initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
This is the same as DLASET_BAND, but adds stream argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag DOUBLE_PRECISION
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag DOUBLE_PRECISION
All the main diagonal elements are set to DIAG.
@param[in]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
A(i,i) = BETA , 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
stream magma_queue_t
Stream to execute DLASET in.
@author Raffaele Solca
@author Mark Gates
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlaset_band_stream(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
double offdiag, double diag,
double *dA, magma_int_t ldda, magma_queue_t stream)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( (min(m+k-1,n) - 1)/LASET_BAND_NB + 1 );
dlaset_band_upper<<< grid, threads, 0, stream >>> (m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( (min(m,n) - 1)/LASET_BAND_NB + 1 );
dlaset_band_lower<<< grid, threads, 0, stream >>> (m, n, offdiag, diag, dA, ldda);
}
}
/**
@see magmablas_dlaset_band_stream
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
double offdiag, double diag,
double *dA, magma_int_t ldda)
{
magmablas_dlaset_band_stream(uplo, m, n, k, offdiag, diag, dA, ldda, magma_stream);
}
|
c9eb2e2289900777f373164de350525f0681050c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// HELPER FUNCTIONS
// Print an array of floats in [,,] format
void printFloatArray(float *arr, int len){
printf("[");
for (int i = 0; i < len -1; ++i) {
printf("%.2f, ", arr[i]);
}
printf("%.2f]", arr[len-1]);
printf("\n");
}
// Print an array of unsigned ints in [,,] format
void printUnsignedArray(unsigned *arr, int len){
printf("[");
for (int i = 0; i < len -1; ++i) {
printf("%u, ", arr[i]);
}
printf("%u]", arr[len-1]);
printf("\n");
}
// Print an array of ints in [,,] format
void printArray(int *arr, int len){
printf("[");
for (int i = 0; i < len -1; ++i) {
printf("%d, ", arr[i]);
}
printf("%d]", arr[len-1]);
printf("\n");
}
// A simple helper function to compute the difference between to points in time
double time_diff(struct timeval x , struct timeval y){
double x_ms , y_ms , diff;
x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec;
y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec;
diff = (double)y_ms - (double)x_ms;
return diff;
}
// A simple helper function to reset two arrays with random values
void resetTestData(float *floatArr, int lenFloats, int *intArr, int lenInts){
for (int i = 0; i < lenFloats; i++){
floatArr[i] = (float)rand()/(float)(RAND_MAX/1.0);
}
for (int i = 0; i < lenInts; i++){
intArr[i] = (int)rand()/(float)(RAND_MAX/10);
}
}
// returns the tile indicies corresponding to the floats and ints
void getFeaturesNorm(float **prototypes, int numPrototypes, float *floats, int lenFloats, int *ints, int lenInts, int numCoordinates, float threshold, int *features) {
for (int i = 0; i < numPrototypes; i++) {
float *prototype = prototypes[i];
float distance = 0.0;
float diff = 0.0;
// Compute using norm
for (int j = 0; j < lenFloats; j++) {
diff = floats[j] - prototype[j];
distance += diff*diff;
}
for (int j = 0; j < lenInts; j++){
diff = (float) ints[j] - prototype[j+lenFloats];
distance += diff*diff;
}
if (sqrt(distance) < threshold){
features[i] = 1;
}
}
}
// threadIdx.x = coord
// blockIdx.x = prototype
__global__ void calcFeatures(float *d_prototypes, float *d_floats, int lenFloats, int *d_ints, int lenInts, float *d_activationRadii, int *d_features){
float val = 0.0;
if (threadIdx.x < lenFloats){
float distance = fabsf(d_floats[threadIdx.x] - d_prototypes[blockIdx.x * (lenFloats + lenInts) + threadIdx.x]);
val = distance <= d_activationRadii[threadIdx.x] ? 1 - distance/d_activationRadii[threadIdx.x] : 0;
} else {
float distance = fabsf(((float) d_ints[threadIdx.x - lenFloats]) - d_prototypes[blockIdx.x * (lenFloats + lenInts) + threadIdx.x]);
val = distance <= d_activationRadii[threadIdx.x] ? 1 - distance/d_activationRadii[threadIdx.x] : 0;
}
atomicAnd(&d_features[blockIdx.x], val > 0 ? 1 : 0);
}
// TODO finish this
void parallel_getFeaturesActivationRadii(int numPrototypes, int numCoordinates, float *d_prototypes, float *h_floatArr, float *d_floats, int lenFloats, int *h_intArr, int *d_ints, int lenInts, float *d_activationRadii, int *d_features, int *h_features){
hipMemset(d_features, 0xF, numPrototypes*sizeof(int));
hipMemcpy(d_floats, h_floatArr, lenFloats*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_ints, h_intArr, lenInts * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( calcFeatures), dim3(numPrototypes), dim3(numCoordinates), 0, 0, d_prototypes, d_floats, lenFloats, d_ints, lenInts, d_activationRadii, d_features);
hipMemcpy(h_features, d_features, numPrototypes * sizeof(float), hipMemcpyDeviceToHost);
}
// returns the tile indicies corresponding to the floats and ints
void getFeaturesActivationRadii(int numPrototypes, int numCoordinates, float *prototypes,float *floats, int lenFloats, int *ints, int lenInts, float *activationRadii, int *features) {
for (int i = 0; i < numPrototypes; i++) {
float minValue = INFINITY;
float distance;
float val;
// Do floats
for (int j = 0; j < lenFloats; j++) {
distance = fabs(floats[j] - prototypes[i*lenFloats + j]);
val = distance <= activationRadii[j] ? 1 - distance/activationRadii[j] : 0;
minValue = minValue < val ? minValue : val;
}
// Do ints
for (int j = 0; j < lenInts; j++) {
distance = fabs((float)ints[j] - prototypes[i*lenFloats + j]);
val = distance <= activationRadii[j + lenFloats] ? 1 - distance/activationRadii[j + lenFloats] : 0;
minValue = minValue < val ? minValue : val;
}
// if close enough, activate feature
features[i] = minValue > 0 ? 1 : 0;
}
}
int main(int argc, char ** argv) {
// Use random other than 1
// srand ( time(NULL) );
// not testing ints so set it to length 0
int h_intArr[0] = {};
int lenInts = 0;
int * d_ints;
hipMalloc((void **) &d_ints, lenInts*sizeof(int));
int maxPrototypes = 2048;
int maxFloats = 1024;
int numTrials = 5000;
int incrementBy = 10;
struct timeval beforeA, afterA, beforeB, afterB;
double sumA = 0.0, avgTimeA = 0.0, minTimeA = INFINITY, maxTimeA = 0.0, sumB = 0.0, avgTimeB = 0.0, minTimeB = INFINITY, maxTimeB = 0.0;
int maxTimeTrialA = 0, maxTimeTrialB = 0, minTimeTrialA = 0, minTimeTrialB;
for (int numPrototypes = 200; numPrototypes < maxPrototypes; numPrototypes*=2){
int features[numPrototypes];
int testFeatures[numPrototypes];
int *d_features;
hipMalloc((void **) &d_features, numPrototypes*sizeof(int));
for (int lenFloats = 2; lenFloats < maxFloats; lenFloats+=incrementBy){
int numCoordinates = lenFloats + lenInts;
float h_prototypes[numPrototypes*numCoordinates];
// initialize random prototypes
resetTestData(h_prototypes, numPrototypes*lenFloats, h_intArr, lenInts);
float *d_prototypes;
hipMalloc((void **) &d_prototypes, numPrototypes*numCoordinates*sizeof(float));
hipMemcpy(d_prototypes, h_prototypes, numPrototypes * numCoordinates * sizeof(float), hipMemcpyHostToDevice);
// populate the activation radii array, although .2 could be passed in, there could be different radii for different dimensions
float h_activationRadii[lenFloats];
for (int i = 0; i < lenFloats; i++){
h_activationRadii[i] = .2;
}
float *d_activationRadii;
hipMalloc((void **) &d_activationRadii, lenFloats * sizeof(float));
hipMemcpy(d_activationRadii, h_activationRadii, lenFloats * sizeof(float), hipMemcpyHostToDevice);
float h_floatArr[lenFloats];
float *d_floats;
hipMalloc((void **) &d_floats, lenFloats * sizeof(float));
for (int trial = 0; trial < numTrials; trial++){
// reset float array
resetTestData(h_floatArr, lenFloats, h_intArr, lenInts);
// time the Parallel tiles
gettimeofday(&beforeA , NULL);
parallel_getFeaturesActivationRadii(numPrototypes, numCoordinates, d_prototypes, h_floatArr, d_floats, lenFloats, h_intArr, d_ints, lenInts, d_activationRadii, d_features, testFeatures);
gettimeofday(&afterA , NULL);
// time the Serial tiles
gettimeofday(&beforeB, NULL);
getFeaturesActivationRadii(numPrototypes, numCoordinates, h_prototypes, h_floatArr, lenFloats, h_intArr, lenInts, h_activationRadii, features);
gettimeofday(&afterB, NULL);
// confirm correct calculation
int Errors = 0;
for (int j = 0; j < numPrototypes; j++){
if (features[j] != testFeatures[j]){
printf("Error: Incorrect Arrays\nCorrect Array: ");
printArray(features, numPrototypes);
printf("\nComputed Array: ");
printArray(testFeatures, numPrototypes);
Errors = 1;
break;
}
}
if (Errors){
// if there is an error (differing arrays), free the memory and print debug info
hipFree(d_floats);
hipFree(d_prototypes);
hipFree(d_activationRadii);
hipFree(d_ints);
hipFree(d_features);
printf("Error: numPrototypes %d, lenFloats %d, trial %d\n", numPrototypes, lenFloats, trial);
return 1;
}
// compute time comparison
double timeTakenA = time_diff(beforeA , afterA);
sumA += timeTakenA;
if (timeTakenA < minTimeA){
minTimeA = timeTakenA;
minTimeTrialA = trial;
}
if (timeTakenA > maxTimeA){
maxTimeA = timeTakenA;
maxTimeTrialA = trial;
}
//compute time comparison
double timeTakenB = time_diff(beforeB , afterB);
sumB += timeTakenB;
if (timeTakenB < minTimeB){
minTimeB = timeTakenB;
minTimeTrialB = trial;
}
if (timeTakenB > maxTimeB){
maxTimeB = timeTakenB;
maxTimeTrialB = trial;
}
} // trialsloop
hipFree(d_floats);
hipFree(d_prototypes);
hipFree(d_activationRadii);
// compute the average time for each scenario
avgTimeA= sumA/numTrials;
avgTimeB = sumB/numTrials;
if (avgTimeA < avgTimeB){
printf("numPrototypes: %d\t numCoordinates: %d\n", numPrototypes, numCoordinates);
printf("\tParallel\n\t\tMin Time: %.0lf us | Min Trial: %d | Max Time: %.0lf us | Max Trial: %d | Avg time : %.0lf us\n\tSerial\n\t\tMin Time: %.0lf us | Min Trial: %d | Max Time: %.0lf us | Max Trial: %d | Avg time : %.0lf us\n\n", minTimeA, minTimeTrialA, maxTimeA, maxTimeTrialA, avgTimeA, minTimeB, minTimeTrialB, maxTimeB, maxTimeTrialB, avgTimeB);
printf("---------------------------------------------------------\n");
break;
}
} // float loop
hipFree(features);
hipFree(testFeatures);
hipFree(d_features);
} // prototype loop
return 0;
} | c9eb2e2289900777f373164de350525f0681050c.cu | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// HELPER FUNCTIONS
// Print an array of floats in [,,] format
void printFloatArray(float *arr, int len){
printf("[");
for (int i = 0; i < len -1; ++i) {
printf("%.2f, ", arr[i]);
}
printf("%.2f]", arr[len-1]);
printf("\n");
}
// Print an array of unsigned ints in [,,] format
void printUnsignedArray(unsigned *arr, int len){
printf("[");
for (int i = 0; i < len -1; ++i) {
printf("%u, ", arr[i]);
}
printf("%u]", arr[len-1]);
printf("\n");
}
// Print an array of ints in [,,] format
void printArray(int *arr, int len){
printf("[");
for (int i = 0; i < len -1; ++i) {
printf("%d, ", arr[i]);
}
printf("%d]", arr[len-1]);
printf("\n");
}
// A simple helper function to compute the difference between to points in time
double time_diff(struct timeval x , struct timeval y){
double x_ms , y_ms , diff;
x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec;
y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec;
diff = (double)y_ms - (double)x_ms;
return diff;
}
// A simple helper function to reset two arrays with random values
void resetTestData(float *floatArr, int lenFloats, int *intArr, int lenInts){
for (int i = 0; i < lenFloats; i++){
floatArr[i] = (float)rand()/(float)(RAND_MAX/1.0);
}
for (int i = 0; i < lenInts; i++){
intArr[i] = (int)rand()/(float)(RAND_MAX/10);
}
}
// returns the tile indicies corresponding to the floats and ints
void getFeaturesNorm(float **prototypes, int numPrototypes, float *floats, int lenFloats, int *ints, int lenInts, int numCoordinates, float threshold, int *features) {
for (int i = 0; i < numPrototypes; i++) {
float *prototype = prototypes[i];
float distance = 0.0;
float diff = 0.0;
// Compute using norm
for (int j = 0; j < lenFloats; j++) {
diff = floats[j] - prototype[j];
distance += diff*diff;
}
for (int j = 0; j < lenInts; j++){
diff = (float) ints[j] - prototype[j+lenFloats];
distance += diff*diff;
}
if (sqrt(distance) < threshold){
features[i] = 1;
}
}
}
// threadIdx.x = coord
// blockIdx.x = prototype
__global__ void calcFeatures(float *d_prototypes, float *d_floats, int lenFloats, int *d_ints, int lenInts, float *d_activationRadii, int *d_features){
float val = 0.0;
if (threadIdx.x < lenFloats){
float distance = fabsf(d_floats[threadIdx.x] - d_prototypes[blockIdx.x * (lenFloats + lenInts) + threadIdx.x]);
val = distance <= d_activationRadii[threadIdx.x] ? 1 - distance/d_activationRadii[threadIdx.x] : 0;
} else {
float distance = fabsf(((float) d_ints[threadIdx.x - lenFloats]) - d_prototypes[blockIdx.x * (lenFloats + lenInts) + threadIdx.x]);
val = distance <= d_activationRadii[threadIdx.x] ? 1 - distance/d_activationRadii[threadIdx.x] : 0;
}
atomicAnd(&d_features[blockIdx.x], val > 0 ? 1 : 0);
}
// TODO finish this
void parallel_getFeaturesActivationRadii(int numPrototypes, int numCoordinates, float *d_prototypes, float *h_floatArr, float *d_floats, int lenFloats, int *h_intArr, int *d_ints, int lenInts, float *d_activationRadii, int *d_features, int *h_features){
cudaMemset(d_features, 0xF, numPrototypes*sizeof(int));
cudaMemcpy(d_floats, h_floatArr, lenFloats*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ints, h_intArr, lenInts * sizeof(int), cudaMemcpyHostToDevice);
calcFeatures<<<numPrototypes, numCoordinates>>>(d_prototypes, d_floats, lenFloats, d_ints, lenInts, d_activationRadii, d_features);
cudaMemcpy(h_features, d_features, numPrototypes * sizeof(float), cudaMemcpyDeviceToHost);
}
// returns the tile indicies corresponding to the floats and ints
void getFeaturesActivationRadii(int numPrototypes, int numCoordinates, float *prototypes,float *floats, int lenFloats, int *ints, int lenInts, float *activationRadii, int *features) {
for (int i = 0; i < numPrototypes; i++) {
float minValue = INFINITY;
float distance;
float val;
// Do floats
for (int j = 0; j < lenFloats; j++) {
distance = fabs(floats[j] - prototypes[i*lenFloats + j]);
val = distance <= activationRadii[j] ? 1 - distance/activationRadii[j] : 0;
minValue = minValue < val ? minValue : val;
}
// Do ints
for (int j = 0; j < lenInts; j++) {
distance = fabs((float)ints[j] - prototypes[i*lenFloats + j]);
val = distance <= activationRadii[j + lenFloats] ? 1 - distance/activationRadii[j + lenFloats] : 0;
minValue = minValue < val ? minValue : val;
}
// if close enough, activate feature
features[i] = minValue > 0 ? 1 : 0;
}
}
int main(int argc, char ** argv) {
// Use random other than 1
// srand ( time(NULL) );
// not testing ints so set it to length 0
int h_intArr[0] = {};
int lenInts = 0;
int * d_ints;
cudaMalloc((void **) &d_ints, lenInts*sizeof(int));
int maxPrototypes = 2048;
int maxFloats = 1024;
int numTrials = 5000;
int incrementBy = 10;
struct timeval beforeA, afterA, beforeB, afterB;
double sumA = 0.0, avgTimeA = 0.0, minTimeA = INFINITY, maxTimeA = 0.0, sumB = 0.0, avgTimeB = 0.0, minTimeB = INFINITY, maxTimeB = 0.0;
int maxTimeTrialA = 0, maxTimeTrialB = 0, minTimeTrialA = 0, minTimeTrialB;
for (int numPrototypes = 200; numPrototypes < maxPrototypes; numPrototypes*=2){
int features[numPrototypes];
int testFeatures[numPrototypes];
int *d_features;
cudaMalloc((void **) &d_features, numPrototypes*sizeof(int));
for (int lenFloats = 2; lenFloats < maxFloats; lenFloats+=incrementBy){
int numCoordinates = lenFloats + lenInts;
float h_prototypes[numPrototypes*numCoordinates];
// initialize random prototypes
resetTestData(h_prototypes, numPrototypes*lenFloats, h_intArr, lenInts);
float *d_prototypes;
cudaMalloc((void **) &d_prototypes, numPrototypes*numCoordinates*sizeof(float));
cudaMemcpy(d_prototypes, h_prototypes, numPrototypes * numCoordinates * sizeof(float), cudaMemcpyHostToDevice);
// populate the activation radii array, although .2 could be passed in, there could be different radii for different dimensions
float h_activationRadii[lenFloats];
for (int i = 0; i < lenFloats; i++){
h_activationRadii[i] = .2;
}
float *d_activationRadii;
cudaMalloc((void **) &d_activationRadii, lenFloats * sizeof(float));
cudaMemcpy(d_activationRadii, h_activationRadii, lenFloats * sizeof(float), cudaMemcpyHostToDevice);
float h_floatArr[lenFloats];
float *d_floats;
cudaMalloc((void **) &d_floats, lenFloats * sizeof(float));
for (int trial = 0; trial < numTrials; trial++){
// reset float array
resetTestData(h_floatArr, lenFloats, h_intArr, lenInts);
// time the Parallel tiles
gettimeofday(&beforeA , NULL);
parallel_getFeaturesActivationRadii(numPrototypes, numCoordinates, d_prototypes, h_floatArr, d_floats, lenFloats, h_intArr, d_ints, lenInts, d_activationRadii, d_features, testFeatures);
gettimeofday(&afterA , NULL);
// time the Serial tiles
gettimeofday(&beforeB, NULL);
getFeaturesActivationRadii(numPrototypes, numCoordinates, h_prototypes, h_floatArr, lenFloats, h_intArr, lenInts, h_activationRadii, features);
gettimeofday(&afterB, NULL);
// confirm correct calculation
int Errors = 0;
for (int j = 0; j < numPrototypes; j++){
if (features[j] != testFeatures[j]){
printf("Error: Incorrect Arrays\nCorrect Array: ");
printArray(features, numPrototypes);
printf("\nComputed Array: ");
printArray(testFeatures, numPrototypes);
Errors = 1;
break;
}
}
if (Errors){
// if there is an error (differing arrays), free the memory and print debug info
cudaFree(d_floats);
cudaFree(d_prototypes);
cudaFree(d_activationRadii);
cudaFree(d_ints);
cudaFree(d_features);
printf("Error: numPrototypes %d, lenFloats %d, trial %d\n", numPrototypes, lenFloats, trial);
return 1;
}
// compute time comparison
double timeTakenA = time_diff(beforeA , afterA);
sumA += timeTakenA;
if (timeTakenA < minTimeA){
minTimeA = timeTakenA;
minTimeTrialA = trial;
}
if (timeTakenA > maxTimeA){
maxTimeA = timeTakenA;
maxTimeTrialA = trial;
}
//compute time comparison
double timeTakenB = time_diff(beforeB , afterB);
sumB += timeTakenB;
if (timeTakenB < minTimeB){
minTimeB = timeTakenB;
minTimeTrialB = trial;
}
if (timeTakenB > maxTimeB){
maxTimeB = timeTakenB;
maxTimeTrialB = trial;
}
} // trialsloop
cudaFree(d_floats);
cudaFree(d_prototypes);
cudaFree(d_activationRadii);
// compute the average time for each scenario
avgTimeA= sumA/numTrials;
avgTimeB = sumB/numTrials;
if (avgTimeA < avgTimeB){
printf("numPrototypes: %d\t numCoordinates: %d\n", numPrototypes, numCoordinates);
printf("\tParallel\n\t\tMin Time: %.0lf us | Min Trial: %d | Max Time: %.0lf us | Max Trial: %d | Avg time : %.0lf us\n\tSerial\n\t\tMin Time: %.0lf us | Min Trial: %d | Max Time: %.0lf us | Max Trial: %d | Avg time : %.0lf us\n\n", minTimeA, minTimeTrialA, maxTimeA, maxTimeTrialA, avgTimeA, minTimeB, minTimeTrialB, maxTimeB, maxTimeTrialB, avgTimeB);
printf("---------------------------------------------------------\n");
break;
}
} // float loop
cudaFree(features);
cudaFree(testFeatures);
cudaFree(d_features);
} // prototype loop
return 0;
} |
064bd1a2d9f9a42ddc46ecd0e6fc06a8176cb407.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define ITER 4
#define BANK_OFFSET1(n) (n) + (((n) >> 5))
#define BANK_OFFSET(n) (n) + (((n) >> 5))
#define NUM_BLOCKS(length, dim) nextPow2(length) / (2 * dim)
#define ELEM 4
#define TOTAL_THREADS 512
#define TWO_PWR(n) (1 << (n))
extern float toBW(int bytes, float sec);
__global__ void add_kernel(int* device_result, int* device_blocksum)
{
int temp1;
int thid = threadIdx.x;
int N = blockDim.x;
int offset = blockIdx.x * 4 * blockDim.x;
temp1 = device_blocksum[blockIdx.x];
device_result[offset + thid] = device_result[offset + thid] + temp1;
device_result[offset + thid + N] = device_result[offset + thid + N] + temp1;
device_result[offset + thid + 2 * N] = device_result[offset + thid + 2 * N] + temp1;
device_result[offset + thid + 3 * N] = device_result[offset + thid + 3 * N] + temp1;
} | 064bd1a2d9f9a42ddc46ecd0e6fc06a8176cb407.cu | #include "includes.h"
#define ITER 4
#define BANK_OFFSET1(n) (n) + (((n) >> 5))
#define BANK_OFFSET(n) (n) + (((n) >> 5))
#define NUM_BLOCKS(length, dim) nextPow2(length) / (2 * dim)
#define ELEM 4
#define TOTAL_THREADS 512
#define TWO_PWR(n) (1 << (n))
extern float toBW(int bytes, float sec);
__global__ void add_kernel(int* device_result, int* device_blocksum)
{
int temp1;
int thid = threadIdx.x;
int N = blockDim.x;
int offset = blockIdx.x * 4 * blockDim.x;
temp1 = device_blocksum[blockIdx.x];
device_result[offset + thid] = device_result[offset + thid] + temp1;
device_result[offset + thid + N] = device_result[offset + thid + N] + temp1;
device_result[offset + thid + 2 * N] = device_result[offset + thid + 2 * N] + temp1;
device_result[offset + thid + 3 * N] = device_result[offset + thid + 3 * N] + temp1;
} |
b7ed1dcf8b4595f8a4527058c3b98d84e9dfae10.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/core/cuda/common.hpp>
#include "stereo.hpp"
#include "util_opencv.hpp"
#include "costs/absolute_diff.hpp"
#include "costs/census.hpp"
#include "costs/dual.hpp"
#include "dsi.hpp"
#include "wta.hpp"
#include "cost_aggregation.hpp"
#include "aggregations/standard_sgm.hpp"
#ifdef __GNUG__
#include <chrono>
#include <iostream>
static std::chrono::time_point<std::chrono::system_clock> start;
static void timer_set() {
start = std::chrono::high_resolution_clock::now();
}
static void timer_print(const std::string &msg, const bool reset=true) {
auto stop = std::chrono::high_resolution_clock::now();
char buf[24];
snprintf(buf, sizeof(buf), "%5i ms ",
(int) std::chrono::duration_cast<std::chrono::milliseconds>(stop-start).count());
std::cout << buf << msg << "\n" << std::flush;
if (reset) { timer_set(); }
}
#else
static void timer_set() {}
static void timer_print(const std::string &msg, const bool reset=true) {}
#endif
using cv::Mat;
using cv::Size;
using ftl::stereo::aggregations::StandardSGM;
struct StereoADCensusSgm::Impl {
DisparitySpaceImage<unsigned short> dsi;
AbsDiffBT ad_cost;
CensusMatchingCost census_cost;
DualCosts<AbsDiffBT,CensusMatchingCost> cost;
Array2D<unsigned short> cost_min;
Array2D<unsigned short> cost_min_paths;
Array2D<unsigned short> uncertainty;
Array2D<float> confidence;
Array2D<float> disparity_r;
Array2D<uchar> l;
Array2D<uchar> r;
PathAggregator<StandardSGM<typename DualCosts<AbsDiffBT,CensusMatchingCost>::DataType>> aggr;
WinnerTakesAll<DSImage16U,float> wta;
Impl(int width, int height, int min_disp, int max_disp) :
dsi(width, height, min_disp, max_disp),
ad_cost(width, height, min_disp, max_disp),
census_cost(width, height, min_disp, max_disp),
cost(width, height, min_disp, max_disp, ad_cost, census_cost),
cost_min(width, height),
cost_min_paths(width, height),
uncertainty(width, height),
confidence(width, height),
disparity_r(width, height), l(width, height), r(width, height) {}
};
StereoADCensusSgm::StereoADCensusSgm() : impl_(nullptr) {
impl_ = new Impl(0, 0, 0, 0);
}
void StereoADCensusSgm::compute(cv::InputArray l, cv::InputArray r, cv::OutputArray disparity) {
hipSetDevice(0);
if (l.rows() != impl_->dsi.height() || r.cols() != impl_->dsi.width()) {
delete impl_; impl_ = nullptr;
impl_ = new Impl(l.cols(), l.rows(), params.d_min, params.d_max);
}
impl_->dsi.clear();
impl_->uncertainty.toMat().setTo(0);
mat2gray(l, impl_->l);
mat2gray(r, impl_->r);
timer_set();
// CT
impl_->census_cost.set(impl_->l, impl_->r);
impl_->ad_cost.set(impl_->l, impl_->r);
impl_->cost.set();
cudaSafeCall(hipDeviceSynchronize());
if (params.debug) { timer_print("census transform"); }
// cost aggregation
StandardSGM<DualCosts<AbsDiffBT,CensusMatchingCost>::DataType> func = {impl_->cost.data(), impl_->cost_min_paths.data(), params.P1, params.P2};
auto &out = impl_->aggr(func, params.paths);
cudaSafeCall(hipDeviceSynchronize());
if (params.debug) { timer_print("Aggregation"); }
impl_->wta(out, 0);
cudaSafeCall(hipDeviceSynchronize());
if (params.debug) { timer_print("WTA"); }
if (disparity.isGpuMat()) {
impl_->wta.disparity.toGpuMat(disparity.getGpuMatRef());
}
else {
cv::Mat &disparity_ = disparity.getMatRef();
impl_->wta.disparity.toMat(disparity_);
cv::medianBlur(disparity_, disparity_, 3);
}
// confidence estimate
// Drory, A., Haubold, C., Avidan, S., & Hamprecht, F. A. (2014).
// Semi-global matching: A principled derivation in terms of
// message passing. Lecture Notes in Computer Science (Including Subseries
// Lecture Notes in Artificial Intelligence and Lecture Notes in
// Bioinformatics). https://doi.org/10.1007/978-3-319-11752-2_4
//cv::Mat uncertainty;
//uncertainty = impl_->cost_min.toMat() - impl_->cost_min_paths.toMat();
// confidence threshold
// TODO: estimate confidence from uncertainty and plot ROC curve.
//disparity.setTo(0.0f, uncertainty > params.uniqueness);
}
StereoADCensusSgm::~StereoADCensusSgm() {
if (impl_) {
delete impl_;
impl_ = nullptr;
}
}
| b7ed1dcf8b4595f8a4527058c3b98d84e9dfae10.cu | #include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/core/cuda/common.hpp>
#include "stereo.hpp"
#include "util_opencv.hpp"
#include "costs/absolute_diff.hpp"
#include "costs/census.hpp"
#include "costs/dual.hpp"
#include "dsi.hpp"
#include "wta.hpp"
#include "cost_aggregation.hpp"
#include "aggregations/standard_sgm.hpp"
#ifdef __GNUG__
#include <chrono>
#include <iostream>
static std::chrono::time_point<std::chrono::system_clock> start;
static void timer_set() {
start = std::chrono::high_resolution_clock::now();
}
static void timer_print(const std::string &msg, const bool reset=true) {
auto stop = std::chrono::high_resolution_clock::now();
char buf[24];
snprintf(buf, sizeof(buf), "%5i ms ",
(int) std::chrono::duration_cast<std::chrono::milliseconds>(stop-start).count());
std::cout << buf << msg << "\n" << std::flush;
if (reset) { timer_set(); }
}
#else
static void timer_set() {}
static void timer_print(const std::string &msg, const bool reset=true) {}
#endif
using cv::Mat;
using cv::Size;
using ftl::stereo::aggregations::StandardSGM;
struct StereoADCensusSgm::Impl {
DisparitySpaceImage<unsigned short> dsi;
AbsDiffBT ad_cost;
CensusMatchingCost census_cost;
DualCosts<AbsDiffBT,CensusMatchingCost> cost;
Array2D<unsigned short> cost_min;
Array2D<unsigned short> cost_min_paths;
Array2D<unsigned short> uncertainty;
Array2D<float> confidence;
Array2D<float> disparity_r;
Array2D<uchar> l;
Array2D<uchar> r;
PathAggregator<StandardSGM<typename DualCosts<AbsDiffBT,CensusMatchingCost>::DataType>> aggr;
WinnerTakesAll<DSImage16U,float> wta;
Impl(int width, int height, int min_disp, int max_disp) :
dsi(width, height, min_disp, max_disp),
ad_cost(width, height, min_disp, max_disp),
census_cost(width, height, min_disp, max_disp),
cost(width, height, min_disp, max_disp, ad_cost, census_cost),
cost_min(width, height),
cost_min_paths(width, height),
uncertainty(width, height),
confidence(width, height),
disparity_r(width, height), l(width, height), r(width, height) {}
};
StereoADCensusSgm::StereoADCensusSgm() : impl_(nullptr) {
impl_ = new Impl(0, 0, 0, 0);
}
void StereoADCensusSgm::compute(cv::InputArray l, cv::InputArray r, cv::OutputArray disparity) {
cudaSetDevice(0);
if (l.rows() != impl_->dsi.height() || r.cols() != impl_->dsi.width()) {
delete impl_; impl_ = nullptr;
impl_ = new Impl(l.cols(), l.rows(), params.d_min, params.d_max);
}
impl_->dsi.clear();
impl_->uncertainty.toMat().setTo(0);
mat2gray(l, impl_->l);
mat2gray(r, impl_->r);
timer_set();
// CT
impl_->census_cost.set(impl_->l, impl_->r);
impl_->ad_cost.set(impl_->l, impl_->r);
impl_->cost.set();
cudaSafeCall(cudaDeviceSynchronize());
if (params.debug) { timer_print("census transform"); }
// cost aggregation
StandardSGM<DualCosts<AbsDiffBT,CensusMatchingCost>::DataType> func = {impl_->cost.data(), impl_->cost_min_paths.data(), params.P1, params.P2};
auto &out = impl_->aggr(func, params.paths);
cudaSafeCall(cudaDeviceSynchronize());
if (params.debug) { timer_print("Aggregation"); }
impl_->wta(out, 0);
cudaSafeCall(cudaDeviceSynchronize());
if (params.debug) { timer_print("WTA"); }
if (disparity.isGpuMat()) {
impl_->wta.disparity.toGpuMat(disparity.getGpuMatRef());
}
else {
cv::Mat &disparity_ = disparity.getMatRef();
impl_->wta.disparity.toMat(disparity_);
cv::medianBlur(disparity_, disparity_, 3);
}
// confidence estimate
// Drory, A., Haubold, C., Avidan, S., & Hamprecht, F. A. (2014).
// Semi-global matching: A principled derivation in terms of
// message passing. Lecture Notes in Computer Science (Including Subseries
// Lecture Notes in Artificial Intelligence and Lecture Notes in
// Bioinformatics). https://doi.org/10.1007/978-3-319-11752-2_4
//cv::Mat uncertainty;
//uncertainty = impl_->cost_min.toMat() - impl_->cost_min_paths.toMat();
// confidence threshold
// TODO: estimate confidence from uncertainty and plot ROC curve.
//disparity.setTo(0.0f, uncertainty > params.uniqueness);
}
StereoADCensusSgm::~StereoADCensusSgm() {
if (impl_) {
delete impl_;
impl_ = nullptr;
}
}
|
4cc9c671ae0c66c4abee1c34e3b0972e8c02755a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by milinda on 8/10/18.
//
#include "rhs_cuda.cuh"
namespace cuda
{
void computeRHS(double **unzipVarsRHS, const double **uZipVars,const ot::Block* dendroBlockList,unsigned int numBlocks,const cuda::BSSNComputeParams* bssnPars,dim3 blockDim,const Point & pt_min, const Point & pt_max,unsigned int numStreams,unsigned int device)
{
cuda::profile::t_overall.start();
if(numStreams==0)
{
std::cout<<"[Error]: "<<__func__<<" numStreams "<<numStreams<<" should at least be 1 (synchronous transfer) "<<std::endl;
return ;
}
// initialize the input data
unsigned int offset,bflag;
unsigned int sz[3];
double dx,dy,dz;
double ptmin[3];//={-1.0,-1.0,-1.0};
double ptmax[3];
double hx[3];
unsigned int c_in=0;
unsigned int c_ex=0;
for(unsigned int blk=0;blk<numBlocks;blk++)
{
if(dendroBlockList[blk].getBlkNodeFlag()==0)
c_in++;
else
c_ex++;
}
const unsigned int NUM_GPU_DENDRO_BLOCKS=c_in;
const unsigned int NUM_CPU_DENDRO_BLOCKS=c_ex;
const unsigned int BSSN_NUM_VARS=24;
const unsigned int BSSN_CONSTRAINT_NUM_VARS=6;
const unsigned int UNZIP_DOF_SZ=dendroBlockList[numBlocks-1].getOffset()+ dendroBlockList[numBlocks-1].getAlignedBlockSz();
//get GPU information.
// assumes the if there are multiple gpus per node all have the same specification.
hipSetDevice(device);
cuda::__CUDA_DEVICE_PROPERTIES=getGPUDeviceInfo(device);
// device properties for the host
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,device);
const double GPU_BLOCK_SHARED_MEM_UTIL=0.8;
ot::Block* blkListCPU=new ot::Block[NUM_CPU_DENDRO_BLOCKS];
ot::Block* blkListGPU=new ot::Block[NUM_GPU_DENDRO_BLOCKS];
cuda::_Block* cudaBlkList=new cuda::_Block[NUM_GPU_DENDRO_BLOCKS];
c_in=0;
c_ex=0;
for(unsigned int blk=0;blk<numBlocks;blk++)
{
bflag=dendroBlockList[blk].getBlkNodeFlag();
if(bflag==0)
{
offset=dendroBlockList[blk].getOffset();
sz[0]=dendroBlockList[blk].getAllocationSzX();
sz[1]=dendroBlockList[blk].getAllocationSzY();
sz[2]=dendroBlockList[blk].getAllocationSzZ();
dx = dendroBlockList[blk].computeDx(pt_min,pt_max);
dy = dendroBlockList[blk].computeDy(pt_min,pt_max);
dz = dendroBlockList[blk].computeDz(pt_min,pt_max);
hx[0]=dx;
hx[1]=dy;
hx[2]=dz;
ptmin[0]=GRIDX_TO_X(dendroBlockList[blk].getBlockNode().minX())-3*dx;
ptmin[1]=GRIDY_TO_Y(dendroBlockList[blk].getBlockNode().minY())-3*dy;
ptmin[2]=GRIDZ_TO_Z(dendroBlockList[blk].getBlockNode().minZ())-3*dz;
ptmax[0]=GRIDX_TO_X(dendroBlockList[blk].getBlockNode().maxX())+3*dx;
ptmax[1]=GRIDY_TO_Y(dendroBlockList[blk].getBlockNode().maxY())+3*dy;
ptmax[2]=GRIDZ_TO_Z(dendroBlockList[blk].getBlockNode().maxZ())+3*dz;
cudaBlkList[c_in]=cuda::_Block((const double *)ptmin,(const double *)ptmax,offset,bflag,(const unsigned int*)sz, (const double *)hx);
blkListGPU[c_in]=dendroBlockList[blk];
c_in++;
}else {
blkListCPU[c_ex]=dendroBlockList[blk];
c_ex++;
}
}
unsigned int maxBlkSz=0;
for(unsigned int blk=0;blk<NUM_GPU_DENDRO_BLOCKS;blk++)
{
if(maxBlkSz<cudaBlkList[blk].getAlignedBlockSz())
maxBlkSz=cudaBlkList[blk].getAlignedBlockSz();
}
const unsigned int derivSz=(maxBlkSz);
cuda::__DENDRO_BLK_MAX_SZ=cuda::copyValueToDevice(&derivSz);
const unsigned int numSM=deviceProp.multiProcessorCount;
//std::cout<<"deriv alloc begin"<<std::endl;
cuda::profile::t_cudaMalloc_derivs.start();
cuda::MemoryDerivs derivWorkSpace;
derivWorkSpace.allocateDerivMemory(maxBlkSz,numSM,numStreams);
CUDA_CHECK_ERROR();
cuda::__BSSN_DERIV_WORKSPACE=cuda::copyValueToDevice(&derivWorkSpace);
CUDA_CHECK_ERROR();
cuda::profile::t_cudaMalloc_derivs.stop();
if(numStreams==1)
{// sync case
// computes 1D grid block
dim3 gridDim;
unsigned int *blockMap=NULL;
cuda::computeDendroBlockToGPUMap(blkListGPU,NUM_GPU_DENDRO_BLOCKS,blockMap,gridDim);
cuda::profile::t_H2D_Comm.start();
const unsigned int NUM_GPU_BLOCKS=((gridDim.x)*(gridDim.y)*(gridDim.z));
//send blocks to the gpu
cuda::__DENDRO_BLOCK_LIST=cuda::copyArrayToDevice(cudaBlkList,NUM_GPU_DENDRO_BLOCKS);
cuda::__DENDRO_NUM_BLOCKS=cuda::copyValueToDevice(&NUM_GPU_DENDRO_BLOCKS);
cuda::__NUM_GPU_BLOCKS=cuda::copyValueToDevice(&NUM_GPU_BLOCKS);
cuda::__GPU_BLOCK_MAP=cuda::copyArrayToDevice(blockMap,2*NUM_GPU_BLOCKS);
cuda::__BSSN_NUM_VARS=cuda::copyValueToDevice(&BSSN_NUM_VARS);
cuda::__BSSN_CONSTRAINT_NUM_VARS=cuda::copyValueToDevice(&BSSN_CONSTRAINT_NUM_VARS);
cuda::__GPU_BLOCK_SHARED_MEM_UTIL=cuda::copyValueToDevice(&GPU_BLOCK_SHARED_MEM_UTIL);
//allocate memory for unzip vectors
cuda::__UNZIP_INPUT=cuda::alloc2DCudaArray<double>(uZipVars,BSSN_NUM_VARS,UNZIP_DOF_SZ);
cuda::__UNZIP_OUTPUT=cuda::alloc2DCudaArray<double>(BSSN_NUM_VARS,UNZIP_DOF_SZ);
cuda::__BSSN_COMPUTE_PARMS=cuda::copyValueToDevice(&(*bssnPars));
cuda::profile::t_H2D_Comm.stop();
cuda::profile::t_rhs_total.start();
cuda::profile::t_rhs_gpu.start();
hipLaunchKernelGGL(( cuda::__computeBSSNRHS), dim3(gridDim),dim3(blockDim), 0, 0, cuda::__UNZIP_OUTPUT,(const double**)cuda::__UNZIP_INPUT,cuda::__BSSN_DERIV_WORKSPACE,cuda::__DENDRO_BLOCK_LIST,cuda::__GPU_BLOCK_MAP,cuda::__BSSN_COMPUTE_PARMS,cuda::__CUDA_DEVICE_PROPERTIES,0);
hipDeviceSynchronize();
CUDA_CHECK_ERROR();
cuda::profile::t_rhs_gpu.stop();
cuda::profile::t_D2H_Comm.start();
cuda::copy2DArrayToHost(unzipVarsRHS,(const double**)cuda::__UNZIP_OUTPUT,BSSN_NUM_VARS,UNZIP_DOF_SZ);
cuda::profile::t_D2H_Comm.stop();
cuda::profile::t_rhs_cpu.start();
for(unsigned int blk=0;blk<NUM_CPU_DENDRO_BLOCKS;blk++)
{
offset=blkListCPU[blk].getOffset();
sz[0]=blkListCPU[blk].getAllocationSzX();
sz[1]=blkListCPU[blk].getAllocationSzY();
sz[2]=blkListCPU[blk].getAllocationSzZ();
bflag=blkListCPU[blk].getBlkNodeFlag();
dx=blkListCPU[blk].computeDx(pt_min,pt_max);
dy=blkListCPU[blk].computeDy(pt_min,pt_max);
dz=blkListCPU[blk].computeDz(pt_min,pt_max);
ptmin[0]=GRIDX_TO_X(blkListCPU[blk].getBlockNode().minX())-3*dx;
ptmin[1]=GRIDY_TO_Y(blkListCPU[blk].getBlockNode().minY())-3*dy;
ptmin[2]=GRIDZ_TO_Z(blkListCPU[blk].getBlockNode().minZ())-3*dz;
ptmax[0]=GRIDX_TO_X(blkListCPU[blk].getBlockNode().maxX())+3*dx;
ptmax[1]=GRIDY_TO_Y(blkListCPU[blk].getBlockNode().maxY())+3*dy;
ptmax[2]=GRIDZ_TO_Z(blkListCPU[blk].getBlockNode().maxZ())+3*dz;
bssnrhs(unzipVarsRHS, (const double **)uZipVars, offset, ptmin, ptmax, sz, bflag);
}
cuda::profile::t_rhs_cpu.stop();
cuda::profile::t_rhs_total.stop();
delete [] blockMap;
}else {
hipStream_t streams[numStreams];
// send counts and offset for asynctransfer, all the counts and offsets are based on the how we transfer the data blocks to the gpu.
unsigned int sendBlockCount[numStreams];
unsigned int sendBlockOffset[numStreams];
unsigned int sendUnzipCount[numStreams];
unsigned int sendUnzipOffset[numStreams];
unsigned int gridBlockCount[numStreams];
unsigned int gridBlockOffset[numStreams];
for(unsigned int i=0;i<numStreams;i++)
{
hipStreamCreate(&streams[i]);
CUDA_CHECK_ERROR();
}
for(unsigned int i=0;i<numStreams;i++)
{
sendBlockCount[i]=(((i+1)*NUM_GPU_DENDRO_BLOCKS)/numStreams)-(((i)*NUM_GPU_DENDRO_BLOCKS)/numStreams);
sendUnzipCount[i]=0;
for(unsigned int blk=(((i)*NUM_GPU_DENDRO_BLOCKS)/numStreams);blk<(((i+1)*NUM_GPU_DENDRO_BLOCKS)/numStreams);blk++)
sendUnzipCount[i]+=blkListGPU[blk].getAlignedBlockSz();
}
sendBlockOffset[0]=0;
sendUnzipOffset[0]=0;
for(unsigned int i=1;i<numStreams;i++)
{
sendBlockOffset[i]=sendBlockOffset[i-1]+sendBlockCount[i-1];
sendUnzipOffset[i]=sendUnzipOffset[i-1]+sendUnzipCount[i-1];
}
unsigned int ** blockMap=new unsigned int*[numStreams];
dim3 gridDim[numStreams];
for(unsigned int i=0;i<numStreams;i++)
{
cuda::computeDendroBlockToGPUMap(blkListGPU+sendBlockOffset[i],sendBlockCount[i],blockMap[i],gridDim[i]);
gridBlockCount[i]=(gridDim[i].x*gridDim[i].y*gridDim[i].z);
// modify for map for the global blk list index.
for(unsigned int gblk=0;gblk<gridBlockCount[i];gblk++)
{
blockMap[i][2*gblk]+=sendBlockOffset[i];
blockMap[i][2*gblk+1]+=sendBlockOffset[i];
}
}
gridBlockOffset[0]=0;
for(unsigned int i=1;i<numStreams;i++)
gridBlockOffset[i]=gridBlockOffset[i-1]+gridBlockCount[i-1];
const unsigned int NUM_GPU_BLOCKS=gridBlockOffset[numStreams-1]+gridBlockCount[numStreams-1];
double ** unZipInputHost=NULL;
double ** unZipOutputHost=NULL;
cuda::profile::t_H2D_Comm.start();
cuda::__DENDRO_BLOCK_LIST=cuda::alloc1DCudaArray<_Block>(NUM_GPU_DENDRO_BLOCKS);
cuda::__GPU_BLOCK_MAP=cuda::alloc1DCudaArray<unsigned int>(2*NUM_GPU_BLOCKS);
cuda::__UNZIP_INPUT=cuda::alloc2DCudaArray<double>(unZipInputHost,BSSN_NUM_VARS,UNZIP_DOF_SZ);
cuda::__UNZIP_OUTPUT=cuda::alloc2DCudaArray<double>(unZipOutputHost,BSSN_NUM_VARS,UNZIP_DOF_SZ);
// all the blocking small sends
cuda::__DENDRO_NUM_BLOCKS=cuda::copyValueToDevice(&NUM_GPU_DENDRO_BLOCKS);
cuda::__NUM_GPU_BLOCKS=cuda::copyValueToDevice(&NUM_GPU_BLOCKS);
cuda::__BSSN_NUM_VARS=cuda::copyValueToDevice(&BSSN_NUM_VARS);
cuda::__BSSN_CONSTRAINT_NUM_VARS=cuda::copyValueToDevice(&BSSN_CONSTRAINT_NUM_VARS);
cuda::__BSSN_COMPUTE_PARMS=cuda::copyValueToDevice(&(*bssnPars));
cuda::profile::t_H2D_Comm.stop();
cuda::profile::t_rhs_total.start();
for(unsigned int i=0;i<numStreams;i++)
{
for (unsigned int var = 0; var < BSSN_NUM_VARS; var++)
cuda::copyArrayToDeviceAsync(uZipVars[var] + sendUnzipOffset[i],unZipInputHost[var] + sendUnzipOffset[i], sendUnzipCount[i], streams[i]);
cuda::copyArrayToDeviceAsync(cudaBlkList + sendBlockOffset[i], cuda::__DENDRO_BLOCK_LIST + sendBlockOffset[i], sendBlockCount[i], streams[i]);
cuda::copyArrayToDeviceAsync(blockMap[i], cuda::__GPU_BLOCK_MAP + 2*gridBlockOffset[i], 2*gridBlockCount[i],streams[i]);
}
for(unsigned int i=0;i<numStreams;i++)
{
hipLaunchKernelGGL(( cuda::__computeBSSNRHS), dim3(gridDim[i]), dim3(blockDim), 0, streams[i] , cuda::__UNZIP_OUTPUT,(const double**)cuda::__UNZIP_INPUT,cuda::__BSSN_DERIV_WORKSPACE,cuda::__DENDRO_BLOCK_LIST,cuda::__GPU_BLOCK_MAP + 2*gridBlockOffset[i],cuda::__BSSN_COMPUTE_PARMS,cuda::__CUDA_DEVICE_PROPERTIES,i);
CUDA_CHECK_ERROR();
}
for(unsigned int i=0;i<numStreams;i++)
{
//D2H
for (unsigned int var = 0; var < BSSN_NUM_VARS; var++)
cuda::copyArrayToHostAsync(unzipVarsRHS[var]+sendUnzipOffset[i],unZipOutputHost[var]+sendUnzipOffset[i],sendUnzipCount[i],streams[i]);
}
cuda::profile::t_rhs_cpu.start();
// process cpu blocks before sync
double ** unZipRHS_CPU=new double*[BSSN_NUM_VARS];
for(unsigned int var=0;var<BSSN_NUM_VARS;var++)
unZipRHS_CPU[var]=new double[UNZIP_DOF_SZ];
for(unsigned int blk=0;blk<NUM_CPU_DENDRO_BLOCKS;blk++)
{
offset=blkListCPU[blk].getOffset();
sz[0]=blkListCPU[blk].getAllocationSzX();
sz[1]=blkListCPU[blk].getAllocationSzY();
sz[2]=blkListCPU[blk].getAllocationSzZ();
bflag=blkListCPU[blk].getBlkNodeFlag();
dx=blkListCPU[blk].computeDx(pt_min,pt_max);
dy=blkListCPU[blk].computeDy(pt_min,pt_max);
dz=blkListCPU[blk].computeDz(pt_min,pt_max);
ptmin[0]=GRIDX_TO_X(blkListCPU[blk].getBlockNode().minX())-3*dx;
ptmin[1]=GRIDY_TO_Y(blkListCPU[blk].getBlockNode().minY())-3*dy;
ptmin[2]=GRIDZ_TO_Z(blkListCPU[blk].getBlockNode().minZ())-3*dz;
ptmax[0]=GRIDX_TO_X(blkListCPU[blk].getBlockNode().maxX())+3*dx;
ptmax[1]=GRIDY_TO_Y(blkListCPU[blk].getBlockNode().maxY())+3*dy;
ptmax[2]=GRIDZ_TO_Z(blkListCPU[blk].getBlockNode().maxZ())+3*dz;
bssnrhs(unZipRHS_CPU, (const double **)uZipVars, offset, ptmin, ptmax, sz, bflag);
}
cuda::profile::t_rhs_cpu.stop();
hipDeviceSynchronize();
CUDA_CHECK_ERROR();
// merge cpu and gpu results.
for(unsigned int blk=0;blk<NUM_CPU_DENDRO_BLOCKS;blk++)
{
offset = blkListCPU[blk].getOffset();
sz[0] = blkListCPU[blk].getAllocationSzX();
sz[1] = blkListCPU[blk].getAllocationSzY();
sz[2] = blkListCPU[blk].getAllocationSzZ();
for(unsigned int var=0;var<BSSN_NUM_VARS;var++)
for(unsigned int node=0;node<(sz[0]*sz[1]*sz[2]);node++)
unzipVarsRHS[var][offset+node]=unZipRHS_CPU[var][offset+node];
}
cuda::profile::t_rhs_total.stop();
for(unsigned int var=0;var<BSSN_NUM_VARS;var++)
delete [] unZipRHS_CPU[var];
delete [] unZipRHS_CPU;
for(unsigned int i=0;i<numStreams;i++)
delete [] blockMap[i];
delete [] blockMap;
}
cuda::profile::t_cudaMalloc_derivs.start();
derivWorkSpace.deallocateDerivMemory();
CUDA_CHECK_ERROR();
cuda::profile::t_cudaMalloc_derivs.stop();
hipFree(cuda::__CUDA_DEVICE_PROPERTIES);
hipFree(cuda::__DENDRO_BLOCK_LIST);
hipFree(cuda::__DENDRO_NUM_BLOCKS);
hipFree(cuda::__NUM_GPU_BLOCKS);
hipFree(cuda::__GPU_BLOCK_MAP);
hipFree(cuda::__BSSN_NUM_VARS);
hipFree(cuda::__BSSN_CONSTRAINT_NUM_VARS);
hipFree(cuda::__GPU_BLOCK_SHARED_MEM_UTIL);
hipFree(cuda::__DENDRO_BLK_MAX_SZ);
hipFree(cuda::__BSSN_DERIV_WORKSPACE);
hipFree(cuda::__BSSN_COMPUTE_PARMS);
cuda::dealloc2DCudaArray(cuda::__UNZIP_INPUT,BSSN_NUM_VARS);
cuda::dealloc2DCudaArray(cuda::__UNZIP_OUTPUT,BSSN_NUM_VARS);
delete [] blkListGPU;
delete [] blkListCPU;
delete [] cudaBlkList;
cuda::profile::t_overall.stop();
}
}
| 4cc9c671ae0c66c4abee1c34e3b0972e8c02755a.cu | //
// Created by milinda on 8/10/18.
//
#include "rhs_cuda.cuh"
namespace cuda
{
void computeRHS(double **unzipVarsRHS, const double **uZipVars,const ot::Block* dendroBlockList,unsigned int numBlocks,const cuda::BSSNComputeParams* bssnPars,dim3 blockDim,const Point & pt_min, const Point & pt_max,unsigned int numStreams,unsigned int device)
{
cuda::profile::t_overall.start();
if(numStreams==0)
{
std::cout<<"[Error]: "<<__func__<<" numStreams "<<numStreams<<" should at least be 1 (synchronous transfer) "<<std::endl;
return ;
}
// initialize the input data
unsigned int offset,bflag;
unsigned int sz[3];
double dx,dy,dz;
double ptmin[3];//={-1.0,-1.0,-1.0};
double ptmax[3];
double hx[3];
unsigned int c_in=0;
unsigned int c_ex=0;
for(unsigned int blk=0;blk<numBlocks;blk++)
{
if(dendroBlockList[blk].getBlkNodeFlag()==0)
c_in++;
else
c_ex++;
}
const unsigned int NUM_GPU_DENDRO_BLOCKS=c_in;
const unsigned int NUM_CPU_DENDRO_BLOCKS=c_ex;
const unsigned int BSSN_NUM_VARS=24;
const unsigned int BSSN_CONSTRAINT_NUM_VARS=6;
const unsigned int UNZIP_DOF_SZ=dendroBlockList[numBlocks-1].getOffset()+ dendroBlockList[numBlocks-1].getAlignedBlockSz();
//get GPU information.
// assumes the if there are multiple gpus per node all have the same specification.
cudaSetDevice(device);
cuda::__CUDA_DEVICE_PROPERTIES=getGPUDeviceInfo(device);
// device properties for the host
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,device);
const double GPU_BLOCK_SHARED_MEM_UTIL=0.8;
ot::Block* blkListCPU=new ot::Block[NUM_CPU_DENDRO_BLOCKS];
ot::Block* blkListGPU=new ot::Block[NUM_GPU_DENDRO_BLOCKS];
cuda::_Block* cudaBlkList=new cuda::_Block[NUM_GPU_DENDRO_BLOCKS];
c_in=0;
c_ex=0;
for(unsigned int blk=0;blk<numBlocks;blk++)
{
bflag=dendroBlockList[blk].getBlkNodeFlag();
if(bflag==0)
{
offset=dendroBlockList[blk].getOffset();
sz[0]=dendroBlockList[blk].getAllocationSzX();
sz[1]=dendroBlockList[blk].getAllocationSzY();
sz[2]=dendroBlockList[blk].getAllocationSzZ();
dx = dendroBlockList[blk].computeDx(pt_min,pt_max);
dy = dendroBlockList[blk].computeDy(pt_min,pt_max);
dz = dendroBlockList[blk].computeDz(pt_min,pt_max);
hx[0]=dx;
hx[1]=dy;
hx[2]=dz;
ptmin[0]=GRIDX_TO_X(dendroBlockList[blk].getBlockNode().minX())-3*dx;
ptmin[1]=GRIDY_TO_Y(dendroBlockList[blk].getBlockNode().minY())-3*dy;
ptmin[2]=GRIDZ_TO_Z(dendroBlockList[blk].getBlockNode().minZ())-3*dz;
ptmax[0]=GRIDX_TO_X(dendroBlockList[blk].getBlockNode().maxX())+3*dx;
ptmax[1]=GRIDY_TO_Y(dendroBlockList[blk].getBlockNode().maxY())+3*dy;
ptmax[2]=GRIDZ_TO_Z(dendroBlockList[blk].getBlockNode().maxZ())+3*dz;
cudaBlkList[c_in]=cuda::_Block((const double *)ptmin,(const double *)ptmax,offset,bflag,(const unsigned int*)sz, (const double *)hx);
blkListGPU[c_in]=dendroBlockList[blk];
c_in++;
}else {
blkListCPU[c_ex]=dendroBlockList[blk];
c_ex++;
}
}
unsigned int maxBlkSz=0;
for(unsigned int blk=0;blk<NUM_GPU_DENDRO_BLOCKS;blk++)
{
if(maxBlkSz<cudaBlkList[blk].getAlignedBlockSz())
maxBlkSz=cudaBlkList[blk].getAlignedBlockSz();
}
const unsigned int derivSz=(maxBlkSz);
cuda::__DENDRO_BLK_MAX_SZ=cuda::copyValueToDevice(&derivSz);
const unsigned int numSM=deviceProp.multiProcessorCount;
//std::cout<<"deriv alloc begin"<<std::endl;
cuda::profile::t_cudaMalloc_derivs.start();
cuda::MemoryDerivs derivWorkSpace;
derivWorkSpace.allocateDerivMemory(maxBlkSz,numSM,numStreams);
CUDA_CHECK_ERROR();
cuda::__BSSN_DERIV_WORKSPACE=cuda::copyValueToDevice(&derivWorkSpace);
CUDA_CHECK_ERROR();
cuda::profile::t_cudaMalloc_derivs.stop();
if(numStreams==1)
{// sync case
// computes 1D grid block
dim3 gridDim;
unsigned int *blockMap=NULL;
cuda::computeDendroBlockToGPUMap(blkListGPU,NUM_GPU_DENDRO_BLOCKS,blockMap,gridDim);
cuda::profile::t_H2D_Comm.start();
const unsigned int NUM_GPU_BLOCKS=((gridDim.x)*(gridDim.y)*(gridDim.z));
//send blocks to the gpu
cuda::__DENDRO_BLOCK_LIST=cuda::copyArrayToDevice(cudaBlkList,NUM_GPU_DENDRO_BLOCKS);
cuda::__DENDRO_NUM_BLOCKS=cuda::copyValueToDevice(&NUM_GPU_DENDRO_BLOCKS);
cuda::__NUM_GPU_BLOCKS=cuda::copyValueToDevice(&NUM_GPU_BLOCKS);
cuda::__GPU_BLOCK_MAP=cuda::copyArrayToDevice(blockMap,2*NUM_GPU_BLOCKS);
cuda::__BSSN_NUM_VARS=cuda::copyValueToDevice(&BSSN_NUM_VARS);
cuda::__BSSN_CONSTRAINT_NUM_VARS=cuda::copyValueToDevice(&BSSN_CONSTRAINT_NUM_VARS);
cuda::__GPU_BLOCK_SHARED_MEM_UTIL=cuda::copyValueToDevice(&GPU_BLOCK_SHARED_MEM_UTIL);
//allocate memory for unzip vectors
cuda::__UNZIP_INPUT=cuda::alloc2DCudaArray<double>(uZipVars,BSSN_NUM_VARS,UNZIP_DOF_SZ);
cuda::__UNZIP_OUTPUT=cuda::alloc2DCudaArray<double>(BSSN_NUM_VARS,UNZIP_DOF_SZ);
cuda::__BSSN_COMPUTE_PARMS=cuda::copyValueToDevice(&(*bssnPars));
cuda::profile::t_H2D_Comm.stop();
cuda::profile::t_rhs_total.start();
cuda::profile::t_rhs_gpu.start();
cuda::__computeBSSNRHS<<<gridDim,blockDim>>>(cuda::__UNZIP_OUTPUT,(const double**)cuda::__UNZIP_INPUT,cuda::__BSSN_DERIV_WORKSPACE,cuda::__DENDRO_BLOCK_LIST,cuda::__GPU_BLOCK_MAP,cuda::__BSSN_COMPUTE_PARMS,cuda::__CUDA_DEVICE_PROPERTIES,0);
cudaDeviceSynchronize();
CUDA_CHECK_ERROR();
cuda::profile::t_rhs_gpu.stop();
cuda::profile::t_D2H_Comm.start();
cuda::copy2DArrayToHost(unzipVarsRHS,(const double**)cuda::__UNZIP_OUTPUT,BSSN_NUM_VARS,UNZIP_DOF_SZ);
cuda::profile::t_D2H_Comm.stop();
cuda::profile::t_rhs_cpu.start();
for(unsigned int blk=0;blk<NUM_CPU_DENDRO_BLOCKS;blk++)
{
offset=blkListCPU[blk].getOffset();
sz[0]=blkListCPU[blk].getAllocationSzX();
sz[1]=blkListCPU[blk].getAllocationSzY();
sz[2]=blkListCPU[blk].getAllocationSzZ();
bflag=blkListCPU[blk].getBlkNodeFlag();
dx=blkListCPU[blk].computeDx(pt_min,pt_max);
dy=blkListCPU[blk].computeDy(pt_min,pt_max);
dz=blkListCPU[blk].computeDz(pt_min,pt_max);
ptmin[0]=GRIDX_TO_X(blkListCPU[blk].getBlockNode().minX())-3*dx;
ptmin[1]=GRIDY_TO_Y(blkListCPU[blk].getBlockNode().minY())-3*dy;
ptmin[2]=GRIDZ_TO_Z(blkListCPU[blk].getBlockNode().minZ())-3*dz;
ptmax[0]=GRIDX_TO_X(blkListCPU[blk].getBlockNode().maxX())+3*dx;
ptmax[1]=GRIDY_TO_Y(blkListCPU[blk].getBlockNode().maxY())+3*dy;
ptmax[2]=GRIDZ_TO_Z(blkListCPU[blk].getBlockNode().maxZ())+3*dz;
bssnrhs(unzipVarsRHS, (const double **)uZipVars, offset, ptmin, ptmax, sz, bflag);
}
cuda::profile::t_rhs_cpu.stop();
cuda::profile::t_rhs_total.stop();
delete [] blockMap;
}else {
cudaStream_t streams[numStreams];
// send counts and offset for asynctransfer, all the counts and offsets are based on the how we transfer the data blocks to the gpu.
unsigned int sendBlockCount[numStreams];
unsigned int sendBlockOffset[numStreams];
unsigned int sendUnzipCount[numStreams];
unsigned int sendUnzipOffset[numStreams];
unsigned int gridBlockCount[numStreams];
unsigned int gridBlockOffset[numStreams];
for(unsigned int i=0;i<numStreams;i++)
{
cudaStreamCreate(&streams[i]);
CUDA_CHECK_ERROR();
}
for(unsigned int i=0;i<numStreams;i++)
{
sendBlockCount[i]=(((i+1)*NUM_GPU_DENDRO_BLOCKS)/numStreams)-(((i)*NUM_GPU_DENDRO_BLOCKS)/numStreams);
sendUnzipCount[i]=0;
for(unsigned int blk=(((i)*NUM_GPU_DENDRO_BLOCKS)/numStreams);blk<(((i+1)*NUM_GPU_DENDRO_BLOCKS)/numStreams);blk++)
sendUnzipCount[i]+=blkListGPU[blk].getAlignedBlockSz();
}
sendBlockOffset[0]=0;
sendUnzipOffset[0]=0;
for(unsigned int i=1;i<numStreams;i++)
{
sendBlockOffset[i]=sendBlockOffset[i-1]+sendBlockCount[i-1];
sendUnzipOffset[i]=sendUnzipOffset[i-1]+sendUnzipCount[i-1];
}
unsigned int ** blockMap=new unsigned int*[numStreams];
dim3 gridDim[numStreams];
for(unsigned int i=0;i<numStreams;i++)
{
cuda::computeDendroBlockToGPUMap(blkListGPU+sendBlockOffset[i],sendBlockCount[i],blockMap[i],gridDim[i]);
gridBlockCount[i]=(gridDim[i].x*gridDim[i].y*gridDim[i].z);
// modify for map for the global blk list index.
for(unsigned int gblk=0;gblk<gridBlockCount[i];gblk++)
{
blockMap[i][2*gblk]+=sendBlockOffset[i];
blockMap[i][2*gblk+1]+=sendBlockOffset[i];
}
}
gridBlockOffset[0]=0;
for(unsigned int i=1;i<numStreams;i++)
gridBlockOffset[i]=gridBlockOffset[i-1]+gridBlockCount[i-1];
const unsigned int NUM_GPU_BLOCKS=gridBlockOffset[numStreams-1]+gridBlockCount[numStreams-1];
double ** unZipInputHost=NULL;
double ** unZipOutputHost=NULL;
cuda::profile::t_H2D_Comm.start();
cuda::__DENDRO_BLOCK_LIST=cuda::alloc1DCudaArray<_Block>(NUM_GPU_DENDRO_BLOCKS);
cuda::__GPU_BLOCK_MAP=cuda::alloc1DCudaArray<unsigned int>(2*NUM_GPU_BLOCKS);
cuda::__UNZIP_INPUT=cuda::alloc2DCudaArray<double>(unZipInputHost,BSSN_NUM_VARS,UNZIP_DOF_SZ);
cuda::__UNZIP_OUTPUT=cuda::alloc2DCudaArray<double>(unZipOutputHost,BSSN_NUM_VARS,UNZIP_DOF_SZ);
// all the blocking small sends
cuda::__DENDRO_NUM_BLOCKS=cuda::copyValueToDevice(&NUM_GPU_DENDRO_BLOCKS);
cuda::__NUM_GPU_BLOCKS=cuda::copyValueToDevice(&NUM_GPU_BLOCKS);
cuda::__BSSN_NUM_VARS=cuda::copyValueToDevice(&BSSN_NUM_VARS);
cuda::__BSSN_CONSTRAINT_NUM_VARS=cuda::copyValueToDevice(&BSSN_CONSTRAINT_NUM_VARS);
cuda::__BSSN_COMPUTE_PARMS=cuda::copyValueToDevice(&(*bssnPars));
cuda::profile::t_H2D_Comm.stop();
cuda::profile::t_rhs_total.start();
for(unsigned int i=0;i<numStreams;i++)
{
for (unsigned int var = 0; var < BSSN_NUM_VARS; var++)
cuda::copyArrayToDeviceAsync(uZipVars[var] + sendUnzipOffset[i],unZipInputHost[var] + sendUnzipOffset[i], sendUnzipCount[i], streams[i]);
cuda::copyArrayToDeviceAsync(cudaBlkList + sendBlockOffset[i], cuda::__DENDRO_BLOCK_LIST + sendBlockOffset[i], sendBlockCount[i], streams[i]);
cuda::copyArrayToDeviceAsync(blockMap[i], cuda::__GPU_BLOCK_MAP + 2*gridBlockOffset[i], 2*gridBlockCount[i],streams[i]);
}
for(unsigned int i=0;i<numStreams;i++)
{
cuda::__computeBSSNRHS<<< gridDim[i], blockDim, 0, streams[i] >>>(cuda::__UNZIP_OUTPUT,(const double**)cuda::__UNZIP_INPUT,cuda::__BSSN_DERIV_WORKSPACE,cuda::__DENDRO_BLOCK_LIST,cuda::__GPU_BLOCK_MAP + 2*gridBlockOffset[i],cuda::__BSSN_COMPUTE_PARMS,cuda::__CUDA_DEVICE_PROPERTIES,i);
CUDA_CHECK_ERROR();
}
for(unsigned int i=0;i<numStreams;i++)
{
//D2H
for (unsigned int var = 0; var < BSSN_NUM_VARS; var++)
cuda::copyArrayToHostAsync(unzipVarsRHS[var]+sendUnzipOffset[i],unZipOutputHost[var]+sendUnzipOffset[i],sendUnzipCount[i],streams[i]);
}
cuda::profile::t_rhs_cpu.start();
// process cpu blocks before sync
double ** unZipRHS_CPU=new double*[BSSN_NUM_VARS];
for(unsigned int var=0;var<BSSN_NUM_VARS;var++)
unZipRHS_CPU[var]=new double[UNZIP_DOF_SZ];
for(unsigned int blk=0;blk<NUM_CPU_DENDRO_BLOCKS;blk++)
{
offset=blkListCPU[blk].getOffset();
sz[0]=blkListCPU[blk].getAllocationSzX();
sz[1]=blkListCPU[blk].getAllocationSzY();
sz[2]=blkListCPU[blk].getAllocationSzZ();
bflag=blkListCPU[blk].getBlkNodeFlag();
dx=blkListCPU[blk].computeDx(pt_min,pt_max);
dy=blkListCPU[blk].computeDy(pt_min,pt_max);
dz=blkListCPU[blk].computeDz(pt_min,pt_max);
ptmin[0]=GRIDX_TO_X(blkListCPU[blk].getBlockNode().minX())-3*dx;
ptmin[1]=GRIDY_TO_Y(blkListCPU[blk].getBlockNode().minY())-3*dy;
ptmin[2]=GRIDZ_TO_Z(blkListCPU[blk].getBlockNode().minZ())-3*dz;
ptmax[0]=GRIDX_TO_X(blkListCPU[blk].getBlockNode().maxX())+3*dx;
ptmax[1]=GRIDY_TO_Y(blkListCPU[blk].getBlockNode().maxY())+3*dy;
ptmax[2]=GRIDZ_TO_Z(blkListCPU[blk].getBlockNode().maxZ())+3*dz;
bssnrhs(unZipRHS_CPU, (const double **)uZipVars, offset, ptmin, ptmax, sz, bflag);
}
cuda::profile::t_rhs_cpu.stop();
cudaDeviceSynchronize();
CUDA_CHECK_ERROR();
// merge cpu and gpu results.
for(unsigned int blk=0;blk<NUM_CPU_DENDRO_BLOCKS;blk++)
{
offset = blkListCPU[blk].getOffset();
sz[0] = blkListCPU[blk].getAllocationSzX();
sz[1] = blkListCPU[blk].getAllocationSzY();
sz[2] = blkListCPU[blk].getAllocationSzZ();
for(unsigned int var=0;var<BSSN_NUM_VARS;var++)
for(unsigned int node=0;node<(sz[0]*sz[1]*sz[2]);node++)
unzipVarsRHS[var][offset+node]=unZipRHS_CPU[var][offset+node];
}
cuda::profile::t_rhs_total.stop();
for(unsigned int var=0;var<BSSN_NUM_VARS;var++)
delete [] unZipRHS_CPU[var];
delete [] unZipRHS_CPU;
for(unsigned int i=0;i<numStreams;i++)
delete [] blockMap[i];
delete [] blockMap;
}
cuda::profile::t_cudaMalloc_derivs.start();
derivWorkSpace.deallocateDerivMemory();
CUDA_CHECK_ERROR();
cuda::profile::t_cudaMalloc_derivs.stop();
cudaFree(cuda::__CUDA_DEVICE_PROPERTIES);
cudaFree(cuda::__DENDRO_BLOCK_LIST);
cudaFree(cuda::__DENDRO_NUM_BLOCKS);
cudaFree(cuda::__NUM_GPU_BLOCKS);
cudaFree(cuda::__GPU_BLOCK_MAP);
cudaFree(cuda::__BSSN_NUM_VARS);
cudaFree(cuda::__BSSN_CONSTRAINT_NUM_VARS);
cudaFree(cuda::__GPU_BLOCK_SHARED_MEM_UTIL);
cudaFree(cuda::__DENDRO_BLK_MAX_SZ);
cudaFree(cuda::__BSSN_DERIV_WORKSPACE);
cudaFree(cuda::__BSSN_COMPUTE_PARMS);
cuda::dealloc2DCudaArray(cuda::__UNZIP_INPUT,BSSN_NUM_VARS);
cuda::dealloc2DCudaArray(cuda::__UNZIP_OUTPUT,BSSN_NUM_VARS);
delete [] blkListGPU;
delete [] blkListCPU;
delete [] cudaBlkList;
cuda::profile::t_overall.stop();
}
}
|
0a3287a688cf834e5f763787ab99acd70ffc2dda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
namespace at {
namespace native {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k, const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), name, [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int32_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int64_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
}
});
AT_CUDA_CHECK(hipGetLastError());
return result;
}
Tensor& tril_cuda_(Tensor &self, int64_t k) {
return tril_cuda_out(self, self, k);
}
Tensor& tril_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<false>(result, self, k, "tril");
}
Tensor& triu_cuda_(Tensor &self, int64_t k) {
return triu_cuda_out(self, self, k);
}
Tensor& triu_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<true>(result, self, k, "triu");
}
// Copy the kth diagonal of a matrix B to a vector A.
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void copy_from_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideA) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void copy_to_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideB) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
template <typename scalar_t>
Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) {
TORCH_CHECK(
self.dim() == 1 || self.dim() == 2, "matrix or a vector expected");
TensorArg result_arg{result, "result", 1};
TensorArg self_arg{self, "self", 2};
checkAllSameGPU("diag", {result_arg, self_arg});
checkSameType("diag", result_arg, self_arg);
int nDimension = self.dim();
if (nDimension == 2) {
auto self_stride_0 = self.stride(0);
auto self_stride_1 = self.stride(1);
int sz;
if (dimension > 0) {
sz = ::min(self.size(0), self.size(1) - dimension);
} else {
sz = ::min(self.size(0) + dimension, self.size(1));
}
result.resize_({sz});
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride = result.stride(0);
const dim3 threads(::min(
int(sz),
int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)));
const dim3 grid(
::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * self_stride_1
: -dimension * self_stride_0);
// Kernel Launch
hipLaunchKernelGGL(( copy_from_diagonal_kernel<scalar_t>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
sz,
self_stride_0 + self_stride_1,
result_stride);
}
} else {
auto n_elems = self.numel();
auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension;
auto self_stride = self.stride(0);
result.resize_({sz, sz});
result.zero_();
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride_0 = result.stride(0);
auto result_stride_1 = result.stride(1);
const dim3 threads(::min(
int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock));
const dim3 grid(
::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * result_stride_1
: -dimension * result_stride_0);
// Kernel Launch
hipLaunchKernelGGL(( copy_to_diagonal_kernel<scalar_t>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
n_elems,
result_stride_0 + result_stride_1,
self_stride);
}
}
return result;
}
Tensor& diag_cuda_out(Tensor& result, const Tensor& self, int64_t dimension) {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, self.scalar_type(), "diag_cuda", [&] {
apply_diag<scalar_t>(result, self, dimension);
});
return result;
}
Tensor trace_cuda(const Tensor& self) {
TORCH_CHECK(self.dim() == 2, "expected a matrix");
int dimension = 0;
auto result = at::diag(self, dimension);
return result.sum();
}
} // namespace native
} // namespace at
| 0a3287a688cf834e5f763787ab99acd70ffc2dda.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
namespace at {
namespace native {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k, const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), name, [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
triu_tril_kernel<scalar_t, int32_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
triu_tril_kernel<scalar_t, int64_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
}
});
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
Tensor& tril_cuda_(Tensor &self, int64_t k) {
return tril_cuda_out(self, self, k);
}
Tensor& tril_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<false>(result, self, k, "tril");
}
Tensor& triu_cuda_(Tensor &self, int64_t k) {
return triu_cuda_out(self, self, k);
}
Tensor& triu_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<true>(result, self, k, "triu");
}
// Copy the kth diagonal of a matrix B to a vector A.
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void copy_from_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideA) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void copy_to_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideB) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
template <typename scalar_t>
Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) {
TORCH_CHECK(
self.dim() == 1 || self.dim() == 2, "matrix or a vector expected");
TensorArg result_arg{result, "result", 1};
TensorArg self_arg{self, "self", 2};
checkAllSameGPU("diag", {result_arg, self_arg});
checkSameType("diag", result_arg, self_arg);
int nDimension = self.dim();
if (nDimension == 2) {
auto self_stride_0 = self.stride(0);
auto self_stride_1 = self.stride(1);
int sz;
if (dimension > 0) {
sz = std::min(self.size(0), self.size(1) - dimension);
} else {
sz = std::min(self.size(0) + dimension, self.size(1));
}
result.resize_({sz});
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride = result.stride(0);
const dim3 threads(std::min(
int(sz),
int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)));
const dim3 grid(
std::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * self_stride_1
: -dimension * self_stride_0);
// Kernel Launch
copy_from_diagonal_kernel<scalar_t>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
sz,
self_stride_0 + self_stride_1,
result_stride);
}
} else {
auto n_elems = self.numel();
auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension;
auto self_stride = self.stride(0);
result.resize_({sz, sz});
result.zero_();
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride_0 = result.stride(0);
auto result_stride_1 = result.stride(1);
const dim3 threads(std::min(
int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock));
const dim3 grid(
std::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * result_stride_1
: -dimension * result_stride_0);
// Kernel Launch
copy_to_diagonal_kernel<scalar_t>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
n_elems,
result_stride_0 + result_stride_1,
self_stride);
}
}
return result;
}
Tensor& diag_cuda_out(Tensor& result, const Tensor& self, int64_t dimension) {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, self.scalar_type(), "diag_cuda", [&] {
apply_diag<scalar_t>(result, self, dimension);
});
return result;
}
Tensor trace_cuda(const Tensor& self) {
TORCH_CHECK(self.dim() == 2, "expected a matrix");
int dimension = 0;
auto result = at::diag(self, dimension);
return result.sum();
}
} // namespace native
} // namespace at
|
8e5f115076207a6fe953e2f8d6637122d4ccf320.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "star3d1r-32x32-5-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 22;
const AN5D_TYPE __side3Len = 22;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
| 8e5f115076207a6fe953e2f8d6637122d4ccf320.cu | #include "star3d1r-32x32-5-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 22;
const AN5D_TYPE __side3Len = 22;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
|
892bf2f16459ca308ce0acfea84160f4bb364cb9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
int main(int argc, char* argv[])
{
int dev = 0;
hipSetDevice(dev);
unsigned int isize = 1 << 22;
unsigned int nbytes = isize * sizeof(float);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("%s starting at ", argv[0]);
printf("device %d: %s memory size %d nbyte %5.2f MB\n", dev, deviceProp.name, isize, nbytes / (1024.0f * 1024.0f));
float *h_a;
hipHostMalloc((float**)&h_a, nbytes);
float *d_a;
hipMalloc((float**)&d_a, nbytes);
for (unsigned int i = 0; i< isize; ++i)
{
h_a[i] = 0.5f;
}
hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice);
hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost);
hipFree(d_a);
hipHostFree(h_a);
hipDeviceReset();
system("Pause");
return 0;
}
| 892bf2f16459ca308ce0acfea84160f4bb364cb9.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
int main(int argc, char* argv[])
{
int dev = 0;
cudaSetDevice(dev);
unsigned int isize = 1 << 22;
unsigned int nbytes = isize * sizeof(float);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("%s starting at ", argv[0]);
printf("device %d: %s memory size %d nbyte %5.2f MB\n", dev, deviceProp.name, isize, nbytes / (1024.0f * 1024.0f));
float *h_a;
cudaMallocHost((float**)&h_a, nbytes);
float *d_a;
cudaMalloc((float**)&d_a, nbytes);
for (unsigned int i = 0; i< isize; ++i)
{
h_a[i] = 0.5f;
}
cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFreeHost(h_a);
cudaDeviceReset();
system("Pause");
return 0;
}
|
01d7ae23feb8f8cc286b16359ac1b53b8ef27c8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void reduce5(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
} | 01d7ae23feb8f8cc286b16359ac1b53b8ef27c8e.cu | #include "includes.h"
template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void reduce5(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
} |
b483852f383010d605bf709d827e0aaf83355f4e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Yuri Gorokhov
* lab 1 - Global vs Shared memory speeds
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "../include/cuda_util.h"
#define ITERATIONS 10000000
__global__ void register_mem_kernel();
__global__ void shared_mem_kernel();
int main() {
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
// Register Memory
hipEventRecord(start,0);
hipLaunchKernelGGL(( register_mem_kernel), dim3(1), dim3(1), 0, 0, );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Register memory: %f\n", elapsedTime);
hipEventCreate(&start);
hipEventCreate(&stop);
// Shared Memory
hipEventRecord(start,0);
hipLaunchKernelGGL(( shared_mem_kernel), dim3(1), dim3(1), 0, 0, );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Shared memory: %f\n", elapsedTime);
return 0;
}
__global__ void register_mem_kernel() {
int location;
for(int i = 0; i < ITERATIONS; i++) {
int tmp = location >> 1;
location = tmp;
}
}
__global__ void shared_mem_kernel() {
__shared__ int location;
for(int i = 0; i < ITERATIONS; i++) {
int tmp = location >> 1;
location = tmp;
}
}
| b483852f383010d605bf709d827e0aaf83355f4e.cu | /**
* Yuri Gorokhov
* lab 1 - Global vs Shared memory speeds
*/
#include <stdio.h>
#include <cuda.h>
#include "../include/cuda_util.h"
#define ITERATIONS 10000000
__global__ void register_mem_kernel();
__global__ void shared_mem_kernel();
int main() {
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Register Memory
cudaEventRecord(start,0);
register_mem_kernel<<<1, 1>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Register memory: %f\n", elapsedTime);
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Shared Memory
cudaEventRecord(start,0);
shared_mem_kernel<<<1, 1>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Shared memory: %f\n", elapsedTime);
return 0;
}
__global__ void register_mem_kernel() {
int location;
for(int i = 0; i < ITERATIONS; i++) {
int tmp = location >> 1;
location = tmp;
}
}
__global__ void shared_mem_kernel() {
__shared__ int location;
for(int i = 0; i < ITERATIONS; i++) {
int tmp = location >> 1;
location = tmp;
}
}
|
468b3fa2e2db0f0efb48a2f14660365358faf2ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main()
{
int N = 2<<24;
size_t size = N * sizeof(int);
int *a;
hipMallocManaged(&a, size);
h(a, N);
hipFree(a);
}
| 468b3fa2e2db0f0efb48a2f14660365358faf2ad.cu | __global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main()
{
int N = 2<<24;
size_t size = N * sizeof(int);
int *a;
cudaMallocManaged(&a, size);
h(a, N);
cudaFree(a);
}
|
44912a0f6d552ab0aa3087ea0701f9f1f3a54e97.hip | // !!! This is a file automatically generated by hipify!!!
//This takes command line arguements for:
// number of runs
// matrix side length
// Utilities and system includes
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixMul.h"
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
template <int BLOCK_SIZE> __global__ void
matrixMul( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
////////////////////////////////////////////////////////////////////////////////
// END OF KERNEL
////////////////////////////////////////////////////////////////////////////////
struct timeval tp;
double getTime_sec();
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int, int, float);
bool check(float*, float*, int, float);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int cuda_device;
hipDeviceProp_t deviceProp;
hipGetDevice(&cuda_device);
hipGetDeviceProperties(&deviceProp, cuda_device);
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// set seed for rand()
srand(2006);
//Get command line arguements
int nIter = 30;
int size = 640;
if( argc>2 ){
nIter = atoi(argv[1]);
size = atoi(argv[2]);
}
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = size;
uiHA = size;
uiWB = size;
uiHB = size;
uiWC = size;
uiHC = size;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// initialize host memory
randomInit(h_A, size_A);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
// allocate device memory
float* d_A, *d_C;
hipMalloc((void**) &d_A, mem_size_A);
hipMalloc((void**) &d_C, mem_size_C);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
// setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(uiWC / threads.x, uiHC / threads.y);
//Print information about test
printf("Calculating: C = A x A, %d times on CPU\n", nIter);
printf("Matrix size is : %d x %d\n\n", uiWA, uiHA);
float* reference = (float*)malloc(mem_size_C);
double start_time = getTime_sec();
for (int j = 0; j < nIter; j++) {
computeGold(reference, h_A, h_A, uiHA, uiWA, uiWB);
}
// check if kernel execution generated and error
hipError_t cuda_error = hipDeviceSynchronize();
double finish_time = getTime_sec();
if(cuda_error==hipSuccess){
//printf( " Running the concurrentKernels was a success\n");
}else{
printf("CUDA Error: %s\n", hipGetErrorString(cuda_error));
return 1;
}
// calculate timing stuff
double total_sec = finish_time-start_time;
double dSeconds = total_sec/((double)nIter);
double dNumOps = 2.0 * (double)uiWA * (double)uiHA * (double)uiWB;
double gflops = 1.0e-9 * dNumOps/dSeconds;
printf("Time Informarion:\n");
printf(" Total Time: %.6f sec\n", total_sec);
printf(" Gflops: %.4f G Ops/sec\n\n\n", gflops);
// copy result from device to host
//hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
// compute reference solution
//
//computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
// printf("Comparing CUDA matrixMul & Host results\n");
// bool resCUDA = check(reference, h_C, size_C, 1.0e-6f); //not sure if I can use this
// if (resCUDA != true)
//{
// printDiff(reference, h_C, uiWC, uiHC, 100, 1.0e-3f);
// }
// printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "OK" : "FAIL");
// clean up memory
free(h_A);
free(h_C);
free(reference);
hipFree(d_A);
hipFree(d_C);
hipDeviceReset();
}
double getTime_sec() {
gettimeofday(&tp, NULL);
return static_cast<double>(tp.tv_sec)
+ static_cast<double>(tp.tv_usec) / 1E6;
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
bool check(float *data1, float *data2, int size, float fListTol)
{
int k;
for (k = 0; k < size; k++){
float fDiff = fabs(data1[k] - data2[k]);
if (fDiff > fListTol) return false;
}
return true;
}
void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol)
{
//printf("Listing first %d Differences > %.6f...\n", iListLength, fListTol);
int i,j,k;
int error_count=0;
for (j = 0; j < height; j++)
{
if (error_count < iListLength)
{
//printf("\n Row %d:\n", j);
}
for (i = 0; i < width; i++)
{
k = j * width + i;
float fDiff = fabs(data1[k] - data2[k]);
if (fDiff > fListTol)
{
if (error_count < iListLength)
{
//printf(" Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff);
}
error_count++;
}
}
}
printf(" \n Total Errors = %d with tolerance of %.6f\n\n", error_count, fListTol);
}
| 44912a0f6d552ab0aa3087ea0701f9f1f3a54e97.cu |
//This takes command line arguements for:
// number of runs
// matrix side length
// Utilities and system includes
#include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include "matrixMul.h"
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
template <int BLOCK_SIZE> __global__ void
matrixMul( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
////////////////////////////////////////////////////////////////////////////////
// END OF KERNEL
////////////////////////////////////////////////////////////////////////////////
struct timeval tp;
double getTime_sec();
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int, int, float);
bool check(float*, float*, int, float);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int cuda_device;
cudaDeviceProp deviceProp;
cudaGetDevice(&cuda_device);
cudaGetDeviceProperties(&deviceProp, cuda_device);
// use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// set seed for rand()
srand(2006);
//Get command line arguements
int nIter = 30;
int size = 640;
if( argc>2 ){
nIter = atoi(argv[1]);
size = atoi(argv[2]);
}
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = size;
uiHA = size;
uiWB = size;
uiHB = size;
uiWC = size;
uiHC = size;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// initialize host memory
randomInit(h_A, size_A);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
// allocate device memory
float* d_A, *d_C;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_C, mem_size_C);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
// setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(uiWC / threads.x, uiHC / threads.y);
//Print information about test
printf("Calculating: C = A x A, %d times on CPU\n", nIter);
printf("Matrix size is : %d x %d\n\n", uiWA, uiHA);
float* reference = (float*)malloc(mem_size_C);
double start_time = getTime_sec();
for (int j = 0; j < nIter; j++) {
computeGold(reference, h_A, h_A, uiHA, uiWA, uiWB);
}
// check if kernel execution generated and error
cudaError cuda_error = cudaDeviceSynchronize();
double finish_time = getTime_sec();
if(cuda_error==cudaSuccess){
//printf( " Running the concurrentKernels was a success\n");
}else{
printf("CUDA Error: %s\n", cudaGetErrorString(cuda_error));
return 1;
}
// calculate timing stuff
double total_sec = finish_time-start_time;
double dSeconds = total_sec/((double)nIter);
double dNumOps = 2.0 * (double)uiWA * (double)uiHA * (double)uiWB;
double gflops = 1.0e-9 * dNumOps/dSeconds;
printf("Time Informarion:\n");
printf(" Total Time: %.6f sec\n", total_sec);
printf(" Gflops: %.4f G Ops/sec\n\n\n", gflops);
// copy result from device to host
//cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
// compute reference solution
//
//computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
// printf("Comparing CUDA matrixMul & Host results\n");
// bool resCUDA = check(reference, h_C, size_C, 1.0e-6f); //not sure if I can use this
// if (resCUDA != true)
//{
// printDiff(reference, h_C, uiWC, uiHC, 100, 1.0e-3f);
// }
// printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "OK" : "FAIL");
// clean up memory
free(h_A);
free(h_C);
free(reference);
cudaFree(d_A);
cudaFree(d_C);
cudaDeviceReset();
}
double getTime_sec() {
gettimeofday(&tp, NULL);
return static_cast<double>(tp.tv_sec)
+ static_cast<double>(tp.tv_usec) / 1E6;
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
bool check(float *data1, float *data2, int size, float fListTol)
{
int k;
for (k = 0; k < size; k++){
float fDiff = fabs(data1[k] - data2[k]);
if (fDiff > fListTol) return false;
}
return true;
}
void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol)
{
//printf("Listing first %d Differences > %.6f...\n", iListLength, fListTol);
int i,j,k;
int error_count=0;
for (j = 0; j < height; j++)
{
if (error_count < iListLength)
{
//printf("\n Row %d:\n", j);
}
for (i = 0; i < width; i++)
{
k = j * width + i;
float fDiff = fabs(data1[k] - data2[k]);
if (fDiff > fListTol)
{
if (error_count < iListLength)
{
//printf(" Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff);
}
error_count++;
}
}
}
printf(" \n Total Errors = %d with tolerance of %.6f\n\n", error_count, fListTol);
}
|
37e42f66bc9d54ce0b3d7096bbf2b081f4c428aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_generate_chunk_kernel;
int xdim0_generate_chunk_kernel_h = -1;
__constant__ int ydim0_generate_chunk_kernel;
int ydim0_generate_chunk_kernel_h = -1;
__constant__ int xdim1_generate_chunk_kernel;
int xdim1_generate_chunk_kernel_h = -1;
__constant__ int ydim1_generate_chunk_kernel;
int ydim1_generate_chunk_kernel_h = -1;
__constant__ int xdim2_generate_chunk_kernel;
int xdim2_generate_chunk_kernel_h = -1;
__constant__ int ydim2_generate_chunk_kernel;
int ydim2_generate_chunk_kernel_h = -1;
__constant__ int xdim3_generate_chunk_kernel;
int xdim3_generate_chunk_kernel_h = -1;
__constant__ int ydim3_generate_chunk_kernel;
int ydim3_generate_chunk_kernel_h = -1;
__constant__ int xdim4_generate_chunk_kernel;
int xdim4_generate_chunk_kernel_h = -1;
__constant__ int ydim4_generate_chunk_kernel;
int ydim4_generate_chunk_kernel_h = -1;
__constant__ int xdim5_generate_chunk_kernel;
int xdim5_generate_chunk_kernel_h = -1;
__constant__ int ydim5_generate_chunk_kernel;
int ydim5_generate_chunk_kernel_h = -1;
__constant__ int xdim6_generate_chunk_kernel;
int xdim6_generate_chunk_kernel_h = -1;
__constant__ int ydim6_generate_chunk_kernel;
int ydim6_generate_chunk_kernel_h = -1;
__constant__ int xdim7_generate_chunk_kernel;
int xdim7_generate_chunk_kernel_h = -1;
__constant__ int ydim7_generate_chunk_kernel;
int ydim7_generate_chunk_kernel_h = -1;
__constant__ int xdim8_generate_chunk_kernel;
int xdim8_generate_chunk_kernel_h = -1;
__constant__ int ydim8_generate_chunk_kernel;
int ydim8_generate_chunk_kernel_h = -1;
__constant__ int xdim9_generate_chunk_kernel;
int xdim9_generate_chunk_kernel_h = -1;
__constant__ int ydim9_generate_chunk_kernel;
int ydim9_generate_chunk_kernel_h = -1;
__constant__ int xdim10_generate_chunk_kernel;
int xdim10_generate_chunk_kernel_h = -1;
__constant__ int ydim10_generate_chunk_kernel;
int ydim10_generate_chunk_kernel_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_generate_chunk_kernel*(y)+xdim0_generate_chunk_kernel*ydim0_generate_chunk_kernel*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_generate_chunk_kernel*(y)+xdim1_generate_chunk_kernel*ydim1_generate_chunk_kernel*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_generate_chunk_kernel*(y)+xdim2_generate_chunk_kernel*ydim2_generate_chunk_kernel*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_generate_chunk_kernel*(y)+xdim3_generate_chunk_kernel*ydim3_generate_chunk_kernel*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_generate_chunk_kernel*(y)+xdim4_generate_chunk_kernel*ydim4_generate_chunk_kernel*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_generate_chunk_kernel*(y)+xdim5_generate_chunk_kernel*ydim5_generate_chunk_kernel*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_generate_chunk_kernel*(y)+xdim6_generate_chunk_kernel*ydim6_generate_chunk_kernel*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_generate_chunk_kernel*(y)+xdim7_generate_chunk_kernel*ydim7_generate_chunk_kernel*(z))
#define OPS_ACC8(x,y,z) (x+xdim8_generate_chunk_kernel*(y)+xdim8_generate_chunk_kernel*ydim8_generate_chunk_kernel*(z))
#define OPS_ACC9(x,y,z) (x+xdim9_generate_chunk_kernel*(y)+xdim9_generate_chunk_kernel*ydim9_generate_chunk_kernel*(z))
#define OPS_ACC10(x,y,z) (x+xdim10_generate_chunk_kernel*(y)+xdim10_generate_chunk_kernel*ydim10_generate_chunk_kernel*(z))
//user function
__device__
void generate_chunk_kernel( const double *vertexx,
const double *vertexy, const double *vertexz,
double *energy0, double *density0,
double *xvel0, double *yvel0, double *zvel0,
const double *cellx, const double *celly, const double *cellz) {
double radius, x_cent, y_cent, z_cent;
energy0[OPS_ACC3(0,0,0)]= states[0].energy;
density0[OPS_ACC4(0,0,0)]= states[0].density;
xvel0[OPS_ACC5(0,0,0)]=states[0].xvel;
yvel0[OPS_ACC6(0,0,0)]=states[0].yvel;
zvel0[OPS_ACC7(0,0,0)]=states[0].zvel;
for(int i = 1; i<number_of_states; i++) {
x_cent=states[i].xmin;
y_cent=states[i].ymin;
z_cent=states[i].zmin;
if (states[i].geometry == g_cube) {
if(vertexx[OPS_ACC0(1,0,0)] >= states[i].xmin && vertexx[OPS_ACC0(0,0,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1,0)] >= states[i].ymin && vertexy[OPS_ACC1(0,0,0)] < states[i].ymax) {
if(vertexz[OPS_ACC2(0,0,1)] >= states[i].zmin && vertexz[OPS_ACC2(0,0,0)] < states[i].zmax) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
for (int ix=0;ix<2;ix++){
for (int iy=0;iy<2;iy++){
for (int iz=0;iz<2;iz++){
xvel0[OPS_ACC5(ix,iy,iz)] = states[i].xvel;
yvel0[OPS_ACC6(ix,iy,iz)] = states[i].yvel;
zvel0[OPS_ACC7(ix,iy,iz)] = states[i].zvel;
}
}
}
}
}
}
}
else if(states[i].geometry == g_sphe) {
radius = sqrt ((cellx[OPS_ACC8(0,0,0)] - x_cent) * (cellx[OPS_ACC8(0,0,0)] - x_cent) +
(celly[OPS_ACC9(0,0,0)] - y_cent) * (celly[OPS_ACC9(0,0,0)] - y_cent) +
(cellz[OPS_ACC10(0,0,0)] - z_cent) * (cellz[OPS_ACC10(0,0,0)] - z_cent));
if(radius <= states[i].radius) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
for (int ix=0;ix<2;ix++){
for (int iy=0;iy<2;iy++){
for (int iz=0;iz<2;iz++){
xvel0[OPS_ACC5(ix,iy,iz)] = states[i].xvel;
yvel0[OPS_ACC6(ix,iy,iz)] = states[i].yvel;
zvel0[OPS_ACC7(ix,iy,iz)] = states[i].zvel;
}
}
}
}
}
else if(states[i].geometry == g_point) {
if(vertexx[OPS_ACC0(0,0,0)] == x_cent && vertexy[OPS_ACC1(0,0,0)] == y_cent && vertexz[OPS_ACC2(0,0,0)] == z_cent) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
for (int ix=0;ix<2;ix++){
for (int iy=0;iy<2;iy++){
for (int iz=0;iz<2;iz++){
xvel0[OPS_ACC5(ix,iy,iz)] = states[i].xvel;
yvel0[OPS_ACC6(ix,iy,iz)] = states[i].yvel;
zvel0[OPS_ACC7(ix,iy,iz)] = states[i].zvel;
}
}
}
}
}
}
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
__global__ void ops_generate_chunk_kernel(
const double* __restrict arg0,
const double* __restrict arg1,
const double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
const double* __restrict arg8,
const double* __restrict arg9,
const double* __restrict arg10,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 0 * xdim0_generate_chunk_kernel + idx_z * 0 * xdim0_generate_chunk_kernel * ydim0_generate_chunk_kernel;
arg1 += idx_x * 0 + idx_y * 1 * xdim1_generate_chunk_kernel + idx_z * 0 * xdim1_generate_chunk_kernel * ydim1_generate_chunk_kernel;
arg2 += idx_x * 0 + idx_y * 0 * xdim2_generate_chunk_kernel + idx_z * 1 * xdim2_generate_chunk_kernel * ydim2_generate_chunk_kernel;
arg3 += idx_x * 1 + idx_y * 1 * xdim3_generate_chunk_kernel + idx_z * 1 * xdim3_generate_chunk_kernel * ydim3_generate_chunk_kernel;
arg4 += idx_x * 1 + idx_y * 1 * xdim4_generate_chunk_kernel + idx_z * 1 * xdim4_generate_chunk_kernel * ydim4_generate_chunk_kernel;
arg5 += idx_x * 1 + idx_y * 1 * xdim5_generate_chunk_kernel + idx_z * 1 * xdim5_generate_chunk_kernel * ydim5_generate_chunk_kernel;
arg6 += idx_x * 1 + idx_y * 1 * xdim6_generate_chunk_kernel + idx_z * 1 * xdim6_generate_chunk_kernel * ydim6_generate_chunk_kernel;
arg7 += idx_x * 1 + idx_y * 1 * xdim7_generate_chunk_kernel + idx_z * 1 * xdim7_generate_chunk_kernel * ydim7_generate_chunk_kernel;
arg8 += idx_x * 1 + idx_y * 0 * xdim8_generate_chunk_kernel + idx_z * 0 * xdim8_generate_chunk_kernel * ydim8_generate_chunk_kernel;
arg9 += idx_x * 0 + idx_y * 1 * xdim9_generate_chunk_kernel + idx_z * 0 * xdim9_generate_chunk_kernel * ydim9_generate_chunk_kernel;
arg10 += idx_x * 0 + idx_y * 0 * xdim10_generate_chunk_kernel + idx_z * 1 * xdim10_generate_chunk_kernel * ydim10_generate_chunk_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
generate_chunk_kernel(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8,
arg9, arg10);
}
}
// host stub function
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10) {
ops_arg args[11] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10};
ops_timing_realloc(140,"generate_chunk_kernel");
OPS_kernels[140].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0]*args[2].dat->dim;
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0]*args[3].dat->dim;
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0]*args[4].dat->dim;
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0]*args[5].dat->dim;
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0]*args[6].dat->dim;
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0]*args[7].dat->dim;
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0]*args[8].dat->dim;
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0]*args[9].dat->dim;
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0]*args[10].dat->dim;
int ydim10 = args[10].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_generate_chunk_kernel_h || ydim0 != ydim0_generate_chunk_kernel_h || xdim1 != xdim1_generate_chunk_kernel_h || ydim1 != ydim1_generate_chunk_kernel_h || xdim2 != xdim2_generate_chunk_kernel_h || ydim2 != ydim2_generate_chunk_kernel_h || xdim3 != xdim3_generate_chunk_kernel_h || ydim3 != ydim3_generate_chunk_kernel_h || xdim4 != xdim4_generate_chunk_kernel_h || ydim4 != ydim4_generate_chunk_kernel_h || xdim5 != xdim5_generate_chunk_kernel_h || ydim5 != ydim5_generate_chunk_kernel_h || xdim6 != xdim6_generate_chunk_kernel_h || ydim6 != ydim6_generate_chunk_kernel_h || xdim7 != xdim7_generate_chunk_kernel_h || ydim7 != ydim7_generate_chunk_kernel_h || xdim8 != xdim8_generate_chunk_kernel_h || ydim8 != ydim8_generate_chunk_kernel_h || xdim9 != xdim9_generate_chunk_kernel_h || ydim9 != ydim9_generate_chunk_kernel_h || xdim10 != xdim10_generate_chunk_kernel_h || ydim10 != ydim10_generate_chunk_kernel_h) {
hipMemcpyToSymbol( xdim0_generate_chunk_kernel, &xdim0, sizeof(int) );
xdim0_generate_chunk_kernel_h = xdim0;
hipMemcpyToSymbol( ydim0_generate_chunk_kernel, &ydim0, sizeof(int) );
ydim0_generate_chunk_kernel_h = ydim0;
hipMemcpyToSymbol( xdim1_generate_chunk_kernel, &xdim1, sizeof(int) );
xdim1_generate_chunk_kernel_h = xdim1;
hipMemcpyToSymbol( ydim1_generate_chunk_kernel, &ydim1, sizeof(int) );
ydim1_generate_chunk_kernel_h = ydim1;
hipMemcpyToSymbol( xdim2_generate_chunk_kernel, &xdim2, sizeof(int) );
xdim2_generate_chunk_kernel_h = xdim2;
hipMemcpyToSymbol( ydim2_generate_chunk_kernel, &ydim2, sizeof(int) );
ydim2_generate_chunk_kernel_h = ydim2;
hipMemcpyToSymbol( xdim3_generate_chunk_kernel, &xdim3, sizeof(int) );
xdim3_generate_chunk_kernel_h = xdim3;
hipMemcpyToSymbol( ydim3_generate_chunk_kernel, &ydim3, sizeof(int) );
ydim3_generate_chunk_kernel_h = ydim3;
hipMemcpyToSymbol( xdim4_generate_chunk_kernel, &xdim4, sizeof(int) );
xdim4_generate_chunk_kernel_h = xdim4;
hipMemcpyToSymbol( ydim4_generate_chunk_kernel, &ydim4, sizeof(int) );
ydim4_generate_chunk_kernel_h = ydim4;
hipMemcpyToSymbol( xdim5_generate_chunk_kernel, &xdim5, sizeof(int) );
xdim5_generate_chunk_kernel_h = xdim5;
hipMemcpyToSymbol( ydim5_generate_chunk_kernel, &ydim5, sizeof(int) );
ydim5_generate_chunk_kernel_h = ydim5;
hipMemcpyToSymbol( xdim6_generate_chunk_kernel, &xdim6, sizeof(int) );
xdim6_generate_chunk_kernel_h = xdim6;
hipMemcpyToSymbol( ydim6_generate_chunk_kernel, &ydim6, sizeof(int) );
ydim6_generate_chunk_kernel_h = ydim6;
hipMemcpyToSymbol( xdim7_generate_chunk_kernel, &xdim7, sizeof(int) );
xdim7_generate_chunk_kernel_h = xdim7;
hipMemcpyToSymbol( ydim7_generate_chunk_kernel, &ydim7, sizeof(int) );
ydim7_generate_chunk_kernel_h = ydim7;
hipMemcpyToSymbol( xdim8_generate_chunk_kernel, &xdim8, sizeof(int) );
xdim8_generate_chunk_kernel_h = xdim8;
hipMemcpyToSymbol( ydim8_generate_chunk_kernel, &ydim8, sizeof(int) );
ydim8_generate_chunk_kernel_h = ydim8;
hipMemcpyToSymbol( xdim9_generate_chunk_kernel, &xdim9, sizeof(int) );
xdim9_generate_chunk_kernel_h = xdim9;
hipMemcpyToSymbol( ydim9_generate_chunk_kernel, &ydim9, sizeof(int) );
ydim9_generate_chunk_kernel_h = ydim9;
hipMemcpyToSymbol( xdim10_generate_chunk_kernel, &xdim10, sizeof(int) );
xdim10_generate_chunk_kernel_h = xdim10;
hipMemcpyToSymbol( ydim10_generate_chunk_kernel, &ydim10, sizeof(int) );
ydim10_generate_chunk_kernel_h = ydim10;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
int dat7 = args[7].dat->elem_size;
int dat8 = args[8].dat->elem_size;
int dat9 = args[9].dat->elem_size;
int dat10 = args[10].dat->elem_size;
char *p_a[11];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif //OPS_MPI
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif //OPS_MPI
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif //OPS_MPI
int base5 = dat5 * 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = dat6 * 1 *
(start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d];
#endif //OPS_MPI
int base7 = dat7 * 1 *
(start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]);
p_a[7] = (char *)args[7].data_d + base7;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d];
#endif //OPS_MPI
int base8 = dat8 * 1 *
(start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]);
p_a[8] = (char *)args[8].data_d + base8;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d];
#endif //OPS_MPI
int base9 = dat9 * 1 *
(start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]);
p_a[9] = (char *)args[9].data_d + base9;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d];
#endif //OPS_MPI
int base10 = dat10 * 1 *
(start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]);
p_a[10] = (char *)args[10].data_d + base10;
ops_H_D_exchanges_device(args, 11);
ops_halo_exchanges(args,11,range);
ops_timers_core(&c1,&t1);
OPS_kernels[140].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_generate_chunk_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10],x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[140].time += t2-t1;
ops_set_dirtybit_device(args, 11);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
//Update kernel record
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg6);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg7);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg8);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg9);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg10);
}
| 37e42f66bc9d54ce0b3d7096bbf2b081f4c428aa.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_generate_chunk_kernel;
int xdim0_generate_chunk_kernel_h = -1;
__constant__ int ydim0_generate_chunk_kernel;
int ydim0_generate_chunk_kernel_h = -1;
__constant__ int xdim1_generate_chunk_kernel;
int xdim1_generate_chunk_kernel_h = -1;
__constant__ int ydim1_generate_chunk_kernel;
int ydim1_generate_chunk_kernel_h = -1;
__constant__ int xdim2_generate_chunk_kernel;
int xdim2_generate_chunk_kernel_h = -1;
__constant__ int ydim2_generate_chunk_kernel;
int ydim2_generate_chunk_kernel_h = -1;
__constant__ int xdim3_generate_chunk_kernel;
int xdim3_generate_chunk_kernel_h = -1;
__constant__ int ydim3_generate_chunk_kernel;
int ydim3_generate_chunk_kernel_h = -1;
__constant__ int xdim4_generate_chunk_kernel;
int xdim4_generate_chunk_kernel_h = -1;
__constant__ int ydim4_generate_chunk_kernel;
int ydim4_generate_chunk_kernel_h = -1;
__constant__ int xdim5_generate_chunk_kernel;
int xdim5_generate_chunk_kernel_h = -1;
__constant__ int ydim5_generate_chunk_kernel;
int ydim5_generate_chunk_kernel_h = -1;
__constant__ int xdim6_generate_chunk_kernel;
int xdim6_generate_chunk_kernel_h = -1;
__constant__ int ydim6_generate_chunk_kernel;
int ydim6_generate_chunk_kernel_h = -1;
__constant__ int xdim7_generate_chunk_kernel;
int xdim7_generate_chunk_kernel_h = -1;
__constant__ int ydim7_generate_chunk_kernel;
int ydim7_generate_chunk_kernel_h = -1;
__constant__ int xdim8_generate_chunk_kernel;
int xdim8_generate_chunk_kernel_h = -1;
__constant__ int ydim8_generate_chunk_kernel;
int ydim8_generate_chunk_kernel_h = -1;
__constant__ int xdim9_generate_chunk_kernel;
int xdim9_generate_chunk_kernel_h = -1;
__constant__ int ydim9_generate_chunk_kernel;
int ydim9_generate_chunk_kernel_h = -1;
__constant__ int xdim10_generate_chunk_kernel;
int xdim10_generate_chunk_kernel_h = -1;
__constant__ int ydim10_generate_chunk_kernel;
int ydim10_generate_chunk_kernel_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_generate_chunk_kernel*(y)+xdim0_generate_chunk_kernel*ydim0_generate_chunk_kernel*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_generate_chunk_kernel*(y)+xdim1_generate_chunk_kernel*ydim1_generate_chunk_kernel*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_generate_chunk_kernel*(y)+xdim2_generate_chunk_kernel*ydim2_generate_chunk_kernel*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_generate_chunk_kernel*(y)+xdim3_generate_chunk_kernel*ydim3_generate_chunk_kernel*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_generate_chunk_kernel*(y)+xdim4_generate_chunk_kernel*ydim4_generate_chunk_kernel*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_generate_chunk_kernel*(y)+xdim5_generate_chunk_kernel*ydim5_generate_chunk_kernel*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_generate_chunk_kernel*(y)+xdim6_generate_chunk_kernel*ydim6_generate_chunk_kernel*(z))
#define OPS_ACC7(x,y,z) (x+xdim7_generate_chunk_kernel*(y)+xdim7_generate_chunk_kernel*ydim7_generate_chunk_kernel*(z))
#define OPS_ACC8(x,y,z) (x+xdim8_generate_chunk_kernel*(y)+xdim8_generate_chunk_kernel*ydim8_generate_chunk_kernel*(z))
#define OPS_ACC9(x,y,z) (x+xdim9_generate_chunk_kernel*(y)+xdim9_generate_chunk_kernel*ydim9_generate_chunk_kernel*(z))
#define OPS_ACC10(x,y,z) (x+xdim10_generate_chunk_kernel*(y)+xdim10_generate_chunk_kernel*ydim10_generate_chunk_kernel*(z))
//user function
__device__
void generate_chunk_kernel( const double *vertexx,
const double *vertexy, const double *vertexz,
double *energy0, double *density0,
double *xvel0, double *yvel0, double *zvel0,
const double *cellx, const double *celly, const double *cellz) {
double radius, x_cent, y_cent, z_cent;
energy0[OPS_ACC3(0,0,0)]= states[0].energy;
density0[OPS_ACC4(0,0,0)]= states[0].density;
xvel0[OPS_ACC5(0,0,0)]=states[0].xvel;
yvel0[OPS_ACC6(0,0,0)]=states[0].yvel;
zvel0[OPS_ACC7(0,0,0)]=states[0].zvel;
for(int i = 1; i<number_of_states; i++) {
x_cent=states[i].xmin;
y_cent=states[i].ymin;
z_cent=states[i].zmin;
if (states[i].geometry == g_cube) {
if(vertexx[OPS_ACC0(1,0,0)] >= states[i].xmin && vertexx[OPS_ACC0(0,0,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1,0)] >= states[i].ymin && vertexy[OPS_ACC1(0,0,0)] < states[i].ymax) {
if(vertexz[OPS_ACC2(0,0,1)] >= states[i].zmin && vertexz[OPS_ACC2(0,0,0)] < states[i].zmax) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
for (int ix=0;ix<2;ix++){
for (int iy=0;iy<2;iy++){
for (int iz=0;iz<2;iz++){
xvel0[OPS_ACC5(ix,iy,iz)] = states[i].xvel;
yvel0[OPS_ACC6(ix,iy,iz)] = states[i].yvel;
zvel0[OPS_ACC7(ix,iy,iz)] = states[i].zvel;
}
}
}
}
}
}
}
else if(states[i].geometry == g_sphe) {
radius = sqrt ((cellx[OPS_ACC8(0,0,0)] - x_cent) * (cellx[OPS_ACC8(0,0,0)] - x_cent) +
(celly[OPS_ACC9(0,0,0)] - y_cent) * (celly[OPS_ACC9(0,0,0)] - y_cent) +
(cellz[OPS_ACC10(0,0,0)] - z_cent) * (cellz[OPS_ACC10(0,0,0)] - z_cent));
if(radius <= states[i].radius) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
for (int ix=0;ix<2;ix++){
for (int iy=0;iy<2;iy++){
for (int iz=0;iz<2;iz++){
xvel0[OPS_ACC5(ix,iy,iz)] = states[i].xvel;
yvel0[OPS_ACC6(ix,iy,iz)] = states[i].yvel;
zvel0[OPS_ACC7(ix,iy,iz)] = states[i].zvel;
}
}
}
}
}
else if(states[i].geometry == g_point) {
if(vertexx[OPS_ACC0(0,0,0)] == x_cent && vertexy[OPS_ACC1(0,0,0)] == y_cent && vertexz[OPS_ACC2(0,0,0)] == z_cent) {
energy0[OPS_ACC3(0,0,0)] = states[i].energy;
density0[OPS_ACC4(0,0,0)] = states[i].density;
for (int ix=0;ix<2;ix++){
for (int iy=0;iy<2;iy++){
for (int iz=0;iz<2;iz++){
xvel0[OPS_ACC5(ix,iy,iz)] = states[i].xvel;
yvel0[OPS_ACC6(ix,iy,iz)] = states[i].yvel;
zvel0[OPS_ACC7(ix,iy,iz)] = states[i].zvel;
}
}
}
}
}
}
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
__global__ void ops_generate_chunk_kernel(
const double* __restrict arg0,
const double* __restrict arg1,
const double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
const double* __restrict arg8,
const double* __restrict arg9,
const double* __restrict arg10,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 0 * xdim0_generate_chunk_kernel + idx_z * 0 * xdim0_generate_chunk_kernel * ydim0_generate_chunk_kernel;
arg1 += idx_x * 0 + idx_y * 1 * xdim1_generate_chunk_kernel + idx_z * 0 * xdim1_generate_chunk_kernel * ydim1_generate_chunk_kernel;
arg2 += idx_x * 0 + idx_y * 0 * xdim2_generate_chunk_kernel + idx_z * 1 * xdim2_generate_chunk_kernel * ydim2_generate_chunk_kernel;
arg3 += idx_x * 1 + idx_y * 1 * xdim3_generate_chunk_kernel + idx_z * 1 * xdim3_generate_chunk_kernel * ydim3_generate_chunk_kernel;
arg4 += idx_x * 1 + idx_y * 1 * xdim4_generate_chunk_kernel + idx_z * 1 * xdim4_generate_chunk_kernel * ydim4_generate_chunk_kernel;
arg5 += idx_x * 1 + idx_y * 1 * xdim5_generate_chunk_kernel + idx_z * 1 * xdim5_generate_chunk_kernel * ydim5_generate_chunk_kernel;
arg6 += idx_x * 1 + idx_y * 1 * xdim6_generate_chunk_kernel + idx_z * 1 * xdim6_generate_chunk_kernel * ydim6_generate_chunk_kernel;
arg7 += idx_x * 1 + idx_y * 1 * xdim7_generate_chunk_kernel + idx_z * 1 * xdim7_generate_chunk_kernel * ydim7_generate_chunk_kernel;
arg8 += idx_x * 1 + idx_y * 0 * xdim8_generate_chunk_kernel + idx_z * 0 * xdim8_generate_chunk_kernel * ydim8_generate_chunk_kernel;
arg9 += idx_x * 0 + idx_y * 1 * xdim9_generate_chunk_kernel + idx_z * 0 * xdim9_generate_chunk_kernel * ydim9_generate_chunk_kernel;
arg10 += idx_x * 0 + idx_y * 0 * xdim10_generate_chunk_kernel + idx_z * 1 * xdim10_generate_chunk_kernel * ydim10_generate_chunk_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
generate_chunk_kernel(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8,
arg9, arg10);
}
}
// host stub function
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10) {
ops_arg args[11] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10};
ops_timing_realloc(140,"generate_chunk_kernel");
OPS_kernels[140].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0]*args[2].dat->dim;
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0]*args[3].dat->dim;
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0]*args[4].dat->dim;
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0]*args[5].dat->dim;
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0]*args[6].dat->dim;
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0]*args[7].dat->dim;
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0]*args[8].dat->dim;
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0]*args[9].dat->dim;
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0]*args[10].dat->dim;
int ydim10 = args[10].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_generate_chunk_kernel_h || ydim0 != ydim0_generate_chunk_kernel_h || xdim1 != xdim1_generate_chunk_kernel_h || ydim1 != ydim1_generate_chunk_kernel_h || xdim2 != xdim2_generate_chunk_kernel_h || ydim2 != ydim2_generate_chunk_kernel_h || xdim3 != xdim3_generate_chunk_kernel_h || ydim3 != ydim3_generate_chunk_kernel_h || xdim4 != xdim4_generate_chunk_kernel_h || ydim4 != ydim4_generate_chunk_kernel_h || xdim5 != xdim5_generate_chunk_kernel_h || ydim5 != ydim5_generate_chunk_kernel_h || xdim6 != xdim6_generate_chunk_kernel_h || ydim6 != ydim6_generate_chunk_kernel_h || xdim7 != xdim7_generate_chunk_kernel_h || ydim7 != ydim7_generate_chunk_kernel_h || xdim8 != xdim8_generate_chunk_kernel_h || ydim8 != ydim8_generate_chunk_kernel_h || xdim9 != xdim9_generate_chunk_kernel_h || ydim9 != ydim9_generate_chunk_kernel_h || xdim10 != xdim10_generate_chunk_kernel_h || ydim10 != ydim10_generate_chunk_kernel_h) {
cudaMemcpyToSymbol( xdim0_generate_chunk_kernel, &xdim0, sizeof(int) );
xdim0_generate_chunk_kernel_h = xdim0;
cudaMemcpyToSymbol( ydim0_generate_chunk_kernel, &ydim0, sizeof(int) );
ydim0_generate_chunk_kernel_h = ydim0;
cudaMemcpyToSymbol( xdim1_generate_chunk_kernel, &xdim1, sizeof(int) );
xdim1_generate_chunk_kernel_h = xdim1;
cudaMemcpyToSymbol( ydim1_generate_chunk_kernel, &ydim1, sizeof(int) );
ydim1_generate_chunk_kernel_h = ydim1;
cudaMemcpyToSymbol( xdim2_generate_chunk_kernel, &xdim2, sizeof(int) );
xdim2_generate_chunk_kernel_h = xdim2;
cudaMemcpyToSymbol( ydim2_generate_chunk_kernel, &ydim2, sizeof(int) );
ydim2_generate_chunk_kernel_h = ydim2;
cudaMemcpyToSymbol( xdim3_generate_chunk_kernel, &xdim3, sizeof(int) );
xdim3_generate_chunk_kernel_h = xdim3;
cudaMemcpyToSymbol( ydim3_generate_chunk_kernel, &ydim3, sizeof(int) );
ydim3_generate_chunk_kernel_h = ydim3;
cudaMemcpyToSymbol( xdim4_generate_chunk_kernel, &xdim4, sizeof(int) );
xdim4_generate_chunk_kernel_h = xdim4;
cudaMemcpyToSymbol( ydim4_generate_chunk_kernel, &ydim4, sizeof(int) );
ydim4_generate_chunk_kernel_h = ydim4;
cudaMemcpyToSymbol( xdim5_generate_chunk_kernel, &xdim5, sizeof(int) );
xdim5_generate_chunk_kernel_h = xdim5;
cudaMemcpyToSymbol( ydim5_generate_chunk_kernel, &ydim5, sizeof(int) );
ydim5_generate_chunk_kernel_h = ydim5;
cudaMemcpyToSymbol( xdim6_generate_chunk_kernel, &xdim6, sizeof(int) );
xdim6_generate_chunk_kernel_h = xdim6;
cudaMemcpyToSymbol( ydim6_generate_chunk_kernel, &ydim6, sizeof(int) );
ydim6_generate_chunk_kernel_h = ydim6;
cudaMemcpyToSymbol( xdim7_generate_chunk_kernel, &xdim7, sizeof(int) );
xdim7_generate_chunk_kernel_h = xdim7;
cudaMemcpyToSymbol( ydim7_generate_chunk_kernel, &ydim7, sizeof(int) );
ydim7_generate_chunk_kernel_h = ydim7;
cudaMemcpyToSymbol( xdim8_generate_chunk_kernel, &xdim8, sizeof(int) );
xdim8_generate_chunk_kernel_h = xdim8;
cudaMemcpyToSymbol( ydim8_generate_chunk_kernel, &ydim8, sizeof(int) );
ydim8_generate_chunk_kernel_h = ydim8;
cudaMemcpyToSymbol( xdim9_generate_chunk_kernel, &xdim9, sizeof(int) );
xdim9_generate_chunk_kernel_h = xdim9;
cudaMemcpyToSymbol( ydim9_generate_chunk_kernel, &ydim9, sizeof(int) );
ydim9_generate_chunk_kernel_h = ydim9;
cudaMemcpyToSymbol( xdim10_generate_chunk_kernel, &xdim10, sizeof(int) );
xdim10_generate_chunk_kernel_h = xdim10;
cudaMemcpyToSymbol( ydim10_generate_chunk_kernel, &ydim10, sizeof(int) );
ydim10_generate_chunk_kernel_h = ydim10;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
int dat7 = args[7].dat->elem_size;
int dat8 = args[8].dat->elem_size;
int dat9 = args[9].dat->elem_size;
int dat10 = args[10].dat->elem_size;
char *p_a[11];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif //OPS_MPI
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif //OPS_MPI
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif //OPS_MPI
int base5 = dat5 * 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = dat6 * 1 *
(start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d];
#endif //OPS_MPI
int base7 = dat7 * 1 *
(start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]);
p_a[7] = (char *)args[7].data_d + base7;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d];
#endif //OPS_MPI
int base8 = dat8 * 1 *
(start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]);
p_a[8] = (char *)args[8].data_d + base8;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d];
#endif //OPS_MPI
int base9 = dat9 * 1 *
(start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]);
p_a[9] = (char *)args[9].data_d + base9;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d];
#endif //OPS_MPI
int base10 = dat10 * 1 *
(start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]);
p_a[10] = (char *)args[10].data_d + base10;
ops_H_D_exchanges_device(args, 11);
ops_halo_exchanges(args,11,range);
ops_timers_core(&c1,&t1);
OPS_kernels[140].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_generate_chunk_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10],x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[140].time += t2-t1;
ops_set_dirtybit_device(args, 11);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
//Update kernel record
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg6);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg7);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg8);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg9);
OPS_kernels[140].transfer += ops_compute_transfer(dim, range, &arg10);
}
|
9a15a0d8aefa47ba818b26f96af2dcda10271e96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "radix_sort.h"
// Block size used for CUDA kernel launch
#define BLOCK_SIZE 128
namespace Sort {
// Map each element in idata to 0/1 contrary to its d-th bit
__global__ void kern_map_bit_to_bool(int n, int d, int *rbools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
rbools[index] = !((idata[index] >> d) & 1);
}
// Generate the indices of split result for elements with true keys
__global__ void kern_gen_true_key_index(int n, int falses, int *odata, const int *scan) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
odata[index] = index - scan[index] + falses;
}
// Generate the indices of split result for all elements
__global__ void kern_gen_index(int n, int *odata, const int *rbools, const int *scan, const int *t) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
odata[index] = rbools[index] ? scan[index] : t[index];
}
// Scatter based on index array
__global__ void kern_scatter(int n, int *odata, const int *addr, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
odata[addr[index]] = idata[index];
}
/*
* Performs split on idata at turn d, storing the result into odata.
* Output array with false keys before true keys.
*
* @param n The number of elements in idata.
* @param idata The array of elements to split.
* @param rbools True/False for bit d (reversed).
* @param odata The result array.
*/
void split(int n, int *odata, const int *idata, const int *rbools) {
// Create device array
int *dev_scan_buffer;
int *dev_true_buffer;
int *dev_index_buffer;
hipMalloc((void **)&dev_scan_buffer, n * sizeof(int));
hipMalloc((void **)&dev_true_buffer, n * sizeof(int));
hipMalloc((void **)&dev_index_buffer, n * sizeof(int));
checkCUDAError("hipMalloc failed!");
// Exclusive scan on reversed bool array
StreamCompaction::Efficient_Upgraded::scan(n, dev_scan_buffer, rbools, false);
// Used for computing the number of elements remaining after compaction
int *last_elements = new int[2];
// Fetch last element of reversed bool array and scan array respectively
hipMemcpy(last_elements, rbools + n - 1, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(last_elements + 1, dev_scan_buffer + n - 1, sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy back failed!");
// Compute the number of total falses
int total_falses = last_elements[0] + last_elements[1];
free(last_elements);
// Generate index array for writing true keys
dim3 fullBlocksPerGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
kern_gen_true_key_index << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, total_falses, dev_true_buffer, dev_scan_buffer);
checkCUDAError("kern_gen_true_key_index failed!");
// Generate index array for writing all keys
kern_gen_index << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, dev_index_buffer, rbools, dev_scan_buffer, dev_true_buffer);
checkCUDAError("kern_gen_index failed!");
// Scatter to output
kern_scatter << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, odata, dev_index_buffer, idata);
checkCUDAError("kern_scatter failed!");
// Cleanup
hipFree(dev_scan_buffer);
hipFree(dev_true_buffer);
hipFree(dev_index_buffer);
checkCUDAError("hipFree failed!");
}
/*
* Performs radix sort on idata, storing the result into odata.
* Sort data from smaller to larger.
*
* @param n The number of elements in idata.
* @param num_bits The maximum number of bits.
* @param idata The array of elements to sort.
* @param odata The result array.
*/
void radix_sort(int n, int num_bits, int *odata, const int *idata) {
// Create device array
int *dev_array;
int *dev_res;
int *dev_bool_buffer;
hipMalloc((void **)&dev_array, n * sizeof(int));
hipMalloc((void **)&dev_res, n * sizeof(int));
hipMalloc((void **)&dev_bool_buffer, n * sizeof(int));
checkCUDAError("hipMalloc failed!");
// Copy data to GPU
hipMemcpy(dev_array, idata, sizeof(int) * n, hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy failed!");
dim3 fullBlocksPerGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Split for num_bits times
for (int k = 0; k < num_bits; k++) {
// Generate bool array
kern_map_bit_to_bool << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, k, dev_bool_buffer, dev_array);
checkCUDAError("kern_map_bit_to_bool failed!");
split(n, dev_res, dev_array, dev_bool_buffer);
// Ping-pong the buffers
hipMemcpy(dev_array, dev_res, sizeof(int) * n, hipMemcpyDeviceToDevice);
checkCUDAError("ping-pong failed!");
}
// Copy data back
hipMemcpy(odata, dev_res, sizeof(int) * n, hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy back failed!");
// Cleanup
hipFree(dev_array);
hipFree(dev_res);
hipFree(dev_bool_buffer);
checkCUDAError("hipFree failed!");
}
} | 9a15a0d8aefa47ba818b26f96af2dcda10271e96.cu | #include "radix_sort.h"
// Block size used for CUDA kernel launch
#define BLOCK_SIZE 128
namespace Sort {
// Map each element in idata to 0/1 contrary to its d-th bit
__global__ void kern_map_bit_to_bool(int n, int d, int *rbools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
rbools[index] = !((idata[index] >> d) & 1);
}
// Generate the indices of split result for elements with true keys
__global__ void kern_gen_true_key_index(int n, int falses, int *odata, const int *scan) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
odata[index] = index - scan[index] + falses;
}
// Generate the indices of split result for all elements
__global__ void kern_gen_index(int n, int *odata, const int *rbools, const int *scan, const int *t) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
odata[index] = rbools[index] ? scan[index] : t[index];
}
// Scatter based on index array
__global__ void kern_scatter(int n, int *odata, const int *addr, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
odata[addr[index]] = idata[index];
}
/*
* Performs split on idata at turn d, storing the result into odata.
* Output array with false keys before true keys.
*
* @param n The number of elements in idata.
* @param idata The array of elements to split.
* @param rbools True/False for bit d (reversed).
* @param odata The result array.
*/
void split(int n, int *odata, const int *idata, const int *rbools) {
// Create device array
int *dev_scan_buffer;
int *dev_true_buffer;
int *dev_index_buffer;
cudaMalloc((void **)&dev_scan_buffer, n * sizeof(int));
cudaMalloc((void **)&dev_true_buffer, n * sizeof(int));
cudaMalloc((void **)&dev_index_buffer, n * sizeof(int));
checkCUDAError("cudaMalloc failed!");
// Exclusive scan on reversed bool array
StreamCompaction::Efficient_Upgraded::scan(n, dev_scan_buffer, rbools, false);
// Used for computing the number of elements remaining after compaction
int *last_elements = new int[2];
// Fetch last element of reversed bool array and scan array respectively
cudaMemcpy(last_elements, rbools + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(last_elements + 1, dev_scan_buffer + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy back failed!");
// Compute the number of total falses
int total_falses = last_elements[0] + last_elements[1];
free(last_elements);
// Generate index array for writing true keys
dim3 fullBlocksPerGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
kern_gen_true_key_index << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, total_falses, dev_true_buffer, dev_scan_buffer);
checkCUDAError("kern_gen_true_key_index failed!");
// Generate index array for writing all keys
kern_gen_index << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, dev_index_buffer, rbools, dev_scan_buffer, dev_true_buffer);
checkCUDAError("kern_gen_index failed!");
// Scatter to output
kern_scatter << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, odata, dev_index_buffer, idata);
checkCUDAError("kern_scatter failed!");
// Cleanup
cudaFree(dev_scan_buffer);
cudaFree(dev_true_buffer);
cudaFree(dev_index_buffer);
checkCUDAError("cudaFree failed!");
}
/*
* Performs radix sort on idata, storing the result into odata.
* Sort data from smaller to larger.
*
* @param n The number of elements in idata.
* @param num_bits The maximum number of bits.
* @param idata The array of elements to sort.
* @param odata The result array.
*/
void radix_sort(int n, int num_bits, int *odata, const int *idata) {
// Create device array
int *dev_array;
int *dev_res;
int *dev_bool_buffer;
cudaMalloc((void **)&dev_array, n * sizeof(int));
cudaMalloc((void **)&dev_res, n * sizeof(int));
cudaMalloc((void **)&dev_bool_buffer, n * sizeof(int));
checkCUDAError("cudaMalloc failed!");
// Copy data to GPU
cudaMemcpy(dev_array, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy failed!");
dim3 fullBlocksPerGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Split for num_bits times
for (int k = 0; k < num_bits; k++) {
// Generate bool array
kern_map_bit_to_bool << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, k, dev_bool_buffer, dev_array);
checkCUDAError("kern_map_bit_to_bool failed!");
split(n, dev_res, dev_array, dev_bool_buffer);
// Ping-pong the buffers
cudaMemcpy(dev_array, dev_res, sizeof(int) * n, cudaMemcpyDeviceToDevice);
checkCUDAError("ping-pong failed!");
}
// Copy data back
cudaMemcpy(odata, dev_res, sizeof(int) * n, cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy back failed!");
// Cleanup
cudaFree(dev_array);
cudaFree(dev_res);
cudaFree(dev_bool_buffer);
checkCUDAError("cudaFree failed!");
}
} |
2caa5fa50f6f718a83a570d8b77ae6c13acc83c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <functional>
#include <utility>
#include <vector>
#include <cfloat>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/argmax_layer.hpp"
namespace caffe {
/**
* @brief Finds and stores the indexes of the highest values along the first
* axis (channels).
* @param n [in]: Number of threads to spawn
* @param num [in]: The index of image to process from batch
* @param channels [in]: The number of channels
* @param width [in]: The width of each image
* @param height [in]: The height of each image
* @param src [in]: A pointer to the GPU source (input) data
* @param dst [out]: A pointer to the GPU destination (output) data
*/
template <typename Dtype>
__global__ void kernel_channel_max_element(const int n, const int num, const int channels,
const int width, const int height,
const Dtype* src, Dtype* dst) {
// Calculate the size of each channel
const int channelSize = height * width;
// Create a pointer to the first applicable element in the source data
const Dtype* p = src + (num * channels) * channelSize;
// Loop over each pixel and find maximum value along channel axis
CUDA_KERNEL_LOOP(index, n) {
// Determine index of maximum channel value
int maxIndex = 0;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; c++) {
Dtype val = *(p + index + c * channelSize);
if (val > maxval) {
maxIndex = c;
maxval = val;
}
}
// Store resulting index
dst[num * channels + index] = maxIndex;
}
}
template <typename Dtype>
void ArgMaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// GPU implementation only supports ArgMax along axis 1 (channels). It doesn't
// support more than one output value, or returning the highest element (instead of index)
if (!has_axis_ || top_k_ != 1 || out_max_val_ || axis_ != 1) {
// Fall back to CPU implementation
return Forward_cpu(bottom, top);
}
// GPU implementation is supported
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = bottom[0]->count();
int channels = bottom[0]->channels();
int NUM_ = bottom[0]->num();
int height = bottom[0]->height();
int width = bottom[0]->width();
// Launch a set of kernels for each feature in batch
for (int num = 0; num < NUM_; num++) {
hipLaunchKernelGGL(( kernel_channel_max_element), dim3(CAFFE_GET_BLOCKS(width * height)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, width * height, num, channels, width, height,
bottom_data, top_data);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ArgMaxLayer);
} // namespace caffe
| 2caa5fa50f6f718a83a570d8b77ae6c13acc83c2.cu | #include <algorithm>
#include <functional>
#include <utility>
#include <vector>
#include <cfloat>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/argmax_layer.hpp"
namespace caffe {
/**
* @brief Finds and stores the indexes of the highest values along the first
* axis (channels).
* @param n [in]: Number of threads to spawn
* @param num [in]: The index of image to process from batch
* @param channels [in]: The number of channels
* @param width [in]: The width of each image
* @param height [in]: The height of each image
* @param src [in]: A pointer to the GPU source (input) data
* @param dst [out]: A pointer to the GPU destination (output) data
*/
template <typename Dtype>
__global__ void kernel_channel_max_element(const int n, const int num, const int channels,
const int width, const int height,
const Dtype* src, Dtype* dst) {
// Calculate the size of each channel
const int channelSize = height * width;
// Create a pointer to the first applicable element in the source data
const Dtype* p = src + (num * channels) * channelSize;
// Loop over each pixel and find maximum value along channel axis
CUDA_KERNEL_LOOP(index, n) {
// Determine index of maximum channel value
int maxIndex = 0;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; c++) {
Dtype val = *(p + index + c * channelSize);
if (val > maxval) {
maxIndex = c;
maxval = val;
}
}
// Store resulting index
dst[num * channels + index] = maxIndex;
}
}
template <typename Dtype>
void ArgMaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// GPU implementation only supports ArgMax along axis 1 (channels). It doesn't
// support more than one output value, or returning the highest element (instead of index)
if (!has_axis_ || top_k_ != 1 || out_max_val_ || axis_ != 1) {
// Fall back to CPU implementation
return Forward_cpu(bottom, top);
}
// GPU implementation is supported
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = bottom[0]->count();
int channels = bottom[0]->channels();
int NUM_ = bottom[0]->num();
int height = bottom[0]->height();
int width = bottom[0]->width();
// Launch a set of kernels for each feature in batch
for (int num = 0; num < NUM_; num++) {
kernel_channel_max_element<<<CAFFE_GET_BLOCKS(width * height),
CAFFE_CUDA_NUM_THREADS>>>(width * height, num, channels, width, height,
bottom_data, top_data);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ArgMaxLayer);
} // namespace caffe
|
71117649e4416788df29bbdcc94c85136aad387f.hip | // !!! This is a file automatically generated by hipify!!!
#include "./common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* Display the dimensionality of a thread block and grid from the host and
* device.
*/
__global__ void checkIndex(void)
{
printf("threadIdx:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
printf("blockIdx:(%d, %d, %d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("blockDim:(%d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("gridDim:(%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
// check grid and block dimension from device side
checkIndex << <grid, block >> > ();
// reset device before you leave
CHECK(hipDeviceReset());
return(0);
} | 71117649e4416788df29bbdcc94c85136aad387f.cu | #include "./common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* Display the dimensionality of a thread block and grid from the host and
* device.
*/
__global__ void checkIndex(void)
{
printf("threadIdx:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
printf("blockIdx:(%d, %d, %d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("blockDim:(%d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("gridDim:(%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
// check grid and block dimension from device side
checkIndex << <grid, block >> > ();
// reset device before you leave
CHECK(cudaDeviceReset());
return(0);
} |
e6f245a049f22bf1f9455f38caca3feed15f3335.hip | // !!! This is a file automatically generated by hipify!!!
#include <cutil_inline.h>
//texture<float, 2, hipReadModeElementType> texref;
//////////////////////
//my area filter
//////////////////////
template<class T, class R> // return type, texture type
__device__
R tex2D4N(const texture<T, 2, hipReadModeNormalizedFloat> tex_ref, float x, float y)
{
R r;
r = 0.25f * (tex2D(tex_ref, x-0.5f, y-0.5f)+tex2D(tex_ref, x-0.5f, y+0.5f)+
tex2D(tex_ref, x+0.5f, y-0.5f)+tex2D(tex_ref, x+0.5f, y+0.5f));
return r;
}
template<class T, class R> // return type, texture type
__device__
R tex2D16N(const texture<T, 2, hipReadModeNormalizedFloat> tex_ref, float x, float y)
{
R r;
r = 0.0625f * (tex2D(tex_ref, x-1.5f, y-1.5f) + tex2D(tex_ref, x-0.5f, y-1.5f) + tex2D(tex_ref, x+0.5f, y-1.5f) + tex2D(tex_ref, x+1.5f, y-1.5f)+
tex2D(tex_ref, x-1.5f, y-0.5f) + tex2D(tex_ref, x-0.5f, y-0.5f) + tex2D(tex_ref, x+0.5f, y-0.5f) + tex2D(tex_ref, x+1.5f, y-0.5f)+
tex2D(tex_ref, x-1.5f, y+0.5f) + tex2D(tex_ref, x-0.5f, y+0.5f) + tex2D(tex_ref, x+0.5f, y+0.5f) + tex2D(tex_ref, x+1.5f, y+0.5f)+
tex2D(tex_ref, x-1.5f, y+1.5f) + tex2D(tex_ref, x-0.5f, y+1.5f) + tex2D(tex_ref, x+0.5f, y+1.5f) + tex2D(tex_ref, x+1.5f, y+1.5f));
return r;
}
/*__device__
float tex2D4N(const texture<unsigned char, 2, hipReadModeElementType> tex_ref, float x, float y)
{
float r;
r = 0.25f * (tex2D(tex_ref, x-0.5f, y-0.5f)+tex2D(tex_ref, x-0.5f, y+0.5f)+
tex2D(tex_ref, x+0.5f, y-0.5f)+tex2D(tex_ref, x+0.5f, y+0.5f));
return r;
}
__device__
float tex2D16N(const texture<unsigned char, 2, hipReadModeElementType> tex_ref, float x, float y)
{
float r;
r = 0.0625f * (tex2D(tex_ref, x-1.5f, y-1.5f) + tex2D(tex_ref, x-0.5f, y-1.5f) + tex2D(tex_ref, x+0.5f, y-1.5f) + tex2D(tex_ref, x+1.5f, y-1.5f)+
tex2D(tex_ref, x-1.5f, y-0.5f) + tex2D(tex_ref, x-0.5f, y-0.5f) + tex2D(tex_ref, x+0.5f, y-0.5f) + tex2D(tex_ref, x+1.5f, y-0.5f)+
tex2D(tex_ref, x-1.5f, y+0.5f) + tex2D(tex_ref, x-0.5f, y+0.5f) + tex2D(tex_ref, x+0.5f, y+0.5f) + tex2D(tex_ref, x+1.5f, y+0.5f)+
tex2D(tex_ref, x-1.5f, y+1.5f) + tex2D(tex_ref, x-0.5f, y+1.5f) + tex2D(tex_ref, x+0.5f, y+1.5f) + tex2D(tex_ref, x+1.5f, y+1.5f));
return r;
}*/
| e6f245a049f22bf1f9455f38caca3feed15f3335.cu | #include <cutil_inline.h>
//texture<float, 2, cudaReadModeElementType> texref;
//////////////////////
//my area filter
//////////////////////
template<class T, class R> // return type, texture type
__device__
R tex2D4N(const texture<T, 2, cudaReadModeNormalizedFloat> tex_ref, float x, float y)
{
R r;
r = 0.25f * (tex2D(tex_ref, x-0.5f, y-0.5f)+tex2D(tex_ref, x-0.5f, y+0.5f)+
tex2D(tex_ref, x+0.5f, y-0.5f)+tex2D(tex_ref, x+0.5f, y+0.5f));
return r;
}
template<class T, class R> // return type, texture type
__device__
R tex2D16N(const texture<T, 2, cudaReadModeNormalizedFloat> tex_ref, float x, float y)
{
R r;
r = 0.0625f * (tex2D(tex_ref, x-1.5f, y-1.5f) + tex2D(tex_ref, x-0.5f, y-1.5f) + tex2D(tex_ref, x+0.5f, y-1.5f) + tex2D(tex_ref, x+1.5f, y-1.5f)+
tex2D(tex_ref, x-1.5f, y-0.5f) + tex2D(tex_ref, x-0.5f, y-0.5f) + tex2D(tex_ref, x+0.5f, y-0.5f) + tex2D(tex_ref, x+1.5f, y-0.5f)+
tex2D(tex_ref, x-1.5f, y+0.5f) + tex2D(tex_ref, x-0.5f, y+0.5f) + tex2D(tex_ref, x+0.5f, y+0.5f) + tex2D(tex_ref, x+1.5f, y+0.5f)+
tex2D(tex_ref, x-1.5f, y+1.5f) + tex2D(tex_ref, x-0.5f, y+1.5f) + tex2D(tex_ref, x+0.5f, y+1.5f) + tex2D(tex_ref, x+1.5f, y+1.5f));
return r;
}
/*__device__
float tex2D4N(const texture<unsigned char, 2, cudaReadModeElementType> tex_ref, float x, float y)
{
float r;
r = 0.25f * (tex2D(tex_ref, x-0.5f, y-0.5f)+tex2D(tex_ref, x-0.5f, y+0.5f)+
tex2D(tex_ref, x+0.5f, y-0.5f)+tex2D(tex_ref, x+0.5f, y+0.5f));
return r;
}
__device__
float tex2D16N(const texture<unsigned char, 2, cudaReadModeElementType> tex_ref, float x, float y)
{
float r;
r = 0.0625f * (tex2D(tex_ref, x-1.5f, y-1.5f) + tex2D(tex_ref, x-0.5f, y-1.5f) + tex2D(tex_ref, x+0.5f, y-1.5f) + tex2D(tex_ref, x+1.5f, y-1.5f)+
tex2D(tex_ref, x-1.5f, y-0.5f) + tex2D(tex_ref, x-0.5f, y-0.5f) + tex2D(tex_ref, x+0.5f, y-0.5f) + tex2D(tex_ref, x+1.5f, y-0.5f)+
tex2D(tex_ref, x-1.5f, y+0.5f) + tex2D(tex_ref, x-0.5f, y+0.5f) + tex2D(tex_ref, x+0.5f, y+0.5f) + tex2D(tex_ref, x+1.5f, y+0.5f)+
tex2D(tex_ref, x-1.5f, y+1.5f) + tex2D(tex_ref, x-0.5f, y+1.5f) + tex2D(tex_ref, x+0.5f, y+1.5f) + tex2D(tex_ref, x+1.5f, y+1.5f));
return r;
}*/
|
792a8096f38abc32b960aa2f3a25fa80179f76bd.hip | // !!! This is a file automatically generated by hipify!!!
/*
MIT License
Copyright (c) 2019 Michael Ksel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/predict.h"
#include "cuda_utils.h"
#include "common.h"
#include <thrust/random.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__global__ void predictKernel(Particle* particle_array, int grid_size, float p_S, const glm::mat4x4 transition_matrix,
const glm::vec4 process_noise, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
particle_array[i].state = transition_matrix * particle_array[i].state + process_noise;
particle_array[i].weight = p_S * particle_array[i].weight;
float x = particle_array[i].state[0];
float y = particle_array[i].state[1];
if ((x > grid_size - 1 || x < 0) || (y > grid_size - 1 || y < 0))
{
unsigned int seed = hash(i);
thrust::default_random_engine rng(seed);
thrust::uniform_int_distribution<int> dist_idx(0, grid_size * grid_size);
thrust::normal_distribution<float> dist_vel(0.0f, 12.0);
const int index = dist_idx(rng);
x = index % grid_size;
y = index / grid_size;
particle_array[i].state = glm::vec4(x, y, dist_vel(rng), dist_vel(rng));
}
int pos_x = clamp(static_cast<int>(x), 0, grid_size - 1);
int pos_y = clamp(static_cast<int>(y), 0, grid_size - 1);
particle_array[i].grid_cell_idx = pos_x + grid_size * pos_y;
//printf("X: %d, Y: %d, Cell index: %d\n", pos_x, pos_y, (pos_x + grid_size * pos_y));
}
}
| 792a8096f38abc32b960aa2f3a25fa80179f76bd.cu | /*
MIT License
Copyright (c) 2019 Michael Kösel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/predict.h"
#include "cuda_utils.h"
#include "common.h"
#include <thrust/random.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void predictKernel(Particle* particle_array, int grid_size, float p_S, const glm::mat4x4 transition_matrix,
const glm::vec4 process_noise, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
particle_array[i].state = transition_matrix * particle_array[i].state + process_noise;
particle_array[i].weight = p_S * particle_array[i].weight;
float x = particle_array[i].state[0];
float y = particle_array[i].state[1];
if ((x > grid_size - 1 || x < 0) || (y > grid_size - 1 || y < 0))
{
unsigned int seed = hash(i);
thrust::default_random_engine rng(seed);
thrust::uniform_int_distribution<int> dist_idx(0, grid_size * grid_size);
thrust::normal_distribution<float> dist_vel(0.0f, 12.0);
const int index = dist_idx(rng);
x = index % grid_size;
y = index / grid_size;
particle_array[i].state = glm::vec4(x, y, dist_vel(rng), dist_vel(rng));
}
int pos_x = clamp(static_cast<int>(x), 0, grid_size - 1);
int pos_y = clamp(static_cast<int>(y), 0, grid_size - 1);
particle_array[i].grid_cell_idx = pos_x + grid_size * pos_y;
//printf("X: %d, Y: %d, Cell index: %d\n", pos_x, pos_y, (pos_x + grid_size * pos_y));
}
}
|
a98e7f41ecbd34a3465c4f4ac4594ef50449cd7b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_fp16.h>
#include <limits>
#include <iostream>
int main() {
using float16_t = __fp16;
std::cout << "float16\t"
<< float(std::numeric_limits<float16_t>::lowest()) << '\t'
<< float(std::numeric_limits<float16_t>::min()) << '\t'
<< float(std::numeric_limits<float16_t>::max()) << std::endl;
return 0;
}
| a98e7f41ecbd34a3465c4f4ac4594ef50449cd7b.cu | #include <cuda_fp16.h>
#include <limits>
#include <iostream>
int main() {
using float16_t = __fp16;
std::cout << "float16\t"
<< float(std::numeric_limits<float16_t>::lowest()) << '\t'
<< float(std::numeric_limits<float16_t>::min()) << '\t'
<< float(std::numeric_limits<float16_t>::max()) << std::endl;
return 0;
}
|
ee4734109bbfa70b1ef21ae80632a3f1d0d90a55.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::hip::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
}
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::hip::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(hipSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMAPE {
const char *Name() const {
return "mape";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs((label - pred) / label);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * ::log(eps) - (1.0f - y) * ::log(1.0f - eps);
} else if (pneg < eps) {
return -y * ::log(1.0f - eps) - (1.0f - y) * ::log(eps);
} else {
return -y * ::log(py) - (1.0f - y) * ::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMPHE {
char const *Name() const {
return "mphe";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return std::sqrt( 1 + diff * diff) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - ::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalGammaDeviance {
const char *Name() const {
return "gamma-deviance";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float epsilon = 1.0e-9;
bst_float tmp = label / (pred + epsilon);
return tmp - ::log(tmp) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return 2 * esum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
bst_float psi = 1.0;
bst_float theta = -1. / py;
bst_float a = psi;
bst_float b = -::log(-theta);
bst_float c = 1. / psi * ::log(y/psi) - ::log(y) - common::LogGamma(1. / psi);
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * ::exp((1 - rho_) * ::log(p)) / (1 - rho_);
bst_float b = ::exp((2 - rho_) * ::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_{policy_};
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(MAPE, "mape")
.describe("Mean absolute percentage error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAPE>(); });
XGBOOST_REGISTER_METRIC(MPHE, "mphe")
.describe("Mean Pseudo Huber error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMPHE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
| ee4734109bbfa70b1ef21ae80632a3f1d0d90a55.cu | /*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
}
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::cuda::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(cudaSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMAPE {
const char *Name() const {
return "mape";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs((label - pred) / label);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * std::log(eps) - (1.0f - y) * std::log(1.0f - eps);
} else if (pneg < eps) {
return -y * std::log(1.0f - eps) - (1.0f - y) * std::log(eps);
} else {
return -y * std::log(py) - (1.0f - y) * std::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMPHE {
char const *Name() const {
return "mphe";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return std::sqrt( 1 + diff * diff) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - std::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalGammaDeviance {
const char *Name() const {
return "gamma-deviance";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float epsilon = 1.0e-9;
bst_float tmp = label / (pred + epsilon);
return tmp - std::log(tmp) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return 2 * esum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
bst_float psi = 1.0;
bst_float theta = -1. / py;
bst_float a = psi;
bst_float b = -std::log(-theta);
bst_float c = 1. / psi * std::log(y/psi) - std::log(y) - common::LogGamma(1. / psi);
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * std::exp((1 - rho_) * std::log(p)) / (1 - rho_);
bst_float b = std::exp((2 - rho_) * std::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_{policy_};
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(MAPE, "mape")
.describe("Mean absolute percentage error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAPE>(); });
XGBOOST_REGISTER_METRIC(MPHE, "mphe")
.describe("Mean Pseudo Huber error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMPHE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
|
f599c4603cc85203163c504b850b9253328f526b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <vector_functions.h>
#include <nori/scene.h>
#include <nori/camera.h>
#include <hip/device_functions.h>
#include <nori/independent.h>
#include <nori/perspective.h>
#include <nori/integrator.h>
#include <nori/pathMisIntegrator.h>
#include <nori/bvh.h>
#include <chrono>
#include <unistd.h>
class Perspective;
NORI_NAMESPACE_BEGIN
//__shared__ Sampler samplerS[1024];
#define MEDIAN(a,b,c) ((a-b)*(b-c) > -1 ? b : ((a-b)*(a-c) < 1 ? a : c))
__global__ void render_gpu(hipSurfaceObject_t cuda_data, int width, int height, nori::Scene* scene,Color3f* image,size_t step)
{
Sampler s;// = samplerS[threadIdx.x * blockDim.y + threadIdx.y]
int iterations = scene->getSampler()->m_sampleCount;
for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) {
for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) {
// if (y <= 100) break;
//if (x != 5 || y != 500) asm("exit;");
int gIn = x*height+y;
Independent::prepare(s, Point2i(x+width*step, y+height*step));
nori::Point2f p((float(x)), (float(y)));
Ray3f r;
const nori::Camera *c = scene->getCamera();
CallCamera(c, sampleRay, r, p, CallSampler(s, next2D));
/**todo we could use constant memory for the origian of the ray**/
Point3f rO = r.o;
Vector3f rD = r.d;
Integrator *i = scene->getIntegrator();
//costly but needed, check whether the camera ray hits something, if not this thread won't be useful
//if(!scene->rayIntersect(r))
// asm("exit;");
Color3f color(0);;
int in = 0;
Ray3f nr = r;
Color3f t = 1;
float lastBsdfPdf = 1;
bool lastBsdfDelta = true;
while(in<iterations){
color += CallIntegrator(i, Li, scene, s, nr,t,lastBsdfDelta,lastBsdfPdf);
//color += float(x+y)/(800.0f*600);
//in++;;
if(t(0)<0){
nr.o = rO;
nr.d = rD;
nr.update();
t = 1;
lastBsdfDelta = true;
in++;
}
}
color /= iterations;
/*if (step) {
color = (color - image[gIn]) / (step + 1) + image[gIn];
}*/
// Color3f colorO = color.toSRGB();
surf2Dwrite(make_float4(color(0), color(1), color(2), 1.0f),
cuda_data, (int) sizeof(float4) * x, height - y,
hipBoundaryModeClamp);
//image[gIn].x() = color.x();
image[gIn] = color.toSRGB();
}
}
}
/**
*
* @param resource
* @param w
* @param h
* @param scene scene object on GPU memory
* @param sampler Sampler object on CPU memory
*/
static Color3f* image = nullptr;
//static nori::Independent* gpuSampler = nullptr;
static size_t step = 0;
static std::chrono::milliseconds startTime;
void render_scene(hipSurfaceObject_t resource, int w, int h,nori::Scene *scene, std::string filename)
{
int blockSize;
int gridSize;
hipOccupancyMaxPotentialBlockSize(&gridSize,&blockSize,render_gpu,0,w*h);
//we want to render 2D blocks not lines
int blockW = sqrt(blockSize);
int blockH = blockSize/blockW;
dim3 block(blockW,blockH);
int gridSizeW = (w + blockW - 1) / blockW;
int gridSizeH = (h + blockH - 1) / blockH;
dim3 grid(gridSizeW,gridSizeH);
//At the moment we do not use the given Sampler but simply independent
if(!image) {
startTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch());
hipMalloc((void **) &image, w * h * sizeof(Color3f));
//hipMemset(image, 0, w * h * sizeof(Color3f));
}
//number of kernel runs
const int sCount = 1;
//int block = 64;
//int grid = (w*h + block - 1) / block;
if(step<sCount) {
nori::render_gpu << < grid,block >> > (resource, w, h, scene, image, step);
std::cout<<hipGetErrorString(hipGetLastError())<<std::endl;
std::cout<<step<<std::endl;
step++;
}else if(sCount == step){
if(hipSuccess==hipStreamQuery(0)){
hipDeviceSynchronize();
auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()) - startTime;
std::cout << "finished: Took" << float(diff.count())/1000 << " seconds!"<< std::endl ;
step++;
//load image from GPU
size_t lastindex = filename.find_last_of(".");
std::string name = filename.substr(0, lastindex);
cv::Mat convertedImage(cv::Size(h,w),CV_8UC3);
cv::Mat mImage(cv::Size(h,w),CV_32FC3);
hipMemcpy(mImage.data,image,sizeof(Color3f)*w*h,hipMemcpyDeviceToHost);
cv::cvtColor(mImage,mImage,CV_RGB2BGR);
mImage.convertTo(convertedImage,CV_8UC3,255);
cv::transpose(convertedImage,convertedImage);
cv::imwrite(name + ".png",convertedImage);
filter_scene(resource,w,h,scene,image);
hipMemcpy(mImage.data,image,sizeof(Color3f)*w*h,hipMemcpyDeviceToHost);
cv::cvtColor(mImage,mImage,CV_RGB2BGR);
mImage.convertTo(convertedImage,CV_8UC3,255);
cv::transpose(convertedImage,convertedImage);
cv::imwrite(name + "_filtered.png",convertedImage);
step++;
//exit(0);
}else{
sleep(1);
}
}
}
NORI_NAMESPACE_END
| f599c4603cc85203163c504b850b9253328f526b.cu | #include <cuda_runtime_api.h>
#include <vector_functions.h>
#include <nori/scene.h>
#include <nori/camera.h>
#include <device_functions.h>
#include <nori/independent.h>
#include <nori/perspective.h>
#include <nori/integrator.h>
#include <nori/pathMisIntegrator.h>
#include <nori/bvh.h>
#include <chrono>
#include <unistd.h>
class Perspective;
NORI_NAMESPACE_BEGIN
//__shared__ Sampler samplerS[1024];
#define MEDIAN(a,b,c) ((a-b)*(b-c) > -1 ? b : ((a-b)*(a-c) < 1 ? a : c))
__global__ void render_gpu(cudaSurfaceObject_t cuda_data, int width, int height, nori::Scene* scene,Color3f* image,size_t step)
{
Sampler s;// = samplerS[threadIdx.x * blockDim.y + threadIdx.y]
int iterations = scene->getSampler()->m_sampleCount;
for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) {
for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) {
// if (y <= 100) break;
//if (x != 5 || y != 500) asm("exit;");
int gIn = x*height+y;
Independent::prepare(s, Point2i(x+width*step, y+height*step));
nori::Point2f p((float(x)), (float(y)));
Ray3f r;
const nori::Camera *c = scene->getCamera();
CallCamera(c, sampleRay, r, p, CallSampler(s, next2D));
/**todo we could use constant memory for the origian of the ray**/
Point3f rO = r.o;
Vector3f rD = r.d;
Integrator *i = scene->getIntegrator();
//costly but needed, check whether the camera ray hits something, if not this thread won't be useful
//if(!scene->rayIntersect(r))
// asm("exit;");
Color3f color(0);;
int in = 0;
Ray3f nr = r;
Color3f t = 1;
float lastBsdfPdf = 1;
bool lastBsdfDelta = true;
while(in<iterations){
color += CallIntegrator(i, Li, scene, s, nr,t,lastBsdfDelta,lastBsdfPdf);
//color += float(x+y)/(800.0f*600);
//in++;;
if(t(0)<0){
nr.o = rO;
nr.d = rD;
nr.update();
t = 1;
lastBsdfDelta = true;
in++;
}
}
color /= iterations;
/*if (step) {
color = (color - image[gIn]) / (step + 1) + image[gIn];
}*/
// Color3f colorO = color.toSRGB();
surf2Dwrite(make_float4(color(0), color(1), color(2), 1.0f),
cuda_data, (int) sizeof(float4) * x, height - y,
cudaBoundaryModeClamp);
//image[gIn].x() = color.x();
image[gIn] = color.toSRGB();
}
}
}
/**
*
* @param resource
* @param w
* @param h
* @param scene scene object on GPU memory
* @param sampler Sampler object on CPU memory
*/
static Color3f* image = nullptr;
//static nori::Independent* gpuSampler = nullptr;
static size_t step = 0;
static std::chrono::milliseconds startTime;
void render_scene(cudaSurfaceObject_t resource, int w, int h,nori::Scene *scene, std::string filename)
{
int blockSize;
int gridSize;
cudaOccupancyMaxPotentialBlockSize(&gridSize,&blockSize,render_gpu,0,w*h);
//we want to render 2D blocks not lines
int blockW = sqrt(blockSize);
int blockH = blockSize/blockW;
dim3 block(blockW,blockH);
int gridSizeW = (w + blockW - 1) / blockW;
int gridSizeH = (h + blockH - 1) / blockH;
dim3 grid(gridSizeW,gridSizeH);
//At the moment we do not use the given Sampler but simply independent
if(!image) {
startTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch());
cudaMalloc((void **) &image, w * h * sizeof(Color3f));
//cudaMemset(image, 0, w * h * sizeof(Color3f));
}
//number of kernel runs
const int sCount = 1;
//int block = 64;
//int grid = (w*h + block - 1) / block;
if(step<sCount) {
nori::render_gpu << < grid,block >> > (resource, w, h, scene, image, step);
std::cout<<cudaGetErrorString(cudaGetLastError())<<std::endl;
std::cout<<step<<std::endl;
step++;
}else if(sCount == step){
if(cudaSuccess==cudaStreamQuery(0)){
cudaDeviceSynchronize();
auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()) - startTime;
std::cout << "finished: Took" << float(diff.count())/1000 << " seconds!"<< std::endl ;
step++;
//load image from GPU
size_t lastindex = filename.find_last_of(".");
std::string name = filename.substr(0, lastindex);
cv::Mat convertedImage(cv::Size(h,w),CV_8UC3);
cv::Mat mImage(cv::Size(h,w),CV_32FC3);
cudaMemcpy(mImage.data,image,sizeof(Color3f)*w*h,cudaMemcpyDeviceToHost);
cv::cvtColor(mImage,mImage,CV_RGB2BGR);
mImage.convertTo(convertedImage,CV_8UC3,255);
cv::transpose(convertedImage,convertedImage);
cv::imwrite(name + ".png",convertedImage);
filter_scene(resource,w,h,scene,image);
cudaMemcpy(mImage.data,image,sizeof(Color3f)*w*h,cudaMemcpyDeviceToHost);
cv::cvtColor(mImage,mImage,CV_RGB2BGR);
mImage.convertTo(convertedImage,CV_8UC3,255);
cv::transpose(convertedImage,convertedImage);
cv::imwrite(name + "_filtered.png",convertedImage);
step++;
//exit(0);
}else{
sleep(1);
}
}
}
NORI_NAMESPACE_END
|
c58d667db6c226ee7d7a19475fc9641dec67563d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file intermediateVelocity.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief Functions that call kernels to solve for the intermediate velocity
*/
#include <solvers/NavierStokes/NavierStokesSolver.h>
#include <solvers/NavierStokes/NavierStokes/kernels/intermediateVelocity.h>
#include <solvers/NavierStokes/NavierStokes/kernels/LHS1.h>
#include <solvers/NavierStokes/NavierStokes/kernels/N.h>
#include <solvers/NavierStokes/NavierStokes/kernels/L.h>
void NavierStokesSolver::generateRHS1()
{
logger.startTimer("Intermediate Velocity Setup");
//set correct grid and block size
const int blocksize = 256;
dim3 dimGridUV( int( ((nx-1)*ny+nx*(ny-1)-0.5)/blocksize ) + 1, 1);
dim3 dimBlockUV(blocksize, 1);
dim3 dimGridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 dimBlockU(blocksize, 1);
dim3 dimGridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 dimBlockV(blocksize, 1);
//boundary update goes here
Nold = N;
generateN();
generateL();
generateBC1();
hipLaunchKernelGGL(( kernels::generateRHS), dim3(dimGridUV),dim3(dimBlockUV), 0, 0, rhs1_r, L_r, Nold_r, N_r, u_r, bc1_r, dt, nx, ny);
logger.stopTimer("Intermediate Velocity Setup");
}
/**
* \brief Fills the Convection matrix N
* \ param nx number of pressure nodes in the x direction
* \ param ny number of pressure nodes in the y direction
* \ param dx array containing the X-direction cell widths
* \ param dy array containing the Y-direction cell heights
* \ param N convection matrix (stored as an array)
* \ param u velocity matrix (stored as an array)
*/
void NavierStokesSolver::generateN()
{
const int blocksize = 256;
dim3 gridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 blockU(blocksize, 1);
dim3 gridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 blockV(blocksize, 1);
hipLaunchKernelGGL(( kernels::Nmidx), dim3(gridU), dim3(blockU), 0, 0, N_r,u_r,dx_r,dy_r,nx,ny);
hipLaunchKernelGGL(( kernels::Nbcx), dim3(gridU), dim3(blockU), 0, 0, N_r, u_r, dx_r, dy_r, ym_r, yp_r, xm_r, xp_r, nx, ny);
hipLaunchKernelGGL(( kernels::Nmidy), dim3(gridV), dim3(blockV), 0, 0, N_r,u_r,dx_r,dy_r,nx,ny);
hipLaunchKernelGGL(( kernels::Nbcy), dim3(gridV), dim3(blockV), 0, 0, N_r, u_r, dx_r, dy_r, ym_r, yp_r, xm_r, xp_r, nx, ny);
}
/**
* \brief Fills the Laplacian matrix L
* \ param nx number of pressure nodes in the x direction
* \ param ny number of pressure nodes in the y direction
* \ param nu viscosity: effectivly the inverse Reynolds number
* \ param dx array containing the X-direction cell widths
* \ param dy array containing the Y-direction cell heights
* \ param L laplacian matrix (stored as an array)
* \ param u velocity matrix (stored as an array)
*/
void NavierStokesSolver::generateL()
{
const int blocksize = 256;
dim3 gridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 blockU(blocksize, 1);
dim3 gridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 blockV(blocksize, 1);
hipLaunchKernelGGL(( kernels::Lmidx), dim3(gridU), dim3(blockU), 0, 0, L_r,u_r,dx_r,dy_r,nx,ny,nu);
hipLaunchKernelGGL(( kernels::Lbcx), dim3(gridU), dim3(blockU), 0, 0, L_r, u_r, dx_r, dy_r, ym_r, yp_r, xm_r, xp_r, nx, ny, nu);
hipLaunchKernelGGL(( kernels::Lmidy), dim3(gridV), dim3(blockV), 0, 0, L_r,u_r,dx_r,dy_r,nx,ny,nu);
hipLaunchKernelGGL(( kernels::Lbcy), dim3(gridV), dim3(blockV), 0, 0, L_r, u_r, dx_r, dy_r, ym_r, yp_r, xm_r, xp_r, nx, ny, nu);
}
void NavierStokesSolver::generateLHS1()
{
const int blocksize = 256;
dim3 gridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 blockU(blocksize, 1);
dim3 gridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 blockV(blocksize, 1);
hipLaunchKernelGGL(( kernels::LHS_mid_X_nobody), dim3(gridU),dim3(blockU), 0, 0, LHS1_row_r, LHS1_col_r, LHS1_val_r, dx_r, dy_r, dt, nu, nx, ny);
hipLaunchKernelGGL(( kernels::LHS_mid_Y_nobody), dim3(gridV),dim3(blockV), 0, 0, LHS1_row_r, LHS1_col_r, LHS1_val_r, dx_r, dy_r, dt, nu, nx, ny);
hipLaunchKernelGGL(( kernels::LHS_BC_X), dim3(gridU),dim3(blockU), 0, 0, LHS1_row_r, LHS1_col_r, LHS1_val_r, dx_r, dy_r, dt, nu, nx, ny);
hipLaunchKernelGGL(( kernels::LHS_BC_Y), dim3(gridV),dim3(blockV), 0, 0, LHS1_row_r, LHS1_col_r, LHS1_val_r, dx_r, dy_r, dt, nu, nx, ny);
}
void NavierStokesSolver::generateBC1()
{
const int blocksize = 256;
dim3 gridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 blockU(blocksize, 1);
dim3 gridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 blockV(blocksize, 1);
hipLaunchKernelGGL(( kernels::bc1X), dim3(gridU), dim3(blockU), 0, 0, u_r, bc1_r, ym_r, yp_r, xm_r, xp_r, dx_r, dy_r, nu, dt, nx, ny);
hipLaunchKernelGGL(( kernels::bc1Y), dim3(gridV), dim3(blockV), 0, 0, u_r, bc1_r, ym_r, yp_r, xm_r, xp_r, dx_r, dy_r, nu, dt, nx, ny);
}
| c58d667db6c226ee7d7a19475fc9641dec67563d.cu | /***************************************************************************//**
* \file intermediateVelocity.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief Functions that call kernels to solve for the intermediate velocity
*/
#include <solvers/NavierStokes/NavierStokesSolver.h>
#include <solvers/NavierStokes/NavierStokes/kernels/intermediateVelocity.h>
#include <solvers/NavierStokes/NavierStokes/kernels/LHS1.h>
#include <solvers/NavierStokes/NavierStokes/kernels/N.h>
#include <solvers/NavierStokes/NavierStokes/kernels/L.h>
void NavierStokesSolver::generateRHS1()
{
logger.startTimer("Intermediate Velocity Setup");
//set correct grid and block size
const int blocksize = 256;
dim3 dimGridUV( int( ((nx-1)*ny+nx*(ny-1)-0.5)/blocksize ) + 1, 1);
dim3 dimBlockUV(blocksize, 1);
dim3 dimGridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 dimBlockU(blocksize, 1);
dim3 dimGridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 dimBlockV(blocksize, 1);
//boundary update goes here
Nold = N;
generateN();
generateL();
generateBC1();
kernels::generateRHS<<<dimGridUV,dimBlockUV>>>(rhs1_r, L_r, Nold_r, N_r, u_r, bc1_r, dt, nx, ny);
logger.stopTimer("Intermediate Velocity Setup");
}
/**
* \brief Fills the Convection matrix N
* \ param nx number of pressure nodes in the x direction
* \ param ny number of pressure nodes in the y direction
* \ param dx array containing the X-direction cell widths
* \ param dy array containing the Y-direction cell heights
* \ param N convection matrix (stored as an array)
* \ param u velocity matrix (stored as an array)
*/
void NavierStokesSolver::generateN()
{
const int blocksize = 256;
dim3 gridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 blockU(blocksize, 1);
dim3 gridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 blockV(blocksize, 1);
kernels::Nmidx<<<gridU, blockU>>>(N_r,u_r,dx_r,dy_r,nx,ny);
kernels::Nbcx<<<gridU, blockU>>>(N_r, u_r, dx_r, dy_r, ym_r, yp_r, xm_r, xp_r, nx, ny);
kernels::Nmidy<<<gridV, blockV>>>(N_r,u_r,dx_r,dy_r,nx,ny);
kernels::Nbcy<<<gridV, blockV>>>(N_r, u_r, dx_r, dy_r, ym_r, yp_r, xm_r, xp_r, nx, ny);
}
/**
* \brief Fills the Laplacian matrix L
* \ param nx number of pressure nodes in the x direction
* \ param ny number of pressure nodes in the y direction
* \ param nu viscosity: effectivly the inverse Reynolds number
* \ param dx array containing the X-direction cell widths
* \ param dy array containing the Y-direction cell heights
* \ param L laplacian matrix (stored as an array)
* \ param u velocity matrix (stored as an array)
*/
void NavierStokesSolver::generateL()
{
const int blocksize = 256;
dim3 gridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 blockU(blocksize, 1);
dim3 gridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 blockV(blocksize, 1);
kernels::Lmidx<<<gridU, blockU>>>(L_r,u_r,dx_r,dy_r,nx,ny,nu);
kernels::Lbcx<<<gridU, blockU>>>(L_r, u_r, dx_r, dy_r, ym_r, yp_r, xm_r, xp_r, nx, ny, nu);
kernels::Lmidy<<<gridV, blockV>>>(L_r,u_r,dx_r,dy_r,nx,ny,nu);
kernels::Lbcy<<<gridV, blockV>>>(L_r, u_r, dx_r, dy_r, ym_r, yp_r, xm_r, xp_r, nx, ny, nu);
}
void NavierStokesSolver::generateLHS1()
{
const int blocksize = 256;
dim3 gridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 blockU(blocksize, 1);
dim3 gridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 blockV(blocksize, 1);
kernels::LHS_mid_X_nobody<<<gridU,blockU>>>(LHS1_row_r, LHS1_col_r, LHS1_val_r, dx_r, dy_r, dt, nu, nx, ny);
kernels::LHS_mid_Y_nobody<<<gridV,blockV>>>(LHS1_row_r, LHS1_col_r, LHS1_val_r, dx_r, dy_r, dt, nu, nx, ny);
kernels::LHS_BC_X<<<gridU,blockU>>>(LHS1_row_r, LHS1_col_r, LHS1_val_r, dx_r, dy_r, dt, nu, nx, ny);
kernels::LHS_BC_Y<<<gridV,blockV>>>(LHS1_row_r, LHS1_col_r, LHS1_val_r, dx_r, dy_r, dt, nu, nx, ny);
}
void NavierStokesSolver::generateBC1()
{
const int blocksize = 256;
dim3 gridU( int( ((nx-1)*ny-0.5)/blocksize ) +1, 1);
dim3 blockU(blocksize, 1);
dim3 gridV( int( (nx*(ny-1)-0.5)/blocksize ) +1, 1);
dim3 blockV(blocksize, 1);
kernels::bc1X<<<gridU, blockU>>>(u_r, bc1_r, ym_r, yp_r, xm_r, xp_r, dx_r, dy_r, nu, dt, nx, ny);
kernels::bc1Y<<<gridV, blockV>>>(u_r, bc1_r, ym_r, yp_r, xm_r, xp_r, dx_r, dy_r, nu, dt, nx, ny);
}
|
de1caf3bacd2793b400a11899af996f2d1385523.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <MemoryHandleInternal.h>
#include <MathEngineCommon.h>
#include <CudaDevice.h>
#include <Kernels/CudaBlasKernels.h>
namespace NeoML {
void CCudaMathEngine::SetVectorToMatrixRows(const CFloatHandle& resultHandle,
int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle)
{
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, matrixHeight, matrixWidth);
hipLaunchKernelGGL(( SetVectorToMatrixRowsKernel), dim3(blockCount), dim3(threadCount), 0, 0,
GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle));
}
void CCudaMathEngine::AddVectorToMatrixElements(const CFloatHandle& matrix, int height, int width,
const CConstIntHandle& indices, const CConstFloatHandle& vector)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( indices.GetMathEngine() == this );
ASSERT_EXPR( vector.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, height, AddVectorToMatrixElementsCombine);
hipLaunchKernelGGL(( AddVectorToMatrixElementsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix),
height, width, GetRaw(indices), GetRaw(vector));
}
void CCudaMathEngine::AddVectorToMatrixElements(const CFloatHandle& matrixHandle, int height, int width,
const CConstIntHandle& rowIndicesHandle, const CConstIntHandle& columnIndicesHandle,
const CConstFloatHandle& vectorHandle, int vectorSize)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( rowIndicesHandle.GetMathEngine() == this );
ASSERT_EXPR( columnIndicesHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, vectorSize, AddVectorToMatrixElementsMulCombine);
hipLaunchKernelGGL(( AddVectorToMatrixElementsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrixHandle), height, width,
GetRaw(rowIndicesHandle), GetRaw(columnIndicesHandle), GetRaw(vectorHandle), vectorSize);
}
// Assigns the values: matrix[rowIndices[i], columnIndices[i]] = vector[i].
void CCudaMathEngine::SetVectorToMatrixElements(
const CFloatHandle& matrixHandle, int height, int width,
const CConstIntHandle& rowIndicesHandle, const CConstIntHandle& columnIndicesHandle,
const CConstFloatHandle& vectorHandle, int vectorSize )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( rowIndicesHandle.GetMathEngine() == this );
ASSERT_EXPR( columnIndicesHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(
blockCount, threadCount, vectorSize, SetVectorToMatrixElementsMulCombine );
hipLaunchKernelGGL(( SetVectorToMatrixElementsKernel), dim3(blockCount), dim3(threadCount), 0, 0,
GetRaw( matrixHandle ), height, width,
GetRaw( rowIndicesHandle ), GetRaw( columnIndicesHandle ),
GetRaw( vectorHandle ), vectorSize );
}
void CCudaMathEngine::EltwiseLogSumExpVectorToMatrixElements(const CFloatHandle& matrix, int height, int width,
const CConstIntHandle& indices, const CConstFloatHandle& vector)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( indices.GetMathEngine() == this );
ASSERT_EXPR( vector.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, height);
hipLaunchKernelGGL(( EltwiseLogSumExpVectorToMatrixElementsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix),
height, width, GetRaw(indices), GetRaw(vector));
}
void CCudaMathEngine::EltwiseLogSumExpVectorToMatrixElements(const CFloatHandle& matrix,
int height, int width,
const CConstIntHandle& rowIndices, const CConstIntHandle& columnIndices,
const CConstFloatHandle& vector, int vectorSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( rowIndices.GetMathEngine() == this );
ASSERT_EXPR( columnIndices.GetMathEngine() == this );
ASSERT_EXPR( vector.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, height, width);
hipLaunchKernelGGL(( EltwiseLogSumExpVectorToMatrixElementsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix), height, width,
GetRaw(rowIndices), GetRaw(columnIndices), GetRaw(vector), vectorSize);
}
void CCudaMathEngine::AddMatrixElementsToVector(const CConstFloatHandle& matrix, int height, int width,
const CConstIntHandle& indices, const CFloatHandle& result, int vectorSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
ASSERT_EXPR( indices.GetMathEngine() == this );
ASSERT_EXPR(vectorSize >= height);
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, height, AddMatrixElementsToVectorCombine);
hipLaunchKernelGGL(( AddMatrixElementsToVectorKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix),
height, width, GetRaw(indices), GetRaw(result));
}
void CCudaMathEngine::AddMatrixElementsToVector(const CConstFloatHandle& matrix, int height, int width,
const CConstIntHandle& rowIndices, const CConstIntHandle& columnIndices,
const CFloatHandle& result, int vectorSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( rowIndices.GetMathEngine() == this );
ASSERT_EXPR( columnIndices.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, vectorSize, AddMatrixElementsToVectorMulCombine);
hipLaunchKernelGGL(( AddMatrixElementsToVectorKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix),
height, width, GetRaw(rowIndices), GetRaw(columnIndices), GetRaw(result), vectorSize);
}
void CCudaMathEngine::AddMatrixElementsToMatrix(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result, const CConstIntHandle& indices)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
ASSERT_EXPR( indices.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, height, AddMatrixElementsToMatrixCombine);
hipLaunchKernelGGL(( AddMatrixElementsToMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(matrix),
height, width, GetRaw(result), GetRaw(indices));
}
void CCudaMathEngine::AddVectorToMatrixRows(int batchSize,
const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle, int matrixHeight,
int matrixWidth, const CConstFloatHandle& vectorHandle)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (matrixWidth + BatchAddVectorToMatrixRowsCombine - 1) /
BatchAddVectorToMatrixRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchSize * matrixHeight, widthNorm);
hipLaunchKernelGGL(( AddVectorToMatrixRowsKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize,
GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight,
matrixWidth, GetRaw(vectorHandle), widthNorm);
}
void CCudaMathEngine::AddVectorToMatrixColumns( const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle,
int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, matrixWidth );
hipLaunchKernelGGL(( AddVectorToMatrixColumnsKernel), dim3(blockCount), dim3(threadCount), 0, 0,
GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle) );
}
void CCudaMathEngine::AddVectorToMatrixColumns( const CConstIntHandle& matrixHandle, const CIntHandle& resultHandle,
int matrixHeight, int matrixWidth, const CConstIntHandle& vectorHandle )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, matrixWidth );
hipLaunchKernelGGL(( AddVectorToMatrixColumnsKernel), dim3(blockCount), dim3(threadCount), 0, 0,
GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle) );
}
void CCudaMathEngine::SubVectorFromMatrixColumns(const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle,
int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, matrixHeight, matrixWidth);
hipLaunchKernelGGL(( SubVectorFromMatrixColumnsKernel), dim3(blockCount), dim3(threadCount), 0, 0,
GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle));
}
void CCudaMathEngine::SumMatrixRowsAdd(
int batchSize, const CFloatHandle& resultHandle, const CConstFloatHandle& matrixHandle,
int matrixHeight, int matrixWidth )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const int height = ( matrixHeight + SumMatrixRowsAddCombineCount - 1 ) / SumMatrixRowsAddCombineCount;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3D( blockCount, threadCount, batchSize, height, matrixWidth );
hipLaunchKernelGGL(( SumMatrixRowsAddKernel), dim3(blockCount), dim3(threadCount), 0, 0,
batchSize, GetRaw(resultHandle), GetRaw(matrixHandle), matrixHeight, matrixWidth );
}
void CCudaMathEngine::SumMatrixColumns(const CFloatHandle& resultHandle, const CConstFloatHandle& matrixHandle,
int matrixHeight, int matrixWidth)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
sumMatrixColumnsKernelFunc(resultHandle, GetRaw(matrixHandle), matrixHeight, matrixWidth, false);
}
void CCudaMathEngine::MatrixLogSumExpByRows(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result, int resultSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
ASSERT_EXPR(resultSize >= height);
SetCudaDevice( device->DeviceNumber );
int widthNorm = (width + MatrixLogSumExpByRowsCombine - 1) / MatrixLogSumExpByRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
hipLaunchKernelGGL(( MatrixLogSumExpByRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(matrix), height, width,
GetRaw(result), widthNorm);
}
void CCudaMathEngine::MatrixSoftmaxByRows(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (width + MatrixSoftmaxByRowsCombine - 1) / MatrixSoftmaxByRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
hipLaunchKernelGGL(( MatrixSoftmaxByRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(matrix), height, width,
GetRaw(result), widthNorm);
}
void CCudaMathEngine::MatrixSoftmaxDiffOpByRows(const CConstFloatHandle& first, const CConstFloatHandle& second,
int height, int width, const CFloatHandle& result)
{
ASSERT_EXPR( first.GetMathEngine() == this );
ASSERT_EXPR( second.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (width + MatrixSoftmaxDiffOpByRowsCombine - 1) / MatrixSoftmaxDiffOpByRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
hipLaunchKernelGGL(( MatrixSoftmaxDiffOpByRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(first), GetRaw(second),
height, width, GetRaw(result), widthNorm);
}
void CCudaMathEngine::MatrixLogSumExpByColumns(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result, int resultSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
ASSERT_EXPR(resultSize >= width);
SetCudaDevice( device->DeviceNumber );
int heightNorm = (height + MatrixLogSumExpByColumnsCombine - 1) / MatrixLogSumExpByColumnsCombine;
heightNorm = alignXSizeForWarp(heightNorm);
dim3 blockCount;
dim3 threadCount;
// Rows over the X instead of Y axis, so we could reduce by X
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
hipLaunchKernelGGL(( MatrixLogSumExpByColumnsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0,
GetRaw(matrix), height, width, GetRaw(result), heightNorm);
}
void CCudaMathEngine::MatrixSoftmaxByColumns(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int heightNorm = (height + MatrixSoftmaxByColumnsCombine - 1) / MatrixSoftmaxByColumnsCombine;
heightNorm = alignXSizeForWarp(heightNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
hipLaunchKernelGGL(( MatrixSoftmaxByColumnsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(matrix), height, width,
GetRaw(result), heightNorm);
}
void CCudaMathEngine::MatrixSoftmaxDiffOpByColumns(const CConstFloatHandle& first, const CConstFloatHandle& second,
int height, int width, const CFloatHandle& result)
{
ASSERT_EXPR( first.GetMathEngine() == this );
ASSERT_EXPR( second.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int heightNorm = (height + MatrixSoftmaxDiffOpByColumnsCombine - 1) / MatrixSoftmaxDiffOpByColumnsCombine;
heightNorm = alignXSizeForWarp(heightNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
hipLaunchKernelGGL(( MatrixSoftmaxDiffOpByColumnsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0,
GetRaw(first), GetRaw(second), height, width, GetRaw(result), heightNorm);
}
void CCudaMathEngine::FindMaxValueInRows(const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth,
const CFloatHandle& resultHandle, const CIntHandle& columnIndices, int vectorSize)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR(vectorSize >= matrixHeight);
SetCudaDevice( device->DeviceNumber );
int widthNorm = (matrixWidth + FindMaxValueInRowsCombine - 1) / FindMaxValueInRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, matrixHeight, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.y * threadCount.x * sizeof(CValueWithIndex);
hipLaunchKernelGGL(( FindMaxValueWithIndicesInRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0,
GetRaw(matrixHandle), matrixHeight, matrixWidth, GetRaw(resultHandle), GetRaw(columnIndices), widthNorm);
}
void CCudaMathEngine::FindMaxValueInRows(const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth,
const CFloatHandle& resultHandle, int vectorSize)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR(vectorSize >= matrixHeight);
SetCudaDevice( device->DeviceNumber );
int widthNorm = (matrixWidth + FindMaxValueInRowsCombine - 1) / FindMaxValueInRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, matrixHeight, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.y * threadCount.x * sizeof(float);
hipLaunchKernelGGL(( FindMaxValueInRowsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw(matrixHandle), matrixHeight,
matrixWidth, GetRaw(resultHandle), widthNorm);
}
void CCudaMathEngine::FindMaxValueInColumns( int batchSize, const CConstFloatHandle& matrixHandle, int matrixHeight,
int matrixWidth, const CFloatHandle& resultHandle, const CIntHandle& rowIndices, int vectorSize )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( rowIndices.GetMathEngine() == this );
ASSERT_EXPR( vectorSize >= batchSize * matrixWidth );
SetCudaDevice( device->DeviceNumber );
int heightNorm = ( matrixHeight + FindMaxValueInColumnsCombine - 1 ) / FindMaxValueInColumnsCombine;
heightNorm = alignXSizeForWarp( heightNorm );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3D( blockCount, threadCount, batchSize, matrixWidth, heightNorm );
blockCount.x = 1;
const int sharedSize = threadCount.z * threadCount.y * threadCount.x * sizeof( CValueWithIndex );
hipLaunchKernelGGL(( FindMaxValueInColumnsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, batchSize,
GetRaw( matrixHandle ), matrixHeight, matrixWidth, GetRaw( resultHandle ), GetRaw( rowIndices ),
heightNorm );
}
void CCudaMathEngine::FindMinValueInColumns( const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth,
const CFloatHandle& resultHandle, const CIntHandle& columnIndices )
{
SetCudaDevice( device->DeviceNumber );
// Initialize using the first row data
VectorCopy( resultHandle, matrixHandle, matrixWidth );
VectorFill( columnIndices, 0, matrixWidth );
int blockCount;
int threadCount;
getCudaTaskGrid( blockCount, threadCount, matrixWidth );
hipLaunchKernelGGL(( FindMinValueInColumnsKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( matrixHandle ), matrixHeight,
matrixWidth, GetRaw( resultHandle ), GetRaw( columnIndices ) );
}
void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstFloatHandle& inputHandle,
const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CFloatHandle& outputHandle, int outputChannelsCount)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( outputHandle.GetMathEngine() == this );
vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle,
lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount);
}
void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstIntHandle& inputHandle,
const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CFloatHandle& outputHandle, int outputChannelsCount)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( outputHandle.GetMathEngine() == this );
vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle,
lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount);
}
void CCudaMathEngine::VectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CConstFloatHandle& inputHandle,
const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CConstFloatHandle& multHandle,
const CConstFloatHandle& matrixHandle, int outputChannelsCount)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( multHandle.GetMathEngine() == this );
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
vectorMultichannelLookupAndAddToTable(batchSize, channelCount, inputHandle,
lookupHandles, lookupDimensions, lookupCount, multHandle, matrixHandle, outputChannelsCount);
}
void CCudaMathEngine::VectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CConstIntHandle& inputHandle,
const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CConstFloatHandle& multHandle,
const CConstFloatHandle& matrixHandle, int outputChannelsCount)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( multHandle.GetMathEngine() == this );
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
vectorMultichannelLookupAndAddToTable(batchSize, channelCount, inputHandle,
lookupHandles, lookupDimensions, lookupCount, multHandle, matrixHandle, outputChannelsCount);
}
void CCudaMathEngine::BitSetBinarization(int batchSize, int bitSetSize,
const CConstIntHandle& inputHandle, int outputVectorSize, const CFloatHandle& resultHandle)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid( blockCount, threadCount, batchSize * outputVectorSize );
hipLaunchKernelGGL(( BitSetBinarizationKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, bitSetSize,
GetRaw(inputHandle), outputVectorSize, GetRaw(resultHandle));
}
void CCudaMathEngine::MultiplyLookupMatrixByLookupVector(int batchSize, const CLookupMatrix& matrix,
const CLookupVector& vector, const CFloatHandle& resultHandle, int resultSize)
{
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
ASSERT_EXPR(matrix.Width() == vector.VectorSize());
ASSERT_EXPR(resultSize >= batchSize * matrix.Height());
int widthNorm = (matrix.Width() + MultiplyLookupMatrixByLookupVectorCombine - 1) /
MultiplyLookupMatrixByLookupVectorCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, batchSize * matrix.Height(), widthNorm);
if(blockCount.x > 0) {
// Several GPUs may take part in adding up one row, need atomic operations
// Set resultHandle to zeros
VectorFill(resultHandle, 0, batchSize * matrix.Height());
}
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
hipLaunchKernelGGL(( MultiplyLookupMatrixByLookupVectorKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, batchSize,
GetRaw(matrix.Table), matrix.Dims.VectorCount, matrix.Dims.VectorSize,
GetRaw(matrix.Rows), matrix.RowCount, GetRaw(vector.Table), vector.Dims.VectorCount,
GetRaw(vector.Vector), GetRaw(resultHandle), resultSize, widthNorm);
}
void CCudaMathEngine::MultiplyTransposedLookupMatrixByVector(int batchSize, const CLookupMatrix& matrix,
const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize)
{
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
multiplyVectorByLookupMatrixImpl(batchSize, matrix, vectorHandle, resultHandle, resultSize, false);
}
void CCudaMathEngine::MultiplyTransposedLookupMatrixByVectorAndAdd(int batchSize, const CLookupMatrix& matrix,
const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize)
{
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
multiplyVectorByLookupMatrixImpl(batchSize, matrix, vectorHandle, resultHandle, resultSize, true);
}
void CCudaMathEngine::MultiplyVectorByTransposedLookupVectorAndAddToTable(int batchSize,
const CFloatHandle& table, int vectorCount, int vectorSize, const CConstIntHandle& indexHandle,
const CConstFloatHandle& firstHandle, int firstSize, const CLookupVector& second)
{
ASSERT_EXPR( table.GetMathEngine() == this );
ASSERT_EXPR( indexHandle.GetMathEngine() == this );
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR(vectorSize == second.VectorSize());
SetCudaDevice( device->DeviceNumber );
int vectorSizeNorm = (vectorSize + MultiplyVectorByTransposedLookupVectorAndAddToTableCombine - 1) /
MultiplyVectorByTransposedLookupVectorAndAddToTableCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchSize * firstSize, vectorSizeNorm);
hipLaunchKernelGGL(( MultiplyVectorByTransposedLookupVectorAndAddToTableKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize,
GetRaw(table), vectorCount, vectorSize, GetRaw(indexHandle),
GetRaw(firstHandle), firstSize, GetRaw(second.Table), GetRaw(second.Vector), vectorSizeNorm);
}
void CCudaMathEngine::MultiplyDiagMatrixByMatrix(const CConstFloatHandle& firstHandle, int firstSize,
const CConstFloatHandle& secondHandle, int secondWidth,
const CFloatHandle& resultHandle, int)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, firstSize, secondWidth);
hipLaunchKernelGGL(( MultiplyDiagMatrixByMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0,
GetRaw(firstHandle), firstSize, GetRaw(secondHandle), secondWidth, GetRaw(resultHandle));
}
void CCudaMathEngine::Multiply1DiagMatrixByMatrix(int batchSize, const CConstFloatHandle& firstHandle, int firstSize,
const CConstFloatHandle& secondHandle, int secondWidth,
const CFloatHandle& resultHandle, int)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
int batchNorm = (batchSize + Multiply1DiagMatrixByMatrixCombine - 1) /
Multiply1DiagMatrixByMatrixCombine;
getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, batchNorm, firstSize * secondWidth);
hipLaunchKernelGGL(( Multiply1DiagMatrixByMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0,
batchSize, GetRaw(firstHandle), firstSize, GetRaw(secondHandle), secondWidth, GetRaw(resultHandle), batchNorm);
}
void CCudaMathEngine::MultiplyMatrixByDiagMatrix(const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth,
const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle, int)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, firstHeight, firstWidth);
hipLaunchKernelGGL(( MultiplyMatrixByDiagMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0,
GetRaw(firstHandle), firstHeight, firstWidth, GetRaw(secondHandle), GetRaw(resultHandle));
}
void CCudaMathEngine::TransposeMatrix(int batchSize, const CConstFloatHandle& firstHandle,
int height, int medium, int width, int channels, const CFloatHandle& resultHandle, int resultBufferSize)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
transposeMatrixImpl(batchSize, firstHandle, height, medium, width, channels, resultHandle, resultBufferSize);
}
void CCudaMathEngine::TransposeMatrix(int batchSize, const CConstIntHandle& firstHandle,
int height, int medium, int width, int channels, const CIntHandle& resultHandle, int resultBufferSize)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
transposeMatrixImpl(batchSize, firstHandle, height, medium, width, channels, resultHandle, resultBufferSize);
}
void CCudaMathEngine::MultiplyDiagMatrixByMatrixAndAdd( int batchSize, const CConstFloatHandle& firstHandle,
int firstSize, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle )
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int batchSizeNorm = ( batchSize + MultiplyDiagMatrixByMatrixAndSumCombine - 1 )
/ MultiplyDiagMatrixByMatrixAndSumCombine;
batchSizeNorm = alignXSizeForWarp( batchSizeNorm );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX( 1, 1, 512, blockCount, threadCount, firstSize, secondWidth, batchSizeNorm );
int sharedSize = threadCount.x * threadCount.y * threadCount.z * sizeof( float );
hipLaunchKernelGGL(( MultiplyDiagMatrixByMatrixAndSumKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, batchSize,
GetRaw( firstHandle ), firstSize, GetRaw( secondHandle ), secondWidth, GetRaw( resultHandle ),
batchSizeNorm );
}
void CCudaMathEngine::RowMultiplyMatrixByMatrix( const CConstFloatHandle& firstHandle,
const CConstFloatHandle& secondHandle, int height, int width, const CFloatHandle& resultHandle )
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
VectorFill( resultHandle, 0, height );
int widthNorm = ( width + RowMultiplyMatrixByMatrixCombine - 1 ) / RowMultiplyMatrixByMatrixCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, height, widthNorm );
const int sharedSize = threadCount.y * threadCount.x * sizeof( float );
hipLaunchKernelGGL(( RowMultiplyMatrixByMatrixKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, GetRaw( firstHandle ),
GetRaw( secondHandle ), height, width, GetRaw( resultHandle ), widthNorm );
}
void CCudaMathEngine::MatrixSpreadRows(const CConstFloatHandle& sourceHandle, int height, int width,
const CFloatHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle,
const CConstFloatHandle& fillValue)
{
ASSERT_EXPR( indexHandle.GetMathEngine() == this );
ASSERT_EXPR( sourceHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( fillValue.IsNull() || fillValue.GetMathEngine() == this );
matrixSpreadRowsImpl(GetRaw(sourceHandle), height, width,
resultHandle, resultHeight, GetRaw(indexHandle), fillValue);
}
void CCudaMathEngine::MatrixSpreadRowsAdd(const CConstFloatHandle& sourceHandle, int height, int width,
const CFloatHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle)
{
ASSERT_EXPR( indexHandle.GetMathEngine() == this );
ASSERT_EXPR( sourceHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (width + MatrixSpreadRowsCombine - 1) / MatrixSpreadRowsCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, height, widthNorm);
hipLaunchKernelGGL(( MatrixSpreadRowsAddKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw(sourceHandle), height, width,
GetRaw(resultHandle), GetRaw(indexHandle), widthNorm);
}
void CCudaMathEngine::MatrixSpreadRows(const CConstIntHandle& sourceHandle, int height, int width,
const CIntHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle,
const CConstIntHandle& fillValue)
{
ASSERT_EXPR( indexHandle.GetMathEngine() == this );
ASSERT_EXPR( sourceHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( fillValue.IsNull() || fillValue.GetMathEngine() == this );
matrixSpreadRowsImpl(GetRaw(sourceHandle), height, width,
resultHandle, resultHeight, GetRaw(indexHandle), fillValue);
}
void CCudaMathEngine::LookupAndSum( const CConstIntHandle& indicesHandle, int batchSize, int indexCount,
const CConstFloatHandle& tableHandle, int vectorSize, const CFloatHandle& result )
{
ASSERT_EXPR( indicesHandle.GetMathEngine() == this );
ASSERT_EXPR( tableHandle.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount, threadCount;
getCudaTaskGrid2D( blockCount, threadCount, batchSize, vectorSize );
hipLaunchKernelGGL(( LookupAndSumKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( indicesHandle ), batchSize, indexCount,
GetRaw( tableHandle ), vectorSize, GetRaw( result ) );
}
void CCudaMathEngine::LookupAndAddToTable( const CConstIntHandle& indicesHandle, int batchSize, int indexCount,
const CConstFloatHandle& additionsHandle, int vectorSize, const CFloatHandle& tableHandle, int vectorCount )
{
ASSERT_EXPR( indicesHandle.GetMathEngine() == this );
ASSERT_EXPR( tableHandle.GetMathEngine() == this );
ASSERT_EXPR( additionsHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
VectorFill( tableHandle, 0.f, vectorSize * vectorCount );
dim3 blockCount, threadCount;
getCudaTaskGrid3D( blockCount, threadCount, batchSize, indexCount, vectorSize );
hipLaunchKernelGGL(( LookupAndAddToTableKernel), dim3(blockCount), dim3(threadCount), 0, 0, GetRaw( indicesHandle ), batchSize, indexCount,
GetRaw( additionsHandle ), vectorSize, GetRaw( tableHandle ) );
}
void CCudaMathEngine::EnumBinarization(int batchSize,
const CConstFloatHandle& inputHandle, int enumSize, const CFloatHandle& resultHandle)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, batchSize * enumSize, EnumBinarizationCombine);
hipLaunchKernelGGL(( EnumBinarizationKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize,
GetRaw(inputHandle), enumSize, GetRaw(resultHandle));
}
void CCudaMathEngine::EnumBinarization(int batchSize,
const CConstIntHandle& inputHandle, int enumSize, const CFloatHandle& resultHandle)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, batchSize * enumSize, EnumBinarizationCombine);
hipLaunchKernelGGL(( EnumBinarizationKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize,
GetRaw(inputHandle), enumSize, GetRaw(resultHandle));
}
template<class T>
void CCudaMathEngine::transposeMatrixImpl(int batchSize, const CTypedMemoryHandle<const T>& firstHandle,
int height, int medium, int width, int channels, const CTypedMemoryHandle<T>& resultHandle, int resultBufferSize)
{
int size = batchSize * height * medium * width * channels;
ASSERT_EXPR(resultBufferSize >= size);
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, size, TransposeMatrixCombine);
hipLaunchKernelGGL(( TransposeMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(firstHandle),
height, medium, width, channels, GetRaw(resultHandle), size);
}
void CCudaMathEngine::sumMatrixColumnsKernelFunc(const CFloatHandle& resultHandle, const float* matrix,
int matrixHeight, int matrixWidth, bool isNeg)
{
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (matrixWidth + SumMatrixColumnsCombine - 1) / SumMatrixColumnsCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, matrixHeight, widthNorm);
int maxAtomicPerX = SumMatrixColumnsMaxAtomic / blockCount.y;
if(maxAtomicPerX <= 0) {
maxAtomicPerX = 1;
}
if((int)blockCount.x > maxAtomicPerX) {
blockCount.x = maxAtomicPerX;
}
int totalThreadXCount = threadCount.x * blockCount.x;
int combine = (matrixWidth + totalThreadXCount - 1) / totalThreadXCount;
if( blockCount.x > 1 ) {
VectorFill(resultHandle, 0, matrixHeight);
}
const int sharedSize = threadCount.y * threadCount.x * sizeof(float);
hipLaunchKernelGGL(( SumMatrixColumnsKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0,
GetRaw(resultHandle), matrix, matrixHeight, matrixWidth, isNeg, widthNorm, combine);
}
void CCudaMathEngine::multiplyVectorByLookupMatrixImpl(int batchSize, const CLookupMatrix& matrix,
const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize, bool isAdd)
{
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR(resultSize >= batchSize * matrix.Width());
SetCudaDevice( device->DeviceNumber );
int heightNorm = (matrix.Height() + MultiplyTransposedLookupMatrixByVectorCombine - 1) /
MultiplyTransposedLookupMatrixByVectorCombine;
heightNorm = alignXSizeForWarp(heightNorm);
// X coordinate is Height to allow for warp reduction
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, batchSize * matrix.Width(), heightNorm);
if(blockCount.x > 0 && !isAdd) {
// Several GPUs may take part in adding up one column, need atomic operations
// Set resultHandle to zeros
VectorFill(resultHandle, 0, batchSize * matrix.Width());
}
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
hipLaunchKernelGGL(( MultiplyTransposedLookupMatrixByVectorKernel), dim3(blockCount), dim3(threadCount), sharedSize, 0, batchSize,
GetRaw(matrix.Table), matrix.Dims.VectorCount, matrix.Dims.VectorSize, GetRaw(matrix.Rows), matrix.RowCount,
GetRaw(vectorHandle), GetRaw(resultHandle), isAdd, heightNorm);
}
template<class T>
void CCudaMathEngine::matrixSpreadRowsImpl(const T* source, int height, int width,
CTypedMemoryHandle<T> result, int resultHeight, const int* index, const CTypedMemoryHandle<const T>& fillValue)
{
SetCudaDevice( device->DeviceNumber );
if(fillValue.IsNull()) {
VectorFill( result, 0, resultHeight * width);
} else {
VectorFill( result, resultHeight * width, fillValue);
}
int widthNorm = (width + MatrixSpreadRowsCombine - 1) / MatrixSpreadRowsCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, height, widthNorm);
hipLaunchKernelGGL(( MatrixSpreadRowsKernel<T>), dim3(blockCount), dim3(threadCount), 0, 0, source, height, width,
GetRaw( result ), index, widthNorm);
}
template<class T>
void CCudaMathEngine::vectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CTypedMemoryHandle<const T>& inputHandle,
const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CFloatHandle& outputHandle, int outputChannelsCount)
{
SetCudaDevice( device->DeviceNumber );
int batchNorm = (batchSize + BatchVectorLookupAndCopyCombineBatch - 1) / BatchVectorLookupAndCopyCombineBatch;
int outputChannel = 0;
for(int j = 0; j < lookupCount; ++j) {
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchNorm, lookupDimensions[j].VectorSize);
hipLaunchKernelGGL(( VectorChannelLookupAndCopyKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(inputHandle) + j, channelCount,
GetRaw(lookupHandles[j]), lookupDimensions[j].VectorSize, GetRaw(outputHandle) + outputChannel, outputChannelsCount, batchNorm);
outputChannel += lookupDimensions[j].VectorSize;
}
if(lookupCount < channelCount) {
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchNorm, channelCount - lookupCount);
hipLaunchKernelGGL(( BatchVectorChannelCopyKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(inputHandle) + lookupCount, channelCount, channelCount - lookupCount,
GetRaw(outputHandle) + outputChannel, outputChannelsCount, batchNorm);
}
}
template<class T>
void CCudaMathEngine::vectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CTypedMemoryHandle<const T>& inputHandle,
const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CConstFloatHandle& multHandle, const CConstFloatHandle& matrixHandle, int outputChannelsCount)
{
SetCudaDevice( device->DeviceNumber );
int batchNorm = (batchSize + BatchVectorLookupAndAddToTableCombine - 1) / BatchVectorLookupAndAddToTableCombine;
float mult = multHandle.GetValue();
int outputChannel = 0;
for (int j = 0; j < lookupCount; ++j) {
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchNorm, lookupDimensions[j].VectorSize);
hipLaunchKernelGGL(( VectorChannelLookupAndAddToTableKernel), dim3(blockCount), dim3(threadCount), 0, 0, batchSize, GetRaw(inputHandle) + j, channelCount,
GetRaw(lookupHandles[j]), lookupDimensions[j].VectorSize, mult, GetRaw(matrixHandle) + outputChannel, outputChannelsCount, batchNorm);
outputChannel += lookupDimensions[j].VectorSize;
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
| de1caf3bacd2793b400a11899af996f2d1385523.cu | /* Copyright © 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <MemoryHandleInternal.h>
#include <MathEngineCommon.h>
#include <CudaDevice.h>
#include <Kernels/CudaBlasKernels.h>
namespace NeoML {
void CCudaMathEngine::SetVectorToMatrixRows(const CFloatHandle& resultHandle,
int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle)
{
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, matrixHeight, matrixWidth);
SetVectorToMatrixRowsKernel<<<blockCount, threadCount>>>
(GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle));
}
void CCudaMathEngine::AddVectorToMatrixElements(const CFloatHandle& matrix, int height, int width,
const CConstIntHandle& indices, const CConstFloatHandle& vector)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( indices.GetMathEngine() == this );
ASSERT_EXPR( vector.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, height, AddVectorToMatrixElementsCombine);
AddVectorToMatrixElementsKernel<<<blockCount, threadCount>>>(GetRaw(matrix),
height, width, GetRaw(indices), GetRaw(vector));
}
void CCudaMathEngine::AddVectorToMatrixElements(const CFloatHandle& matrixHandle, int height, int width,
const CConstIntHandle& rowIndicesHandle, const CConstIntHandle& columnIndicesHandle,
const CConstFloatHandle& vectorHandle, int vectorSize)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( rowIndicesHandle.GetMathEngine() == this );
ASSERT_EXPR( columnIndicesHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, vectorSize, AddVectorToMatrixElementsMulCombine);
AddVectorToMatrixElementsKernel<<<blockCount, threadCount>>>(GetRaw(matrixHandle), height, width,
GetRaw(rowIndicesHandle), GetRaw(columnIndicesHandle), GetRaw(vectorHandle), vectorSize);
}
// Assigns the values: matrix[rowIndices[i], columnIndices[i]] = vector[i].
void CCudaMathEngine::SetVectorToMatrixElements(
const CFloatHandle& matrixHandle, int height, int width,
const CConstIntHandle& rowIndicesHandle, const CConstIntHandle& columnIndicesHandle,
const CConstFloatHandle& vectorHandle, int vectorSize )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( rowIndicesHandle.GetMathEngine() == this );
ASSERT_EXPR( columnIndicesHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(
blockCount, threadCount, vectorSize, SetVectorToMatrixElementsMulCombine );
SetVectorToMatrixElementsKernel<<<blockCount, threadCount>>>(
GetRaw( matrixHandle ), height, width,
GetRaw( rowIndicesHandle ), GetRaw( columnIndicesHandle ),
GetRaw( vectorHandle ), vectorSize );
}
void CCudaMathEngine::EltwiseLogSumExpVectorToMatrixElements(const CFloatHandle& matrix, int height, int width,
const CConstIntHandle& indices, const CConstFloatHandle& vector)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( indices.GetMathEngine() == this );
ASSERT_EXPR( vector.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, height);
EltwiseLogSumExpVectorToMatrixElementsKernel<<<blockCount, threadCount>>>(GetRaw(matrix),
height, width, GetRaw(indices), GetRaw(vector));
}
void CCudaMathEngine::EltwiseLogSumExpVectorToMatrixElements(const CFloatHandle& matrix,
int height, int width,
const CConstIntHandle& rowIndices, const CConstIntHandle& columnIndices,
const CConstFloatHandle& vector, int vectorSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( rowIndices.GetMathEngine() == this );
ASSERT_EXPR( columnIndices.GetMathEngine() == this );
ASSERT_EXPR( vector.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, height, width);
EltwiseLogSumExpVectorToMatrixElementsKernel<<<blockCount, threadCount>>>(GetRaw(matrix), height, width,
GetRaw(rowIndices), GetRaw(columnIndices), GetRaw(vector), vectorSize);
}
void CCudaMathEngine::AddMatrixElementsToVector(const CConstFloatHandle& matrix, int height, int width,
const CConstIntHandle& indices, const CFloatHandle& result, int vectorSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
ASSERT_EXPR( indices.GetMathEngine() == this );
ASSERT_EXPR(vectorSize >= height);
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, height, AddMatrixElementsToVectorCombine);
AddMatrixElementsToVectorKernel<<<blockCount, threadCount>>>(GetRaw(matrix),
height, width, GetRaw(indices), GetRaw(result));
}
void CCudaMathEngine::AddMatrixElementsToVector(const CConstFloatHandle& matrix, int height, int width,
const CConstIntHandle& rowIndices, const CConstIntHandle& columnIndices,
const CFloatHandle& result, int vectorSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( rowIndices.GetMathEngine() == this );
ASSERT_EXPR( columnIndices.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, vectorSize, AddMatrixElementsToVectorMulCombine);
AddMatrixElementsToVectorKernel<<<blockCount, threadCount>>>(GetRaw(matrix),
height, width, GetRaw(rowIndices), GetRaw(columnIndices), GetRaw(result), vectorSize);
}
void CCudaMathEngine::AddMatrixElementsToMatrix(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result, const CConstIntHandle& indices)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
ASSERT_EXPR( indices.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, height, AddMatrixElementsToMatrixCombine);
AddMatrixElementsToMatrixKernel<<<blockCount, threadCount>>>(GetRaw(matrix),
height, width, GetRaw(result), GetRaw(indices));
}
void CCudaMathEngine::AddVectorToMatrixRows(int batchSize,
const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle, int matrixHeight,
int matrixWidth, const CConstFloatHandle& vectorHandle)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (matrixWidth + BatchAddVectorToMatrixRowsCombine - 1) /
BatchAddVectorToMatrixRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchSize * matrixHeight, widthNorm);
AddVectorToMatrixRowsKernel<<<blockCount, threadCount>>>(batchSize,
GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight,
matrixWidth, GetRaw(vectorHandle), widthNorm);
}
void CCudaMathEngine::AddVectorToMatrixColumns( const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle,
int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, matrixWidth );
AddVectorToMatrixColumnsKernel<<<blockCount, threadCount>>>
( GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle) );
}
void CCudaMathEngine::AddVectorToMatrixColumns( const CConstIntHandle& matrixHandle, const CIntHandle& resultHandle,
int matrixHeight, int matrixWidth, const CConstIntHandle& vectorHandle )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, matrixHeight, matrixWidth );
AddVectorToMatrixColumnsKernel<<<blockCount, threadCount>>>
( GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle) );
}
void CCudaMathEngine::SubVectorFromMatrixColumns(const CConstFloatHandle& matrixHandle, const CFloatHandle& resultHandle,
int matrixHeight, int matrixWidth, const CConstFloatHandle& vectorHandle)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, matrixHeight, matrixWidth);
SubVectorFromMatrixColumnsKernel<<<blockCount, threadCount>>>
(GetRaw(matrixHandle), GetRaw(resultHandle), matrixHeight, matrixWidth, GetRaw(vectorHandle));
}
void CCudaMathEngine::SumMatrixRowsAdd(
int batchSize, const CFloatHandle& resultHandle, const CConstFloatHandle& matrixHandle,
int matrixHeight, int matrixWidth )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const int height = ( matrixHeight + SumMatrixRowsAddCombineCount - 1 ) / SumMatrixRowsAddCombineCount;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3D( blockCount, threadCount, batchSize, height, matrixWidth );
SumMatrixRowsAddKernel<<<blockCount, threadCount>>>
( batchSize, GetRaw(resultHandle), GetRaw(matrixHandle), matrixHeight, matrixWidth );
}
void CCudaMathEngine::SumMatrixColumns(const CFloatHandle& resultHandle, const CConstFloatHandle& matrixHandle,
int matrixHeight, int matrixWidth)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
sumMatrixColumnsKernelFunc(resultHandle, GetRaw(matrixHandle), matrixHeight, matrixWidth, false);
}
void CCudaMathEngine::MatrixLogSumExpByRows(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result, int resultSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
ASSERT_EXPR(resultSize >= height);
SetCudaDevice( device->DeviceNumber );
int widthNorm = (width + MatrixLogSumExpByRowsCombine - 1) / MatrixLogSumExpByRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
MatrixLogSumExpByRowsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(matrix), height, width,
GetRaw(result), widthNorm);
}
void CCudaMathEngine::MatrixSoftmaxByRows(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (width + MatrixSoftmaxByRowsCombine - 1) / MatrixSoftmaxByRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
MatrixSoftmaxByRowsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(matrix), height, width,
GetRaw(result), widthNorm);
}
void CCudaMathEngine::MatrixSoftmaxDiffOpByRows(const CConstFloatHandle& first, const CConstFloatHandle& second,
int height, int width, const CFloatHandle& result)
{
ASSERT_EXPR( first.GetMathEngine() == this );
ASSERT_EXPR( second.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (width + MatrixSoftmaxDiffOpByRowsCombine - 1) / MatrixSoftmaxDiffOpByRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, height, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
MatrixSoftmaxDiffOpByRowsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(first), GetRaw(second),
height, width, GetRaw(result), widthNorm);
}
void CCudaMathEngine::MatrixLogSumExpByColumns(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result, int resultSize)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
ASSERT_EXPR(resultSize >= width);
SetCudaDevice( device->DeviceNumber );
int heightNorm = (height + MatrixLogSumExpByColumnsCombine - 1) / MatrixLogSumExpByColumnsCombine;
heightNorm = alignXSizeForWarp(heightNorm);
dim3 blockCount;
dim3 threadCount;
// Rows over the X instead of Y axis, so we could reduce by X
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
MatrixLogSumExpByColumnsKernel<<<blockCount, threadCount, sharedSize>>>(
GetRaw(matrix), height, width, GetRaw(result), heightNorm);
}
void CCudaMathEngine::MatrixSoftmaxByColumns(const CConstFloatHandle& matrix, int height, int width,
const CFloatHandle& result)
{
ASSERT_EXPR( matrix.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int heightNorm = (height + MatrixSoftmaxByColumnsCombine - 1) / MatrixSoftmaxByColumnsCombine;
heightNorm = alignXSizeForWarp(heightNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
MatrixSoftmaxByColumnsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(matrix), height, width,
GetRaw(result), heightNorm);
}
void CCudaMathEngine::MatrixSoftmaxDiffOpByColumns(const CConstFloatHandle& first, const CConstFloatHandle& second,
int height, int width, const CFloatHandle& result)
{
ASSERT_EXPR( first.GetMathEngine() == this );
ASSERT_EXPR( second.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int heightNorm = (height + MatrixSoftmaxDiffOpByColumnsCombine - 1) / MatrixSoftmaxDiffOpByColumnsCombine;
heightNorm = alignXSizeForWarp(heightNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, width, heightNorm);
blockCount.x = 1;
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
MatrixSoftmaxDiffOpByColumnsKernel<<<blockCount, threadCount, sharedSize>>>(
GetRaw(first), GetRaw(second), height, width, GetRaw(result), heightNorm);
}
void CCudaMathEngine::FindMaxValueInRows(const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth,
const CFloatHandle& resultHandle, const CIntHandle& columnIndices, int vectorSize)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR(vectorSize >= matrixHeight);
SetCudaDevice( device->DeviceNumber );
int widthNorm = (matrixWidth + FindMaxValueInRowsCombine - 1) / FindMaxValueInRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, matrixHeight, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.y * threadCount.x * sizeof(CValueWithIndex);
FindMaxValueWithIndicesInRowsKernel<<<blockCount, threadCount, sharedSize>>>(
GetRaw(matrixHandle), matrixHeight, matrixWidth, GetRaw(resultHandle), GetRaw(columnIndices), widthNorm);
}
void CCudaMathEngine::FindMaxValueInRows(const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth,
const CFloatHandle& resultHandle, int vectorSize)
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR(vectorSize >= matrixHeight);
SetCudaDevice( device->DeviceNumber );
int widthNorm = (matrixWidth + FindMaxValueInRowsCombine - 1) / FindMaxValueInRowsCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, matrixHeight, widthNorm);
blockCount.x = 1;
const int sharedSize = threadCount.y * threadCount.x * sizeof(float);
FindMaxValueInRowsKernel<<<blockCount, threadCount, sharedSize>>>(GetRaw(matrixHandle), matrixHeight,
matrixWidth, GetRaw(resultHandle), widthNorm);
}
void CCudaMathEngine::FindMaxValueInColumns( int batchSize, const CConstFloatHandle& matrixHandle, int matrixHeight,
int matrixWidth, const CFloatHandle& resultHandle, const CIntHandle& rowIndices, int vectorSize )
{
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( rowIndices.GetMathEngine() == this );
ASSERT_EXPR( vectorSize >= batchSize * matrixWidth );
SetCudaDevice( device->DeviceNumber );
int heightNorm = ( matrixHeight + FindMaxValueInColumnsCombine - 1 ) / FindMaxValueInColumnsCombine;
heightNorm = alignXSizeForWarp( heightNorm );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3D( blockCount, threadCount, batchSize, matrixWidth, heightNorm );
blockCount.x = 1;
const int sharedSize = threadCount.z * threadCount.y * threadCount.x * sizeof( CValueWithIndex );
FindMaxValueInColumnsKernel<<<blockCount, threadCount, sharedSize>>>( batchSize,
GetRaw( matrixHandle ), matrixHeight, matrixWidth, GetRaw( resultHandle ), GetRaw( rowIndices ),
heightNorm );
}
void CCudaMathEngine::FindMinValueInColumns( const CConstFloatHandle& matrixHandle, int matrixHeight, int matrixWidth,
const CFloatHandle& resultHandle, const CIntHandle& columnIndices )
{
SetCudaDevice( device->DeviceNumber );
// Initialize using the first row data
VectorCopy( resultHandle, matrixHandle, matrixWidth );
VectorFill( columnIndices, 0, matrixWidth );
int blockCount;
int threadCount;
getCudaTaskGrid( blockCount, threadCount, matrixWidth );
FindMinValueInColumnsKernel<<<blockCount, threadCount>>>( GetRaw( matrixHandle ), matrixHeight,
matrixWidth, GetRaw( resultHandle ), GetRaw( columnIndices ) );
}
void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstFloatHandle& inputHandle,
const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CFloatHandle& outputHandle, int outputChannelsCount)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( outputHandle.GetMathEngine() == this );
vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle,
lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount);
}
void CCudaMathEngine::VectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CConstIntHandle& inputHandle,
const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CFloatHandle& outputHandle, int outputChannelsCount)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( outputHandle.GetMathEngine() == this );
vectorMultichannelLookupAndCopy(batchSize, channelCount, inputHandle,
lookupHandles, lookupDimensions, lookupCount, outputHandle, outputChannelsCount);
}
void CCudaMathEngine::VectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CConstFloatHandle& inputHandle,
const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CConstFloatHandle& multHandle,
const CConstFloatHandle& matrixHandle, int outputChannelsCount)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( multHandle.GetMathEngine() == this );
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
vectorMultichannelLookupAndAddToTable(batchSize, channelCount, inputHandle,
lookupHandles, lookupDimensions, lookupCount, multHandle, matrixHandle, outputChannelsCount);
}
void CCudaMathEngine::VectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CConstIntHandle& inputHandle,
const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CConstFloatHandle& multHandle,
const CConstFloatHandle& matrixHandle, int outputChannelsCount)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( multHandle.GetMathEngine() == this );
ASSERT_EXPR( matrixHandle.GetMathEngine() == this );
vectorMultichannelLookupAndAddToTable(batchSize, channelCount, inputHandle,
lookupHandles, lookupDimensions, lookupCount, multHandle, matrixHandle, outputChannelsCount);
}
void CCudaMathEngine::BitSetBinarization(int batchSize, int bitSetSize,
const CConstIntHandle& inputHandle, int outputVectorSize, const CFloatHandle& resultHandle)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid( blockCount, threadCount, batchSize * outputVectorSize );
BitSetBinarizationKernel<<<blockCount, threadCount>>>(batchSize, bitSetSize,
GetRaw(inputHandle), outputVectorSize, GetRaw(resultHandle));
}
void CCudaMathEngine::MultiplyLookupMatrixByLookupVector(int batchSize, const CLookupMatrix& matrix,
const CLookupVector& vector, const CFloatHandle& resultHandle, int resultSize)
{
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
ASSERT_EXPR(matrix.Width() == vector.VectorSize());
ASSERT_EXPR(resultSize >= batchSize * matrix.Height());
int widthNorm = (matrix.Width() + MultiplyLookupMatrixByLookupVectorCombine - 1) /
MultiplyLookupMatrixByLookupVectorCombine;
widthNorm = alignXSizeForWarp(widthNorm);
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, batchSize * matrix.Height(), widthNorm);
if(blockCount.x > 0) {
// Several GPUs may take part in adding up one row, need atomic operations
// Set resultHandle to zeros
VectorFill(resultHandle, 0, batchSize * matrix.Height());
}
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
MultiplyLookupMatrixByLookupVectorKernel<<<blockCount, threadCount, sharedSize>>>(batchSize,
GetRaw(matrix.Table), matrix.Dims.VectorCount, matrix.Dims.VectorSize,
GetRaw(matrix.Rows), matrix.RowCount, GetRaw(vector.Table), vector.Dims.VectorCount,
GetRaw(vector.Vector), GetRaw(resultHandle), resultSize, widthNorm);
}
void CCudaMathEngine::MultiplyTransposedLookupMatrixByVector(int batchSize, const CLookupMatrix& matrix,
const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize)
{
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
multiplyVectorByLookupMatrixImpl(batchSize, matrix, vectorHandle, resultHandle, resultSize, false);
}
void CCudaMathEngine::MultiplyTransposedLookupMatrixByVectorAndAdd(int batchSize, const CLookupMatrix& matrix,
const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize)
{
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
multiplyVectorByLookupMatrixImpl(batchSize, matrix, vectorHandle, resultHandle, resultSize, true);
}
void CCudaMathEngine::MultiplyVectorByTransposedLookupVectorAndAddToTable(int batchSize,
const CFloatHandle& table, int vectorCount, int vectorSize, const CConstIntHandle& indexHandle,
const CConstFloatHandle& firstHandle, int firstSize, const CLookupVector& second)
{
ASSERT_EXPR( table.GetMathEngine() == this );
ASSERT_EXPR( indexHandle.GetMathEngine() == this );
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR(vectorSize == second.VectorSize());
SetCudaDevice( device->DeviceNumber );
int vectorSizeNorm = (vectorSize + MultiplyVectorByTransposedLookupVectorAndAddToTableCombine - 1) /
MultiplyVectorByTransposedLookupVectorAndAddToTableCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchSize * firstSize, vectorSizeNorm);
MultiplyVectorByTransposedLookupVectorAndAddToTableKernel<<<blockCount, threadCount>>>(batchSize,
GetRaw(table), vectorCount, vectorSize, GetRaw(indexHandle),
GetRaw(firstHandle), firstSize, GetRaw(second.Table), GetRaw(second.Vector), vectorSizeNorm);
}
void CCudaMathEngine::MultiplyDiagMatrixByMatrix(const CConstFloatHandle& firstHandle, int firstSize,
const CConstFloatHandle& secondHandle, int secondWidth,
const CFloatHandle& resultHandle, int)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, firstSize, secondWidth);
MultiplyDiagMatrixByMatrixKernel<<<blockCount, threadCount>>>
(GetRaw(firstHandle), firstSize, GetRaw(secondHandle), secondWidth, GetRaw(resultHandle));
}
void CCudaMathEngine::Multiply1DiagMatrixByMatrix(int batchSize, const CConstFloatHandle& firstHandle, int firstSize,
const CConstFloatHandle& secondHandle, int secondWidth,
const CFloatHandle& resultHandle, int)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
int batchNorm = (batchSize + Multiply1DiagMatrixByMatrixCombine - 1) /
Multiply1DiagMatrixByMatrixCombine;
getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, batchNorm, firstSize * secondWidth);
Multiply1DiagMatrixByMatrixKernel<<<blockCount, threadCount>>>
(batchSize, GetRaw(firstHandle), firstSize, GetRaw(secondHandle), secondWidth, GetRaw(resultHandle), batchNorm);
}
void CCudaMathEngine::MultiplyMatrixByDiagMatrix(const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth,
const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle, int)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, firstHeight, firstWidth);
MultiplyMatrixByDiagMatrixKernel<<<blockCount, threadCount>>>
(GetRaw(firstHandle), firstHeight, firstWidth, GetRaw(secondHandle), GetRaw(resultHandle));
}
void CCudaMathEngine::TransposeMatrix(int batchSize, const CConstFloatHandle& firstHandle,
int height, int medium, int width, int channels, const CFloatHandle& resultHandle, int resultBufferSize)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
transposeMatrixImpl(batchSize, firstHandle, height, medium, width, channels, resultHandle, resultBufferSize);
}
void CCudaMathEngine::TransposeMatrix(int batchSize, const CConstIntHandle& firstHandle,
int height, int medium, int width, int channels, const CIntHandle& resultHandle, int resultBufferSize)
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
transposeMatrixImpl(batchSize, firstHandle, height, medium, width, channels, resultHandle, resultBufferSize);
}
void CCudaMathEngine::MultiplyDiagMatrixByMatrixAndAdd( int batchSize, const CConstFloatHandle& firstHandle,
int firstSize, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle )
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int batchSizeNorm = ( batchSize + MultiplyDiagMatrixByMatrixAndSumCombine - 1 )
/ MultiplyDiagMatrixByMatrixAndSumCombine;
batchSizeNorm = alignXSizeForWarp( batchSizeNorm );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid3DMinZYX( 1, 1, 512, blockCount, threadCount, firstSize, secondWidth, batchSizeNorm );
int sharedSize = threadCount.x * threadCount.y * threadCount.z * sizeof( float );
MultiplyDiagMatrixByMatrixAndSumKernel<<<blockCount, threadCount, sharedSize>>>( batchSize,
GetRaw( firstHandle ), firstSize, GetRaw( secondHandle ), secondWidth, GetRaw( resultHandle ),
batchSizeNorm );
}
void CCudaMathEngine::RowMultiplyMatrixByMatrix( const CConstFloatHandle& firstHandle,
const CConstFloatHandle& secondHandle, int height, int width, const CFloatHandle& resultHandle )
{
ASSERT_EXPR( firstHandle.GetMathEngine() == this );
ASSERT_EXPR( secondHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
VectorFill( resultHandle, 0, height );
int widthNorm = ( width + RowMultiplyMatrixByMatrixCombine - 1 ) / RowMultiplyMatrixByMatrixCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, height, widthNorm );
const int sharedSize = threadCount.y * threadCount.x * sizeof( float );
RowMultiplyMatrixByMatrixKernel<<<blockCount, threadCount, sharedSize>>>( GetRaw( firstHandle ),
GetRaw( secondHandle ), height, width, GetRaw( resultHandle ), widthNorm );
}
void CCudaMathEngine::MatrixSpreadRows(const CConstFloatHandle& sourceHandle, int height, int width,
const CFloatHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle,
const CConstFloatHandle& fillValue)
{
ASSERT_EXPR( indexHandle.GetMathEngine() == this );
ASSERT_EXPR( sourceHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( fillValue.IsNull() || fillValue.GetMathEngine() == this );
matrixSpreadRowsImpl(GetRaw(sourceHandle), height, width,
resultHandle, resultHeight, GetRaw(indexHandle), fillValue);
}
void CCudaMathEngine::MatrixSpreadRowsAdd(const CConstFloatHandle& sourceHandle, int height, int width,
const CFloatHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle)
{
ASSERT_EXPR( indexHandle.GetMathEngine() == this );
ASSERT_EXPR( sourceHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (width + MatrixSpreadRowsCombine - 1) / MatrixSpreadRowsCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, height, widthNorm);
MatrixSpreadRowsAddKernel<<<blockCount, threadCount>>>(GetRaw(sourceHandle), height, width,
GetRaw(resultHandle), GetRaw(indexHandle), widthNorm);
}
void CCudaMathEngine::MatrixSpreadRows(const CConstIntHandle& sourceHandle, int height, int width,
const CIntHandle& resultHandle, int resultHeight, const CConstIntHandle& indexHandle,
const CConstIntHandle& fillValue)
{
ASSERT_EXPR( indexHandle.GetMathEngine() == this );
ASSERT_EXPR( sourceHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR( fillValue.IsNull() || fillValue.GetMathEngine() == this );
matrixSpreadRowsImpl(GetRaw(sourceHandle), height, width,
resultHandle, resultHeight, GetRaw(indexHandle), fillValue);
}
void CCudaMathEngine::LookupAndSum( const CConstIntHandle& indicesHandle, int batchSize, int indexCount,
const CConstFloatHandle& tableHandle, int vectorSize, const CFloatHandle& result )
{
ASSERT_EXPR( indicesHandle.GetMathEngine() == this );
ASSERT_EXPR( tableHandle.GetMathEngine() == this );
ASSERT_EXPR( result.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
dim3 blockCount, threadCount;
getCudaTaskGrid2D( blockCount, threadCount, batchSize, vectorSize );
LookupAndSumKernel<<<blockCount, threadCount>>>( GetRaw( indicesHandle ), batchSize, indexCount,
GetRaw( tableHandle ), vectorSize, GetRaw( result ) );
}
void CCudaMathEngine::LookupAndAddToTable( const CConstIntHandle& indicesHandle, int batchSize, int indexCount,
const CConstFloatHandle& additionsHandle, int vectorSize, const CFloatHandle& tableHandle, int vectorCount )
{
ASSERT_EXPR( indicesHandle.GetMathEngine() == this );
ASSERT_EXPR( tableHandle.GetMathEngine() == this );
ASSERT_EXPR( additionsHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
VectorFill( tableHandle, 0.f, vectorSize * vectorCount );
dim3 blockCount, threadCount;
getCudaTaskGrid3D( blockCount, threadCount, batchSize, indexCount, vectorSize );
LookupAndAddToTableKernel<<<blockCount, threadCount>>>( GetRaw( indicesHandle ), batchSize, indexCount,
GetRaw( additionsHandle ), vectorSize, GetRaw( tableHandle ) );
}
void CCudaMathEngine::EnumBinarization(int batchSize,
const CConstFloatHandle& inputHandle, int enumSize, const CFloatHandle& resultHandle)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, batchSize * enumSize, EnumBinarizationCombine);
EnumBinarizationKernel<<<blockCount, threadCount>>>(batchSize,
GetRaw(inputHandle), enumSize, GetRaw(resultHandle));
}
void CCudaMathEngine::EnumBinarization(int batchSize,
const CConstIntHandle& inputHandle, int enumSize, const CFloatHandle& resultHandle)
{
ASSERT_EXPR( inputHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, batchSize * enumSize, EnumBinarizationCombine);
EnumBinarizationKernel<<<blockCount, threadCount>>>(batchSize,
GetRaw(inputHandle), enumSize, GetRaw(resultHandle));
}
template<class T>
void CCudaMathEngine::transposeMatrixImpl(int batchSize, const CTypedMemoryHandle<const T>& firstHandle,
int height, int medium, int width, int channels, const CTypedMemoryHandle<T>& resultHandle, int resultBufferSize)
{
int size = batchSize * height * medium * width * channels;
ASSERT_EXPR(resultBufferSize >= size);
SetCudaDevice( device->DeviceNumber );
int blockCount;
int threadCount;
getCudaTaskGrid(blockCount, threadCount, size, TransposeMatrixCombine);
TransposeMatrixKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(firstHandle),
height, medium, width, channels, GetRaw(resultHandle), size);
}
void CCudaMathEngine::sumMatrixColumnsKernelFunc(const CFloatHandle& resultHandle, const float* matrix,
int matrixHeight, int matrixWidth, bool isNeg)
{
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
int widthNorm = (matrixWidth + SumMatrixColumnsCombine - 1) / SumMatrixColumnsCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 256, blockCount, threadCount, matrixHeight, widthNorm);
int maxAtomicPerX = SumMatrixColumnsMaxAtomic / blockCount.y;
if(maxAtomicPerX <= 0) {
maxAtomicPerX = 1;
}
if((int)blockCount.x > maxAtomicPerX) {
blockCount.x = maxAtomicPerX;
}
int totalThreadXCount = threadCount.x * blockCount.x;
int combine = (matrixWidth + totalThreadXCount - 1) / totalThreadXCount;
if( blockCount.x > 1 ) {
VectorFill(resultHandle, 0, matrixHeight);
}
const int sharedSize = threadCount.y * threadCount.x * sizeof(float);
SumMatrixColumnsKernel<<<blockCount, threadCount, sharedSize>>>
(GetRaw(resultHandle), matrix, matrixHeight, matrixWidth, isNeg, widthNorm, combine);
}
void CCudaMathEngine::multiplyVectorByLookupMatrixImpl(int batchSize, const CLookupMatrix& matrix,
const CConstFloatHandle& vectorHandle, const CFloatHandle& resultHandle, int resultSize, bool isAdd)
{
ASSERT_EXPR( vectorHandle.GetMathEngine() == this );
ASSERT_EXPR( resultHandle.GetMathEngine() == this );
ASSERT_EXPR(resultSize >= batchSize * matrix.Width());
SetCudaDevice( device->DeviceNumber );
int heightNorm = (matrix.Height() + MultiplyTransposedLookupMatrixByVectorCombine - 1) /
MultiplyTransposedLookupMatrixByVectorCombine;
heightNorm = alignXSizeForWarp(heightNorm);
// X coordinate is Height to allow for warp reduction
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 1024, blockCount, threadCount, batchSize * matrix.Width(), heightNorm);
if(blockCount.x > 0 && !isAdd) {
// Several GPUs may take part in adding up one column, need atomic operations
// Set resultHandle to zeros
VectorFill(resultHandle, 0, batchSize * matrix.Width());
}
const int sharedSize = threadCount.x * threadCount.y * sizeof(float);
MultiplyTransposedLookupMatrixByVectorKernel<<<blockCount, threadCount, sharedSize>>>(batchSize,
GetRaw(matrix.Table), matrix.Dims.VectorCount, matrix.Dims.VectorSize, GetRaw(matrix.Rows), matrix.RowCount,
GetRaw(vectorHandle), GetRaw(resultHandle), isAdd, heightNorm);
}
template<class T>
void CCudaMathEngine::matrixSpreadRowsImpl(const T* source, int height, int width,
CTypedMemoryHandle<T> result, int resultHeight, const int* index, const CTypedMemoryHandle<const T>& fillValue)
{
SetCudaDevice( device->DeviceNumber );
if(fillValue.IsNull()) {
VectorFill( result, 0, resultHeight * width);
} else {
VectorFill( result, resultHeight * width, fillValue);
}
int widthNorm = (width + MatrixSpreadRowsCombine - 1) / MatrixSpreadRowsCombine;
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, height, widthNorm);
MatrixSpreadRowsKernel<T><<<blockCount, threadCount>>>(source, height, width,
GetRaw( result ), index, widthNorm);
}
template<class T>
void CCudaMathEngine::vectorMultichannelLookupAndCopy(int batchSize, int channelCount, const CTypedMemoryHandle<const T>& inputHandle,
const CConstFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CFloatHandle& outputHandle, int outputChannelsCount)
{
SetCudaDevice( device->DeviceNumber );
int batchNorm = (batchSize + BatchVectorLookupAndCopyCombineBatch - 1) / BatchVectorLookupAndCopyCombineBatch;
int outputChannel = 0;
for(int j = 0; j < lookupCount; ++j) {
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchNorm, lookupDimensions[j].VectorSize);
VectorChannelLookupAndCopyKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(inputHandle) + j, channelCount,
GetRaw(lookupHandles[j]), lookupDimensions[j].VectorSize, GetRaw(outputHandle) + outputChannel, outputChannelsCount, batchNorm);
outputChannel += lookupDimensions[j].VectorSize;
}
if(lookupCount < channelCount) {
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchNorm, channelCount - lookupCount);
BatchVectorChannelCopyKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(inputHandle) + lookupCount, channelCount, channelCount - lookupCount,
GetRaw(outputHandle) + outputChannel, outputChannelsCount, batchNorm);
}
}
template<class T>
void CCudaMathEngine::vectorMultichannelLookupAndAddToTable(int batchSize, int channelCount, const CTypedMemoryHandle<const T>& inputHandle,
const CFloatHandle* lookupHandles, const CLookupDimension* lookupDimensions, int lookupCount,
const CConstFloatHandle& multHandle, const CConstFloatHandle& matrixHandle, int outputChannelsCount)
{
SetCudaDevice( device->DeviceNumber );
int batchNorm = (batchSize + BatchVectorLookupAndAddToTableCombine - 1) / BatchVectorLookupAndAddToTableCombine;
float mult = multHandle.GetValue();
int outputChannel = 0;
for (int j = 0; j < lookupCount; ++j) {
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, batchNorm, lookupDimensions[j].VectorSize);
VectorChannelLookupAndAddToTableKernel<<<blockCount, threadCount>>>(batchSize, GetRaw(inputHandle) + j, channelCount,
GetRaw(lookupHandles[j]), lookupDimensions[j].VectorSize, mult, GetRaw(matrixHandle) + outputChannel, outputChannelsCount, batchNorm);
outputChannel += lookupDimensions[j].VectorSize;
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
|
66cb7314d839d75074474348f84133878635f196.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/magma_zmatrixtools_gpu.cu, normal z -> d, Mon Jun 25 18:24:26 2018
*/
#include "magmasparse_internal.h"
#define PRECISION_d
#define SWAP(a, b) { tmp = a; a = b; b = tmp; }
__global__ void
magma_dvalinit_kernel(
const magma_int_t num_el,
magmaDouble_ptr dval)
{
int k = blockDim.x * blockIdx.x + threadIdx.x;
double zero = MAGMA_D_MAKE(0.0, 0.0);
if (k < num_el) {
dval[k] = zero;
}
}
/**
Purpose
-------
Initializes a device array with zero.
Arguments
---------
@param[in]
num_el magma_int_t
size of array
@param[in,out]
dval magmaDouble_ptr
array to initialize
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dvalinit_gpu(
magma_int_t num_el,
magmaDouble_ptr dval,
magma_queue_t queue)
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv(num_el, blocksize1);
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid(dimgrid1, dimgrid2, dimgrid3);
dim3 block(blocksize1, blocksize2, 1);
hipLaunchKernelGGL(( magma_dvalinit_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
num_el, dval);
return MAGMA_SUCCESS;
}
__global__ void
magma_dindexinit_kernel(
const magma_int_t num_el,
magmaIndex_ptr dind)
{
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k < num_el) {
dind[k] = 0;
}
}
/**
Purpose
-------
Initializes a device array with zero.
Arguments
---------
@param[in]
num_el magma_int_t
size of array
@param[in,out]
dind magmaIndex_ptr
array to initialize
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dindexinit_gpu(
magma_int_t num_el,
magmaIndex_ptr dind,
magma_queue_t queue)
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv(num_el, blocksize1);
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid(dimgrid1, dimgrid2, dimgrid3);
dim3 block(blocksize1, blocksize2, 1);
hipLaunchKernelGGL(( magma_dindexinit_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
num_el, dind);
return MAGMA_SUCCESS;
}
__global__ void
magma_dmatrixcup_count(
const magma_int_t num_rows,
const magma_index_t* A_row,
const magma_index_t* A_col,
const magma_index_t* B_row,
const magma_index_t* B_col,
magma_index_t* inserted)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
int add = 0;
int a = A_row[row];
int b = B_row[row];
int enda = A_row[ row+1 ];
int endb = B_row[ row+1 ];
int acol;
int bcol;
if (a<enda && b<endb) {
do{
acol = A_col[ a ];
bcol = B_col[ b ];
if(acol == -1) { // stop in case acol = -1
a++;
}
else if(bcol == -1) { // stop in case bcol = -1
b++;
}
else if(acol == bcol) {
add++;
a++;
b++;
}
else if(acol<bcol) {
add++;
a++;
}
else {
add++;
b++;
}
}while(a<enda && b<endb);
}
// now th rest - if existing
if(a<enda) {
do{
add++;
a++;
}while(a<enda);
}
if(b<endb) {
do{
add++;
b++;
}while(b<endb);
}
inserted[ row ] = add;
}
}
__global__ void
magma_dmatrixcup_fill(
const magma_int_t num_rows,
const magma_index_t* A_row,
const magma_index_t* A_col,
const double* A_val,
const magma_index_t* B_row,
const magma_index_t* B_col,
const double* B_val,
magma_index_t* U_row,
magma_index_t* U_rowidx,
magma_index_t* U_col,
double* U_val)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
int add = 0;
int offset = U_row[row];
int a = A_row[row];
int b = B_row[row];
int enda = A_row[ row+1 ];
int endb = B_row[ row+1 ];
int acol;
int bcol;
if (a<enda && b<endb) {
do{
acol = A_col[ a ];
bcol = B_col[ b ];
if(acol == -1) { // stop in case acol = -1
a++;
}
else if(bcol == -1) { // stop in case bcol = -1
b++;
}
else if(acol == bcol) {
U_col[ offset + add ] = acol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = A_val[ a ];
add++;
a++;
b++;
}
else if(acol<bcol) {
U_col[ offset + add ] = acol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = A_val[ a ];
add++;
a++;
}
else {
U_col[ offset + add ] = bcol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = B_val[ b ];
add++;
b++;
}
}while(a<enda && b<endb);
}
// now th rest - if existing
if(a<enda) {
do{
acol = A_col[ a ];
U_col[ offset + add ] = acol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = A_val[ a ];
add++;
a++;
}while(a<enda);
}
if(b<endb) {
do{
bcol = B_col[ b ];
U_col[ offset + add ] = bcol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = B_val[ b ];
add++;
b++;
}while(b<endb);
}
}
}
/***************************************************************************//**
Purpose
-------
Generates a matrix U = A \cup B. If both matrices have a nonzero value
in the same location, the value of A is used.
This is the GPU version of the operation.
Arguments
---------
@param[in]
A magma_d_matrix
Input matrix 1.
@param[in]
B magma_d_matrix
Input matrix 2.
@param[out]
U magma_d_matrix*
U = A \cup B. If both matrices have a nonzero value
in the same location, the value of A is used.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
*******************************************************************************/
extern "C" magma_int_t
magma_dmatrix_cup_gpu(
magma_d_matrix A,
magma_d_matrix B,
magma_d_matrix *U,
magma_queue_t queue)
{
magma_int_t info = 0;
assert(A.num_rows == B.num_rows);
magma_int_t num_rows = A.num_rows;
U->num_rows = num_rows;
U->num_cols = A.num_cols;
U->storage_type = Magma_CSR;
U->memory_location = Magma_DEV;
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid11 = magma_ceildiv(num_rows, blocksize1 );
int dimgrid12 = 1;
int dimgrid13 = 1;
dim3 grid1(dimgrid11, dimgrid12, dimgrid13 );
dim3 block1(blocksize1, blocksize2, 1 );
magmaIndex_ptr inserted = NULL;
CHECK(magma_index_malloc(&U->drow, num_rows+1));
CHECK(magma_index_malloc(&inserted, num_rows));
CHECK(magma_dindexinit_gpu(num_rows, inserted, queue));
hipLaunchKernelGGL(( magma_dmatrixcup_count), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
num_rows, A.drow, A.dcol, B.drow, B.dcol, inserted);
CHECK(magma_dget_row_ptr(num_rows, &U->nnz, inserted, U->drow, queue));
CHECK(magma_dmalloc(&U->dval, U->nnz));
CHECK(magma_index_malloc(&U->drowidx, U->nnz));
CHECK(magma_index_malloc(&U->dcol, U->nnz));
hipLaunchKernelGGL(( magma_dmatrixcup_fill), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
num_rows, A.drow, A.dcol, A.dval, B.drow, B.dcol, B.dval,
U->drow, U->drowidx, U->dcol, U->dval);
cleanup:
magma_free(inserted);
return info;
}
/***************************************************************************//**
Purpose
-------
Generates a matrix U = A \cup B. If both matrices have a nonzero value
in the same location, the value of A is used.
This is the GPU version of the operation.
Arguments
---------
@param[in]
A magma_d_matrix
Input matrix 1.
@param[in]
B magma_d_matrix
Input matrix 2.
@param[out]
U magma_d_matrix*
U = A \cup B. If both matrices have a nonzero value
in the same location, the value of A is used.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
*******************************************************************************/
extern "C" magma_int_t
magma_dcsr_sort_gpu(
magma_d_matrix *A,
magma_queue_t queue)
{
magma_int_t info = 0;
hipsparseHandle_t handle=NULL;
hipsparseMatDescr_t descrA=NULL;
magmaDouble_ptr tmp=NULL, csrVal_sorted=NULL;
char* pBuffer;
int *P;
size_t pBufferSizeInBytes;
CHECK_CUSPARSE( hipsparseCreate( &handle ));
CHECK_CUSPARSE( hipsparseSetStream( handle, queue->cuda_stream() ));
CHECK_CUSPARSE( hipsparseCreateMatDescr( &descrA ));
CHECK_CUSPARSE( hipsparseSetMatType( descrA,
HIPSPARSE_MATRIX_TYPE_GENERAL ));
CHECK_CUSPARSE( hipsparseSetMatDiagType( descrA,
HIPSPARSE_DIAG_TYPE_NON_UNIT ));
CHECK_CUSPARSE( hipsparseSetMatIndexBase( descrA,
HIPSPARSE_INDEX_BASE_ZERO ));
CHECK(magma_dmalloc(&csrVal_sorted, A->nnz));
// step 1: allocate buffer
hipsparseXcsrsort_bufferSizeExt(handle, A->num_rows, A->num_cols,
A->nnz, A->drow, A->dcol, &pBufferSizeInBytes);
hipMalloc( &pBuffer, sizeof(char)* pBufferSizeInBytes);
// step 2: setup permutation vector P to identity
hipMalloc( (void**)&P, sizeof(int)*A->nnz);
hipsparseCreateIdentityPermutation(handle, A->nnz, P);
// step 3: sort CSR format
hipsparseXcsrsort(handle, A->num_rows, A->num_cols, A->nnz,
descrA, A->drow, A->dcol, P, pBuffer);
// step 4: gather sorted csrVal
hipsparseDgthr(handle, A->nnz, A->dval, csrVal_sorted, P,
HIPSPARSE_INDEX_BASE_ZERO);
SWAP(A->dval, csrVal_sorted);
cleanup:
hipsparseDestroyMatDescr( descrA );
hipsparseDestroy( handle );
magma_free(csrVal_sorted);
return info;
} | 66cb7314d839d75074474348f84133878635f196.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/magma_zmatrixtools_gpu.cu, normal z -> d, Mon Jun 25 18:24:26 2018
*/
#include "magmasparse_internal.h"
#define PRECISION_d
#define SWAP(a, b) { tmp = a; a = b; b = tmp; }
__global__ void
magma_dvalinit_kernel(
const magma_int_t num_el,
magmaDouble_ptr dval)
{
int k = blockDim.x * blockIdx.x + threadIdx.x;
double zero = MAGMA_D_MAKE(0.0, 0.0);
if (k < num_el) {
dval[k] = zero;
}
}
/**
Purpose
-------
Initializes a device array with zero.
Arguments
---------
@param[in]
num_el magma_int_t
size of array
@param[in,out]
dval magmaDouble_ptr
array to initialize
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dvalinit_gpu(
magma_int_t num_el,
magmaDouble_ptr dval,
magma_queue_t queue)
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv(num_el, blocksize1);
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid(dimgrid1, dimgrid2, dimgrid3);
dim3 block(blocksize1, blocksize2, 1);
magma_dvalinit_kernel<<< grid, block, 0, queue->cuda_stream() >>>
(num_el, dval);
return MAGMA_SUCCESS;
}
__global__ void
magma_dindexinit_kernel(
const magma_int_t num_el,
magmaIndex_ptr dind)
{
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k < num_el) {
dind[k] = 0;
}
}
/**
Purpose
-------
Initializes a device array with zero.
Arguments
---------
@param[in]
num_el magma_int_t
size of array
@param[in,out]
dind magmaIndex_ptr
array to initialize
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dindexinit_gpu(
magma_int_t num_el,
magmaIndex_ptr dind,
magma_queue_t queue)
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv(num_el, blocksize1);
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid(dimgrid1, dimgrid2, dimgrid3);
dim3 block(blocksize1, blocksize2, 1);
magma_dindexinit_kernel<<< grid, block, 0, queue->cuda_stream() >>>
(num_el, dind);
return MAGMA_SUCCESS;
}
__global__ void
magma_dmatrixcup_count(
const magma_int_t num_rows,
const magma_index_t* A_row,
const magma_index_t* A_col,
const magma_index_t* B_row,
const magma_index_t* B_col,
magma_index_t* inserted)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
int add = 0;
int a = A_row[row];
int b = B_row[row];
int enda = A_row[ row+1 ];
int endb = B_row[ row+1 ];
int acol;
int bcol;
if (a<enda && b<endb) {
do{
acol = A_col[ a ];
bcol = B_col[ b ];
if(acol == -1) { // stop in case acol = -1
a++;
}
else if(bcol == -1) { // stop in case bcol = -1
b++;
}
else if(acol == bcol) {
add++;
a++;
b++;
}
else if(acol<bcol) {
add++;
a++;
}
else {
add++;
b++;
}
}while(a<enda && b<endb);
}
// now th rest - if existing
if(a<enda) {
do{
add++;
a++;
}while(a<enda);
}
if(b<endb) {
do{
add++;
b++;
}while(b<endb);
}
inserted[ row ] = add;
}
}
__global__ void
magma_dmatrixcup_fill(
const magma_int_t num_rows,
const magma_index_t* A_row,
const magma_index_t* A_col,
const double* A_val,
const magma_index_t* B_row,
const magma_index_t* B_col,
const double* B_val,
magma_index_t* U_row,
magma_index_t* U_rowidx,
magma_index_t* U_col,
double* U_val)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
int add = 0;
int offset = U_row[row];
int a = A_row[row];
int b = B_row[row];
int enda = A_row[ row+1 ];
int endb = B_row[ row+1 ];
int acol;
int bcol;
if (a<enda && b<endb) {
do{
acol = A_col[ a ];
bcol = B_col[ b ];
if(acol == -1) { // stop in case acol = -1
a++;
}
else if(bcol == -1) { // stop in case bcol = -1
b++;
}
else if(acol == bcol) {
U_col[ offset + add ] = acol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = A_val[ a ];
add++;
a++;
b++;
}
else if(acol<bcol) {
U_col[ offset + add ] = acol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = A_val[ a ];
add++;
a++;
}
else {
U_col[ offset + add ] = bcol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = B_val[ b ];
add++;
b++;
}
}while(a<enda && b<endb);
}
// now th rest - if existing
if(a<enda) {
do{
acol = A_col[ a ];
U_col[ offset + add ] = acol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = A_val[ a ];
add++;
a++;
}while(a<enda);
}
if(b<endb) {
do{
bcol = B_col[ b ];
U_col[ offset + add ] = bcol;
U_rowidx[ offset + add ] = row;
U_val[ offset + add ] = B_val[ b ];
add++;
b++;
}while(b<endb);
}
}
}
/***************************************************************************//**
Purpose
-------
Generates a matrix U = A \cup B. If both matrices have a nonzero value
in the same location, the value of A is used.
This is the GPU version of the operation.
Arguments
---------
@param[in]
A magma_d_matrix
Input matrix 1.
@param[in]
B magma_d_matrix
Input matrix 2.
@param[out]
U magma_d_matrix*
U = A \cup B. If both matrices have a nonzero value
in the same location, the value of A is used.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
*******************************************************************************/
extern "C" magma_int_t
magma_dmatrix_cup_gpu(
magma_d_matrix A,
magma_d_matrix B,
magma_d_matrix *U,
magma_queue_t queue)
{
magma_int_t info = 0;
assert(A.num_rows == B.num_rows);
magma_int_t num_rows = A.num_rows;
U->num_rows = num_rows;
U->num_cols = A.num_cols;
U->storage_type = Magma_CSR;
U->memory_location = Magma_DEV;
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid11 = magma_ceildiv(num_rows, blocksize1 );
int dimgrid12 = 1;
int dimgrid13 = 1;
dim3 grid1(dimgrid11, dimgrid12, dimgrid13 );
dim3 block1(blocksize1, blocksize2, 1 );
magmaIndex_ptr inserted = NULL;
CHECK(magma_index_malloc(&U->drow, num_rows+1));
CHECK(magma_index_malloc(&inserted, num_rows));
CHECK(magma_dindexinit_gpu(num_rows, inserted, queue));
magma_dmatrixcup_count<<<grid1, block1, 0, queue->cuda_stream()>>>
(num_rows, A.drow, A.dcol, B.drow, B.dcol, inserted);
CHECK(magma_dget_row_ptr(num_rows, &U->nnz, inserted, U->drow, queue));
CHECK(magma_dmalloc(&U->dval, U->nnz));
CHECK(magma_index_malloc(&U->drowidx, U->nnz));
CHECK(magma_index_malloc(&U->dcol, U->nnz));
magma_dmatrixcup_fill<<<grid1, block1, 0, queue->cuda_stream()>>>
(num_rows, A.drow, A.dcol, A.dval, B.drow, B.dcol, B.dval,
U->drow, U->drowidx, U->dcol, U->dval);
cleanup:
magma_free(inserted);
return info;
}
/***************************************************************************//**
Purpose
-------
Generates a matrix U = A \cup B. If both matrices have a nonzero value
in the same location, the value of A is used.
This is the GPU version of the operation.
Arguments
---------
@param[in]
A magma_d_matrix
Input matrix 1.
@param[in]
B magma_d_matrix
Input matrix 2.
@param[out]
U magma_d_matrix*
U = A \cup B. If both matrices have a nonzero value
in the same location, the value of A is used.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
*******************************************************************************/
extern "C" magma_int_t
magma_dcsr_sort_gpu(
magma_d_matrix *A,
magma_queue_t queue)
{
magma_int_t info = 0;
cusparseHandle_t handle=NULL;
cusparseMatDescr_t descrA=NULL;
magmaDouble_ptr tmp=NULL, csrVal_sorted=NULL;
char* pBuffer;
int *P;
size_t pBufferSizeInBytes;
CHECK_CUSPARSE( cusparseCreate( &handle ));
CHECK_CUSPARSE( cusparseSetStream( handle, queue->cuda_stream() ));
CHECK_CUSPARSE( cusparseCreateMatDescr( &descrA ));
CHECK_CUSPARSE( cusparseSetMatType( descrA,
CUSPARSE_MATRIX_TYPE_GENERAL ));
CHECK_CUSPARSE( cusparseSetMatDiagType( descrA,
CUSPARSE_DIAG_TYPE_NON_UNIT ));
CHECK_CUSPARSE( cusparseSetMatIndexBase( descrA,
CUSPARSE_INDEX_BASE_ZERO ));
CHECK(magma_dmalloc(&csrVal_sorted, A->nnz));
// step 1: allocate buffer
cusparseXcsrsort_bufferSizeExt(handle, A->num_rows, A->num_cols,
A->nnz, A->drow, A->dcol, &pBufferSizeInBytes);
cudaMalloc( &pBuffer, sizeof(char)* pBufferSizeInBytes);
// step 2: setup permutation vector P to identity
cudaMalloc( (void**)&P, sizeof(int)*A->nnz);
cusparseCreateIdentityPermutation(handle, A->nnz, P);
// step 3: sort CSR format
cusparseXcsrsort(handle, A->num_rows, A->num_cols, A->nnz,
descrA, A->drow, A->dcol, P, pBuffer);
// step 4: gather sorted csrVal
cusparseDgthr(handle, A->nnz, A->dval, csrVal_sorted, P,
CUSPARSE_INDEX_BASE_ZERO);
SWAP(A->dval, csrVal_sorted);
cleanup:
cusparseDestroyMatDescr( descrA );
cusparseDestroy( handle );
magma_free(csrVal_sorted);
return info;
} |
193fddaa4964a9c15a1571ef3ec6903a6ff9ef6b.hip | // !!! This is a file automatically generated by hipify!!!
/* Host code for the Jacobi method of solving a system of linear equations
* by iteration.
* Build as follws: make clean && make
* Author: Naga Kandasamy
* Date modified: May 21, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "jacobi_iteration.h"
/* Include the kernel code */
#include "jacobi_iteration_kernel.hip"
/* Uncomment the line below if you want the code to spit out debug information. */
/* #define DEBUG */
int main(int argc, char **argv)
{
if (argc > 1) {
printf("This program accepts no arguments\n");
exit(EXIT_FAILURE);
}
matrix_t A; /* N x N constant matrix */
matrix_t B; /* N x 1 b matrix */
matrix_t reference_x; /* Reference solution */
matrix_t gpu_naive_solution_x; /* Solution computed by naive kernel */
matrix_t gpu_opt_solution_x; /* Solution computed by optimized kernel */
/* Initialize the random number generator */
srand(time(NULL));
/* Generate diagonally dominant matrix */
printf("\nGenerating %d x %d system\n", MATRIX_SIZE, MATRIX_SIZE);
A = create_diagonally_dominant_matrix(MATRIX_SIZE, MATRIX_SIZE);
if (A.elements == NULL) {
printf("Error creating matrix\n");
exit(EXIT_FAILURE);
}
/* Create the other vectors */
B = allocate_matrix_on_host(MATRIX_SIZE, 1, 1);
reference_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
gpu_naive_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
gpu_opt_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
#ifdef DEBUG
print_matrix(A);
print_matrix(B);
print_matrix(reference_x);
#endif
/* Compute Jacobi solution on CPU */
printf("\nPerforming Jacobi iteration on the CPU\n");
compute_gold(A, reference_x, B);
display_jacobi_solution(A, reference_x, B); /* Display statistics */
/* Compute Jacobi solution on device. Solutions are returned
in gpu_naive_solution_x and gpu_opt_solution_x. */
printf("\nPerforming Jacobi iteration on device\n");
compute_on_device(A, gpu_naive_solution_x, gpu_opt_solution_x, B);
display_jacobi_solution(A, gpu_naive_solution_x, B); /* Display statistics */
display_jacobi_solution(A, gpu_opt_solution_x, B);
free(A.elements);
free(B.elements);
free(reference_x.elements);
free(gpu_naive_solution_x.elements);
free(gpu_opt_solution_x.elements);
exit(EXIT_SUCCESS);
}
void naive_method(const matrix_t A, matrix_t gpu_naive_sol_x, const matrix_t B)
{
matrix_t Ad = allocate_matrix_on_device(A);
copy_matrix_to_device(Ad, A);
matrix_t Bd = allocate_matrix_on_device(B);
copy_matrix_to_device(Bd, B);
matrix_t = allocate_matrix_on_device(gpu_naive_sol_x);
matrix_t new_Xd = allocate_matrix_on_device(gpu_naive_sol_x);
unsigned int done = 0;
double ssd, mse;
unsigned int num_iter = 0;
int BLOCK_SIZE = 32;
dim3 thread_block(BLOCK_SIZE, BLOCK_SIZE, 1); /* Set number of threads in thread block */
dim3 grid(1,1);
while (!done) {
for (i = 0; i < num_rows; i++) {
double sum = -A.elements[i * num_cols + i] * Xd.elements[i];
for (j = 0; j < num_cols; j++) {
sum += A.elements[i * num_cols + j] * Xd.elements[j];
}
/* Update values for the unkowns for the current row. */
new_Xd.elements[i] = (B.elements[i] - sum)/A.elements[i * num_cols + i];
}
// jacobi_iteration_kernel_naive<<<grid, thread_block>>>(Ad.elements, Xd.elements, Bd.elements, Ad.num_rows);
// hipDeviceSynchronize();
/* Check for convergence and update the unknowns. */
ssd = 0.0;
for (i = 0; i < num_rows; i++) {
ssd += (new_Xd.elements[i] - Xd.elements[i]) * (new_Xd.elements[i] - Xd.elements[i]);
Xd.elements[i] = new_Xd.elements[i];
}
num_iter++;
mse = sqrt (ssd); /* Mean squared error. */
printf("Iteration: %d. MSE = %f\n", num_iter, mse);
if (mse <= THRESHOLD || num_iter > 1000)
done = 1;
}
copy_matrix_from_device(Xd, gpu_naive_sol_x);
cudaF
}
/* FIXME: Complete this function to perform Jacobi calculation on device */
void compute_on_device(const matrix_t A, matrix_t gpu_naive_sol_x,
matrix_t gpu_opt_sol_x, const matrix_t B)
{
naive_method(A, gpu_naive_sol_x, B);
return;
}
/* Allocate matrix on the device of same size as M */
matrix_t allocate_matrix_on_device(const matrix_t M)
{
matrix_t Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
hipMalloc((void **)&Mdevice.elements, size);
return Mdevice;
}
/* Allocate a matrix of dimensions height * width.
If init == 0, initialize to all zeroes.
If init == 1, perform random initialization.
*/
matrix_t allocate_matrix_on_host(int num_rows, int num_columns, int init)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
for (unsigned int i = 0; i < size; i++) {
if (init == 0)
M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
/* Copy matrix to device */
void copy_matrix_to_device(matrix_t Mdevice, const matrix_t Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
return;
}
/* Copy matrix from device to host */
void copy_matrix_from_device(matrix_t Mhost, const matrix_t Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
return;
}
/* Prints the matrix out to screen */
void print_matrix(const matrix_t M)
{
for (unsigned int i = 0; i < M.num_rows; i++) {
for (unsigned int j = 0; j < M.num_columns; j++) {
printf("%f ", M.elements[i * M.num_columns + j]);
}
printf("\n");
}
printf("\n");
return;
}
/* Returns a floating-point value between [min, max] */
float get_random_number(int min, int max)
{
float r = rand()/(float)RAND_MAX;
return (float)floor((double)(min + (max - min + 1) * r));
}
/* Check for errors in kernel execution */
void check_CUDA_error(const char *msg)
{
hipError_t err = hipGetLastError();
if ( hipSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return;
}
/* Create diagonally dominant matrix */
matrix_t create_diagonally_dominant_matrix(unsigned int num_rows, unsigned int num_columns)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
unsigned int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
if (M.elements == NULL)
return M;
/* Create a matrix with random numbers between [-.5 and .5] */
unsigned int i, j;
for (i = 0; i < size; i++)
M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER);
/* Make diagonal entries large with respect to the entries on each row. */
for (i = 0; i < num_rows; i++) {
float row_sum = 0.0;
for (j = 0; j < num_columns; j++) {
row_sum += fabs(M.elements[i * M.num_rows + j]);
}
M.elements[i * M.num_rows + i] = 0.5 + row_sum;
}
return M;
}
| 193fddaa4964a9c15a1571ef3ec6903a6ff9ef6b.cu | /* Host code for the Jacobi method of solving a system of linear equations
* by iteration.
* Build as follws: make clean && make
* Author: Naga Kandasamy
* Date modified: May 21, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include "jacobi_iteration.h"
/* Include the kernel code */
#include "jacobi_iteration_kernel.cu"
/* Uncomment the line below if you want the code to spit out debug information. */
/* #define DEBUG */
int main(int argc, char **argv)
{
if (argc > 1) {
printf("This program accepts no arguments\n");
exit(EXIT_FAILURE);
}
matrix_t A; /* N x N constant matrix */
matrix_t B; /* N x 1 b matrix */
matrix_t reference_x; /* Reference solution */
matrix_t gpu_naive_solution_x; /* Solution computed by naive kernel */
matrix_t gpu_opt_solution_x; /* Solution computed by optimized kernel */
/* Initialize the random number generator */
srand(time(NULL));
/* Generate diagonally dominant matrix */
printf("\nGenerating %d x %d system\n", MATRIX_SIZE, MATRIX_SIZE);
A = create_diagonally_dominant_matrix(MATRIX_SIZE, MATRIX_SIZE);
if (A.elements == NULL) {
printf("Error creating matrix\n");
exit(EXIT_FAILURE);
}
/* Create the other vectors */
B = allocate_matrix_on_host(MATRIX_SIZE, 1, 1);
reference_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
gpu_naive_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
gpu_opt_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0);
#ifdef DEBUG
print_matrix(A);
print_matrix(B);
print_matrix(reference_x);
#endif
/* Compute Jacobi solution on CPU */
printf("\nPerforming Jacobi iteration on the CPU\n");
compute_gold(A, reference_x, B);
display_jacobi_solution(A, reference_x, B); /* Display statistics */
/* Compute Jacobi solution on device. Solutions are returned
in gpu_naive_solution_x and gpu_opt_solution_x. */
printf("\nPerforming Jacobi iteration on device\n");
compute_on_device(A, gpu_naive_solution_x, gpu_opt_solution_x, B);
display_jacobi_solution(A, gpu_naive_solution_x, B); /* Display statistics */
display_jacobi_solution(A, gpu_opt_solution_x, B);
free(A.elements);
free(B.elements);
free(reference_x.elements);
free(gpu_naive_solution_x.elements);
free(gpu_opt_solution_x.elements);
exit(EXIT_SUCCESS);
}
void naive_method(const matrix_t A, matrix_t gpu_naive_sol_x, const matrix_t B)
{
matrix_t Ad = allocate_matrix_on_device(A);
copy_matrix_to_device(Ad, A);
matrix_t Bd = allocate_matrix_on_device(B);
copy_matrix_to_device(Bd, B);
matrix_t = allocate_matrix_on_device(gpu_naive_sol_x);
matrix_t new_Xd = allocate_matrix_on_device(gpu_naive_sol_x);
unsigned int done = 0;
double ssd, mse;
unsigned int num_iter = 0;
int BLOCK_SIZE = 32;
dim3 thread_block(BLOCK_SIZE, BLOCK_SIZE, 1); /* Set number of threads in thread block */
dim3 grid(1,1);
while (!done) {
for (i = 0; i < num_rows; i++) {
double sum = -A.elements[i * num_cols + i] * Xd.elements[i];
for (j = 0; j < num_cols; j++) {
sum += A.elements[i * num_cols + j] * Xd.elements[j];
}
/* Update values for the unkowns for the current row. */
new_Xd.elements[i] = (B.elements[i] - sum)/A.elements[i * num_cols + i];
}
// jacobi_iteration_kernel_naive<<<grid, thread_block>>>(Ad.elements, Xd.elements, Bd.elements, Ad.num_rows);
// cudaDeviceSynchronize();
/* Check for convergence and update the unknowns. */
ssd = 0.0;
for (i = 0; i < num_rows; i++) {
ssd += (new_Xd.elements[i] - Xd.elements[i]) * (new_Xd.elements[i] - Xd.elements[i]);
Xd.elements[i] = new_Xd.elements[i];
}
num_iter++;
mse = sqrt (ssd); /* Mean squared error. */
printf("Iteration: %d. MSE = %f\n", num_iter, mse);
if (mse <= THRESHOLD || num_iter > 1000)
done = 1;
}
copy_matrix_from_device(Xd, gpu_naive_sol_x);
cudaF
}
/* FIXME: Complete this function to perform Jacobi calculation on device */
void compute_on_device(const matrix_t A, matrix_t gpu_naive_sol_x,
matrix_t gpu_opt_sol_x, const matrix_t B)
{
naive_method(A, gpu_naive_sol_x, B);
return;
}
/* Allocate matrix on the device of same size as M */
matrix_t allocate_matrix_on_device(const matrix_t M)
{
matrix_t Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
cudaMalloc((void **)&Mdevice.elements, size);
return Mdevice;
}
/* Allocate a matrix of dimensions height * width.
If init == 0, initialize to all zeroes.
If init == 1, perform random initialization.
*/
matrix_t allocate_matrix_on_host(int num_rows, int num_columns, int init)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
for (unsigned int i = 0; i < size; i++) {
if (init == 0)
M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
/* Copy matrix to device */
void copy_matrix_to_device(matrix_t Mdevice, const matrix_t Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
return;
}
/* Copy matrix from device to host */
void copy_matrix_from_device(matrix_t Mhost, const matrix_t Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
return;
}
/* Prints the matrix out to screen */
void print_matrix(const matrix_t M)
{
for (unsigned int i = 0; i < M.num_rows; i++) {
for (unsigned int j = 0; j < M.num_columns; j++) {
printf("%f ", M.elements[i * M.num_columns + j]);
}
printf("\n");
}
printf("\n");
return;
}
/* Returns a floating-point value between [min, max] */
float get_random_number(int min, int max)
{
float r = rand()/(float)RAND_MAX;
return (float)floor((double)(min + (max - min + 1) * r));
}
/* Check for errors in kernel execution */
void check_CUDA_error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return;
}
/* Create diagonally dominant matrix */
matrix_t create_diagonally_dominant_matrix(unsigned int num_rows, unsigned int num_columns)
{
matrix_t M;
M.num_columns = num_columns;
M.num_rows = num_rows;
unsigned int size = M.num_rows * M.num_columns;
M.elements = (float *)malloc(size * sizeof(float));
if (M.elements == NULL)
return M;
/* Create a matrix with random numbers between [-.5 and .5] */
unsigned int i, j;
for (i = 0; i < size; i++)
M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER);
/* Make diagonal entries large with respect to the entries on each row. */
for (i = 0; i < num_rows; i++) {
float row_sum = 0.0;
for (j = 0; j < num_columns; j++) {
row_sum += fabs(M.elements[i * M.num_rows + j]);
}
M.elements[i * M.num_rows + i] = 0.5 + row_sum;
}
return M;
}
|
e7d0ed3c95f72aa5f799a6b436e3b5ac319ffcf9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include<iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define N 4
#define INF 9999
__global__ void Floyd(int** graph, int k)
{
int i = /*blockIdx.x +*/ threadIdx.x;
int j = /*blockIdx.y +*/ threadIdx.y;
if (graph[i][k] + graph[k][j] < graph[i][j])
graph[i][j] = graph[i][k] + graph[k][j];
}
int main()
{
int h_graph[N][N] = {
0,5,9999, 10,
9999, 0,3, 9999,
9999, 9999, 0,1,
9999, 9999, 9999,0
};
size_t size = N * N * sizeof(int);
int** d_graph;
hipMalloc(&d_graph, size);
hipMemcpy(d_graph, h_graph, size, hipMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N, N);
for (int k = 0; k < N*N; k++)
{
hipLaunchKernelGGL(( Floyd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_graph, k);
}
hipMemcpy(h_graph, d_graph, size, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
if (h_graph[i][j] == INF){
cout << "INF ";
}
else{
cout << h_graph[i][j] << " ";
}
}
cout << endl;
}
hipFree(d_graph);
}
| e7d0ed3c95f72aa5f799a6b436e3b5ac319ffcf9.cu |
#include <stdio.h>
#include<iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define N 4
#define INF 9999
__global__ void Floyd(int** graph, int k)
{
int i = /*blockIdx.x +*/ threadIdx.x;
int j = /*blockIdx.y +*/ threadIdx.y;
if (graph[i][k] + graph[k][j] < graph[i][j])
graph[i][j] = graph[i][k] + graph[k][j];
}
int main()
{
int h_graph[N][N] = {
0,5,9999, 10,
9999, 0,3, 9999,
9999, 9999, 0,1,
9999, 9999, 9999,0
};
size_t size = N * N * sizeof(int);
int** d_graph;
cudaMalloc(&d_graph, size);
cudaMemcpy(d_graph, h_graph, size, cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N, N);
for (int k = 0; k < N*N; k++)
{
Floyd<<<numBlocks, threadsPerBlock>>>(d_graph, k);
}
cudaMemcpy(h_graph, d_graph, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
if (h_graph[i][j] == INF){
cout << "INF ";
}
else{
cout << h_graph[i][j] << " ";
}
}
cout << endl;
}
cudaFree(d_graph);
}
|
23be052c62d13f4c45f16ae74d9e9afb09e278f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d3r-512-7-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 97
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 470;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 482;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.01530f * A[t%2][i-3][j-3] +
0.01531f * A[t%2][i-3][j-2] +
0.01532f * A[t%2][i-3][j-1] +
0.01533f * A[t%2][i-3][j] +
0.01534f * A[t%2][i-3][j+1] +
0.01535f * A[t%2][i-3][j+2] +
0.01536f * A[t%2][i-3][j+3] +
0.01537f * A[t%2][i-2][j-3] +
0.01538f * A[t%2][i-2][j-2] +
0.01539f * A[t%2][i-2][j-1] +
0.01540f * A[t%2][i-2][j] +
0.01541f * A[t%2][i-2][j+1] +
0.01542f * A[t%2][i-2][j+2] +
0.01543f * A[t%2][i-2][j+3] +
0.01544f * A[t%2][i-1][j-3] +
0.01545f * A[t%2][i-1][j-2] +
0.01546f * A[t%2][i-1][j-1] +
0.01546f * A[t%2][i-1][j] +
0.01547f * A[t%2][i-1][j+1] +
0.01548f * A[t%2][i-1][j+2] +
0.01549f * A[t%2][i-1][j+3] +
0.01550f * A[t%2][i][j-3] +
0.01551f * A[t%2][i][j-2] +
0.01552f * A[t%2][i][j-1] +
0.25424f * A[t%2][i][j] +
0.01554f * A[t%2][i][j+1] +
0.01555f * A[t%2][i][j+2] +
0.01556f * A[t%2][i][j+3] +
0.01557f * A[t%2][i+1][j-3] +
0.01558f * A[t%2][i+1][j-2] +
0.01559f * A[t%2][i+1][j-1] +
0.01560f * A[t%2][i+1][j] +
0.01561f * A[t%2][i+1][j+1] +
0.01562f * A[t%2][i+1][j+2] +
0.01564f * A[t%2][i+1][j+3] +
0.01565f * A[t%2][i+2][j-3] +
0.01566f * A[t%2][i+2][j-2] +
0.01567f * A[t%2][i+2][j-1] +
0.01568f * A[t%2][i+2][j] +
0.01569f * A[t%2][i+2][j+1] +
0.01570f * A[t%2][i+2][j+2] +
0.01571f * A[t%2][i+2][j+3] +
0.01572f * A[t%2][i+3][j-3] +
0.01573f * A[t%2][i+3][j-2] +
0.01574f * A[t%2][i+3][j-1] +
0.01575f * A[t%2][i+3][j] +
0.01576f * A[t%2][i+3][j+1] +
0.01577f * A[t%2][i+3][j+2] +
0.01578f * A[t%2][i+3][j+3];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 23be052c62d13f4c45f16ae74d9e9afb09e278f7.cu | #include <assert.h>
#include <stdio.h>
#include "box2d3r-512-7-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 97
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 470;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 482;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.01530f * A[t%2][i-3][j-3] +
0.01531f * A[t%2][i-3][j-2] +
0.01532f * A[t%2][i-3][j-1] +
0.01533f * A[t%2][i-3][j] +
0.01534f * A[t%2][i-3][j+1] +
0.01535f * A[t%2][i-3][j+2] +
0.01536f * A[t%2][i-3][j+3] +
0.01537f * A[t%2][i-2][j-3] +
0.01538f * A[t%2][i-2][j-2] +
0.01539f * A[t%2][i-2][j-1] +
0.01540f * A[t%2][i-2][j] +
0.01541f * A[t%2][i-2][j+1] +
0.01542f * A[t%2][i-2][j+2] +
0.01543f * A[t%2][i-2][j+3] +
0.01544f * A[t%2][i-1][j-3] +
0.01545f * A[t%2][i-1][j-2] +
0.01546f * A[t%2][i-1][j-1] +
0.01546f * A[t%2][i-1][j] +
0.01547f * A[t%2][i-1][j+1] +
0.01548f * A[t%2][i-1][j+2] +
0.01549f * A[t%2][i-1][j+3] +
0.01550f * A[t%2][i][j-3] +
0.01551f * A[t%2][i][j-2] +
0.01552f * A[t%2][i][j-1] +
0.25424f * A[t%2][i][j] +
0.01554f * A[t%2][i][j+1] +
0.01555f * A[t%2][i][j+2] +
0.01556f * A[t%2][i][j+3] +
0.01557f * A[t%2][i+1][j-3] +
0.01558f * A[t%2][i+1][j-2] +
0.01559f * A[t%2][i+1][j-1] +
0.01560f * A[t%2][i+1][j] +
0.01561f * A[t%2][i+1][j+1] +
0.01562f * A[t%2][i+1][j+2] +
0.01564f * A[t%2][i+1][j+3] +
0.01565f * A[t%2][i+2][j-3] +
0.01566f * A[t%2][i+2][j-2] +
0.01567f * A[t%2][i+2][j-1] +
0.01568f * A[t%2][i+2][j] +
0.01569f * A[t%2][i+2][j+1] +
0.01570f * A[t%2][i+2][j+2] +
0.01571f * A[t%2][i+2][j+3] +
0.01572f * A[t%2][i+3][j-3] +
0.01573f * A[t%2][i+3][j-2] +
0.01574f * A[t%2][i+3][j-1] +
0.01575f * A[t%2][i+3][j] +
0.01576f * A[t%2][i+3][j+1] +
0.01577f * A[t%2][i+3][j+2] +
0.01578f * A[t%2][i+3][j+3];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
b1b8b88ea9672c33d8dfaf8673a8025da4b2843c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sumGrad(float* input1, float* input2, float* input3, float* input4, float* output, const int numElem)
{
size_t pos = blockDim.x * blockIdx.x + threadIdx.x;
size_t size = blockDim.x * gridDim.x;
for(int i = numElem * pos / size; i < numElem * (pos+1) / size; i++){
output[i] = input1[i] + input2[i] + input3[i] + input4[i];
}
} | b1b8b88ea9672c33d8dfaf8673a8025da4b2843c.cu | #include "includes.h"
__global__ void sumGrad(float* input1, float* input2, float* input3, float* input4, float* output, const int numElem)
{
size_t pos = blockDim.x * blockIdx.x + threadIdx.x;
size_t size = blockDim.x * gridDim.x;
for(int i = numElem * pos / size; i < numElem * (pos+1) / size; i++){
output[i] = input1[i] + input2[i] + input3[i] + input4[i];
}
} |
build_tree.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Node.cu"
#include "stack.h"
__host__ __device__ void treePrint(Node *n)
{
if(n != NULL)
{
treePrint(n->left);
n->printNode();
treePrint(n->right);
}
}
//function to find leftmost leaf of a tree with root n
__host__ __device__ Node* leftmost(Node *n)
{
if(n->left != NULL)
return leftmost(n->left);
else
return n;
}
//function to find rightmost leaf of a tree with root n
__host__ __device__ Node* rightmost(Node *n)
{
if(n->right != NULL)
return rightmost(n->right);
else
return n;
}
// compressed quadtree is computed for a given set of points
__global__ void compressed_quadtree(long int *d_points, int n, Stack<Node> *d_roots, Stack<Node> *d_quadtree, int w, int msb)
{
int index = threadIdx.x * n;
// initialize root of the tree to biggest possible box in level w and child p1
Node *r = new Node;
Node *p = new Node(r, d_points[index]);
d_quadtree->push(p);
r->right = p;
r->level = w;
Node *temp = new Node;
temp = r;
for(int i=1; i<n; i++)
{
// new nodes created in each iteration q(pi-1, pi) and pi
Node *q = new Node;
Node *p_i = new Node(q, d_points[i+index]);
d_quadtree->push(q);
d_quadtree->push(p_i);
// set the level to most significant bit different between 2 points
q->setLevel(d_points[i+index-1], d_points[i+index]);
q->data = d_points[i+index-1];
q->box = new Bounding_box(w, q->level, d_points[i+index-1], d_points[i+index], msb);
q->right = p_i;
// find location for new node
while (q->level > r->level)
r = r->parent;
q->left = r->right;
r->right->parent = q;
q->parent = r;
r->right = q;
// preserve newest q node to compare in the future iteration
r = q;
}
// root of the tree is stored in stack
temp = temp->right;
temp->parent = NULL;
d_roots->insert(temp, threadIdx.x);
}
// compressed trees are merged into a sole compressed quadtree
__global__ void tree_merge(Stack<Node> *d_roots, Stack<Node> *d_quadtree, Node *d_root, int w, int msb)
{
while(d_roots->get_size()>0)
{
// take previously built trees to merge
Node *t1 = d_roots->pop();
Node *t2 = d_roots->pop();
if(!t1 || !t2)
return;
// if t2 contains smaller points than t1 swap
if(t1->data > t2->data)
{
Node *temp = t1;
t1 = t2;
t2 = temp;
}
// get rightmost leaf of t1 and leftmost leaf of t2
Node *r = rightmost(t1);
Node *l = leftmost(t2);
// righmost and leftmost leaves parents
Node *r_p = new Node;
Node *l_p = new Node;
// new node which will merge t1 and t2
Node *m = new Node;
d_quadtree->push(m);
m->setLevel(r->data, l->data);
m->data = r->data;
m->box = new Bounding_box(w, m->level, r->data, l->data, msb);
// find new node's left child in t1
while(r->parent && m->level > r->parent->level)
r = r->parent;
m->left = r;
r_p = r->parent;
r->parent = m;
// find new node's right child in t2
while(l->parent && m->level > l->parent->level)
l = l->parent;
m->right = l;
l_p = l->parent;
l->parent = m;
// test to find new node's parent
if(!l_p && !r_p)
{
d_roots->push(m);
}
else if(!r_p && l_p)
{
m->parent = l_p;
l_p->left = m;
d_roots->push(l_p);
}
else if(!l_p && r_p)
{
m->parent = r_p;
r_p->right = m;
d_roots->push(r_p);
}
else if(r_p->level > l_p->level)
{
m->parent = l_p;
l_p->left = m;
while(l_p->parent && r_p->level > l_p->level)
{
l_p = l_p->parent;
}
l_p->parent = r_p;
r_p->right = l_p;
d_roots->push(r_p);
}
else if(l_p->level > r_p->level)
{
m->parent = r_p;
r_p->right = m;
while(r_p->parent && l_p->level > r_p->level)
{
r_p = r_p->parent;
}
r_p->parent = l_p;
l_p->left = r_p;
d_roots->push(l_p);
}
}
// root of the compressed quadtree is stored in variable
d_root = d_roots->top();
treePrint(d_root);
}
| build_tree.cu | #include "Node.cu"
#include "stack.h"
__host__ __device__ void treePrint(Node *n)
{
if(n != NULL)
{
treePrint(n->left);
n->printNode();
treePrint(n->right);
}
}
//function to find leftmost leaf of a tree with root n
__host__ __device__ Node* leftmost(Node *n)
{
if(n->left != NULL)
return leftmost(n->left);
else
return n;
}
//function to find rightmost leaf of a tree with root n
__host__ __device__ Node* rightmost(Node *n)
{
if(n->right != NULL)
return rightmost(n->right);
else
return n;
}
// compressed quadtree is computed for a given set of points
__global__ void compressed_quadtree(long int *d_points, int n, Stack<Node> *d_roots, Stack<Node> *d_quadtree, int w, int msb)
{
int index = threadIdx.x * n;
// initialize root of the tree to biggest possible box in level w and child p1
Node *r = new Node;
Node *p = new Node(r, d_points[index]);
d_quadtree->push(p);
r->right = p;
r->level = w;
Node *temp = new Node;
temp = r;
for(int i=1; i<n; i++)
{
// new nodes created in each iteration q(pi-1, pi) and pi
Node *q = new Node;
Node *p_i = new Node(q, d_points[i+index]);
d_quadtree->push(q);
d_quadtree->push(p_i);
// set the level to most significant bit different between 2 points
q->setLevel(d_points[i+index-1], d_points[i+index]);
q->data = d_points[i+index-1];
q->box = new Bounding_box(w, q->level, d_points[i+index-1], d_points[i+index], msb);
q->right = p_i;
// find location for new node
while (q->level > r->level)
r = r->parent;
q->left = r->right;
r->right->parent = q;
q->parent = r;
r->right = q;
// preserve newest q node to compare in the future iteration
r = q;
}
// root of the tree is stored in stack
temp = temp->right;
temp->parent = NULL;
d_roots->insert(temp, threadIdx.x);
}
// compressed trees are merged into a sole compressed quadtree
__global__ void tree_merge(Stack<Node> *d_roots, Stack<Node> *d_quadtree, Node *d_root, int w, int msb)
{
while(d_roots->get_size()>0)
{
// take previously built trees to merge
Node *t1 = d_roots->pop();
Node *t2 = d_roots->pop();
if(!t1 || !t2)
return;
// if t2 contains smaller points than t1 swap
if(t1->data > t2->data)
{
Node *temp = t1;
t1 = t2;
t2 = temp;
}
// get rightmost leaf of t1 and leftmost leaf of t2
Node *r = rightmost(t1);
Node *l = leftmost(t2);
// righmost and leftmost leaves parents
Node *r_p = new Node;
Node *l_p = new Node;
// new node which will merge t1 and t2
Node *m = new Node;
d_quadtree->push(m);
m->setLevel(r->data, l->data);
m->data = r->data;
m->box = new Bounding_box(w, m->level, r->data, l->data, msb);
// find new node's left child in t1
while(r->parent && m->level > r->parent->level)
r = r->parent;
m->left = r;
r_p = r->parent;
r->parent = m;
// find new node's right child in t2
while(l->parent && m->level > l->parent->level)
l = l->parent;
m->right = l;
l_p = l->parent;
l->parent = m;
// test to find new node's parent
if(!l_p && !r_p)
{
d_roots->push(m);
}
else if(!r_p && l_p)
{
m->parent = l_p;
l_p->left = m;
d_roots->push(l_p);
}
else if(!l_p && r_p)
{
m->parent = r_p;
r_p->right = m;
d_roots->push(r_p);
}
else if(r_p->level > l_p->level)
{
m->parent = l_p;
l_p->left = m;
while(l_p->parent && r_p->level > l_p->level)
{
l_p = l_p->parent;
}
l_p->parent = r_p;
r_p->right = l_p;
d_roots->push(r_p);
}
else if(l_p->level > r_p->level)
{
m->parent = r_p;
r_p->right = m;
while(r_p->parent && l_p->level > r_p->level)
{
r_p = r_p->parent;
}
r_p->parent = l_p;
l_p->left = r_p;
d_roots->push(l_p);
}
}
// root of the compressed quadtree is stored in variable
d_root = d_roots->top();
treePrint(d_root);
}
|
de22021648aab28bf1484016821a23219d5aaef2.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "rocblas.h"
#include "gemv.cu"
#define MM 4
#define NN 3
using namespace std;
int main(int argc, char *argv[])
{
int mode = atoi(argv[1]);
srand((unsigned)time(0));
float *a, *x, *y;
a = (float*)malloc(MM * NN * sizeof(float));
x = (float*)malloc(NN * sizeof(float));
y = (float*)malloc(MM * sizeof(float));
float alpha = 1.0;
float beta = 0;
for (int i = 0; i < MM; ++i)
for (int j = 0; j < NN; ++j)
a[i * NN + j] = i * NN + j;
for (int i = 0; i < NN; ++i)
x[i] = i;
float *d_a, *d_x, *d_y;
hipMalloc((void**)&d_a, MM * NN * sizeof(float));
hipMalloc((void**)&d_x, NN * sizeof(float));
hipMalloc((void**)&d_y, MM * sizeof(float));
hipMemcpy((void*)d_a, (void*)a, MM * NN* sizeof(float), hipMemcpyHostToDevice);
hipMemcpy((void*)d_x, (void*)x, NN * sizeof(float), hipMemcpyHostToDevice);
clock_t start, end;
if (mode == 0)
{
start = clock();
gemv::caffe_gpu_gemv(MM, NN, alpha, d_a, d_x, beta, d_y);
}
hipMemcpy((void*)y, (void*)d_y, sizeof(float) * MM, hipMemcpyDeviceToHost);
for (int i = 0; i < MM; ++i)
cout << y[i] << ' ';
cout << endl;
Error:
hipFree(d_a);
hipFree(d_x);
hipFree(d_y);
free(a);
free(x);
free(y);
return 0;
} | de22021648aab28bf1484016821a23219d5aaef2.cu | #include <iostream>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cmath>
#include "cublas_v2.h"
#include "gemv.cu"
#define MM 4
#define NN 3
using namespace std;
int main(int argc, char *argv[])
{
int mode = atoi(argv[1]);
srand((unsigned)time(0));
float *a, *x, *y;
a = (float*)malloc(MM * NN * sizeof(float));
x = (float*)malloc(NN * sizeof(float));
y = (float*)malloc(MM * sizeof(float));
float alpha = 1.0;
float beta = 0;
for (int i = 0; i < MM; ++i)
for (int j = 0; j < NN; ++j)
a[i * NN + j] = i * NN + j;
for (int i = 0; i < NN; ++i)
x[i] = i;
float *d_a, *d_x, *d_y;
cudaMalloc((void**)&d_a, MM * NN * sizeof(float));
cudaMalloc((void**)&d_x, NN * sizeof(float));
cudaMalloc((void**)&d_y, MM * sizeof(float));
cudaMemcpy((void*)d_a, (void*)a, MM * NN* sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_x, (void*)x, NN * sizeof(float), cudaMemcpyHostToDevice);
clock_t start, end;
if (mode == 0)
{
start = clock();
gemv::caffe_gpu_gemv(MM, NN, alpha, d_a, d_x, beta, d_y);
}
cudaMemcpy((void*)y, (void*)d_y, sizeof(float) * MM, cudaMemcpyDeviceToHost);
for (int i = 0; i < MM; ++i)
cout << y[i] << ' ';
cout << endl;
Error:
cudaFree(d_a);
cudaFree(d_x);
cudaFree(d_y);
free(a);
free(x);
free(y);
return 0;
} |
1edb56d8339d2d27b3debe36685dad942ecfae9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "Integrator.cuh"
#include <assert.h>
/*! \file Integrator.cu
\brief Defines methods and data structures used by the Integrator class on the GPU
*/
//! helper to add a given force/virial pointer pair
template< unsigned int compute_virial >
__device__ void add_force_total(Scalar4& net_force, Scalar *net_virial, Scalar4& net_torque, Scalar4* d_f, Scalar* d_v, const unsigned int virial_pitch, Scalar4* d_t, int idx)
{
if (d_f != NULL && d_v != NULL && d_t != NULL)
{
Scalar4 f = d_f[idx];
Scalar4 t = d_t[idx];
net_force.x += f.x;
net_force.y += f.y;
net_force.z += f.z;
net_force.w += f.w;
if (compute_virial)
{
for (int i=0; i < 6; i++)
net_virial[i] += d_v[i*virial_pitch+idx];
}
net_torque.x += t.x;
net_torque.y += t.y;
net_torque.z += t.z;
net_torque.w += t.w;
}
}
//! Kernel for summing forces on the GPU
/*! The speicified forces and virials are summed for every particle into \a d_net_force and \a d_net_virial
\param d_net_force Output device array to hold the computed net force
\param d_net_virial Output device array to hold the computed net virial
\param net_virial_pitch The pitch of the 2D net_virial array
\param d_net_torque Output device array to hold the computed net torque
\param force_list List of pointers to force data to sum
\param nparticles Number of particles in the arrays
\param clear When true, initializes the sums to 0 before adding. When false, reads in the current \a d_net_force
and \a d_net_virial and adds to that
\tparam compute_virial When set to 0, the virial sum is not computed
*/
template< unsigned int compute_virial >
__global__ void gpu_integrator_sum_net_force_kernel(Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int net_virial_pitch,
Scalar4 *d_net_torque,
const gpu_force_list force_list,
unsigned int nparticles,
bool clear)
{
// calculate the index we will be handling
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < nparticles)
{
// set the initial net_force and net_virial to sum into
Scalar4 net_force;
Scalar net_virial[6];
Scalar4 net_torque;
if (clear)
{
net_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
if (compute_virial)
{
for (int i=0; i<6; i++)
net_virial[i] = Scalar(0.0);
}
net_torque = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
}
else
{
// if clear is false, intialize to the current d_net_force and d_net_virial
net_force = d_net_force[idx];
if (compute_virial)
{
for (int i=0; i<6; i++)
net_virial[i] = d_net_virial[i*net_virial_pitch+idx];
}
net_torque = d_net_torque[idx];
}
// sum up the totals
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f0, force_list.v0, force_list.vpitch0, force_list.t0, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f1, force_list.v1, force_list.vpitch1, force_list.t1, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f2, force_list.v2, force_list.vpitch2, force_list.t2, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f3, force_list.v3, force_list.vpitch3, force_list.t3, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f4, force_list.v4, force_list.vpitch4, force_list.t4, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f5, force_list.v5, force_list.vpitch5, force_list.t5, idx);
// write out the final result
d_net_force[idx] = net_force;
if (compute_virial)
{
for (int i=0; i < 6; i++)
d_net_virial[i*net_virial_pitch+idx] = net_virial[i];
}
d_net_torque[idx] = net_torque;
}
}
hipError_t gpu_integrator_sum_net_force(Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int net_virial_pitch,
Scalar4 *d_net_torque,
const gpu_force_list& force_list,
unsigned int nparticles,
bool clear,
bool compute_virial)
{
// sanity check
assert(d_net_force);
assert(d_net_virial);
assert(d_net_torque);
const int block_size = 256;
if (compute_virial)
{
hipLaunchKernelGGL(( gpu_integrator_sum_net_force_kernel<1>), dim3(nparticles/block_size+1), dim3(block_size) , 0, 0, d_net_force,
d_net_virial,
net_virial_pitch,
d_net_torque,
force_list,
nparticles,
clear);
}
else
{
hipLaunchKernelGGL(( gpu_integrator_sum_net_force_kernel<0>), dim3(nparticles/block_size+1), dim3(block_size) , 0, 0, d_net_force,
d_net_virial,
net_virial_pitch,
d_net_torque,
force_list,
nparticles,
clear);
}
return hipSuccess;
}
| 1edb56d8339d2d27b3debe36685dad942ecfae9e.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "Integrator.cuh"
#include <assert.h>
/*! \file Integrator.cu
\brief Defines methods and data structures used by the Integrator class on the GPU
*/
//! helper to add a given force/virial pointer pair
template< unsigned int compute_virial >
__device__ void add_force_total(Scalar4& net_force, Scalar *net_virial, Scalar4& net_torque, Scalar4* d_f, Scalar* d_v, const unsigned int virial_pitch, Scalar4* d_t, int idx)
{
if (d_f != NULL && d_v != NULL && d_t != NULL)
{
Scalar4 f = d_f[idx];
Scalar4 t = d_t[idx];
net_force.x += f.x;
net_force.y += f.y;
net_force.z += f.z;
net_force.w += f.w;
if (compute_virial)
{
for (int i=0; i < 6; i++)
net_virial[i] += d_v[i*virial_pitch+idx];
}
net_torque.x += t.x;
net_torque.y += t.y;
net_torque.z += t.z;
net_torque.w += t.w;
}
}
//! Kernel for summing forces on the GPU
/*! The speicified forces and virials are summed for every particle into \a d_net_force and \a d_net_virial
\param d_net_force Output device array to hold the computed net force
\param d_net_virial Output device array to hold the computed net virial
\param net_virial_pitch The pitch of the 2D net_virial array
\param d_net_torque Output device array to hold the computed net torque
\param force_list List of pointers to force data to sum
\param nparticles Number of particles in the arrays
\param clear When true, initializes the sums to 0 before adding. When false, reads in the current \a d_net_force
and \a d_net_virial and adds to that
\tparam compute_virial When set to 0, the virial sum is not computed
*/
template< unsigned int compute_virial >
__global__ void gpu_integrator_sum_net_force_kernel(Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int net_virial_pitch,
Scalar4 *d_net_torque,
const gpu_force_list force_list,
unsigned int nparticles,
bool clear)
{
// calculate the index we will be handling
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < nparticles)
{
// set the initial net_force and net_virial to sum into
Scalar4 net_force;
Scalar net_virial[6];
Scalar4 net_torque;
if (clear)
{
net_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
if (compute_virial)
{
for (int i=0; i<6; i++)
net_virial[i] = Scalar(0.0);
}
net_torque = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
}
else
{
// if clear is false, intialize to the current d_net_force and d_net_virial
net_force = d_net_force[idx];
if (compute_virial)
{
for (int i=0; i<6; i++)
net_virial[i] = d_net_virial[i*net_virial_pitch+idx];
}
net_torque = d_net_torque[idx];
}
// sum up the totals
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f0, force_list.v0, force_list.vpitch0, force_list.t0, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f1, force_list.v1, force_list.vpitch1, force_list.t1, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f2, force_list.v2, force_list.vpitch2, force_list.t2, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f3, force_list.v3, force_list.vpitch3, force_list.t3, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f4, force_list.v4, force_list.vpitch4, force_list.t4, idx);
add_force_total<compute_virial>(net_force, net_virial, net_torque, force_list.f5, force_list.v5, force_list.vpitch5, force_list.t5, idx);
// write out the final result
d_net_force[idx] = net_force;
if (compute_virial)
{
for (int i=0; i < 6; i++)
d_net_virial[i*net_virial_pitch+idx] = net_virial[i];
}
d_net_torque[idx] = net_torque;
}
}
cudaError_t gpu_integrator_sum_net_force(Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int net_virial_pitch,
Scalar4 *d_net_torque,
const gpu_force_list& force_list,
unsigned int nparticles,
bool clear,
bool compute_virial)
{
// sanity check
assert(d_net_force);
assert(d_net_virial);
assert(d_net_torque);
const int block_size = 256;
if (compute_virial)
{
gpu_integrator_sum_net_force_kernel<1><<< nparticles/block_size+1, block_size >>>(d_net_force,
d_net_virial,
net_virial_pitch,
d_net_torque,
force_list,
nparticles,
clear);
}
else
{
gpu_integrator_sum_net_force_kernel<0><<< nparticles/block_size+1, block_size >>>(d_net_force,
d_net_virial,
net_virial_pitch,
d_net_torque,
force_list,
nparticles,
clear);
}
return cudaSuccess;
}
|
bed9ec8136ae8f280d9dde0940424634ff58ad8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "quadtree.h"
//R2 = R1*multFactor + R2;
__device__ void add_r1_to_r2(glm::mat3 &A, glm::vec3 &b, int r1, int r2, float multFactor)
{
float tmp;
if (r1 == r2) return;
for(int i = 0; i < 3; ++i)
{
A[i][r2] = A[i][r2] + multFactor*A[i][r1];
}
b[r2] = b[r2] + multFactor*b[r1];
}
__device__ void swap_row(glm::mat3 &A, glm::vec3 &b, int r1, int r2)
{
float tmp;
if (r1 == r2) return;
#pragma unroll
for(int i = 0; i < 3; ++i)
{
tmp = A[i][r1];
A[i][r1] = A[i][r2];
A[i][r2] = tmp;
}
tmp = b[r1];
b[r1] = b[r2];
b[r2] = tmp;
}
__device__ void row_mult(glm::mat3 &A, glm::vec3 &b, int r1, float mult)
{
#pragma unroll
for(int i = 0; i < 3; ++i)
{
A[i][r1] = A[i][r1]*mult;
}
b[r1] *= mult;
}
#define APPROXZERO(a) (abs(a) < 0.000001f)
__device__ void makeNonZeroDiagonal(glm::mat3 &A,glm::vec3 b)
{
int permute[3];
if( !APPROXZERO(A[0][0]) && !APPROXZERO(A[1][1]) && !APPROXZERO(A[2][2]))
{
permute[0] = 0; permute[1] = 1; permute[2] = 2;
}else if(!APPROXZERO(A[0][0]) && !APPROXZERO(A[1][2]) && !APPROXZERO(A[2][1]))
{
permute[0] = 0; permute[1] = 2; permute[2] = 1;
}else if(!APPROXZERO(A[0][1]) && !APPROXZERO(A[1][0]) && !APPROXZERO(A[2][2]))
{
permute[0] = 1; permute[1] = 0; permute[2] = 2;
}else if(!APPROXZERO(A[0][1]) && !APPROXZERO(A[1][2]) && !APPROXZERO(A[2][0]))
{
permute[0] = 1; permute[1] = 2; permute[2] = 0;
}else if(!APPROXZERO(A[0][2]) && !APPROXZERO(A[1][0]) && !APPROXZERO(A[2][1]))
{
permute[0] = 2; permute[1] = 0; permute[2] = 1;
}else if(!APPROXZERO(A[0][2]) && !APPROXZERO(A[1][1]) && !APPROXZERO(A[2][0]))
{
permute[0] = 2; permute[1] = 1; permute[2] = 0;
}else{
//ERROR
}
for(int i = 0; i < 3; ++i)
{
if(permute[i] > i)
{
swap_row(A,b, i, permute[i]);
for(int j = 0; j < 3; ++j)
{
if(permute[j] == i)
{
permute[j] = permute[i];
permute[i] = i;
break;
}
}
}
}
}
__device__ glm::vec3 solveAbGaussian(glm::mat3 A, glm::vec3 b)
{
//Make sure diagonals have non-zero entries
if(abs(A[0][0]*A[1][1]*A[2][2]) < 0.000001f)
makeNonZeroDiagonal(A,b);
//Row echelon form
for(int r = 0; r < 3; ++r)
{
float factor = 1.0f/A[r][r];
row_mult(A,b,r,factor);
for(int r2 = r+1; r2 < 3; ++r2)
{
if(abs(A[r][r2]) > 0.000001f)
{
//If A[r][r2] not zero yet,
//Need A[r][r2] + factor*A[r][r] == 0
factor = -A[r][r2]/A[r][r];
add_r1_to_r2(A,b,r,r2,factor);
}
}
}
//Matrix now upper triangular
//Back substitute
for(int r = 0; r < 3; ++r)
{
for(int c = r+1; c < 3; ++c)
{
if(abs(A[c][r]) > 0.000001f)
{
//element is non-zero. Backsubstitute
//Need A[c][r] + factor*A[c][c] == 0
float factor = -A[c][r]/A[c][c];
add_r1_to_r2(A,b,c,r,factor);
}
}
}
return b;
}
//Numthreads is assumed to be a power of two
__device__ void minmaxreduction(float* s_minSx, float* s_maxSx, float* s_minSy, float* s_maxSy, int indexInBlock, int nTotalThreads)
{
int thread2;
float temp;
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1); // divide by two
// only the first half of the threads will be active.
if (indexInBlock < halfPoint)
{
thread2 = indexInBlock + halfPoint;
// Get the shared value stored by another thread
temp = s_minSx[thread2];
if (temp < s_minSx[indexInBlock])
s_minSx[indexInBlock] = temp;
temp = s_minSy[thread2];
if (temp < s_minSy[indexInBlock])
s_minSy[indexInBlock] = temp;
temp = s_maxSx[thread2];
if (temp > s_maxSx[indexInBlock])
s_maxSx[indexInBlock] = temp;
temp = s_maxSy[thread2];
if (temp > s_maxSy[indexInBlock])
s_maxSy[indexInBlock] = temp;
}
__syncthreads();
// Reducing the binary tree size by two:
nTotalThreads = halfPoint;
}
}
__global__ void computeAABBsKernel(PlaneStats* planeStats, int* planeInvIdMap, glm::vec4* aabbsBlockResults,
int* planeCount, int maxPlanes,
Float3SOA positions, float* segmentProjectedSx, float* segmentProjectedSy,
int* finalSegmentsBuffer, int xRes, int yRes)
{
extern __shared__ int s_Mem[];
int* s_InvMap = (int*) s_Mem;
float* s_centroidX = (float*)(s_InvMap + maxPlanes);
float* s_centroidY = s_centroidX + maxPlanes;
float* s_centroidZ = s_centroidY + maxPlanes;
glm::vec3* s_tangents = (glm::vec3*) (s_centroidZ + maxPlanes);
glm::vec3* s_bitangents = s_tangents + maxPlanes;
float* s_minSx = (float*)(s_bitangents + maxPlanes);
float* s_minSy = (s_minSx + blockDim.x*blockDim.y);
float* s_maxSx = (s_minSy + blockDim.x*blockDim.y);
float* s_maxSy = (s_maxSx + blockDim.x*blockDim.y);
int indexInBlock = threadIdx.x + threadIdx.y*blockDim.x;
int imageX = threadIdx.x + blockDim.x*blockIdx.x;
int imageY = threadIdx.y + blockDim.y*blockIdx.y;
int numPlanes = planeCount[0];
if(indexInBlock < maxPlanes)
{
s_InvMap[indexInBlock] = planeInvIdMap[indexInBlock];
if(indexInBlock < numPlanes)
{
//s_aabb[indexInBlock] = glm::vec4(0.0f);
s_tangents[indexInBlock] = planeStats[indexInBlock].tangent;
s_centroidX[indexInBlock] = planeStats[indexInBlock].centroid.x;
s_centroidY[indexInBlock] = planeStats[indexInBlock].centroid.y;
s_centroidZ[indexInBlock] = planeStats[indexInBlock].centroid.z;
//bitangent = norm cross tangent
glm::vec3 norm(planeStats[indexInBlock].norm.x,planeStats[indexInBlock].norm.y,planeStats[indexInBlock].norm.z);
s_bitangents[indexInBlock] = glm::normalize(glm::cross(norm, s_tangents[indexInBlock]));
}
}
__syncthreads();
//Remap segments
int segment = finalSegmentsBuffer[imageX + imageY*xRes];
float sx = 0;
float sy = 0;
if(segment >= 0)
{
//Remap and writeback
segment = s_InvMap[segment];
finalSegmentsBuffer[imageX + imageY*xRes] = segment;
//Compute Sx and Sy
glm::vec3 dp = glm::vec3(positions.x[imageX + imageY*xRes] - s_centroidX[segment],
positions.y[imageX + imageY*xRes] - s_centroidY[segment],
positions.z[imageX + imageY*xRes] - s_centroidZ[segment]);
sx = glm::dot(dp, s_bitangents[segment]);
sy = glm::dot(dp, s_tangents[segment]);
}
//writeback
segmentProjectedSx[imageX + imageY*xRes] = sx;
segmentProjectedSy[imageX + imageY*xRes] = sy;
__syncthreads();
//Repurpose invmap sharedmem for segment flags
if(indexInBlock < maxPlanes)
{
s_InvMap[indexInBlock] = 0;
}
__syncthreads();
if(segment >= 0)//flag each segment that exists in this block
s_InvMap[segment] = 1;
for(int plane = 0; plane < numPlanes; ++plane)
{
if(s_InvMap[plane] > 0)
{
//Init minmax planes
s_minSx[indexInBlock] = (segment == plane)?sx:0;
s_maxSx[indexInBlock] = (segment == plane)?sx:0;
s_minSy[indexInBlock] = (segment == plane)?sy:0;
s_maxSy[indexInBlock] = (segment == plane)?sy:0;
__syncthreads();
minmaxreduction(s_minSx, s_maxSx, s_minSy, s_maxSy, indexInBlock, blockDim.x*blockDim.y);
//Threads already synced in function
if(indexInBlock == 0)
{
aabbsBlockResults[(blockIdx.x + blockIdx.y*gridDim.x) + plane*gridDim.x*gridDim.y]
= glm::vec4(s_minSx[0], s_maxSx[0],s_minSy[0],s_maxSy[0]);
}
}else{
if(indexInBlock == 0)
aabbsBlockResults[(blockIdx.x + blockIdx.y*gridDim.x) + plane*gridDim.x*gridDim.y] = glm::vec4(0.0f);
}
}
}
__global__ void reduceAABBsKernel(PlaneStats* planeStats, glm::vec4* aabbsBlockResults, int numBlocks, int maxPlanes, int* planeCount)
{
extern __shared__ float s_temp[];
float* s_minSx = s_temp;
float* s_minSy = (s_minSx + blockDim.x);
float* s_maxSx = (s_minSy + blockDim.x);
float* s_maxSy = (s_maxSx + blockDim.x);
//two elements loaded per thread
int i = threadIdx.x;
int i2 = threadIdx.x + blockDim.x;
int numPlanes = planeCount[0];
for(int plane = 0; plane < numPlanes; ++plane)
{
glm::vec4 aabb1(0.0f);
glm::vec4 aabb2(0.0f);
if(i < numBlocks)
aabb1 = aabbsBlockResults[i + plane*numBlocks];
if(i2 < numBlocks)
aabb2 = aabbsBlockResults[i2 + plane*numBlocks];
s_minSx[i] = MIN(aabb1.x,aabb2.x);
s_maxSx[i] = MAX(aabb1.y,aabb2.y);
s_minSy[i] = MIN(aabb1.z,aabb2.z);
s_maxSy[i] = MAX(aabb1.w,aabb2.w);
__syncthreads();
minmaxreduction(s_minSx, s_maxSx, s_minSy, s_maxSy, i, blockDim.x);
if(threadIdx.x == 0)
planeStats[plane].projParams.aabbMeters = glm::vec4(s_minSx[0], s_maxSx[0],s_minSy[0],s_maxSy[0]);
}
}
__host__ __device__ int roundupnextpow2 (int x)
{
if (x < 0)
return 0;
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return x+1;
}
__host__ void computeAABBs(PlaneStats* planeStats, int* planeInvIdMap, glm::vec4* aabbsBlockResults,
int* planeCount, int maxPlanes,
Float3SOA positions, float* segmentProjectedSx, float* segmentProjectedSy,
int* finalSegmentsBuffer, int xRes, int yRes)
{
int blockWidth = AABB_COMPUTE_BLOCKWIDTH;
int blockHeight = AABB_COMPUTE_BLOCKHEIGHT;
assert(blockHeight*blockWidth >= maxPlanes);
dim3 threads(blockWidth, blockHeight);
dim3 blocks((int) ceil(xRes/float(blockWidth)), (int) ceil(yRes/float(blockHeight)));
//plane map, tangent, bitangent, centroid and aabb of each plane loaded to shared memory.
int sharedMem = maxPlanes*(sizeof(int) + sizeof(float)*3+sizeof(glm::vec3)*2) + blockWidth*blockHeight*4*sizeof(float);
hipLaunchKernelGGL(( computeAABBsKernel), dim3(blocks),dim3(threads),sharedMem, 0, planeStats, planeInvIdMap, aabbsBlockResults, planeCount, maxPlanes,
positions, segmentProjectedSx, segmentProjectedSy,
finalSegmentsBuffer, xRes, yRes);
int numBlocks = blocks.x*blocks.y;
int pow2Blocks = roundupnextpow2 (numBlocks) >> 1;//Next lowest power of two
assert(pow2Blocks <= 1024);
threads = dim3(pow2Blocks);
blocks = dim3(1);
sharedMem = 4*sizeof(float)*pow2Blocks;
hipLaunchKernelGGL(( reduceAABBsKernel), dim3(blocks),dim3(threads),sharedMem, 0, planeStats, aabbsBlockResults, numBlocks, maxPlanes, planeCount);
}
__global__ void calculateProjectionDataKernel(rgbd::framework::Intrinsics intr, PlaneStats* planeStats,
int* planeCount, int maxTextureSize, int xRes, int yRes)
{
glm::mat3 C(1.0f);
int destWidth = 0;
int destHeight = 0;
int maxRatio = 0;
glm::vec4 aabb;
if(threadIdx.x < planeCount[0])
{
//In range and valid plane.
glm::vec3 tangent = planeStats[threadIdx.x].tangent;
glm::vec3 normal = glm::vec3(planeStats[threadIdx.x].norm.x,planeStats[threadIdx.x].norm.y,planeStats[threadIdx.x].norm.z);
glm::vec3 bitangent = glm::normalize(glm::cross(normal, tangent));
glm::vec3 centroid = glm::vec3(planeStats[threadIdx.x].centroid.x,
planeStats[threadIdx.x].centroid.y,
planeStats[threadIdx.x].centroid.z);
aabb = planeStats[threadIdx.x].projParams.aabbMeters;
//Compute camera space coordinates (4 points in clockwise winding from viewpoint)
/* 1----2
* | |
* 4----3
*/
glm::vec3 sp1 = (aabb.x*bitangent)+(aabb.z*tangent)+centroid;//UL, Sxmin,Symin
glm::vec3 sp2 = (aabb.y*bitangent)+(aabb.z*tangent)+centroid;//UR, Sxmax,Symin
glm::vec3 sp3 = (aabb.y*bitangent)+(aabb.w*tangent)+centroid;//LR, Sxmax,Symax
glm::vec3 sp4 = (aabb.x*bitangent)+(aabb.w*tangent)+centroid;//LL, Sxmin,Symax
//Compute screen space projections
float su1 = sp1.x*intr.fx/sp1.z + intr.cx;
float sv1 = sp1.y*intr.fy/sp1.z + intr.cy;
float su2 = sp2.x*intr.fx/sp2.z + intr.cx;
float sv2 = sp2.y*intr.fy/sp2.z + intr.cy;
float su3 = sp3.x*intr.fx/sp3.z + intr.cx;
float sv3 = sp3.y*intr.fy/sp3.z + intr.cy;
float su4 = sp4.x*intr.fx/sp4.z + intr.cx;
float sv4 = sp4.y*intr.fy/sp4.z + intr.cy;
//Compute desired resolution.
float sourceWidthMeters = aabb.y-aabb.x;
float sourceHeightMeters = aabb.w-aabb.z;
//Compute minimum resolution for complete data preservation
float d12 = sqrtf((su1-su2)*(su1-su2)+(sv1-sv2)*(sv1-sv2));
float d23 = sqrtf((su2-su3)*(su2-su3)+(sv2-sv3)*(sv2-sv3));
float d34 = sqrtf((su3-su4)*(su3-su4)+(sv3-sv4)*(sv3-sv4));
float d41 = sqrtf((su4-su1)*(su4-su1)+(sv4-sv1)*(sv4-sv1));
float maxXRatio = MAX(d12,d34)/sourceWidthMeters;
float maxYRatio = MAX(d23,d41)/sourceHeightMeters;
maxRatio = ceil(MAX(maxXRatio,maxYRatio));
maxRatio = roundupnextpow2(maxRatio);
destWidth = maxRatio * sourceWidthMeters;
destHeight = maxRatio * sourceHeightMeters;
//Make sure it fits. If not, then scale down
if(destWidth > maxTextureSize || destHeight > maxTextureSize)
{
int scale = glm::max(ceil(destWidth/float(maxTextureSize)),ceil(destHeight/float(maxTextureSize)));
scale = roundupnextpow2(scale);
destWidth/=scale;
destHeight/=scale;
}
//Compute A matrix (source points to basis vectors)
glm::mat3 A = glm::mat3(su1,sv1,1,su2,sv2,1,su3,sv3,1);
glm::vec3 b = glm::vec3(su4,sv4, 1);
glm::vec3 x = glm::inverse(A)*b;
//mult each row i by xi
for(int i = 0; i < 3; ++i)
{
A[i][0] *= x[i];
A[i][1] *= x[i];
A[i][2] *= x[i];
}
//Compute B matrix (dest points to basis vectors)
glm::mat3 B = glm::mat3(0,0,1,
destWidth,0,1,
destWidth,destHeight,1);
b = glm::vec3(0,destHeight, 1);
x = glm::inverse(B)*b;
//mult each row i by xi
for(int i = 0; i < 3; ++i)
{
B[i][0] *= x[i];
B[i][1] *= x[i];
B[i][2] *= x[i];
}
C = A*glm::inverse(B);
}
planeStats[threadIdx.x].projParams.projectionMatrix = C;
planeStats[threadIdx.x].projParams.aabbMeters = aabb;
planeStats[threadIdx.x].projParams.destWidth = destWidth;
planeStats[threadIdx.x].projParams.destHeight = destHeight;
planeStats[threadIdx.x].projParams.textureResolution = maxRatio;
}
__host__ void calculateProjectionData(rgbd::framework::Intrinsics intr, PlaneStats* planeStats,
int* planeCount, int maxTextureSize, int maxPlanes, int xRes, int yRes)
{
dim3 blocks(1);
dim3 threads(maxPlanes);
hipLaunchKernelGGL(( calculateProjectionDataKernel), dim3(blocks),dim3(threads), 0, 0, intr, planeStats, planeCount, maxTextureSize, xRes, yRes);
}
__global__ void projectTexture(int segmentId, PlaneStats* dev_planeStats,
Float4SOA destTexture, int destTextureSize,
RGBMapSOA rgbMap, int* dev_finalSegmentsBuffer, float* dev_finalDistanceToPlaneBuffer,
int imageXRes, int imageYRes)
{
int destX = blockIdx.x*blockDim.x+threadIdx.x;
int destY = blockIdx.y*blockDim.y+threadIdx.y;
if(destX < destTextureSize && destX < dev_planeStats->projParams.destWidth
&& destY < destTextureSize && destY < dev_planeStats->projParams.destHeight)
{
float r = CUDART_NAN_F;
float g = CUDART_NAN_F;
float b = CUDART_NAN_F;
float dist = CUDART_NAN_F;
//Destination in range
glm::mat3 Tds = dev_planeStats->projParams.projectionMatrix;
glm::vec3 sourceCoords = Tds*glm::vec3(destX, destY, 1.0f);
//Dehomogenization
sourceCoords.x /= sourceCoords.z;
sourceCoords.y /= sourceCoords.z;
if(sourceCoords.x >= 0 && sourceCoords.x < imageXRes
&& sourceCoords.y >= 0 && sourceCoords.y < imageYRes )
{
//In source range
int linIndex = int(sourceCoords.x) + int(sourceCoords.y)*imageXRes;
if(segmentId == dev_finalSegmentsBuffer[linIndex]){
r = rgbMap.r[linIndex];
g = rgbMap.g[linIndex];
b = rgbMap.b[linIndex];
dist = dev_finalDistanceToPlaneBuffer[linIndex];
}
}
destTexture.x[destX + destY*destTextureSize] = r;
destTexture.y[destX + destY*destTextureSize] = g;
destTexture.z[destX + destY*destTextureSize] = b;
destTexture.w[destX + destY*destTextureSize] = dist;
}
}
__host__ void projectTexture(int segmentId, PlaneStats* host_planeStats, PlaneStats* dev_planeStats,
Float4SOA destTexture, int destTextureSize,
RGBMapSOA rgbMap, int* dev_finalSegmentsBuffer, float* dev_finalDistanceToPlaneBuffer,
int imageXRes, int imageYRes)
{
int tileSize = 16;
dim3 threads(tileSize, tileSize);
dim3 blocks((int)ceil(float(host_planeStats->projParams.destWidth)/float(tileSize)),
(int)ceil(float(host_planeStats->projParams.destHeight)/float(tileSize)));
hipLaunchKernelGGL(( projectTexture), dim3(blocks),dim3(threads), 0, 0, segmentId, dev_planeStats, destTexture, destTextureSize,
rgbMap, dev_finalSegmentsBuffer, dev_finalDistanceToPlaneBuffer, imageXRes, imageYRes);
}
__global__ void quadtreeDecimationKernel1(int actualWidth, int actualHeight, Float4SOA planarTexture, int* quadTreeAssemblyBuffer,
int textureBufferSize)
{
extern __shared__ int s_tile[];
//======================Load==========================
//Global index
int gx = threadIdx.x + blockDim.x*blockIdx.x;
int gy = threadIdx.y + blockDim.y*blockIdx.y;
int s_index = threadIdx.x + threadIdx.y*(blockDim.x+1);
int indexInBlock = threadIdx.x + threadIdx.y*blockDim.x;
//Load shared memory
//load core. If in range and texture buffer has valid pixel at this location, load 0. Else, load -1;
int val = -1;
if(gx < actualWidth && gy < actualHeight)
{
float pixelContents = planarTexture.x[gx+gy*textureBufferSize];
if(pixelContents == pixelContents)
{
val = 0;//Pixel is valid point. save
}
}
s_tile[s_index] = val;
//Load apron
if(indexInBlock < (blockDim.x*2+1))//first 33 threads load remaining apron
{
if(indexInBlock < blockDim.x)//first 16 load bottom
{
gx = indexInBlock + blockDim.x*blockIdx.x;
gy = blockDim.y*(blockIdx.y+1);//first row of next block
s_index = indexInBlock + (blockDim.y*(blockDim.x+1));
}else if(indexInBlock < blockDim.x*2){//next 16 load right apron
gx = blockDim.x*(blockIdx.x+1);//First column of next block
gy = blockDim.y*blockIdx.y + (indexInBlock % blockDim.x);//indexInBlock % blockDim.x is y position in block
s_index = blockDim.x + ((indexInBlock % blockDim.x)*(blockDim.x+1));
}else{
//load the corner
gx = blockDim.x*(blockIdx.x+1);
gy = blockDim.y*(blockIdx.y+1);
s_index = blockDim.x + blockDim.y*(blockDim.x+1);
}
val = -1;
if(gx < actualWidth && gy < actualHeight)
{
float pixelContents = planarTexture.x[gx+gy*textureBufferSize];
if(pixelContents == pixelContents)
{
val = 0;//Pixel is valid point. save
}
}
s_tile[s_index] = val;
}
__syncthreads();
//====================Reduction=========================
//Step == 0 is special case. need to initialize baseline quads
bool merge = false;
if(s_tile[threadIdx.x+threadIdx.y*(blockDim.x+1)] == 0)
{
//Check neighbors. If all neighbors right down and right-down diagonal are 0, set to 1.
if( s_tile[(threadIdx.x+1) + (threadIdx.y) *(blockDim.x+1)] == 0
&& s_tile[(threadIdx.x ) + (threadIdx.y+1)*(blockDim.x+1)] == 0
&& s_tile[(threadIdx.x+1) + (threadIdx.y+1)*(blockDim.x+1)] == 0)
{
merge = true;
}
}
__syncthreads();
if(merge)
{
s_tile[threadIdx.x+threadIdx.y*(blockDim.x+1)] = 1;
}
__syncthreads();
//Loop for remaining steps
for(int step = 1; step < blockDim.x; step <<= 1)
{
if((threadIdx.x % (step*2)) == 0 && (threadIdx.y % (step*2)) == 0)
{
//Corner points only.
if( s_tile[(threadIdx.x) + (threadIdx.y) *(blockDim.x+1)] == step
&& s_tile[(threadIdx.x+step) + (threadIdx.y) *(blockDim.x+1)] == step
&& s_tile[(threadIdx.x ) + (threadIdx.y+step)*(blockDim.x+1)] == step
&& s_tile[(threadIdx.x+step) + (threadIdx.y+step)*(blockDim.x+1)] == step)
{
//Upgrade degree of this point
s_tile[(threadIdx.x) + (threadIdx.y) *(blockDim.x+1)] *= 2;
//Clear definitely removed points
s_tile[(threadIdx.x+step) + (threadIdx.y) *(blockDim.x+1)] = -1;
s_tile[(threadIdx.x ) + (threadIdx.y+step)*(blockDim.x+1)] = -1;
s_tile[(threadIdx.x+step) + (threadIdx.y+step)*(blockDim.x+1)] = -1;
}
}
__syncthreads();
}
//====================Writeback=========================
//writeback core.
gx = threadIdx.x + blockDim.x*blockIdx.x;
gy = threadIdx.y + blockDim.y*blockIdx.y;
s_index = threadIdx.x + threadIdx.y*(blockDim.x+1);
quadTreeAssemblyBuffer[gx+gy*textureBufferSize] = s_tile[s_index];
//no need to writeback apron
}
__global__ void quadtreeDecimationKernel2(int actualWidth, int actualHeight, int* quadTreeAssemblyBuffer, int textureBufferSize)
{
extern __shared__ int s_tile[];
int scaleMultiplier = blockDim.x;
//======================Load==========================
//Global index (scaled by multiplier)
int gx = scaleMultiplier*(threadIdx.x + blockDim.x*blockIdx.x);
int gy = scaleMultiplier*(threadIdx.y + blockDim.y*blockIdx.y);
int s_index = threadIdx.x + threadIdx.y*(blockDim.x+1);
int indexInBlock = threadIdx.x + threadIdx.y*blockDim.x;
//Load shared memory
//load core. If in range and texture buffer has valid pixel at this location, load 0. Else, load -1;
int val = -1;
if(gx < actualWidth && gy < actualHeight)
{
val = quadTreeAssemblyBuffer[gx+gy*textureBufferSize];
}
s_tile[s_index] = val;
//Load apron
if(indexInBlock < (blockDim.x*2+1))//first 33 threads load remaining apron
{
if(indexInBlock < blockDim.x)//first 16 load bottom
{
gx = indexInBlock + blockDim.x*blockIdx.x;
gy = blockDim.y*(blockIdx.y+1);//first row of next block
s_index = indexInBlock + (blockDim.y*(blockDim.x+1));
}else if(indexInBlock < blockDim.x*2){//next 16 load right apron
gx = blockDim.x*(blockIdx.x+1);//First column of next block
gy = blockDim.y*blockIdx.y + (indexInBlock % blockDim.x);//indexInBlock % blockDim.x is y position in block
s_index = blockDim.x + ((indexInBlock % blockDim.x)*(blockDim.x+1));
}else{
//load the corner
gx = blockDim.x*(blockIdx.x+1);
gy = blockDim.y*(blockIdx.y+1);
s_index = blockDim.x + blockDim.y*(blockDim.x+1);
}
gx *= scaleMultiplier;
gy *= scaleMultiplier;
val = -1;
if(gx < actualWidth && gy < actualHeight)
{
val = quadTreeAssemblyBuffer[gx+gy*textureBufferSize];
}
s_tile[s_index] = val;
}
__syncthreads();
//====================Reduction=========================
//Step == 0 is special case. need to initialize baseline quads
bool merge = false;
if(s_tile[threadIdx.x+threadIdx.y*(blockDim.x+1)] == 0)
{
//Check neighbors. If all neighbors right down and right-down diagonal are 0, set to 1.
if( s_tile[(threadIdx.x+1) + (threadIdx.y) *(blockDim.x+1)] == 0
&& s_tile[(threadIdx.x ) + (threadIdx.y+1)*(blockDim.x+1)] == 0
&& s_tile[(threadIdx.x+1) + (threadIdx.y+1)*(blockDim.x+1)] == 0)
{
merge = true;
}
}
__syncthreads();
if(merge)
{
s_tile[threadIdx.x+threadIdx.y*(blockDim.x+1)] = 1;
}
__syncthreads();
//Loop for remaining steps
for(int step = 1; step < blockDim.x; step <<= 1)
{
if((threadIdx.x % (step*2)) == 0 && (threadIdx.y % (step*2)) == 0)
{
//Corner points only.
if( s_tile[(threadIdx.x) + (threadIdx.y) *(blockDim.x+1)] == step*scaleMultiplier
&& s_tile[(threadIdx.x+step) + (threadIdx.y) *(blockDim.x+1)] == step*scaleMultiplier
&& s_tile[(threadIdx.x ) + (threadIdx.y+step)*(blockDim.x+1)] == step*scaleMultiplier
&& s_tile[(threadIdx.x+step) + (threadIdx.y+step)*(blockDim.x+1)] == step*scaleMultiplier)
{
//Upgrade degree of this point
s_tile[(threadIdx.x) + (threadIdx.y) *(blockDim.x+1)] *= 2;
//Clear definitely removed points
s_tile[(threadIdx.x+step) + (threadIdx.y) *(blockDim.x+1)] = -1;
s_tile[(threadIdx.x ) + (threadIdx.y+step)*(blockDim.x+1)] = -1;
s_tile[(threadIdx.x+step) + (threadIdx.y+step)*(blockDim.x+1)] = -1;
}
}
__syncthreads();
}
//====================Writeback=========================
//writeback core.
gx = scaleMultiplier*(threadIdx.x + blockDim.x*blockIdx.x);
gy = scaleMultiplier*(threadIdx.y + blockDim.y*blockIdx.y);
s_index = threadIdx.x + threadIdx.y*(blockDim.x+1);
quadTreeAssemblyBuffer[gx+gy*textureBufferSize] = s_tile[s_index];
//no need to writeback apron
}
__global__ void quadtreeDecimationHolePatchingKernel(int actualWidth, int actualHeight, int* quadTreeAssemblyBuffer, int textureBufferSize)
{
//TODO: Load shared memory to avoid redundant reads
int gx = threadIdx.x + blockDim.x*blockIdx.x;
int gy = threadIdx.y + blockDim.y*blockIdx.y;
if(gx < actualWidth && gy < actualHeight)
{
int degree = quadTreeAssemblyBuffer[gx + gy*textureBufferSize];
if(degree > 0)
{
//Make sure each corner is flagged as 0 or higher
int cornerDegree = quadTreeAssemblyBuffer[(gx+degree) + (gy)*textureBufferSize];
if(cornerDegree < 0) quadTreeAssemblyBuffer[(gx+degree) + (gy)*textureBufferSize] = 0;
cornerDegree = quadTreeAssemblyBuffer[(gx) + (gy+degree)*textureBufferSize];
if(cornerDegree < 0) quadTreeAssemblyBuffer[(gx) + (gy+degree)*textureBufferSize] = 0;
cornerDegree = quadTreeAssemblyBuffer[(gx+degree) + (gy+degree)*textureBufferSize];
if(cornerDegree < 0) quadTreeAssemblyBuffer[(gx+degree) + (gy+degree)*textureBufferSize] = 0;
}
}
}
__host__ void quadtreeDecimation(int actualWidth, int actualHeight, Float4SOA planarTexture, int* quadTreeAssemblyBuffer,
int textureBufferSize)
{
//do two simplification passes. Max quadtree size will therefore be 2*tileSize
int tileSize = 16;
//Pass one, parallel by pixel
dim3 threads(tileSize, tileSize);
dim3 blocks((int)ceil(actualWidth/float(tileSize)),
(int)ceil(actualHeight/float(tileSize)));
int sharedSize = (tileSize+1)*(tileSize+1)*sizeof(int);
hipLaunchKernelGGL(( quadtreeDecimationKernel1), dim3(blocks),dim3(threads),sharedSize, 0, actualWidth, actualHeight, planarTexture, quadTreeAssemblyBuffer, textureBufferSize);
blocks = dim3((int)ceil(actualWidth/float(tileSize*tileSize)),
(int)ceil(actualHeight/float(tileSize*tileSize)));
hipLaunchKernelGGL(( quadtreeDecimationKernel2), dim3(blocks),dim3(threads),sharedSize, 0, actualWidth, actualHeight, quadTreeAssemblyBuffer, textureBufferSize);
//Fill in holes
blocks= dim3((int)ceil(actualWidth/float(tileSize)),
(int)ceil(actualHeight/float(tileSize)));
hipLaunchKernelGGL(( quadtreeDecimationHolePatchingKernel), dim3(blocks),dim3(threads), 0, 0, actualWidth, actualHeight, quadTreeAssemblyBuffer, textureBufferSize);
}
#define MAX_BLOCK_SIZE 1024
#define MAX_GRID_SIZE 65535
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define NO_BANK_CONFLICTS
#ifdef NO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
(((n) >> (2 * LOG_NUM_BANKS)))
#else
#define CONFLICT_FREE_OFFSET(a) (0)
#endif
__global__ void quadTreeExclusiveScanKernel(int width, int* input, int* output, int bufferStride, int* blockResults)
{
extern __shared__ float temp[];
//Offset pointers to this block's row. Avoids the need for more complex indexing
input += bufferStride*blockIdx.x;
output += bufferStride*blockIdx.x;
//Now each row is working with it's own row like a normal exclusive scan of an array length width.
int index = threadIdx.x;
int offset = 1;
int n = 2*blockDim.x;//get actual temp padding
int ai = index;
int bi = index + n/2;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
//Bounds checking, load shared mem
temp[ai+bankOffsetA] = (ai < width)?(input[ai]>=0?1:0):0;
temp[bi+bankOffsetB] = (bi < width)?(input[bi]>=0?1:0):0;
//Negative vertecies are to be cleared
if(temp[ai+bankOffsetA] < 0)
temp[ai+bankOffsetA] = 0;
if(temp[bi+bankOffsetB] < 0)
temp[bi+bankOffsetB] = 0;
//Reduction step
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads(); //Make sure previous step has completed
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
temp[bi2] += temp[ai2];
}
offset *= 2; //Adjust offset
}
//Reduction complete
//Clear last element
if(index == 0)
{
blockResults[blockIdx.x] = temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)];
temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)] = 0;
}
//Sweep down
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads(); //wait for previous step to finish
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
//Swap
float t = temp[ai2];
temp[ai2] = temp[bi2];
temp[bi2] += t;
}
}
//Sweep complete
__syncthreads();
//Writeback
if(ai < width)
output[ai] = temp[ai+bankOffsetA];
if(bi < width)
output[bi] = temp[bi+bankOffsetB];
}
__global__ void blockResultsExclusiveScanKernel(int* blockResults, int numBlocks, int* totalSumOut)
{
extern __shared__ float temp[];
//Now each row is working with it's own row like a normal exclusive scan of an array length width.
int index = threadIdx.x;
int offset = 1;
int n = 2*blockDim.x;//get actual temp padding
int ai = index;
int bi = index + n/2;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
//Bounds checking, load shared mem
temp[ai+bankOffsetA] = (ai < numBlocks)?blockResults[ai]:0;
temp[bi+bankOffsetB] = (bi < numBlocks)?blockResults[bi]:0;
//Reduction step
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads(); //Make sure previous step has completed
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
temp[bi2] += temp[ai2];
}
offset *= 2; //Adjust offset
}
//Reduction complete
//Clear last element
if(index == 0)
{
totalSumOut[0] = temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)];
temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)] = 0;
}
//Sweep down
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads(); //wait for previous step to finish
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
//Swap
float t = temp[ai2];
temp[ai2] = temp[bi2];
temp[bi2] += t;
}
}
//Sweep complete
__syncthreads();
//Writeback
if(ai < numBlocks)
blockResults[ai] = temp[ai+bankOffsetA];
if(bi < numBlocks)
blockResults[bi] = temp[bi+bankOffsetB];
}
__global__ void reintegrateResultsKernel(int actualWidth, int textureBufferSize,
int* quadTreeScanResults, int* blockResults)
{
int pixelX = threadIdx.x;
int pixelY = blockIdx.x;
if(pixelX < actualWidth)
{
quadTreeScanResults[pixelX + pixelY*textureBufferSize] += blockResults[pixelY];
}
}
__global__ void scatterResultsKernel(glm::vec4 aabbMeters, int actualWidth, int actualHeight,
int finalTextureWidth, int finalTextureHeight, int textureBufferSize,
int* quadTreeAssemblyBuffer, int* quadTreeScanResults,
int* blockResults, int* indexBuffer, float4* vertexBuffer)
{
int pixelX = threadIdx.x;
int pixelY = blockIdx.x;
if(pixelX < actualWidth)
{
int degree = quadTreeAssemblyBuffer[pixelX + pixelY*textureBufferSize];//Load vertex degree
//Only continue if this is a used vertex in the quadtree
if(degree >= 0)
{
int vertNum = quadTreeScanResults[pixelX + pixelY*textureBufferSize];
//Compute vertex info.
float textureU = float(pixelX)/float(finalTextureWidth);
float textureV = float(pixelY)/float(finalTextureHeight);
//pixelX*(Sxmax-Sxmin)/actualWidth + Sxmin;
float posX = (pixelX*(aabbMeters.y-aabbMeters.x))/float(actualWidth) + aabbMeters.x;
//pixelY*(Symax-Symin)/actualHeight + Symin;
float posY = (pixelY*(aabbMeters.w-aabbMeters.z))/float(actualHeight) + aabbMeters.z;
float4 vertex;
vertex.x = posX;
vertex.y = posY;
vertex.z = textureU;
vertex.w = textureV;
vertexBuffer[vertNum] = vertex;
//Generate mesh
// Quad configuration:
// 0-1
// |/|
// 2-3
// Index order: 0-2-1, 1-2-3
//Already loaded vertnum for 0
int vertNum0 = 0;
int vertNum1 = 0;
int vertNum2 = 0;
int vertNum3 = 0;
//If degree greater than 0, assemble quad
if(degree > 0)
{
//garunteed to be in range by nature of quadtree degree
vertNum0 = vertNum;
vertNum1 = quadTreeScanResults[(pixelX+degree) + (pixelY)*textureBufferSize];
vertNum2 = quadTreeScanResults[(pixelX) + (pixelY+degree)*textureBufferSize];
vertNum3 = quadTreeScanResults[(pixelX+degree) + (pixelY+degree)*textureBufferSize];
}
//Always fill buffer
// Index order: 0-2-1, 1-2-3
int offset = vertNum*6;
indexBuffer[offset+0] = vertNum0;
indexBuffer[offset+1] = vertNum2;
indexBuffer[offset+2] = vertNum1;
indexBuffer[offset+3] = vertNum1;
indexBuffer[offset+4] = vertNum2;
indexBuffer[offset+5] = vertNum3;
}
}
}
__global__ void reshapeTextureKernel(int actualWidth, int actualHeight, int finalTextureWidth, int finalTextureHeight, int textureBufferSize,
Float4SOA planarTexture, float4* finalTexture)
{
int x = threadIdx.x;
int y = blockIdx.x;
int destIndex = x + y * finalTextureWidth;
int sourceIndex = x + y * textureBufferSize;
float4 textureValue = {CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F};
if(x < actualWidth && y < actualHeight)
{
textureValue.x = planarTexture.x[sourceIndex];
textureValue.y = planarTexture.y[sourceIndex];
textureValue.z = planarTexture.z[sourceIndex];
textureValue.w = planarTexture.w[sourceIndex];
}
finalTexture[destIndex] = textureValue;
}
__host__ void quadtreeMeshGeneration(glm::vec4 aabbMeters, int actualWidth, int actualHeight, int* quadTreeAssemblyBuffer,
int* quadTreeScanResults, int textureBufferSize, int* blockResults, int blockResultsBufferSize,
int* indexBuffer, float4* vertexBuffer, int* compactCount, int* host_compactCount, int outputBufferSize,
int finalTextureWidth, int finalTextureHeight, Float4SOA planarTexture, float4* finalTexture)
{
int blockSize = roundupnextpow2(actualWidth);
int numBlocks = actualHeight;
dim3 threads(blockSize >> 1);//2 elements per thread
dim3 blocks(numBlocks);
int sharedCount = (blockSize+2)*sizeof(int);
//Make sure size constraints aren't violated
assert(blocks.x <= blockResultsBufferSize);
//Scan blocks
hipLaunchKernelGGL(( quadTreeExclusiveScanKernel), dim3(blocks),dim3(threads),sharedCount, 0, actualWidth, quadTreeAssemblyBuffer,
quadTreeScanResults, textureBufferSize, blockResults);
//Scan block results
int pow2 = roundupnextpow2(numBlocks);
threads = dim3(pow2>>1);
blocks = dim3(1);
assert(pow2 <= blockResultsBufferSize);
sharedCount = (pow2 + 2)*sizeof(int);
hipLaunchKernelGGL(( blockResultsExclusiveScanKernel), dim3(blocks),dim3(threads),sharedCount, 0, blockResults, numBlocks, compactCount);
hipMemcpy(host_compactCount, compactCount, sizeof(int), hipMemcpyDeviceToHost);
//Reintegrate
//Also scatter (generate meshes and vertecies in the process)
threads = dim3(actualWidth);
blocks = dim3(numBlocks);
hipLaunchKernelGGL(( reintegrateResultsKernel), dim3(blocks),dim3(threads), 0, 0, actualWidth, textureBufferSize, quadTreeScanResults, blockResults);
assert(finalTextureWidth <= textureBufferSize);
assert(finalTextureHeight <= textureBufferSize);
hipLaunchKernelGGL(( scatterResultsKernel), dim3(blocks),dim3(threads), 0, 0, aabbMeters, actualWidth, actualHeight, finalTextureWidth, finalTextureHeight, textureBufferSize,
quadTreeAssemblyBuffer, quadTreeScanResults, blockResults, indexBuffer, vertexBuffer);
//Reshape texture to aligned memory
threads = dim3(finalTextureWidth);
blocks = dim3(finalTextureHeight);
hipLaunchKernelGGL(( reshapeTextureKernel), dim3(blocks),dim3(threads), 0, 0, actualWidth, actualHeight, finalTextureWidth, finalTextureHeight, textureBufferSize,
planarTexture, finalTexture);
} | bed9ec8136ae8f280d9dde0940424634ff58ad8b.cu | #include "quadtree.h"
//R2 = R1*multFactor + R2;
__device__ void add_r1_to_r2(glm::mat3 &A, glm::vec3 &b, int r1, int r2, float multFactor)
{
float tmp;
if (r1 == r2) return;
for(int i = 0; i < 3; ++i)
{
A[i][r2] = A[i][r2] + multFactor*A[i][r1];
}
b[r2] = b[r2] + multFactor*b[r1];
}
__device__ void swap_row(glm::mat3 &A, glm::vec3 &b, int r1, int r2)
{
float tmp;
if (r1 == r2) return;
#pragma unroll
for(int i = 0; i < 3; ++i)
{
tmp = A[i][r1];
A[i][r1] = A[i][r2];
A[i][r2] = tmp;
}
tmp = b[r1];
b[r1] = b[r2];
b[r2] = tmp;
}
__device__ void row_mult(glm::mat3 &A, glm::vec3 &b, int r1, float mult)
{
#pragma unroll
for(int i = 0; i < 3; ++i)
{
A[i][r1] = A[i][r1]*mult;
}
b[r1] *= mult;
}
#define APPROXZERO(a) (abs(a) < 0.000001f)
__device__ void makeNonZeroDiagonal(glm::mat3 &A,glm::vec3 b)
{
int permute[3];
if( !APPROXZERO(A[0][0]) && !APPROXZERO(A[1][1]) && !APPROXZERO(A[2][2]))
{
permute[0] = 0; permute[1] = 1; permute[2] = 2;
}else if(!APPROXZERO(A[0][0]) && !APPROXZERO(A[1][2]) && !APPROXZERO(A[2][1]))
{
permute[0] = 0; permute[1] = 2; permute[2] = 1;
}else if(!APPROXZERO(A[0][1]) && !APPROXZERO(A[1][0]) && !APPROXZERO(A[2][2]))
{
permute[0] = 1; permute[1] = 0; permute[2] = 2;
}else if(!APPROXZERO(A[0][1]) && !APPROXZERO(A[1][2]) && !APPROXZERO(A[2][0]))
{
permute[0] = 1; permute[1] = 2; permute[2] = 0;
}else if(!APPROXZERO(A[0][2]) && !APPROXZERO(A[1][0]) && !APPROXZERO(A[2][1]))
{
permute[0] = 2; permute[1] = 0; permute[2] = 1;
}else if(!APPROXZERO(A[0][2]) && !APPROXZERO(A[1][1]) && !APPROXZERO(A[2][0]))
{
permute[0] = 2; permute[1] = 1; permute[2] = 0;
}else{
//ERROR
}
for(int i = 0; i < 3; ++i)
{
if(permute[i] > i)
{
swap_row(A,b, i, permute[i]);
for(int j = 0; j < 3; ++j)
{
if(permute[j] == i)
{
permute[j] = permute[i];
permute[i] = i;
break;
}
}
}
}
}
__device__ glm::vec3 solveAbGaussian(glm::mat3 A, glm::vec3 b)
{
//Make sure diagonals have non-zero entries
if(abs(A[0][0]*A[1][1]*A[2][2]) < 0.000001f)
makeNonZeroDiagonal(A,b);
//Row echelon form
for(int r = 0; r < 3; ++r)
{
float factor = 1.0f/A[r][r];
row_mult(A,b,r,factor);
for(int r2 = r+1; r2 < 3; ++r2)
{
if(abs(A[r][r2]) > 0.000001f)
{
//If A[r][r2] not zero yet,
//Need A[r][r2] + factor*A[r][r] == 0
factor = -A[r][r2]/A[r][r];
add_r1_to_r2(A,b,r,r2,factor);
}
}
}
//Matrix now upper triangular
//Back substitute
for(int r = 0; r < 3; ++r)
{
for(int c = r+1; c < 3; ++c)
{
if(abs(A[c][r]) > 0.000001f)
{
//element is non-zero. Backsubstitute
//Need A[c][r] + factor*A[c][c] == 0
float factor = -A[c][r]/A[c][c];
add_r1_to_r2(A,b,c,r,factor);
}
}
}
return b;
}
//Numthreads is assumed to be a power of two
__device__ void minmaxreduction(float* s_minSx, float* s_maxSx, float* s_minSy, float* s_maxSy, int indexInBlock, int nTotalThreads)
{
int thread2;
float temp;
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1); // divide by two
// only the first half of the threads will be active.
if (indexInBlock < halfPoint)
{
thread2 = indexInBlock + halfPoint;
// Get the shared value stored by another thread
temp = s_minSx[thread2];
if (temp < s_minSx[indexInBlock])
s_minSx[indexInBlock] = temp;
temp = s_minSy[thread2];
if (temp < s_minSy[indexInBlock])
s_minSy[indexInBlock] = temp;
temp = s_maxSx[thread2];
if (temp > s_maxSx[indexInBlock])
s_maxSx[indexInBlock] = temp;
temp = s_maxSy[thread2];
if (temp > s_maxSy[indexInBlock])
s_maxSy[indexInBlock] = temp;
}
__syncthreads();
// Reducing the binary tree size by two:
nTotalThreads = halfPoint;
}
}
__global__ void computeAABBsKernel(PlaneStats* planeStats, int* planeInvIdMap, glm::vec4* aabbsBlockResults,
int* planeCount, int maxPlanes,
Float3SOA positions, float* segmentProjectedSx, float* segmentProjectedSy,
int* finalSegmentsBuffer, int xRes, int yRes)
{
extern __shared__ int s_Mem[];
int* s_InvMap = (int*) s_Mem;
float* s_centroidX = (float*)(s_InvMap + maxPlanes);
float* s_centroidY = s_centroidX + maxPlanes;
float* s_centroidZ = s_centroidY + maxPlanes;
glm::vec3* s_tangents = (glm::vec3*) (s_centroidZ + maxPlanes);
glm::vec3* s_bitangents = s_tangents + maxPlanes;
float* s_minSx = (float*)(s_bitangents + maxPlanes);
float* s_minSy = (s_minSx + blockDim.x*blockDim.y);
float* s_maxSx = (s_minSy + blockDim.x*blockDim.y);
float* s_maxSy = (s_maxSx + blockDim.x*blockDim.y);
int indexInBlock = threadIdx.x + threadIdx.y*blockDim.x;
int imageX = threadIdx.x + blockDim.x*blockIdx.x;
int imageY = threadIdx.y + blockDim.y*blockIdx.y;
int numPlanes = planeCount[0];
if(indexInBlock < maxPlanes)
{
s_InvMap[indexInBlock] = planeInvIdMap[indexInBlock];
if(indexInBlock < numPlanes)
{
//s_aabb[indexInBlock] = glm::vec4(0.0f);
s_tangents[indexInBlock] = planeStats[indexInBlock].tangent;
s_centroidX[indexInBlock] = planeStats[indexInBlock].centroid.x;
s_centroidY[indexInBlock] = planeStats[indexInBlock].centroid.y;
s_centroidZ[indexInBlock] = planeStats[indexInBlock].centroid.z;
//bitangent = norm cross tangent
glm::vec3 norm(planeStats[indexInBlock].norm.x,planeStats[indexInBlock].norm.y,planeStats[indexInBlock].norm.z);
s_bitangents[indexInBlock] = glm::normalize(glm::cross(norm, s_tangents[indexInBlock]));
}
}
__syncthreads();
//Remap segments
int segment = finalSegmentsBuffer[imageX + imageY*xRes];
float sx = 0;
float sy = 0;
if(segment >= 0)
{
//Remap and writeback
segment = s_InvMap[segment];
finalSegmentsBuffer[imageX + imageY*xRes] = segment;
//Compute Sx and Sy
glm::vec3 dp = glm::vec3(positions.x[imageX + imageY*xRes] - s_centroidX[segment],
positions.y[imageX + imageY*xRes] - s_centroidY[segment],
positions.z[imageX + imageY*xRes] - s_centroidZ[segment]);
sx = glm::dot(dp, s_bitangents[segment]);
sy = glm::dot(dp, s_tangents[segment]);
}
//writeback
segmentProjectedSx[imageX + imageY*xRes] = sx;
segmentProjectedSy[imageX + imageY*xRes] = sy;
__syncthreads();
//Repurpose invmap sharedmem for segment flags
if(indexInBlock < maxPlanes)
{
s_InvMap[indexInBlock] = 0;
}
__syncthreads();
if(segment >= 0)//flag each segment that exists in this block
s_InvMap[segment] = 1;
for(int plane = 0; plane < numPlanes; ++plane)
{
if(s_InvMap[plane] > 0)
{
//Init minmax planes
s_minSx[indexInBlock] = (segment == plane)?sx:0;
s_maxSx[indexInBlock] = (segment == plane)?sx:0;
s_minSy[indexInBlock] = (segment == plane)?sy:0;
s_maxSy[indexInBlock] = (segment == plane)?sy:0;
__syncthreads();
minmaxreduction(s_minSx, s_maxSx, s_minSy, s_maxSy, indexInBlock, blockDim.x*blockDim.y);
//Threads already synced in function
if(indexInBlock == 0)
{
aabbsBlockResults[(blockIdx.x + blockIdx.y*gridDim.x) + plane*gridDim.x*gridDim.y]
= glm::vec4(s_minSx[0], s_maxSx[0],s_minSy[0],s_maxSy[0]);
}
}else{
if(indexInBlock == 0)
aabbsBlockResults[(blockIdx.x + blockIdx.y*gridDim.x) + plane*gridDim.x*gridDim.y] = glm::vec4(0.0f);
}
}
}
__global__ void reduceAABBsKernel(PlaneStats* planeStats, glm::vec4* aabbsBlockResults, int numBlocks, int maxPlanes, int* planeCount)
{
extern __shared__ float s_temp[];
float* s_minSx = s_temp;
float* s_minSy = (s_minSx + blockDim.x);
float* s_maxSx = (s_minSy + blockDim.x);
float* s_maxSy = (s_maxSx + blockDim.x);
//two elements loaded per thread
int i = threadIdx.x;
int i2 = threadIdx.x + blockDim.x;
int numPlanes = planeCount[0];
for(int plane = 0; plane < numPlanes; ++plane)
{
glm::vec4 aabb1(0.0f);
glm::vec4 aabb2(0.0f);
if(i < numBlocks)
aabb1 = aabbsBlockResults[i + plane*numBlocks];
if(i2 < numBlocks)
aabb2 = aabbsBlockResults[i2 + plane*numBlocks];
s_minSx[i] = MIN(aabb1.x,aabb2.x);
s_maxSx[i] = MAX(aabb1.y,aabb2.y);
s_minSy[i] = MIN(aabb1.z,aabb2.z);
s_maxSy[i] = MAX(aabb1.w,aabb2.w);
__syncthreads();
minmaxreduction(s_minSx, s_maxSx, s_minSy, s_maxSy, i, blockDim.x);
if(threadIdx.x == 0)
planeStats[plane].projParams.aabbMeters = glm::vec4(s_minSx[0], s_maxSx[0],s_minSy[0],s_maxSy[0]);
}
}
__host__ __device__ int roundupnextpow2 (int x)
{
if (x < 0)
return 0;
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return x+1;
}
__host__ void computeAABBs(PlaneStats* planeStats, int* planeInvIdMap, glm::vec4* aabbsBlockResults,
int* planeCount, int maxPlanes,
Float3SOA positions, float* segmentProjectedSx, float* segmentProjectedSy,
int* finalSegmentsBuffer, int xRes, int yRes)
{
int blockWidth = AABB_COMPUTE_BLOCKWIDTH;
int blockHeight = AABB_COMPUTE_BLOCKHEIGHT;
assert(blockHeight*blockWidth >= maxPlanes);
dim3 threads(blockWidth, blockHeight);
dim3 blocks((int) ceil(xRes/float(blockWidth)), (int) ceil(yRes/float(blockHeight)));
//plane map, tangent, bitangent, centroid and aabb of each plane loaded to shared memory.
int sharedMem = maxPlanes*(sizeof(int) + sizeof(float)*3+sizeof(glm::vec3)*2) + blockWidth*blockHeight*4*sizeof(float);
computeAABBsKernel<<<blocks,threads,sharedMem>>>(planeStats, planeInvIdMap, aabbsBlockResults, planeCount, maxPlanes,
positions, segmentProjectedSx, segmentProjectedSy,
finalSegmentsBuffer, xRes, yRes);
int numBlocks = blocks.x*blocks.y;
int pow2Blocks = roundupnextpow2 (numBlocks) >> 1;//Next lowest power of two
assert(pow2Blocks <= 1024);
threads = dim3(pow2Blocks);
blocks = dim3(1);
sharedMem = 4*sizeof(float)*pow2Blocks;
reduceAABBsKernel<<<blocks,threads,sharedMem>>>(planeStats, aabbsBlockResults, numBlocks, maxPlanes, planeCount);
}
__global__ void calculateProjectionDataKernel(rgbd::framework::Intrinsics intr, PlaneStats* planeStats,
int* planeCount, int maxTextureSize, int xRes, int yRes)
{
glm::mat3 C(1.0f);
int destWidth = 0;
int destHeight = 0;
int maxRatio = 0;
glm::vec4 aabb;
if(threadIdx.x < planeCount[0])
{
//In range and valid plane.
glm::vec3 tangent = planeStats[threadIdx.x].tangent;
glm::vec3 normal = glm::vec3(planeStats[threadIdx.x].norm.x,planeStats[threadIdx.x].norm.y,planeStats[threadIdx.x].norm.z);
glm::vec3 bitangent = glm::normalize(glm::cross(normal, tangent));
glm::vec3 centroid = glm::vec3(planeStats[threadIdx.x].centroid.x,
planeStats[threadIdx.x].centroid.y,
planeStats[threadIdx.x].centroid.z);
aabb = planeStats[threadIdx.x].projParams.aabbMeters;
//Compute camera space coordinates (4 points in clockwise winding from viewpoint)
/* 1----2
* | |
* 4----3
*/
glm::vec3 sp1 = (aabb.x*bitangent)+(aabb.z*tangent)+centroid;//UL, Sxmin,Symin
glm::vec3 sp2 = (aabb.y*bitangent)+(aabb.z*tangent)+centroid;//UR, Sxmax,Symin
glm::vec3 sp3 = (aabb.y*bitangent)+(aabb.w*tangent)+centroid;//LR, Sxmax,Symax
glm::vec3 sp4 = (aabb.x*bitangent)+(aabb.w*tangent)+centroid;//LL, Sxmin,Symax
//Compute screen space projections
float su1 = sp1.x*intr.fx/sp1.z + intr.cx;
float sv1 = sp1.y*intr.fy/sp1.z + intr.cy;
float su2 = sp2.x*intr.fx/sp2.z + intr.cx;
float sv2 = sp2.y*intr.fy/sp2.z + intr.cy;
float su3 = sp3.x*intr.fx/sp3.z + intr.cx;
float sv3 = sp3.y*intr.fy/sp3.z + intr.cy;
float su4 = sp4.x*intr.fx/sp4.z + intr.cx;
float sv4 = sp4.y*intr.fy/sp4.z + intr.cy;
//Compute desired resolution.
float sourceWidthMeters = aabb.y-aabb.x;
float sourceHeightMeters = aabb.w-aabb.z;
//Compute minimum resolution for complete data preservation
float d12 = sqrtf((su1-su2)*(su1-su2)+(sv1-sv2)*(sv1-sv2));
float d23 = sqrtf((su2-su3)*(su2-su3)+(sv2-sv3)*(sv2-sv3));
float d34 = sqrtf((su3-su4)*(su3-su4)+(sv3-sv4)*(sv3-sv4));
float d41 = sqrtf((su4-su1)*(su4-su1)+(sv4-sv1)*(sv4-sv1));
float maxXRatio = MAX(d12,d34)/sourceWidthMeters;
float maxYRatio = MAX(d23,d41)/sourceHeightMeters;
maxRatio = ceil(MAX(maxXRatio,maxYRatio));
maxRatio = roundupnextpow2(maxRatio);
destWidth = maxRatio * sourceWidthMeters;
destHeight = maxRatio * sourceHeightMeters;
//Make sure it fits. If not, then scale down
if(destWidth > maxTextureSize || destHeight > maxTextureSize)
{
int scale = glm::max(ceil(destWidth/float(maxTextureSize)),ceil(destHeight/float(maxTextureSize)));
scale = roundupnextpow2(scale);
destWidth/=scale;
destHeight/=scale;
}
//Compute A matrix (source points to basis vectors)
glm::mat3 A = glm::mat3(su1,sv1,1,su2,sv2,1,su3,sv3,1);
glm::vec3 b = glm::vec3(su4,sv4, 1);
glm::vec3 x = glm::inverse(A)*b;
//mult each row i by xi
for(int i = 0; i < 3; ++i)
{
A[i][0] *= x[i];
A[i][1] *= x[i];
A[i][2] *= x[i];
}
//Compute B matrix (dest points to basis vectors)
glm::mat3 B = glm::mat3(0,0,1,
destWidth,0,1,
destWidth,destHeight,1);
b = glm::vec3(0,destHeight, 1);
x = glm::inverse(B)*b;
//mult each row i by xi
for(int i = 0; i < 3; ++i)
{
B[i][0] *= x[i];
B[i][1] *= x[i];
B[i][2] *= x[i];
}
C = A*glm::inverse(B);
}
planeStats[threadIdx.x].projParams.projectionMatrix = C;
planeStats[threadIdx.x].projParams.aabbMeters = aabb;
planeStats[threadIdx.x].projParams.destWidth = destWidth;
planeStats[threadIdx.x].projParams.destHeight = destHeight;
planeStats[threadIdx.x].projParams.textureResolution = maxRatio;
}
__host__ void calculateProjectionData(rgbd::framework::Intrinsics intr, PlaneStats* planeStats,
int* planeCount, int maxTextureSize, int maxPlanes, int xRes, int yRes)
{
dim3 blocks(1);
dim3 threads(maxPlanes);
calculateProjectionDataKernel<<<blocks,threads>>>(intr, planeStats, planeCount, maxTextureSize, xRes, yRes);
}
__global__ void projectTexture(int segmentId, PlaneStats* dev_planeStats,
Float4SOA destTexture, int destTextureSize,
RGBMapSOA rgbMap, int* dev_finalSegmentsBuffer, float* dev_finalDistanceToPlaneBuffer,
int imageXRes, int imageYRes)
{
int destX = blockIdx.x*blockDim.x+threadIdx.x;
int destY = blockIdx.y*blockDim.y+threadIdx.y;
if(destX < destTextureSize && destX < dev_planeStats->projParams.destWidth
&& destY < destTextureSize && destY < dev_planeStats->projParams.destHeight)
{
float r = CUDART_NAN_F;
float g = CUDART_NAN_F;
float b = CUDART_NAN_F;
float dist = CUDART_NAN_F;
//Destination in range
glm::mat3 Tds = dev_planeStats->projParams.projectionMatrix;
glm::vec3 sourceCoords = Tds*glm::vec3(destX, destY, 1.0f);
//Dehomogenization
sourceCoords.x /= sourceCoords.z;
sourceCoords.y /= sourceCoords.z;
if(sourceCoords.x >= 0 && sourceCoords.x < imageXRes
&& sourceCoords.y >= 0 && sourceCoords.y < imageYRes )
{
//In source range
int linIndex = int(sourceCoords.x) + int(sourceCoords.y)*imageXRes;
if(segmentId == dev_finalSegmentsBuffer[linIndex]){
r = rgbMap.r[linIndex];
g = rgbMap.g[linIndex];
b = rgbMap.b[linIndex];
dist = dev_finalDistanceToPlaneBuffer[linIndex];
}
}
destTexture.x[destX + destY*destTextureSize] = r;
destTexture.y[destX + destY*destTextureSize] = g;
destTexture.z[destX + destY*destTextureSize] = b;
destTexture.w[destX + destY*destTextureSize] = dist;
}
}
__host__ void projectTexture(int segmentId, PlaneStats* host_planeStats, PlaneStats* dev_planeStats,
Float4SOA destTexture, int destTextureSize,
RGBMapSOA rgbMap, int* dev_finalSegmentsBuffer, float* dev_finalDistanceToPlaneBuffer,
int imageXRes, int imageYRes)
{
int tileSize = 16;
dim3 threads(tileSize, tileSize);
dim3 blocks((int)ceil(float(host_planeStats->projParams.destWidth)/float(tileSize)),
(int)ceil(float(host_planeStats->projParams.destHeight)/float(tileSize)));
projectTexture<<<blocks,threads>>>(segmentId, dev_planeStats, destTexture, destTextureSize,
rgbMap, dev_finalSegmentsBuffer, dev_finalDistanceToPlaneBuffer, imageXRes, imageYRes);
}
__global__ void quadtreeDecimationKernel1(int actualWidth, int actualHeight, Float4SOA planarTexture, int* quadTreeAssemblyBuffer,
int textureBufferSize)
{
extern __shared__ int s_tile[];
//======================Load==========================
//Global index
int gx = threadIdx.x + blockDim.x*blockIdx.x;
int gy = threadIdx.y + blockDim.y*blockIdx.y;
int s_index = threadIdx.x + threadIdx.y*(blockDim.x+1);
int indexInBlock = threadIdx.x + threadIdx.y*blockDim.x;
//Load shared memory
//load core. If in range and texture buffer has valid pixel at this location, load 0. Else, load -1;
int val = -1;
if(gx < actualWidth && gy < actualHeight)
{
float pixelContents = planarTexture.x[gx+gy*textureBufferSize];
if(pixelContents == pixelContents)
{
val = 0;//Pixel is valid point. save
}
}
s_tile[s_index] = val;
//Load apron
if(indexInBlock < (blockDim.x*2+1))//first 33 threads load remaining apron
{
if(indexInBlock < blockDim.x)//first 16 load bottom
{
gx = indexInBlock + blockDim.x*blockIdx.x;
gy = blockDim.y*(blockIdx.y+1);//first row of next block
s_index = indexInBlock + (blockDim.y*(blockDim.x+1));
}else if(indexInBlock < blockDim.x*2){//next 16 load right apron
gx = blockDim.x*(blockIdx.x+1);//First column of next block
gy = blockDim.y*blockIdx.y + (indexInBlock % blockDim.x);//indexInBlock % blockDim.x is y position in block
s_index = blockDim.x + ((indexInBlock % blockDim.x)*(blockDim.x+1));
}else{
//load the corner
gx = blockDim.x*(blockIdx.x+1);
gy = blockDim.y*(blockIdx.y+1);
s_index = blockDim.x + blockDim.y*(blockDim.x+1);
}
val = -1;
if(gx < actualWidth && gy < actualHeight)
{
float pixelContents = planarTexture.x[gx+gy*textureBufferSize];
if(pixelContents == pixelContents)
{
val = 0;//Pixel is valid point. save
}
}
s_tile[s_index] = val;
}
__syncthreads();
//====================Reduction=========================
//Step == 0 is special case. need to initialize baseline quads
bool merge = false;
if(s_tile[threadIdx.x+threadIdx.y*(blockDim.x+1)] == 0)
{
//Check neighbors. If all neighbors right down and right-down diagonal are 0, set to 1.
if( s_tile[(threadIdx.x+1) + (threadIdx.y) *(blockDim.x+1)] == 0
&& s_tile[(threadIdx.x ) + (threadIdx.y+1)*(blockDim.x+1)] == 0
&& s_tile[(threadIdx.x+1) + (threadIdx.y+1)*(blockDim.x+1)] == 0)
{
merge = true;
}
}
__syncthreads();
if(merge)
{
s_tile[threadIdx.x+threadIdx.y*(blockDim.x+1)] = 1;
}
__syncthreads();
//Loop for remaining steps
for(int step = 1; step < blockDim.x; step <<= 1)
{
if((threadIdx.x % (step*2)) == 0 && (threadIdx.y % (step*2)) == 0)
{
//Corner points only.
if( s_tile[(threadIdx.x) + (threadIdx.y) *(blockDim.x+1)] == step
&& s_tile[(threadIdx.x+step) + (threadIdx.y) *(blockDim.x+1)] == step
&& s_tile[(threadIdx.x ) + (threadIdx.y+step)*(blockDim.x+1)] == step
&& s_tile[(threadIdx.x+step) + (threadIdx.y+step)*(blockDim.x+1)] == step)
{
//Upgrade degree of this point
s_tile[(threadIdx.x) + (threadIdx.y) *(blockDim.x+1)] *= 2;
//Clear definitely removed points
s_tile[(threadIdx.x+step) + (threadIdx.y) *(blockDim.x+1)] = -1;
s_tile[(threadIdx.x ) + (threadIdx.y+step)*(blockDim.x+1)] = -1;
s_tile[(threadIdx.x+step) + (threadIdx.y+step)*(blockDim.x+1)] = -1;
}
}
__syncthreads();
}
//====================Writeback=========================
//writeback core.
gx = threadIdx.x + blockDim.x*blockIdx.x;
gy = threadIdx.y + blockDim.y*blockIdx.y;
s_index = threadIdx.x + threadIdx.y*(blockDim.x+1);
quadTreeAssemblyBuffer[gx+gy*textureBufferSize] = s_tile[s_index];
//no need to writeback apron
}
__global__ void quadtreeDecimationKernel2(int actualWidth, int actualHeight, int* quadTreeAssemblyBuffer, int textureBufferSize)
{
extern __shared__ int s_tile[];
int scaleMultiplier = blockDim.x;
//======================Load==========================
//Global index (scaled by multiplier)
int gx = scaleMultiplier*(threadIdx.x + blockDim.x*blockIdx.x);
int gy = scaleMultiplier*(threadIdx.y + blockDim.y*blockIdx.y);
int s_index = threadIdx.x + threadIdx.y*(blockDim.x+1);
int indexInBlock = threadIdx.x + threadIdx.y*blockDim.x;
//Load shared memory
//load core. If in range and texture buffer has valid pixel at this location, load 0. Else, load -1;
int val = -1;
if(gx < actualWidth && gy < actualHeight)
{
val = quadTreeAssemblyBuffer[gx+gy*textureBufferSize];
}
s_tile[s_index] = val;
//Load apron
if(indexInBlock < (blockDim.x*2+1))//first 33 threads load remaining apron
{
if(indexInBlock < blockDim.x)//first 16 load bottom
{
gx = indexInBlock + blockDim.x*blockIdx.x;
gy = blockDim.y*(blockIdx.y+1);//first row of next block
s_index = indexInBlock + (blockDim.y*(blockDim.x+1));
}else if(indexInBlock < blockDim.x*2){//next 16 load right apron
gx = blockDim.x*(blockIdx.x+1);//First column of next block
gy = blockDim.y*blockIdx.y + (indexInBlock % blockDim.x);//indexInBlock % blockDim.x is y position in block
s_index = blockDim.x + ((indexInBlock % blockDim.x)*(blockDim.x+1));
}else{
//load the corner
gx = blockDim.x*(blockIdx.x+1);
gy = blockDim.y*(blockIdx.y+1);
s_index = blockDim.x + blockDim.y*(blockDim.x+1);
}
gx *= scaleMultiplier;
gy *= scaleMultiplier;
val = -1;
if(gx < actualWidth && gy < actualHeight)
{
val = quadTreeAssemblyBuffer[gx+gy*textureBufferSize];
}
s_tile[s_index] = val;
}
__syncthreads();
//====================Reduction=========================
//Step == 0 is special case. need to initialize baseline quads
bool merge = false;
if(s_tile[threadIdx.x+threadIdx.y*(blockDim.x+1)] == 0)
{
//Check neighbors. If all neighbors right down and right-down diagonal are 0, set to 1.
if( s_tile[(threadIdx.x+1) + (threadIdx.y) *(blockDim.x+1)] == 0
&& s_tile[(threadIdx.x ) + (threadIdx.y+1)*(blockDim.x+1)] == 0
&& s_tile[(threadIdx.x+1) + (threadIdx.y+1)*(blockDim.x+1)] == 0)
{
merge = true;
}
}
__syncthreads();
if(merge)
{
s_tile[threadIdx.x+threadIdx.y*(blockDim.x+1)] = 1;
}
__syncthreads();
//Loop for remaining steps
for(int step = 1; step < blockDim.x; step <<= 1)
{
if((threadIdx.x % (step*2)) == 0 && (threadIdx.y % (step*2)) == 0)
{
//Corner points only.
if( s_tile[(threadIdx.x) + (threadIdx.y) *(blockDim.x+1)] == step*scaleMultiplier
&& s_tile[(threadIdx.x+step) + (threadIdx.y) *(blockDim.x+1)] == step*scaleMultiplier
&& s_tile[(threadIdx.x ) + (threadIdx.y+step)*(blockDim.x+1)] == step*scaleMultiplier
&& s_tile[(threadIdx.x+step) + (threadIdx.y+step)*(blockDim.x+1)] == step*scaleMultiplier)
{
//Upgrade degree of this point
s_tile[(threadIdx.x) + (threadIdx.y) *(blockDim.x+1)] *= 2;
//Clear definitely removed points
s_tile[(threadIdx.x+step) + (threadIdx.y) *(blockDim.x+1)] = -1;
s_tile[(threadIdx.x ) + (threadIdx.y+step)*(blockDim.x+1)] = -1;
s_tile[(threadIdx.x+step) + (threadIdx.y+step)*(blockDim.x+1)] = -1;
}
}
__syncthreads();
}
//====================Writeback=========================
//writeback core.
gx = scaleMultiplier*(threadIdx.x + blockDim.x*blockIdx.x);
gy = scaleMultiplier*(threadIdx.y + blockDim.y*blockIdx.y);
s_index = threadIdx.x + threadIdx.y*(blockDim.x+1);
quadTreeAssemblyBuffer[gx+gy*textureBufferSize] = s_tile[s_index];
//no need to writeback apron
}
__global__ void quadtreeDecimationHolePatchingKernel(int actualWidth, int actualHeight, int* quadTreeAssemblyBuffer, int textureBufferSize)
{
//TODO: Load shared memory to avoid redundant reads
int gx = threadIdx.x + blockDim.x*blockIdx.x;
int gy = threadIdx.y + blockDim.y*blockIdx.y;
if(gx < actualWidth && gy < actualHeight)
{
int degree = quadTreeAssemblyBuffer[gx + gy*textureBufferSize];
if(degree > 0)
{
//Make sure each corner is flagged as 0 or higher
int cornerDegree = quadTreeAssemblyBuffer[(gx+degree) + (gy)*textureBufferSize];
if(cornerDegree < 0) quadTreeAssemblyBuffer[(gx+degree) + (gy)*textureBufferSize] = 0;
cornerDegree = quadTreeAssemblyBuffer[(gx) + (gy+degree)*textureBufferSize];
if(cornerDegree < 0) quadTreeAssemblyBuffer[(gx) + (gy+degree)*textureBufferSize] = 0;
cornerDegree = quadTreeAssemblyBuffer[(gx+degree) + (gy+degree)*textureBufferSize];
if(cornerDegree < 0) quadTreeAssemblyBuffer[(gx+degree) + (gy+degree)*textureBufferSize] = 0;
}
}
}
__host__ void quadtreeDecimation(int actualWidth, int actualHeight, Float4SOA planarTexture, int* quadTreeAssemblyBuffer,
int textureBufferSize)
{
//do two simplification passes. Max quadtree size will therefore be 2*tileSize
int tileSize = 16;
//Pass one, parallel by pixel
dim3 threads(tileSize, tileSize);
dim3 blocks((int)ceil(actualWidth/float(tileSize)),
(int)ceil(actualHeight/float(tileSize)));
int sharedSize = (tileSize+1)*(tileSize+1)*sizeof(int);
quadtreeDecimationKernel1<<<blocks,threads,sharedSize>>>(actualWidth, actualHeight, planarTexture, quadTreeAssemblyBuffer, textureBufferSize);
blocks = dim3((int)ceil(actualWidth/float(tileSize*tileSize)),
(int)ceil(actualHeight/float(tileSize*tileSize)));
quadtreeDecimationKernel2<<<blocks,threads,sharedSize>>>(actualWidth, actualHeight, quadTreeAssemblyBuffer, textureBufferSize);
//Fill in holes
blocks= dim3((int)ceil(actualWidth/float(tileSize)),
(int)ceil(actualHeight/float(tileSize)));
quadtreeDecimationHolePatchingKernel<<<blocks,threads>>>(actualWidth, actualHeight, quadTreeAssemblyBuffer, textureBufferSize);
}
#define MAX_BLOCK_SIZE 1024
#define MAX_GRID_SIZE 65535
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define NO_BANK_CONFLICTS
#ifdef NO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
(((n) >> (2 * LOG_NUM_BANKS)))
#else
#define CONFLICT_FREE_OFFSET(a) (0)
#endif
__global__ void quadTreeExclusiveScanKernel(int width, int* input, int* output, int bufferStride, int* blockResults)
{
extern __shared__ float temp[];
//Offset pointers to this block's row. Avoids the need for more complex indexing
input += bufferStride*blockIdx.x;
output += bufferStride*blockIdx.x;
//Now each row is working with it's own row like a normal exclusive scan of an array length width.
int index = threadIdx.x;
int offset = 1;
int n = 2*blockDim.x;//get actual temp padding
int ai = index;
int bi = index + n/2;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
//Bounds checking, load shared mem
temp[ai+bankOffsetA] = (ai < width)?(input[ai]>=0?1:0):0;
temp[bi+bankOffsetB] = (bi < width)?(input[bi]>=0?1:0):0;
//Negative vertecies are to be cleared
if(temp[ai+bankOffsetA] < 0)
temp[ai+bankOffsetA] = 0;
if(temp[bi+bankOffsetB] < 0)
temp[bi+bankOffsetB] = 0;
//Reduction step
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads(); //Make sure previous step has completed
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
temp[bi2] += temp[ai2];
}
offset *= 2; //Adjust offset
}
//Reduction complete
//Clear last element
if(index == 0)
{
blockResults[blockIdx.x] = temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)];
temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)] = 0;
}
//Sweep down
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads(); //wait for previous step to finish
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
//Swap
float t = temp[ai2];
temp[ai2] = temp[bi2];
temp[bi2] += t;
}
}
//Sweep complete
__syncthreads();
//Writeback
if(ai < width)
output[ai] = temp[ai+bankOffsetA];
if(bi < width)
output[bi] = temp[bi+bankOffsetB];
}
__global__ void blockResultsExclusiveScanKernel(int* blockResults, int numBlocks, int* totalSumOut)
{
extern __shared__ float temp[];
//Now each row is working with it's own row like a normal exclusive scan of an array length width.
int index = threadIdx.x;
int offset = 1;
int n = 2*blockDim.x;//get actual temp padding
int ai = index;
int bi = index + n/2;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
//Bounds checking, load shared mem
temp[ai+bankOffsetA] = (ai < numBlocks)?blockResults[ai]:0;
temp[bi+bankOffsetB] = (bi < numBlocks)?blockResults[bi]:0;
//Reduction step
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads(); //Make sure previous step has completed
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
temp[bi2] += temp[ai2];
}
offset *= 2; //Adjust offset
}
//Reduction complete
//Clear last element
if(index == 0)
{
totalSumOut[0] = temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)];
temp[(n-1)+CONFLICT_FREE_OFFSET(n-1)] = 0;
}
//Sweep down
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads(); //wait for previous step to finish
if (index < d)
{
int ai2 = offset*(2*index+1)-1;
int bi2 = offset*(2*index+2)-1;
ai2 += CONFLICT_FREE_OFFSET(ai2);
bi2 += CONFLICT_FREE_OFFSET(bi2);
//Swap
float t = temp[ai2];
temp[ai2] = temp[bi2];
temp[bi2] += t;
}
}
//Sweep complete
__syncthreads();
//Writeback
if(ai < numBlocks)
blockResults[ai] = temp[ai+bankOffsetA];
if(bi < numBlocks)
blockResults[bi] = temp[bi+bankOffsetB];
}
__global__ void reintegrateResultsKernel(int actualWidth, int textureBufferSize,
int* quadTreeScanResults, int* blockResults)
{
int pixelX = threadIdx.x;
int pixelY = blockIdx.x;
if(pixelX < actualWidth)
{
quadTreeScanResults[pixelX + pixelY*textureBufferSize] += blockResults[pixelY];
}
}
__global__ void scatterResultsKernel(glm::vec4 aabbMeters, int actualWidth, int actualHeight,
int finalTextureWidth, int finalTextureHeight, int textureBufferSize,
int* quadTreeAssemblyBuffer, int* quadTreeScanResults,
int* blockResults, int* indexBuffer, float4* vertexBuffer)
{
int pixelX = threadIdx.x;
int pixelY = blockIdx.x;
if(pixelX < actualWidth)
{
int degree = quadTreeAssemblyBuffer[pixelX + pixelY*textureBufferSize];//Load vertex degree
//Only continue if this is a used vertex in the quadtree
if(degree >= 0)
{
int vertNum = quadTreeScanResults[pixelX + pixelY*textureBufferSize];
//Compute vertex info.
float textureU = float(pixelX)/float(finalTextureWidth);
float textureV = float(pixelY)/float(finalTextureHeight);
//pixelX*(Sxmax-Sxmin)/actualWidth + Sxmin;
float posX = (pixelX*(aabbMeters.y-aabbMeters.x))/float(actualWidth) + aabbMeters.x;
//pixelY*(Symax-Symin)/actualHeight + Symin;
float posY = (pixelY*(aabbMeters.w-aabbMeters.z))/float(actualHeight) + aabbMeters.z;
float4 vertex;
vertex.x = posX;
vertex.y = posY;
vertex.z = textureU;
vertex.w = textureV;
vertexBuffer[vertNum] = vertex;
//Generate mesh
// Quad configuration:
// 0-1
// |/|
// 2-3
// Index order: 0-2-1, 1-2-3
//Already loaded vertnum for 0
int vertNum0 = 0;
int vertNum1 = 0;
int vertNum2 = 0;
int vertNum3 = 0;
//If degree greater than 0, assemble quad
if(degree > 0)
{
//garunteed to be in range by nature of quadtree degree
vertNum0 = vertNum;
vertNum1 = quadTreeScanResults[(pixelX+degree) + (pixelY)*textureBufferSize];
vertNum2 = quadTreeScanResults[(pixelX) + (pixelY+degree)*textureBufferSize];
vertNum3 = quadTreeScanResults[(pixelX+degree) + (pixelY+degree)*textureBufferSize];
}
//Always fill buffer
// Index order: 0-2-1, 1-2-3
int offset = vertNum*6;
indexBuffer[offset+0] = vertNum0;
indexBuffer[offset+1] = vertNum2;
indexBuffer[offset+2] = vertNum1;
indexBuffer[offset+3] = vertNum1;
indexBuffer[offset+4] = vertNum2;
indexBuffer[offset+5] = vertNum3;
}
}
}
__global__ void reshapeTextureKernel(int actualWidth, int actualHeight, int finalTextureWidth, int finalTextureHeight, int textureBufferSize,
Float4SOA planarTexture, float4* finalTexture)
{
int x = threadIdx.x;
int y = blockIdx.x;
int destIndex = x + y * finalTextureWidth;
int sourceIndex = x + y * textureBufferSize;
float4 textureValue = {CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F,CUDART_NAN_F};
if(x < actualWidth && y < actualHeight)
{
textureValue.x = planarTexture.x[sourceIndex];
textureValue.y = planarTexture.y[sourceIndex];
textureValue.z = planarTexture.z[sourceIndex];
textureValue.w = planarTexture.w[sourceIndex];
}
finalTexture[destIndex] = textureValue;
}
__host__ void quadtreeMeshGeneration(glm::vec4 aabbMeters, int actualWidth, int actualHeight, int* quadTreeAssemblyBuffer,
int* quadTreeScanResults, int textureBufferSize, int* blockResults, int blockResultsBufferSize,
int* indexBuffer, float4* vertexBuffer, int* compactCount, int* host_compactCount, int outputBufferSize,
int finalTextureWidth, int finalTextureHeight, Float4SOA planarTexture, float4* finalTexture)
{
int blockSize = roundupnextpow2(actualWidth);
int numBlocks = actualHeight;
dim3 threads(blockSize >> 1);//2 elements per thread
dim3 blocks(numBlocks);
int sharedCount = (blockSize+2)*sizeof(int);
//Make sure size constraints aren't violated
assert(blocks.x <= blockResultsBufferSize);
//Scan blocks
quadTreeExclusiveScanKernel<<<blocks,threads,sharedCount>>>(actualWidth, quadTreeAssemblyBuffer,
quadTreeScanResults, textureBufferSize, blockResults);
//Scan block results
int pow2 = roundupnextpow2(numBlocks);
threads = dim3(pow2>>1);
blocks = dim3(1);
assert(pow2 <= blockResultsBufferSize);
sharedCount = (pow2 + 2)*sizeof(int);
blockResultsExclusiveScanKernel<<<blocks,threads,sharedCount>>>(blockResults, numBlocks, compactCount);
cudaMemcpy(host_compactCount, compactCount, sizeof(int), cudaMemcpyDeviceToHost);
//Reintegrate
//Also scatter (generate meshes and vertecies in the process)
threads = dim3(actualWidth);
blocks = dim3(numBlocks);
reintegrateResultsKernel<<<blocks,threads>>>(actualWidth, textureBufferSize, quadTreeScanResults, blockResults);
assert(finalTextureWidth <= textureBufferSize);
assert(finalTextureHeight <= textureBufferSize);
scatterResultsKernel<<<blocks,threads>>>(aabbMeters, actualWidth, actualHeight, finalTextureWidth, finalTextureHeight, textureBufferSize,
quadTreeAssemblyBuffer, quadTreeScanResults, blockResults, indexBuffer, vertexBuffer);
//Reshape texture to aligned memory
threads = dim3(finalTextureWidth);
blocks = dim3(finalTextureHeight);
reshapeTextureKernel<<<blocks,threads>>>(actualWidth, actualHeight, finalTextureWidth, finalTextureHeight, textureBufferSize,
planarTexture, finalTexture);
} |
94b822eecdcb412d0d64606d11715807a6b82158.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "contrib_ops/cuda/quantization/qordered_ops/qordered_attention_impl.h"
#include "contrib_ops/cuda/quantization/qordered_ops/qordered_common.cuh"
#include <hipcub/hipcub.hpp>
namespace onnxruntime {
namespace contrib {
namespace cuda {
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11040
__global__ void
BuildTableForSoftmaxPowerOfKernel(const double base, float* table) {
int g = threadIdx.x - 255;
table[255 + g] = __double2float_rn(pow(base, static_cast<double>(g)));
}
Status BuildTableForSoftmaxPowerOf(hipStream_t stream, const double base, float* table) {
hipLaunchKernelGGL(( BuildTableForSoftmaxPowerOfKernel), dim3(1), dim3(256), 0, stream, base, table);
return CUDA_CALL(hipGetLastError());
}
template <int TPB>
__global__ void
QOrderMaskedSoftmaxKernel(const int8_t* src, const float* lookup_table, const int32_t* mask_index,
int8_t* dst, const float scale_dst, const unsigned sequence_len) {
using BlockReduceInt32 = hipcub::BlockReduce<int32_t, TPB>;
using BlockReduceFP32 = hipcub::BlockReduce<float, TPB>;
__shared__ union {
typename BlockReduceInt32::TempStorage i32;
typename BlockReduceFP32::TempStorage f32;
} unioned_tmp_storage;
__shared__ float sum_reverse_block;
__shared__ int32_t max_in_block;
const int block_offset = (blockIdx.y * gridDim.x + blockIdx.x) * sequence_len; /* 4 bytes per thread */
src += block_offset;
dst += block_offset;
mask_index += (blockIdx.y * sequence_len);
int offset = threadIdx.x * 4;
char4 ch4 = make_char4(-128, -128, -128, -128);
int4 four_masks = make_int4(0, 0, 0, 0);
if (offset < sequence_len) {
four_masks = *(const int4*)(mask_index + offset);
ch4 = *(const char4*)(src + offset);
}
int32_t max_of_4 = max(max(static_cast<int>(ch4.x), static_cast<int>(ch4.y)),
max(static_cast<int>(ch4.z), static_cast<int>(ch4.w)));
const int32_t max_all = BlockReduceInt32(unioned_tmp_storage.i32).Reduce(max_of_4, hipcub::Max());
if (threadIdx.x == 0) {
max_in_block = max_all;
}
__syncthreads();
float4 epow_of_4 = {
four_masks.x ? lookup_table[255 - max_in_block + ch4.x] : 0.0f,
four_masks.y ? lookup_table[255 - max_in_block + ch4.y] : 0.0f,
four_masks.z ? lookup_table[255 - max_in_block + ch4.z] : 0.0f,
four_masks.w ? lookup_table[255 - max_in_block + ch4.w] : 0.0f};
float sum_of_4 = epow_of_4.x + epow_of_4.y + epow_of_4.z + epow_of_4.w;
const float sum_all = BlockReduceFP32(unioned_tmp_storage.f32).Reduce(sum_of_4, hipcub::Sum());
if (threadIdx.x == 0) {
sum_reverse_block = (float)(1.0 / ((double)sum_all * scale_dst));
}
__syncthreads();
if (offset < sequence_len) {
ch4.x = QuantizeFloatS8(epow_of_4.x, sum_reverse_block);
ch4.y = QuantizeFloatS8(epow_of_4.y, sum_reverse_block);
ch4.z = QuantizeFloatS8(epow_of_4.z, sum_reverse_block);
ch4.w = QuantizeFloatS8(epow_of_4.w, sum_reverse_block);
*(char4*)(dst + offset) = ch4;
}
}
Status QOrderMaskedSoftmax(
hipStream_t stream, const hipDeviceProp_t& /*device_prop*/,
const int8_t* src, const float* lookup_table,
const int32_t* mask_index,
int8_t* dst, const float scale_dst,
const unsigned batch, const unsigned num_heads, const unsigned sequence_len) {
int tpb = (sequence_len + 3) / 4;
if (tpb <= 32) {
constexpr int TPB = 32;
dim3 threads(TPB, 1, 1);
dim3 blocks(sequence_len * num_heads, batch, 1);
hipLaunchKernelGGL(( QOrderMaskedSoftmaxKernel<TPB>), dim3(blocks), dim3(threads), 0, stream, src, lookup_table, mask_index, dst, scale_dst, sequence_len);
} else if (tpb <= 128) {
constexpr int TPB = 128;
dim3 threads(TPB, 1, 1);
dim3 blocks(sequence_len * num_heads, batch, 1);
hipLaunchKernelGGL(( QOrderMaskedSoftmaxKernel<TPB>), dim3(blocks), dim3(threads), 0, stream, src, lookup_table, mask_index, dst, scale_dst, sequence_len);
} else if (tpb <= 256) {
constexpr int TPB = 256;
dim3 threads(TPB, 1, 1);
dim3 blocks(sequence_len * num_heads, batch, 1);
hipLaunchKernelGGL(( QOrderMaskedSoftmaxKernel<TPB>), dim3(blocks), dim3(threads), 0, stream, src, lookup_table, mask_index, dst, scale_dst, sequence_len);
} else if (tpb <= 512) {
constexpr int TPB = 512;
dim3 threads(TPB, 1, 1);
dim3 blocks(sequence_len * num_heads, batch, 1);
hipLaunchKernelGGL(( QOrderMaskedSoftmaxKernel<TPB>), dim3(blocks), dim3(threads), 0, stream, src, lookup_table, mask_index, dst, scale_dst, sequence_len);
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Sequence length too long (> 2048) currently not supported!");
}
return CUDA_CALL(hipGetLastError());
}
constexpr int S8TransposeWidth = 16;
__global__ void
QOrderBatchInt8MatrixTransposeKernel(const int8_t* src, const int8_t* dst, const int rows, const int cols) {
__shared__ char4 shm[S8TransposeWidth * 4][S8TransposeWidth + 1];
const int64_t batch_offset = int64_t(rows) * cols * blockIdx.z;
src += batch_offset;
dst += batch_offset;
const int src_col = blockIdx.x * (S8TransposeWidth << 2);
const int src_row = blockIdx.y * (S8TransposeWidth << 2);
const int c = threadIdx.x << 2;
const int r = threadIdx.y << 2;
int col = src_col + c;
int row = src_row + r;
if (row < rows && col < cols) {
src += row * cols + col;
char4 ch4_0 = *(const char4*)(src);
char4 ch4_1 = *(const char4*)(src += cols);
char4 ch4_2 = *(const char4*)(src += cols);
char4 ch4_3 = *(const char4*)(src += cols);
shm[c + 0][threadIdx.y] = {ch4_0.x, ch4_1.x, ch4_2.x, ch4_3.x};
shm[c + 1][threadIdx.y] = {ch4_0.y, ch4_1.y, ch4_2.y, ch4_3.y};
shm[c + 2][threadIdx.y] = {ch4_0.z, ch4_1.z, ch4_2.z, ch4_3.z};
shm[c + 3][threadIdx.y] = {ch4_0.w, ch4_1.w, ch4_2.w, ch4_3.w};
}
__syncthreads();
int tcol = src_row + c;
int trow = src_col + r;
if (trow < cols && tcol < rows) {
dst += trow * rows + tcol;
*(char4*)(dst) = shm[r + 0][threadIdx.x];
*(char4*)(dst += rows) = shm[r + 1][threadIdx.x];
*(char4*)(dst += rows) = shm[r + 2][threadIdx.x];
*(char4*)(dst += rows) = shm[r + 3][threadIdx.x];
}
}
Status QOrderBatchTransposeInt8Matrix(hipStream_t stream, const hipDeviceProp_t& device_prop,
const int batch_size, const int rows, const int cols,
const int8_t* input, int8_t* output) {
ORT_ENFORCE(rows % 4 == 0 && cols % 4 == 0, "Matrix rows and cols must be divisible by 4!");
ORT_ENFORCE(rows > 0 && cols > 0 && batch_size > 0, "batch_size, rows, cols should be positive");
dim3 block(S8TransposeWidth, S8TransposeWidth);
dim3 grid((cols / 4 + S8TransposeWidth - 1) / S8TransposeWidth, (rows / 4 + S8TransposeWidth - 1) / S8TransposeWidth, batch_size);
hipLaunchKernelGGL(( QOrderBatchInt8MatrixTransposeKernel), dim3(grid), dim3(block), 0, stream, input, output, rows, cols);
return CUDA_CALL(hipGetLastError());
}
#endif
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 94b822eecdcb412d0d64606d11715807a6b82158.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "contrib_ops/cuda/quantization/qordered_ops/qordered_attention_impl.h"
#include "contrib_ops/cuda/quantization/qordered_ops/qordered_common.cuh"
#include <cub/cub.cuh>
namespace onnxruntime {
namespace contrib {
namespace cuda {
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11040
__global__ void
BuildTableForSoftmaxPowerOfKernel(const double base, float* table) {
int g = threadIdx.x - 255;
table[255 + g] = __double2float_rn(pow(base, static_cast<double>(g)));
}
Status BuildTableForSoftmaxPowerOf(cudaStream_t stream, const double base, float* table) {
BuildTableForSoftmaxPowerOfKernel<<<1, 256, 0, stream>>>(base, table);
return CUDA_CALL(cudaGetLastError());
}
template <int TPB>
__global__ void
QOrderMaskedSoftmaxKernel(const int8_t* src, const float* lookup_table, const int32_t* mask_index,
int8_t* dst, const float scale_dst, const unsigned sequence_len) {
using BlockReduceInt32 = cub::BlockReduce<int32_t, TPB>;
using BlockReduceFP32 = cub::BlockReduce<float, TPB>;
__shared__ union {
typename BlockReduceInt32::TempStorage i32;
typename BlockReduceFP32::TempStorage f32;
} unioned_tmp_storage;
__shared__ float sum_reverse_block;
__shared__ int32_t max_in_block;
const int block_offset = (blockIdx.y * gridDim.x + blockIdx.x) * sequence_len; /* 4 bytes per thread */
src += block_offset;
dst += block_offset;
mask_index += (blockIdx.y * sequence_len);
int offset = threadIdx.x * 4;
char4 ch4 = make_char4(-128, -128, -128, -128);
int4 four_masks = make_int4(0, 0, 0, 0);
if (offset < sequence_len) {
four_masks = *(const int4*)(mask_index + offset);
ch4 = *(const char4*)(src + offset);
}
int32_t max_of_4 = max(max(static_cast<int>(ch4.x), static_cast<int>(ch4.y)),
max(static_cast<int>(ch4.z), static_cast<int>(ch4.w)));
const int32_t max_all = BlockReduceInt32(unioned_tmp_storage.i32).Reduce(max_of_4, cub::Max());
if (threadIdx.x == 0) {
max_in_block = max_all;
}
__syncthreads();
float4 epow_of_4 = {
four_masks.x ? lookup_table[255 - max_in_block + ch4.x] : 0.0f,
four_masks.y ? lookup_table[255 - max_in_block + ch4.y] : 0.0f,
four_masks.z ? lookup_table[255 - max_in_block + ch4.z] : 0.0f,
four_masks.w ? lookup_table[255 - max_in_block + ch4.w] : 0.0f};
float sum_of_4 = epow_of_4.x + epow_of_4.y + epow_of_4.z + epow_of_4.w;
const float sum_all = BlockReduceFP32(unioned_tmp_storage.f32).Reduce(sum_of_4, cub::Sum());
if (threadIdx.x == 0) {
sum_reverse_block = (float)(1.0 / ((double)sum_all * scale_dst));
}
__syncthreads();
if (offset < sequence_len) {
ch4.x = QuantizeFloatS8(epow_of_4.x, sum_reverse_block);
ch4.y = QuantizeFloatS8(epow_of_4.y, sum_reverse_block);
ch4.z = QuantizeFloatS8(epow_of_4.z, sum_reverse_block);
ch4.w = QuantizeFloatS8(epow_of_4.w, sum_reverse_block);
*(char4*)(dst + offset) = ch4;
}
}
Status QOrderMaskedSoftmax(
cudaStream_t stream, const cudaDeviceProp& /*device_prop*/,
const int8_t* src, const float* lookup_table,
const int32_t* mask_index,
int8_t* dst, const float scale_dst,
const unsigned batch, const unsigned num_heads, const unsigned sequence_len) {
int tpb = (sequence_len + 3) / 4;
if (tpb <= 32) {
constexpr int TPB = 32;
dim3 threads(TPB, 1, 1);
dim3 blocks(sequence_len * num_heads, batch, 1);
QOrderMaskedSoftmaxKernel<TPB><<<blocks, threads, 0, stream>>>(src, lookup_table, mask_index, dst, scale_dst, sequence_len);
} else if (tpb <= 128) {
constexpr int TPB = 128;
dim3 threads(TPB, 1, 1);
dim3 blocks(sequence_len * num_heads, batch, 1);
QOrderMaskedSoftmaxKernel<TPB><<<blocks, threads, 0, stream>>>(src, lookup_table, mask_index, dst, scale_dst, sequence_len);
} else if (tpb <= 256) {
constexpr int TPB = 256;
dim3 threads(TPB, 1, 1);
dim3 blocks(sequence_len * num_heads, batch, 1);
QOrderMaskedSoftmaxKernel<TPB><<<blocks, threads, 0, stream>>>(src, lookup_table, mask_index, dst, scale_dst, sequence_len);
} else if (tpb <= 512) {
constexpr int TPB = 512;
dim3 threads(TPB, 1, 1);
dim3 blocks(sequence_len * num_heads, batch, 1);
QOrderMaskedSoftmaxKernel<TPB><<<blocks, threads, 0, stream>>>(src, lookup_table, mask_index, dst, scale_dst, sequence_len);
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Sequence length too long (> 2048) currently not supported!");
}
return CUDA_CALL(cudaGetLastError());
}
constexpr int S8TransposeWidth = 16;
__global__ void
QOrderBatchInt8MatrixTransposeKernel(const int8_t* src, const int8_t* dst, const int rows, const int cols) {
__shared__ char4 shm[S8TransposeWidth * 4][S8TransposeWidth + 1];
const int64_t batch_offset = int64_t(rows) * cols * blockIdx.z;
src += batch_offset;
dst += batch_offset;
const int src_col = blockIdx.x * (S8TransposeWidth << 2);
const int src_row = blockIdx.y * (S8TransposeWidth << 2);
const int c = threadIdx.x << 2;
const int r = threadIdx.y << 2;
int col = src_col + c;
int row = src_row + r;
if (row < rows && col < cols) {
src += row * cols + col;
char4 ch4_0 = *(const char4*)(src);
char4 ch4_1 = *(const char4*)(src += cols);
char4 ch4_2 = *(const char4*)(src += cols);
char4 ch4_3 = *(const char4*)(src += cols);
shm[c + 0][threadIdx.y] = {ch4_0.x, ch4_1.x, ch4_2.x, ch4_3.x};
shm[c + 1][threadIdx.y] = {ch4_0.y, ch4_1.y, ch4_2.y, ch4_3.y};
shm[c + 2][threadIdx.y] = {ch4_0.z, ch4_1.z, ch4_2.z, ch4_3.z};
shm[c + 3][threadIdx.y] = {ch4_0.w, ch4_1.w, ch4_2.w, ch4_3.w};
}
__syncthreads();
int tcol = src_row + c;
int trow = src_col + r;
if (trow < cols && tcol < rows) {
dst += trow * rows + tcol;
*(char4*)(dst) = shm[r + 0][threadIdx.x];
*(char4*)(dst += rows) = shm[r + 1][threadIdx.x];
*(char4*)(dst += rows) = shm[r + 2][threadIdx.x];
*(char4*)(dst += rows) = shm[r + 3][threadIdx.x];
}
}
Status QOrderBatchTransposeInt8Matrix(cudaStream_t stream, const cudaDeviceProp& device_prop,
const int batch_size, const int rows, const int cols,
const int8_t* input, int8_t* output) {
ORT_ENFORCE(rows % 4 == 0 && cols % 4 == 0, "Matrix rows and cols must be divisible by 4!");
ORT_ENFORCE(rows > 0 && cols > 0 && batch_size > 0, "batch_size, rows, cols should be positive");
dim3 block(S8TransposeWidth, S8TransposeWidth);
dim3 grid((cols / 4 + S8TransposeWidth - 1) / S8TransposeWidth, (rows / 4 + S8TransposeWidth - 1) / S8TransposeWidth, batch_size);
QOrderBatchInt8MatrixTransposeKernel<<<grid, block, 0, stream>>>(input, output, rows, cols);
return CUDA_CALL(cudaGetLastError());
}
#endif
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
2b5df7c685e0a51976d11208e15b1f544b6eade0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <hip/hip_runtime.h>
#define PI 3.14159265359
// #define HEIGHT 256
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <pycuda-complex.hpp>
typedef pycuda::complex<double> pyComplex;
__device__ float norma(pyComplex z){
return norm(z);
}
__global__ void mandelbrot_kernel(double xMin, double xMax, double yMin, double yMax, int L, double *M) {
int n_x = blockDim.x*gridDim.x;
//int n_y = blockDim.y*gridDim.y;
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
//int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = idy*n_x+idx;
double x0 = (xMin + xMax)/2;
double y0 = (yMin + yMax)/2;
double side = xMax - xMin;
float delta = side/n_x;
pyComplex c( x0-side/2.+delta*idx,y0-side/2.+delta*idy);
pyComplex z( x0-side/2.+delta*idx,y0-side/2.+delta*idy);
double h = 0;
int L1= 1700;
float R = 2.0;
while( h<L1 && norma(z)<R){
z=z*z+c;
h+=1;
}
M[threadId]=log(h + 1);
}
/*
__global__ void mandelbrot_kernel( const int nWidth, const int nHeight, cudaP xMin, cudaP xMax,
cudaP yMin, cudaP yMax,cudaP *startingPoints, cudaP *graphPoints ){
int tid = blockIdx.x + threadIdx.x*gridDim.x;
__shared__ unsigned int mappedPoints[ %(HEIGHT)s ];
mappedPoints[threadIdx.x] = 0;
__syncthreads();
cudaP val = startingPoints[ threadIdx.x + blockIdx.x*blockDim.x];
cudaP k = (xMax - xMin)/(nWidth-1)*blockIdx.x + xMin;
int nValues = 1500;
cudaP yFactor = cudaP(nHeight)/(yMax-yMin);
int yPix;
for (int i=0; i<100000; i++) val = k*val*(1-val); //Tranciente
for (int i=0; i<nValues; i++ ){
if ( val>=yMin and val <=yMax){
yPix = int((val-yMin)*yFactor);
if (yPix<nHeight and yPix>=0) mappedPoints[yPix] += 1;
}
val = k*val*(1-val);
}
cudaP value;
if (mappedPoints[threadIdx.x]>=1) value = log(cudaP(mappedPoints[threadIdx.x]));
else value = 0.0f;
graphPoints[tid] = value;
}*/
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void mask_kernel( int xMin, int xMax, int yMin, int yMax, int *maskPoints){
int t_x = blockIdx.x*blockDim.x + threadIdx.x;
int t_y = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_x + t_y*blockDim.x*gridDim.x;
int val;
if ( (t_x<xMax && t_x>xMin) && (t_y<yMax && t_y>yMin) ) val = 0;
else val = 1;
maskPoints[tid] = val;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void plot_kernel( int jMin, int jMax, int iMin, int iMax, cudaP *graphPoints, int *maskPoints, cudaP *plotData){
int t_x = blockIdx.x*blockDim.x + threadIdx.x;
int t_y = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_x + t_y*blockDim.x*gridDim.x;
cudaP val=graphPoints[tid];
if ( (t_x>=jMin and t_x<jMax) and (t_y>=iMin and t_y<iMax) ) val = 1-val;
plotData[tid] = val;
} | 2b5df7c685e0a51976d11208e15b1f544b6eade0.cu | #include <stdint.h>
#include <cuda.h>
#define PI 3.14159265359
// #define HEIGHT 256
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <pycuda-complex.hpp>
typedef pycuda::complex<double> pyComplex;
__device__ float norma(pyComplex z){
return norm(z);
}
__global__ void mandelbrot_kernel(double xMin, double xMax, double yMin, double yMax, int L, double *M) {
int n_x = blockDim.x*gridDim.x;
//int n_y = blockDim.y*gridDim.y;
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
//int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = idy*n_x+idx;
double x0 = (xMin + xMax)/2;
double y0 = (yMin + yMax)/2;
double side = xMax - xMin;
float delta = side/n_x;
pyComplex c( x0-side/2.+delta*idx,y0-side/2.+delta*idy);
pyComplex z( x0-side/2.+delta*idx,y0-side/2.+delta*idy);
double h = 0;
int L1= 1700;
float R = 2.0;
while( h<L1 && norma(z)<R){
z=z*z+c;
h+=1;
}
M[threadId]=log(h + 1);
}
/*
__global__ void mandelbrot_kernel( const int nWidth, const int nHeight, cudaP xMin, cudaP xMax,
cudaP yMin, cudaP yMax,cudaP *startingPoints, cudaP *graphPoints ){
int tid = blockIdx.x + threadIdx.x*gridDim.x;
__shared__ unsigned int mappedPoints[ %(HEIGHT)s ];
mappedPoints[threadIdx.x] = 0;
__syncthreads();
cudaP val = startingPoints[ threadIdx.x + blockIdx.x*blockDim.x];
cudaP k = (xMax - xMin)/(nWidth-1)*blockIdx.x + xMin;
int nValues = 1500;
cudaP yFactor = cudaP(nHeight)/(yMax-yMin);
int yPix;
for (int i=0; i<100000; i++) val = k*val*(1-val); //Tranciente
for (int i=0; i<nValues; i++ ){
if ( val>=yMin and val <=yMax){
yPix = int((val-yMin)*yFactor);
if (yPix<nHeight and yPix>=0) mappedPoints[yPix] += 1;
}
val = k*val*(1-val);
}
cudaP value;
if (mappedPoints[threadIdx.x]>=1) value = log(cudaP(mappedPoints[threadIdx.x]));
else value = 0.0f;
graphPoints[tid] = value;
}*/
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void mask_kernel( int xMin, int xMax, int yMin, int yMax, int *maskPoints){
int t_x = blockIdx.x*blockDim.x + threadIdx.x;
int t_y = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_x + t_y*blockDim.x*gridDim.x;
int val;
if ( (t_x<xMax && t_x>xMin) && (t_y<yMax && t_y>yMin) ) val = 0;
else val = 1;
maskPoints[tid] = val;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void plot_kernel( int jMin, int jMax, int iMin, int iMax, cudaP *graphPoints, int *maskPoints, cudaP *plotData){
int t_x = blockIdx.x*blockDim.x + threadIdx.x;
int t_y = blockIdx.y*blockDim.y + threadIdx.y;
int tid = t_x + t_y*blockDim.x*gridDim.x;
cudaP val=graphPoints[tid];
if ( (t_x>=jMin and t_x<jMax) and (t_y>=iMin and t_y<iMax) ) val = 1-val;
plotData[tid] = val;
} |
0bccc70a93731bfeedc61b0b5205ea2cfa4effc6.hip | // !!! This is a file automatically generated by hipify!!!
// https://cvw.cac.cornell.edu/gpu/example_submit
#include "hip/hip_runtime.h"
#include <stdio.h>
// Print device properties
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ?"Yes" : "No"));
return;
}
int main()
{
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
return 0;
}
| 0bccc70a93731bfeedc61b0b5205ea2cfa4effc6.cu | // https://cvw.cac.cornell.edu/gpu/example_submit
#include "cuda_runtime.h"
#include <stdio.h>
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ?"Yes" : "No"));
return;
}
int main()
{
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
return 0;
}
|
48855ca6c03894a3d5072fa2c5df7e900dbd0ded.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgemm_fermi.cu normal z -> s, Fri Jan 30 19:00:10 2015
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
gemm_stencil.cuh defines the GPU kernel (device function).
gemm_kernel.cuh defines the GPU kernel (global function).
The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "sgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
transA CHARACTER*1.
On entry, transA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**H.
@param[in]
transB CHARACTER*1.
On entry, transB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( dA ) and of the matrix dC. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( dB ) and the number of columns of the matrix dC. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( dA ) and the number of rows of the matrix op( dB ). K must
be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of DIMENSION ( LDA, ka ), where ka is
k when transA = MagmaNoTrans, and is m otherwise.
Before entry with transA = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When transA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
dB REAL array of DIMENSION ( LDB, kb ), where kb is
n when transB = MagmaNoTrans, and is k otherwise.
Before entry with transB = MagmaNoTrans, the leading k by n
part of the array dB must contain the matrix dB, otherwise
the leading n by k part of the array dB must contain the
matrix dB.
@param[in]
lddb INTEGER.
On entry, LDB specifies the first dimension of dB as declared
in the calling (sub) program. When transB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC REAL array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array dC must
contain the matrix dC, except when beta is zero, in which
case dC need not be set on entry.
On exit, the array dC is overwritten by the m by n matrix
( alpha*op( dA )*op( dB ) + beta*dC ).
@param[in]
lddc INTEGER.
On entry, LDC specifies the first dimension of dC as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm(
magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dB, magma_int_t lddb,
float beta,
magmaFloat_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemm(
transA, transB,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
#else
magmablas_sgemm_tesla(
transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( transA == MagmaTrans )
TransA = 1;
else if ( transA == MagmaNoTrans )
TransA = 0;
if ( transB == MagmaTrans )
TransB = 1;
else if ( transB == MagmaNoTrans )
TransB = 0;
magma_int_t Am = ( ! TransA ? m : k);
magma_int_t An = (!TransA ? k : m);
magma_int_t Bm = ( ! TransB ? k : n);
magma_int_t Bn = (!TransB ? n : k);
size_t sizeA = (size_t) ldda * (An - 1) + Am;
size_t sizeB = (size_t) lddb * (Bn - 1) + Bm;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_sgemm( transA, transB, m, n, k, alpha,
dA, ldda, dB, lddb,
beta, dC, lddc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = hipFilterModePoint;
tex_ref_A.addressMode[0] = hipAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = hipFilterModePoint;
tex_ref_B.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_A, dA, sizeA*sizeof(float));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
err = hipBindTexture(&offsetB, tex_ref_B, dB, sizeB*sizeof(float));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err );
hipUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(dA[0]);
offsetB = offsetB/sizeof(dB[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_nn + 1,
(n - 1)/BLK_N_nn + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_tt + 1,
(n - 1)/BLK_N_tt + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_tc + 1,
(n - 1)/BLK_N_tc + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_ct + 1,
(n - 1)/BLK_N_ct + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_cc + 1,
(n - 1)/BLK_N_cc + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
#ifdef TEXTURE_1D
hipUnbindTexture( tex_ref_A );
hipUnbindTexture( tex_ref_B );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 48855ca6c03894a3d5072fa2c5df7e900dbd0ded.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgemm_fermi.cu normal z -> s, Fri Jan 30 19:00:10 2015
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
gemm_stencil.cuh defines the GPU kernel (device function).
gemm_kernel.cuh defines the GPU kernel (global function).
The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "sgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
transA CHARACTER*1.
On entry, transA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**H.
@param[in]
transB CHARACTER*1.
On entry, transB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( dA ) and of the matrix dC. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( dB ) and the number of columns of the matrix dC. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( dA ) and the number of rows of the matrix op( dB ). K must
be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA REAL array of DIMENSION ( LDA, ka ), where ka is
k when transA = MagmaNoTrans, and is m otherwise.
Before entry with transA = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When transA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
dB REAL array of DIMENSION ( LDB, kb ), where kb is
n when transB = MagmaNoTrans, and is k otherwise.
Before entry with transB = MagmaNoTrans, the leading k by n
part of the array dB must contain the matrix dB, otherwise
the leading n by k part of the array dB must contain the
matrix dB.
@param[in]
lddb INTEGER.
On entry, LDB specifies the first dimension of dB as declared
in the calling (sub) program. When transB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC REAL array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array dC must
contain the matrix dC, except when beta is zero, in which
case dC need not be set on entry.
On exit, the array dC is overwritten by the m by n matrix
( alpha*op( dA )*op( dB ) + beta*dC ).
@param[in]
lddc INTEGER.
On entry, LDC specifies the first dimension of dC as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm(
magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dB, magma_int_t lddb,
float beta,
magmaFloat_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemm(
transA, transB,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
#else
magmablas_sgemm_tesla(
transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( transA == MagmaTrans )
TransA = 1;
else if ( transA == MagmaNoTrans )
TransA = 0;
if ( transB == MagmaTrans )
TransB = 1;
else if ( transB == MagmaNoTrans )
TransB = 0;
magma_int_t Am = ( ! TransA ? m : k);
magma_int_t An = (!TransA ? k : m);
magma_int_t Bm = ( ! TransB ? k : n);
magma_int_t Bn = (!TransB ? n : k);
size_t sizeA = (size_t) ldda * (An - 1) + Am;
size_t sizeB = (size_t) lddb * (Bn - 1) + Bm;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_sgemm( transA, transB, m, n, k, alpha,
dA, ldda, dB, lddb,
beta, dC, lddc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = cudaFilterModePoint;
tex_ref_A.addressMode[0] = cudaAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = cudaFilterModePoint;
tex_ref_B.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_A, dA, sizeA*sizeof(float));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
err = cudaBindTexture(&offsetB, tex_ref_B, dB, sizeB*sizeof(float));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err );
cudaUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(dA[0]);
offsetB = offsetB/sizeof(dB[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_nn + 1,
(n - 1)/BLK_N_nn + 1 );
sgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 );
sgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 );
sgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 );
sgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_tt + 1,
(n - 1)/BLK_N_tt + 1 );
sgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_tc + 1,
(n - 1)/BLK_N_tc + 1 );
sgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 );
sgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_ct + 1,
(n - 1)/BLK_N_ct + 1 );
sgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_cc + 1,
(n - 1)/BLK_N_cc + 1 );
sgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
#ifdef TEXTURE_1D
cudaUnbindTexture( tex_ref_A );
cudaUnbindTexture( tex_ref_B );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
83589897fde4ca130dfc82a52e5a30214e9dddc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ForwardGPU(const int nthreads, const Dtype* prob,
const Dtype* log_data, const Dtype threshold, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(prob[index] < threshold){
loss_data[index] = Dtype(0);
}
else{
loss_data[index] = prob[index] * log_data[index];
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if(now_iteration_ < iterations_num_){
top[0]->mutable_cpu_data()[0] = Dtype(0);
return;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* log_data = normalized_bottom_data_.mutable_gpu_data();
caffe_gpu_log(data_num_ * label_num_, bottom_data, log_data);
int nthreads = label_num_ * data_num_;
Dtype loss;
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
hipLaunchKernelGGL(( ForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, log_data, Dtype(0.00001), loss_data);
caffe_gpu_asum(label_num_ * data_num_, loss_data, &loss);
loss = -loss;
top[0]->mutable_cpu_data()[0] = loss;
}
/*
template <typename Dtype>
__global__ void EntropyDiff(const int nthreads, const Dtype* data,
const Dtype* log_data, const Dtype* label, const Dtype threshold,
const int data_num, const int ignore_label, const int label_num,
Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1) / label_num;
if(data[index] < threshold){
diff[index] = Dtype(0);
}
else if(label[(index / data_num) * 2] < 0){
count[index] = Dtype(0);
diff[index] = Dtype(0);
}
else if(label[(index / data_num) * 2] == ignore_label){
count[index] = Dtype(0);
diff[index] = Dtype(0);
}
else{
diff[index] = -(Dtype(1) + log_data[index]);
}
}
}
*/
template <typename Dtype>
__global__ void EntropyDiff(const int nthreads, const Dtype* data,
const Dtype* log_data, const Dtype* label, const Dtype threshold,
const int data_num, const int ignore_label, const int label_num,
Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1) / label_num;
if(data[index] < threshold){
diff[index] = Dtype(0);
}
else{
diff[index] = -(Dtype(1) + log_data[index]);
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if(now_iteration_ < iterations_num_){
now_iteration_++;
return;
}
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* log_data = normalized_bottom_data_.mutable_gpu_data();
Dtype* count = normalized_bottom_data_.mutable_gpu_diff();
int nthreads = data_num_ * label_num_;
if (propagate_down[0]) {
hipLaunchKernelGGL(( EntropyDiff<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, log_data, bottom_label,
threshold_, data_num_, ignore_label_, label_num_, count, bottom_diff);
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
count_num = count_num > 0 ? count_num : Dtype(1);
caffe_gpu_scal(nthreads, loss_weight_ / count_num, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyLossLayer);
} // namespace caffe
| 83589897fde4ca130dfc82a52e5a30214e9dddc7.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ForwardGPU(const int nthreads, const Dtype* prob,
const Dtype* log_data, const Dtype threshold, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(prob[index] < threshold){
loss_data[index] = Dtype(0);
}
else{
loss_data[index] = prob[index] * log_data[index];
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if(now_iteration_ < iterations_num_){
top[0]->mutable_cpu_data()[0] = Dtype(0);
return;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* log_data = normalized_bottom_data_.mutable_gpu_data();
caffe_gpu_log(data_num_ * label_num_, bottom_data, log_data);
int nthreads = label_num_ * data_num_;
Dtype loss;
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
ForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, log_data, Dtype(0.00001), loss_data);
caffe_gpu_asum(label_num_ * data_num_, loss_data, &loss);
loss = -loss;
top[0]->mutable_cpu_data()[0] = loss;
}
/*
template <typename Dtype>
__global__ void EntropyDiff(const int nthreads, const Dtype* data,
const Dtype* log_data, const Dtype* label, const Dtype threshold,
const int data_num, const int ignore_label, const int label_num,
Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1) / label_num;
if(data[index] < threshold){
diff[index] = Dtype(0);
}
else if(label[(index / data_num) * 2] < 0){
count[index] = Dtype(0);
diff[index] = Dtype(0);
}
else if(label[(index / data_num) * 2] == ignore_label){
count[index] = Dtype(0);
diff[index] = Dtype(0);
}
else{
diff[index] = -(Dtype(1) + log_data[index]);
}
}
}
*/
template <typename Dtype>
__global__ void EntropyDiff(const int nthreads, const Dtype* data,
const Dtype* log_data, const Dtype* label, const Dtype threshold,
const int data_num, const int ignore_label, const int label_num,
Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1) / label_num;
if(data[index] < threshold){
diff[index] = Dtype(0);
}
else{
diff[index] = -(Dtype(1) + log_data[index]);
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if(now_iteration_ < iterations_num_){
now_iteration_++;
return;
}
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* log_data = normalized_bottom_data_.mutable_gpu_data();
Dtype* count = normalized_bottom_data_.mutable_gpu_diff();
int nthreads = data_num_ * label_num_;
if (propagate_down[0]) {
EntropyDiff<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, log_data, bottom_label,
threshold_, data_num_, ignore_label_, label_num_, count, bottom_diff);
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
count_num = count_num > 0 ? count_num : Dtype(1);
caffe_gpu_scal(nthreads, loss_weight_ / count_num, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyLossLayer);
} // namespace caffe
|
14b2e124d81de54548ae4edc8f1f4b2b889fadf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_calc_dt_kernel_get;
int xdim0_calc_dt_kernel_get_h = -1;
__constant__ int ydim0_calc_dt_kernel_get;
int ydim0_calc_dt_kernel_get_h = -1;
__constant__ int xdim1_calc_dt_kernel_get;
int xdim1_calc_dt_kernel_get_h = -1;
__constant__ int ydim1_calc_dt_kernel_get;
int ydim1_calc_dt_kernel_get_h = -1;
__constant__ int xdim4_calc_dt_kernel_get;
int xdim4_calc_dt_kernel_get_h = -1;
__constant__ int ydim4_calc_dt_kernel_get;
int ydim4_calc_dt_kernel_get_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC4
#define OPS_ACC0(x, y, z) \
(x + xdim0_calc_dt_kernel_get * (y) + \
xdim0_calc_dt_kernel_get * ydim0_calc_dt_kernel_get * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_calc_dt_kernel_get * (y) + \
xdim1_calc_dt_kernel_get * ydim1_calc_dt_kernel_get * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_calc_dt_kernel_get * (y) + \
xdim4_calc_dt_kernel_get * ydim4_calc_dt_kernel_get * (z))
// user function
__device__
void
calc_dt_kernel_get_gpu(const double *cellx, const double *celly,
double *xl_pos, double *yl_pos, const double *cellz,
double *zl_pos) {
*xl_pos = cellx[OPS_ACC0(0, 0, 0)];
*yl_pos = celly[OPS_ACC1(0, 0, 0)];
*zl_pos = cellz[OPS_ACC4(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC4
__global__ void ops_calc_dt_kernel_get(const double *__restrict arg0,
const double *__restrict arg1,
double *__restrict arg2,
double *__restrict arg3,
const double *__restrict arg4,
double *__restrict arg5, int size0,
int size1, int size2) {
double arg2_l[1];
double arg3_l[1];
double arg5_l[1];
for (int d = 0; d < 1; d++)
arg2_l[d] = ZERO_double;
for (int d = 0; d < 1; d++)
arg3_l[d] = ZERO_double;
for (int d = 0; d < 1; d++)
arg5_l[d] = ZERO_double;
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim0_calc_dt_kernel_get +
idx_z * 0 * 1 * xdim0_calc_dt_kernel_get * ydim0_calc_dt_kernel_get;
arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_calc_dt_kernel_get +
idx_z * 0 * 1 * xdim1_calc_dt_kernel_get * ydim1_calc_dt_kernel_get;
arg4 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim4_calc_dt_kernel_get +
idx_z * 1 * 1 * xdim4_calc_dt_kernel_get * ydim4_calc_dt_kernel_get;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
calc_dt_kernel_get_gpu(arg0, arg1, arg2_l, arg3_l, arg4, arg5_l);
}
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(&arg2[d +
(blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y) *
1],
arg2_l[d]);
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(&arg3[d +
(blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y) *
1],
arg3_l[d]);
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(&arg5[d +
(blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y) *
1],
arg5_l[d]);
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_calc_dt_kernel_get(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
#else
void ops_par_loop_calc_dt_kernel_get_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 6, range, 99))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(99, "calc_dt_kernel_get");
OPS_kernels[99].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_calc_dt_kernel_get_h ||
ydim0 != ydim0_calc_dt_kernel_get_h ||
xdim1 != xdim1_calc_dt_kernel_get_h ||
ydim1 != ydim1_calc_dt_kernel_get_h ||
xdim4 != xdim4_calc_dt_kernel_get_h ||
ydim4 != ydim4_calc_dt_kernel_get_h) {
hipMemcpyToSymbol(xdim0_calc_dt_kernel_get, &xdim0, sizeof(int));
xdim0_calc_dt_kernel_get_h = xdim0;
hipMemcpyToSymbol(ydim0_calc_dt_kernel_get, &ydim0, sizeof(int));
ydim0_calc_dt_kernel_get_h = ydim0;
hipMemcpyToSymbol(xdim1_calc_dt_kernel_get, &xdim1, sizeof(int));
xdim1_calc_dt_kernel_get_h = xdim1;
hipMemcpyToSymbol(ydim1_calc_dt_kernel_get, &ydim1, sizeof(int));
ydim1_calc_dt_kernel_get_h = ydim1;
hipMemcpyToSymbol(xdim4_calc_dt_kernel_get, &xdim4, sizeof(int));
xdim4_calc_dt_kernel_get_h = xdim4;
hipMemcpyToSymbol(ydim4_calc_dt_kernel_get, &ydim4, sizeof(int));
ydim4_calc_dt_kernel_get_h = ydim4;
}
#ifdef OPS_LAZY
ops_block block = desc->block;
#endif
#ifdef OPS_MPI
double *arg2h =
(double *)(((ops_reduction)args[2].data)->data +
((ops_reduction)args[2].data)->size * block->index);
#else
double *arg2h = (double *)(((ops_reduction)args[2].data)->data);
#endif
#ifdef OPS_MPI
double *arg3h =
(double *)(((ops_reduction)args[3].data)->data +
((ops_reduction)args[3].data)->size * block->index);
#else
double *arg3h = (double *)(((ops_reduction)args[3].data)->data);
#endif
#ifdef OPS_MPI
double *arg5h =
(double *)(((ops_reduction)args[5].data)->data +
((ops_reduction)args[5].data)->size * block->index);
#else
double *arg5h = (double *)(((ops_reduction)args[5].data)->data);
#endif
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int nblocks = ((x_size - 1) / OPS_block_size_x + 1) *
((y_size - 1) / OPS_block_size_y + 1) * z_size;
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg2.data = OPS_reduct_h + reduct_bytes;
arg2.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg2.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
arg3.data = OPS_reduct_h + reduct_bytes;
arg3.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg3.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
arg5.data = OPS_reduct_h + reduct_bytes;
arg5.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg5.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[6];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[99].mpi_time += t2 - t1;
}
int nshared = 0;
int nthread = OPS_block_size_x * OPS_block_size_y;
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared * nthread, reduct_size * nthread);
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_calc_dt_kernel_get), dim3(grid), dim3(tblock), nshared, 0,
(double *)p_a[0], (double *)p_a[1], (double *)arg2.data_d,
(double *)arg3.data_d, (double *)p_a[4], (double *)arg5.data_d, x_size,
y_size, z_size);
cutilSafeCall(hipGetLastError());
mvReductArraysToHost(reduct_bytes);
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg2h[d] = arg2h[d] + ((double *)arg2.data)[d + b * 1];
}
}
arg2.data = (char *)arg2h;
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg3h[d] = arg3h[d] + ((double *)arg3.data)[d + b * 1];
}
}
arg3.data = (char *)arg3h;
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg5h[d] = arg5h[d] + ((double *)arg5.data)[d + b * 1];
}
}
arg5.data = (char *)arg5h;
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[99].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[99].mpi_time += t2 - t1;
OPS_kernels[99].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[99].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[99].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_calc_dt_kernel_get(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 99;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 99;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->args[3] = arg3;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->function = ops_par_loop_calc_dt_kernel_get_execute;
if (OPS_diags > 1) {
ops_timing_realloc(99, "calc_dt_kernel_get");
}
ops_enqueue_kernel(desc);
}
#endif
| 14b2e124d81de54548ae4edc8f1f4b2b889fadf7.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_calc_dt_kernel_get;
int xdim0_calc_dt_kernel_get_h = -1;
__constant__ int ydim0_calc_dt_kernel_get;
int ydim0_calc_dt_kernel_get_h = -1;
__constant__ int xdim1_calc_dt_kernel_get;
int xdim1_calc_dt_kernel_get_h = -1;
__constant__ int ydim1_calc_dt_kernel_get;
int ydim1_calc_dt_kernel_get_h = -1;
__constant__ int xdim4_calc_dt_kernel_get;
int xdim4_calc_dt_kernel_get_h = -1;
__constant__ int ydim4_calc_dt_kernel_get;
int ydim4_calc_dt_kernel_get_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC4
#define OPS_ACC0(x, y, z) \
(x + xdim0_calc_dt_kernel_get * (y) + \
xdim0_calc_dt_kernel_get * ydim0_calc_dt_kernel_get * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_calc_dt_kernel_get * (y) + \
xdim1_calc_dt_kernel_get * ydim1_calc_dt_kernel_get * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_calc_dt_kernel_get * (y) + \
xdim4_calc_dt_kernel_get * ydim4_calc_dt_kernel_get * (z))
// user function
__device__
void
calc_dt_kernel_get_gpu(const double *cellx, const double *celly,
double *xl_pos, double *yl_pos, const double *cellz,
double *zl_pos) {
*xl_pos = cellx[OPS_ACC0(0, 0, 0)];
*yl_pos = celly[OPS_ACC1(0, 0, 0)];
*zl_pos = cellz[OPS_ACC4(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC4
__global__ void ops_calc_dt_kernel_get(const double *__restrict arg0,
const double *__restrict arg1,
double *__restrict arg2,
double *__restrict arg3,
const double *__restrict arg4,
double *__restrict arg5, int size0,
int size1, int size2) {
double arg2_l[1];
double arg3_l[1];
double arg5_l[1];
for (int d = 0; d < 1; d++)
arg2_l[d] = ZERO_double;
for (int d = 0; d < 1; d++)
arg3_l[d] = ZERO_double;
for (int d = 0; d < 1; d++)
arg5_l[d] = ZERO_double;
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim0_calc_dt_kernel_get +
idx_z * 0 * 1 * xdim0_calc_dt_kernel_get * ydim0_calc_dt_kernel_get;
arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_calc_dt_kernel_get +
idx_z * 0 * 1 * xdim1_calc_dt_kernel_get * ydim1_calc_dt_kernel_get;
arg4 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim4_calc_dt_kernel_get +
idx_z * 1 * 1 * xdim4_calc_dt_kernel_get * ydim4_calc_dt_kernel_get;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
calc_dt_kernel_get_gpu(arg0, arg1, arg2_l, arg3_l, arg4, arg5_l);
}
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(&arg2[d +
(blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y) *
1],
arg2_l[d]);
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(&arg3[d +
(blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y) *
1],
arg3_l[d]);
for (int d = 0; d < 1; d++)
ops_reduction_cuda<OPS_INC>(&arg5[d +
(blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y) *
1],
arg5_l[d]);
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_calc_dt_kernel_get(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
#else
void ops_par_loop_calc_dt_kernel_get_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 6, range, 99))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(99, "calc_dt_kernel_get");
OPS_kernels[99].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_calc_dt_kernel_get_h ||
ydim0 != ydim0_calc_dt_kernel_get_h ||
xdim1 != xdim1_calc_dt_kernel_get_h ||
ydim1 != ydim1_calc_dt_kernel_get_h ||
xdim4 != xdim4_calc_dt_kernel_get_h ||
ydim4 != ydim4_calc_dt_kernel_get_h) {
cudaMemcpyToSymbol(xdim0_calc_dt_kernel_get, &xdim0, sizeof(int));
xdim0_calc_dt_kernel_get_h = xdim0;
cudaMemcpyToSymbol(ydim0_calc_dt_kernel_get, &ydim0, sizeof(int));
ydim0_calc_dt_kernel_get_h = ydim0;
cudaMemcpyToSymbol(xdim1_calc_dt_kernel_get, &xdim1, sizeof(int));
xdim1_calc_dt_kernel_get_h = xdim1;
cudaMemcpyToSymbol(ydim1_calc_dt_kernel_get, &ydim1, sizeof(int));
ydim1_calc_dt_kernel_get_h = ydim1;
cudaMemcpyToSymbol(xdim4_calc_dt_kernel_get, &xdim4, sizeof(int));
xdim4_calc_dt_kernel_get_h = xdim4;
cudaMemcpyToSymbol(ydim4_calc_dt_kernel_get, &ydim4, sizeof(int));
ydim4_calc_dt_kernel_get_h = ydim4;
}
#ifdef OPS_LAZY
ops_block block = desc->block;
#endif
#ifdef OPS_MPI
double *arg2h =
(double *)(((ops_reduction)args[2].data)->data +
((ops_reduction)args[2].data)->size * block->index);
#else
double *arg2h = (double *)(((ops_reduction)args[2].data)->data);
#endif
#ifdef OPS_MPI
double *arg3h =
(double *)(((ops_reduction)args[3].data)->data +
((ops_reduction)args[3].data)->size * block->index);
#else
double *arg3h = (double *)(((ops_reduction)args[3].data)->data);
#endif
#ifdef OPS_MPI
double *arg5h =
(double *)(((ops_reduction)args[5].data)->data +
((ops_reduction)args[5].data)->size * block->index);
#else
double *arg5h = (double *)(((ops_reduction)args[5].data)->data);
#endif
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int nblocks = ((x_size - 1) / OPS_block_size_x + 1) *
((y_size - 1) / OPS_block_size_y + 1) * z_size;
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
reduct_size = MAX(reduct_size, sizeof(double) * 1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg2.data = OPS_reduct_h + reduct_bytes;
arg2.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg2.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
arg3.data = OPS_reduct_h + reduct_bytes;
arg3.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg3.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
arg5.data = OPS_reduct_h + reduct_bytes;
arg5.data_d = OPS_reduct_d + reduct_bytes;
for (int b = 0; b < maxblocks; b++)
for (int d = 0; d < 1; d++)
((double *)arg5.data)[d + b * 1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[6];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[99].mpi_time += t2 - t1;
}
int nshared = 0;
int nthread = OPS_block_size_x * OPS_block_size_y;
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared, sizeof(double) * 1);
nshared = MAX(nshared * nthread, reduct_size * nthread);
// call kernel wrapper function, passing in pointers to data
ops_calc_dt_kernel_get<<<grid, tblock, nshared>>>(
(double *)p_a[0], (double *)p_a[1], (double *)arg2.data_d,
(double *)arg3.data_d, (double *)p_a[4], (double *)arg5.data_d, x_size,
y_size, z_size);
cutilSafeCall(cudaGetLastError());
mvReductArraysToHost(reduct_bytes);
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg2h[d] = arg2h[d] + ((double *)arg2.data)[d + b * 1];
}
}
arg2.data = (char *)arg2h;
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg3h[d] = arg3h[d] + ((double *)arg3.data)[d + b * 1];
}
}
arg3.data = (char *)arg3h;
for (int b = 0; b < maxblocks; b++) {
for (int d = 0; d < 1; d++) {
arg5h[d] = arg5h[d] + ((double *)arg5.data)[d + b * 1];
}
}
arg5.data = (char *)arg5h;
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[99].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[99].mpi_time += t2 - t1;
OPS_kernels[99].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[99].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[99].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_calc_dt_kernel_get(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 99;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 99;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->args[3] = arg3;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->function = ops_par_loop_calc_dt_kernel_get_execute;
if (OPS_diags > 1) {
ops_timing_realloc(99, "calc_dt_kernel_get");
}
ops_enqueue_kernel(desc);
}
#endif
|
5c0cf86b6c4feec7809c2abf202f39194a898366.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020, Lorenzo Basso, Jack Lee, Matthew Zhang, Feiyang Chen
* Copyright (c) 2018, Francis Haghighi-Daly
* All rights reserved.
* This file is part of the WooStOr - Wavepacket prOpopgatiOn using SpliT OperatR method, subject to the GNU/GPL-3.0-or-later.*/
#include "cuda_helper.h"
void _handle_cuda_error(hipError_t code, const char *file, int line) {
if (code != hipSuccess) {
char err_msg[500];
sprintf(err_msg, "Error '%s' occurred in file '%s'@%d\n", hipGetErrorString(code), file, line);
mexErrMsgIdAndTxt("SplitOperator:CUDA:FFT", err_msg);
}
}
void _handle_cudafft_error(hipfftResult code, const char *file, int line) {
if (code != HIPFFT_SUCCESS) {
char err_msg[500];
sprintf(err_msg, "Cuda FFT error occurred in file '%s'@%d\n", file, line);
mexErrMsgIdAndTxt("SplitOperator:CUDA:FFT", err_msg);
}
}
| 5c0cf86b6c4feec7809c2abf202f39194a898366.cu | /* Copyright (c) 2020, Lorenzo Basso, Jack Lee, Matthew Zhang, Feiyang Chen
* Copyright (c) 2018, Francis Haghighi-Daly
* All rights reserved.
* This file is part of the WooStOr - Wavepacket prOpopgatiOn using SpliT OperatR method, subject to the GNU/GPL-3.0-or-later.*/
#include "cuda_helper.h"
void _handle_cuda_error(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
char err_msg[500];
sprintf(err_msg, "Error '%s' occurred in file '%s'@%d\n", cudaGetErrorString(code), file, line);
mexErrMsgIdAndTxt("SplitOperator:CUDA:FFT", err_msg);
}
}
void _handle_cudafft_error(cufftResult code, const char *file, int line) {
if (code != CUFFT_SUCCESS) {
char err_msg[500];
sprintf(err_msg, "Cuda FFT error occurred in file '%s'@%d\n", file, line);
mexErrMsgIdAndTxt("SplitOperator:CUDA:FFT", err_msg);
}
}
|
e1fcf55d695c9a7c226879303dea4d2c5fbe02ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "common_hip.cuh"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
void cudaSafeMalloc(void **ptr, size_t size)
{
size_t total_m;
size_t free_m;
cuMemGetInfo(&free_m, &total_m);
cudaCheckErrors("cuMemGetInfo fail");
/*std::cout << "Memory: " << (free_m / (1024 * 1024)) << "/" << (total_m / (1024 * 1024)) << " MiB" << std::endl;
std::cout << "Allocating " << size << " B on device!" << std::endl;*/
if (hipMalloc(ptr, size) != hipSuccess)
{
std::cerr << "Cannot allocate memory of size " << (size / 1024) << " kiB on device!" << std::endl;
exit(1);
}
cudaCheckErrors("Malloc fail");
}
void safeMalloc(void **ptr, size_t size)
{
*ptr = malloc(size);
/*std::cout << "Allocating " << size << " B on host!" << std::endl;*/
if (*ptr == NULL)
{
std::cerr << "Cannot allocate memory of size " << (size / 1024) << " kiB on host!" << std::endl;
exit(1);
}
}
void cudaSafeFree(void *ptr)
{
hipFree(ptr);
cudaCheckErrors("Free fail");
}
void safeFree(void *ptr)
{
free(ptr);
}
int ceil_log2(unsigned long long x)
{
static const unsigned long long t[6] = {
0xFFFFFFFF00000000ull,
0x00000000FFFF0000ull,
0x000000000000FF00ull,
0x00000000000000F0ull,
0x000000000000000Cull,
0x0000000000000002ull
};
int y = (((x & (x - 1)) == 0) ? 0 : 1);
int j = 32;
int i;
for (i = 0; i < 6; i++) {
int k = (((x & t[i]) == 0) ? 0 : j);
y += k;
x >>= k;
j >>= 1;
}
return y;
}
int pow2(int e)
{
if(e < 1)
{
return 1;
}
return 1 << e;
}
| e1fcf55d695c9a7c226879303dea4d2c5fbe02ac.cu | #include "common.cuh"
#include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
#include <iostream>
void cudaSafeMalloc(void **ptr, size_t size)
{
size_t total_m;
size_t free_m;
cuMemGetInfo(&free_m, &total_m);
cudaCheckErrors("cuMemGetInfo fail");
/*std::cout << "Memory: " << (free_m / (1024 * 1024)) << "/" << (total_m / (1024 * 1024)) << " MiB" << std::endl;
std::cout << "Allocating " << size << " B on device!" << std::endl;*/
if (cudaMalloc(ptr, size) != cudaSuccess)
{
std::cerr << "Cannot allocate memory of size " << (size / 1024) << " kiB on device!" << std::endl;
exit(1);
}
cudaCheckErrors("Malloc fail");
}
void safeMalloc(void **ptr, size_t size)
{
*ptr = malloc(size);
/*std::cout << "Allocating " << size << " B on host!" << std::endl;*/
if (*ptr == NULL)
{
std::cerr << "Cannot allocate memory of size " << (size / 1024) << " kiB on host!" << std::endl;
exit(1);
}
}
void cudaSafeFree(void *ptr)
{
cudaFree(ptr);
cudaCheckErrors("Free fail");
}
void safeFree(void *ptr)
{
free(ptr);
}
int ceil_log2(unsigned long long x)
{
static const unsigned long long t[6] = {
0xFFFFFFFF00000000ull,
0x00000000FFFF0000ull,
0x000000000000FF00ull,
0x00000000000000F0ull,
0x000000000000000Cull,
0x0000000000000002ull
};
int y = (((x & (x - 1)) == 0) ? 0 : 1);
int j = 32;
int i;
for (i = 0; i < 6; i++) {
int k = (((x & t[i]) == 0) ? 0 : j);
y += k;
x >>= k;
j >>= 1;
}
return y;
}
int pow2(int e)
{
if(e < 1)
{
return 1;
}
return 1 << e;
}
|
5270b6c8597ee7e0bc7a5875f5978dfc886015ec.hip | // !!! This is a file automatically generated by hipify!!!
//cuda inclusion
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//Math inclusion
#define _USE_MATH_DEFINES
#include <math.h>
#include <cmath>
#include <ctime>
//c++ and project inclusion
#include <stdio.h>
#include "galaxyKernel.h"
//Cuda error handling start here
inline void error_check(hipError_t err, const char* file, int line)
{
if (err != hipSuccess) {
::fprintf(stderr, "\nCUDA ERROR at %s[%d] : %s\n", file, line, hipGetErrorString(err));
printf("\nGeneral error at %s[%d] : %s\n", file, line, hipGetErrorString(err));
}
}
#define CUDA_CHECK(err) do { error_check(err, __FILE__, __LINE__); } while(0)
//this function clamp the numbers ORI with float
__device__ float Clamp(float temp, float a, float b)
{
return fmaxf(a, fminf(b, temp));
}
__global__ void LaunchGalaxy(float * device_ascension, float * device_declination, unsigned long long int * histogram, size_t size)
{
//ascension and declination shared
__shared__ float S_asc[1024];
__shared__ float S_dec[1024];
//shared result
__shared__ unsigned long long int S_result[1024];
//threads x
int tid = threadIdx.x;
//init the array hist to 0
for (int i = 0; i < 1024; i++)
{
S_result[threadIdx.x + i] = 0;
}
//go throw every block
for (int b = 0; b < 98; b++)
{
if (b * threadIdx.x + b * 1024 < size)
{
S_asc[threadIdx.x] = device_ascension[threadIdx.x + b * 1024];
S_dec[threadIdx.x] = device_declination[threadIdx.x + b * 1024];
__syncthreads();
for (int col = 0; col < 1024; col++)
{
float temp = acosf(Clamp(__sinf(S_dec[threadIdx.x]) * __sinf(S_dec[threadIdx.x + col]) +
__cosf(S_dec[threadIdx.x]) * __cosf(S_dec[threadIdx.x + col]) * __cosf(S_asc[threadIdx.x] - S_asc[threadIdx.x + col]) , -1.f, 1.f)) * 180.0f / (float)M_PI * 4.0f;
atomicAdd(&S_result[int(temp)], 1);
if (blockDim.x == b % gridDim.x)
{
S_asc[threadIdx.x] = device_ascension[threadIdx.x + b * 1024];
S_dec[threadIdx.x] = device_declination[threadIdx.x + b * 1024];
__syncthreads();
}
}
}
}
}
void Kernel_handler_single(float * host_ascension, float * host_declination, unsigned long long int * host_histogram, size_t size)
{
//cuda malloc
float* device_ascension = nullptr;
float* device_declination = nullptr;
//cuda histogram
unsigned long long int * device_histogram = nullptr;
//ascension and declination
CUDA_CHECK(hipMalloc(&device_ascension, size * sizeof(float)));
CUDA_CHECK(hipMalloc(&device_declination, size * sizeof(float)));
//histogram
CUDA_CHECK(hipMalloc(&device_histogram, 720 * sizeof(unsigned long long int)));
//copy memory
CUDA_CHECK(hipMemcpy(device_ascension, host_ascension, size * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(device_declination, host_declination, size * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(device_histogram, host_histogram, 720 * sizeof(float), hipMemcpyHostToDevice));
//take kernel time start
clock_t s = clock();
//handle the kernel
hipLaunchKernelGGL(( LaunchGalaxy) , dim3((1, 1, 1)), dim3((32, 1, 1)) , 0, 0, device_ascension, device_declination, device_histogram, size);
//device sync
hipError_t errAsync = hipDeviceSynchronize();
//check error sync
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("\nError in cuda kernel (sync side) %s\n", hipGetErrorString(err));
}
//check error async
if (errAsync != hipSuccess)
{
printf("\nError in cuda kernel (async side) %s\n", hipGetErrorString(errAsync));
}
clock_t e = clock();
//calculate elapsed time
double el = ((double)(e - s)) / CLOCKS_PER_SEC;
printf("\n\n### The kernel timer took %f secs\n", el);
CUDA_CHECK(hipMemcpy(host_histogram, device_histogram, 720 * sizeof(unsigned long long int), hipMemcpyDeviceToHost));
//cuda free
CUDA_CHECK(hipFree(device_ascension));
CUDA_CHECK(hipFree(device_declination));
CUDA_CHECK(hipFree(device_histogram));
} | 5270b6c8597ee7e0bc7a5875f5978dfc886015ec.cu | //cuda inclusion
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//Math inclusion
#define _USE_MATH_DEFINES
#include <math.h>
#include <cmath>
#include <ctime>
//c++ and project inclusion
#include <stdio.h>
#include "galaxyKernel.h"
//Cuda error handling start here
inline void error_check(cudaError_t err, const char* file, int line)
{
if (err != cudaSuccess) {
::fprintf(stderr, "\nCUDA ERROR at %s[%d] : %s\n", file, line, cudaGetErrorString(err));
printf("\nGeneral error at %s[%d] : %s\n", file, line, cudaGetErrorString(err));
}
}
#define CUDA_CHECK(err) do { error_check(err, __FILE__, __LINE__); } while(0)
//this function clamp the numbers ORI with float
__device__ float Clamp(float temp, float a, float b)
{
return fmaxf(a, fminf(b, temp));
}
__global__ void LaunchGalaxy(float * device_ascension, float * device_declination, unsigned long long int * histogram, size_t size)
{
//ascension and declination shared
__shared__ float S_asc[1024];
__shared__ float S_dec[1024];
//shared result
__shared__ unsigned long long int S_result[1024];
//threads x
int tid = threadIdx.x;
//init the array hist to 0
for (int i = 0; i < 1024; i++)
{
S_result[threadIdx.x + i] = 0;
}
//go throw every block
for (int b = 0; b < 98; b++)
{
if (b * threadIdx.x + b * 1024 < size)
{
S_asc[threadIdx.x] = device_ascension[threadIdx.x + b * 1024];
S_dec[threadIdx.x] = device_declination[threadIdx.x + b * 1024];
__syncthreads();
for (int col = 0; col < 1024; col++)
{
float temp = acosf(Clamp(__sinf(S_dec[threadIdx.x]) * __sinf(S_dec[threadIdx.x + col]) +
__cosf(S_dec[threadIdx.x]) * __cosf(S_dec[threadIdx.x + col]) * __cosf(S_asc[threadIdx.x] - S_asc[threadIdx.x + col]) , -1.f, 1.f)) * 180.0f / (float)M_PI * 4.0f;
atomicAdd(&S_result[int(temp)], 1);
if (blockDim.x == b % gridDim.x)
{
S_asc[threadIdx.x] = device_ascension[threadIdx.x + b * 1024];
S_dec[threadIdx.x] = device_declination[threadIdx.x + b * 1024];
__syncthreads();
}
}
}
}
}
void Kernel_handler_single(float * host_ascension, float * host_declination, unsigned long long int * host_histogram, size_t size)
{
//cuda malloc
float* device_ascension = nullptr;
float* device_declination = nullptr;
//cuda histogram
unsigned long long int * device_histogram = nullptr;
//ascension and declination
CUDA_CHECK(cudaMalloc(&device_ascension, size * sizeof(float)));
CUDA_CHECK(cudaMalloc(&device_declination, size * sizeof(float)));
//histogram
CUDA_CHECK(cudaMalloc(&device_histogram, 720 * sizeof(unsigned long long int)));
//copy memory
CUDA_CHECK(cudaMemcpy(device_ascension, host_ascension, size * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(device_declination, host_declination, size * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(device_histogram, host_histogram, 720 * sizeof(float), cudaMemcpyHostToDevice));
//take kernel time start
clock_t s = clock();
//handle the kernel
LaunchGalaxy <<< (1, 1, 1), (32, 1, 1) >>> (device_ascension, device_declination, device_histogram, size);
//device sync
cudaError_t errAsync = cudaDeviceSynchronize();
//check error sync
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("\nError in cuda kernel (sync side) %s\n", cudaGetErrorString(err));
}
//check error async
if (errAsync != cudaSuccess)
{
printf("\nError in cuda kernel (async side) %s\n", cudaGetErrorString(errAsync));
}
clock_t e = clock();
//calculate elapsed time
double el = ((double)(e - s)) / CLOCKS_PER_SEC;
printf("\n\n### The kernel timer took %f secs\n", el);
CUDA_CHECK(cudaMemcpy(host_histogram, device_histogram, 720 * sizeof(unsigned long long int), cudaMemcpyDeviceToHost));
//cuda free
CUDA_CHECK(cudaFree(device_ascension));
CUDA_CHECK(cudaFree(device_declination));
CUDA_CHECK(cudaFree(device_histogram));
} |
5fa106f504e79004a9db77fd11b9a1d19a233645.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO 1 // halo width along one direction when advancing to the next iteration
/*#define BENCH_PRINT*/
void run(int argc, char** argv);
int rows, cols;
int* data;
int** wall;
int* result;
#define M_SEED 9
int pyramid_height;
void
init(int argc, char** argv)
{
if(argc==4){
cols = atoi(argv[1]);
rows = atoi(argv[2]);
pyramid_height=atoi(argv[3]);
}else{
printf("Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows*cols];
wall = new int*[rows];
for(int n=0; n<rows; n++)
wall[n]=data+cols*n;
result = new int[cols];
int seed = M_SEED;
srand(seed);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
#ifdef BENCH_PRINT
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%d ",wall[i][j]) ;
}
printf("\n") ;
}
#endif
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \
int pyramid_height, int blockCols, int borderCols)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
}
int main(int argc, char** argv)
{
int num_devices;
hipGetDeviceCount(&num_devices);
if (num_devices > 1) hipSetDevice(DEVICE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
int size = rows*cols;
hipMalloc((void**)&gpuResult[0], sizeof(int)*cols);
hipMalloc((void**)&gpuResult[1], sizeof(int)*cols);
hipMemcpy(gpuResult[0], data, sizeof(int)*cols, hipMemcpyHostToDevice);
hipMalloc((void**)&gpuWall, sizeof(int)*(size-cols));
hipMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), hipMemcpyHostToDevice);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \
pyramid_height, blockCols, borderCols);
hipMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, hipMemcpyDeviceToHost);
#ifdef BENCH_PRINT
for (int i = 0; i < cols; i++)
printf("%d ",data[i]) ;
printf("\n") ;
for (int i = 0; i < cols; i++)
printf("%d ",result[i]) ;
printf("\n") ;
#endif
hipFree(gpuWall);
hipFree(gpuResult[0]);
hipFree(gpuResult[1]);
delete [] data;
delete [] wall;
delete [] result;
}
| 5fa106f504e79004a9db77fd11b9a1d19a233645.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO 1 // halo width along one direction when advancing to the next iteration
/*#define BENCH_PRINT*/
void run(int argc, char** argv);
int rows, cols;
int* data;
int** wall;
int* result;
#define M_SEED 9
int pyramid_height;
void
init(int argc, char** argv)
{
if(argc==4){
cols = atoi(argv[1]);
rows = atoi(argv[2]);
pyramid_height=atoi(argv[3]);
}else{
printf("Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows*cols];
wall = new int*[rows];
for(int n=0; n<rows; n++)
wall[n]=data+cols*n;
result = new int[cols];
int seed = M_SEED;
srand(seed);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
#ifdef BENCH_PRINT
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%d ",wall[i][j]) ;
}
printf("\n") ;
}
#endif
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \
int pyramid_height, int blockCols, int borderCols)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
dynproc_kernel<<<dimGrid, dimBlock>>>(
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
}
int main(int argc, char** argv)
{
int num_devices;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) cudaSetDevice(DEVICE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
int size = rows*cols;
cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols);
cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols);
cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols));
cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \
pyramid_height, blockCols, borderCols);
cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost);
#ifdef BENCH_PRINT
for (int i = 0; i < cols; i++)
printf("%d ",data[i]) ;
printf("\n") ;
for (int i = 0; i < cols; i++)
printf("%d ",result[i]) ;
printf("\n") ;
#endif
cudaFree(gpuWall);
cudaFree(gpuResult[0]);
cudaFree(gpuResult[1]);
delete [] data;
delete [] wall;
delete [] result;
}
|
df5c334c740293f65c850a3f76becd701634ae74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/elu_layer.hpp"
namespace caffe {
#ifdef USE_ROCM
template <typename Dtype>
__global__ void ELUForward(const int n, const Dtype* in, Dtype* out,
Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] :
alpha * (exp(in[index]) - 1);
}
}
#endif // USE_ROCM
template <typename Dtype>
void ELULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
ELUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, top_data, alpha);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_elu = program.get_kernel(
CL_KERNEL_SELECT("elu_forward"));
viennacl::ocl::enqueue(
oclk_elu(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
fixup_arg_type(alpha)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
#ifdef USE_ROCM
template <typename Dtype>
__global__ void ELUBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, const Dtype* in_data,
Dtype* out_diff, Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_data[index] > 0 ? in_diff[index] :
in_diff[index] * (out_data[index] + alpha);
}
}
#endif // USE_ROCM
template <typename Dtype>
void ELULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
ELUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, top_data, bottom_data, bottom_diff, alpha);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_elu = program.get_kernel(
CL_KERNEL_SELECT("elu_backward"));
viennacl::ocl::enqueue(
oclk_elu(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
fixup_arg_type(alpha)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ELULayer);
} // namespace caffe
| df5c334c740293f65c850a3f76becd701634ae74.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/elu_layer.hpp"
namespace caffe {
#ifdef USE_CUDA
template <typename Dtype>
__global__ void ELUForward(const int n, const Dtype* in, Dtype* out,
Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] :
alpha * (exp(in[index]) - 1);
}
}
#endif // USE_CUDA
template <typename Dtype>
void ELULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
ELUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, top_data, alpha);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_elu = program.get_kernel(
CL_KERNEL_SELECT("elu_forward"));
viennacl::ocl::enqueue(
oclk_elu(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
fixup_arg_type(alpha)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
#ifdef USE_CUDA
template <typename Dtype>
__global__ void ELUBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, const Dtype* in_data,
Dtype* out_diff, Dtype alpha) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_data[index] > 0 ? in_diff[index] :
in_diff[index] * (out_data[index] + alpha);
}
}
#endif // USE_CUDA
template <typename Dtype>
void ELULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype alpha = this->layer_param_.elu_param().alpha();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
ELUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, top_data, bottom_data, bottom_diff, alpha);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_elu = program.get_kernel(
CL_KERNEL_SELECT("elu_backward"));
viennacl::ocl::enqueue(
oclk_elu(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
fixup_arg_type(alpha)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ELULayer);
} // namespace caffe
|
94408949373af98aa80061faacfd235d34642b18.hip | // !!! This is a file automatically generated by hipify!!!
/*BHEADER****************************************************************
* (c) 2007 The Regents of the University of California *
* *
* See the file COPYRIGHT_and_DISCLAIMER for a complete copyright *
* notice and disclaimer. *
* *
*EHEADER****************************************************************/
//--------------
// A micro kernel
//--------------
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#ifdef _OPENMP
#include <omp.h>
#else
#include <chrono>
#endif
#include "headers.h"
// CUDA/HIP block size or OpenCL work-group size
#define BLOCK_SIZE 256
//
const int testIter = 500;
double totalWallTime = 0.0;
//
void test_Matvec();
void test_Relax();
void test_Axpy();
//
int main(int argc, char *argv[])
{
#ifdef _OPENMP
double t0 = 0.0,
t1 = 0.0,
#else
printf("**** Warning: OpenMP is disabled ****\n");
#endif
double del_wtime = 0.0;
#ifdef _OPENMP
int max_num_threads;
#endif
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// CORAL AMGmk Benchmark Version 1.0 \n");
printf("// \n");
printf("//------------ \n");
printf("\n testIter = %d \n\n", testIter );
#ifdef _OPENMP
printf("\n testIter = %d \n\n", testIter );
#pragma omp parallel
#pragma omp master
max_num_threads = omp_get_num_threads();
printf("\nmax_num_threads = %d \n\n",max_num_threads );
#endif
#ifdef _OPENMP
t0 = omp_get_wtime();
#else
auto t0 = std::chrono::steady_clock::now();
#endif
// Matvec
totalWallTime = 0.0;
test_Matvec();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// MATVEC\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
// Relax
totalWallTime = 0.0;
test_Relax();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// Relax\n");
printf("// \n");
printf("//------------ \n");
printf("\nTotal kernel time = %f seconds. \n", totalWallTime);
// Axpy
totalWallTime = 0.0;
test_Axpy();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// Axpy\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
#ifdef _OPENMP
t1 = omp_get_wtime();
del_wtime = t1 - t0;
#else
auto t1 = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = t1 - t0;
del_wtime = diff.count();
#endif
printf("\nTotal Wall time = %f seconds. \n", del_wtime);
return 0;
}
void test_Matvec()
{
#ifdef _OPENMP
double t0 = 0.0,
t1 = 0.0;
#endif
hypre_CSRMatrix *A;
hypre_Vector *x, *y, *sol;
int nx, ny, nz, i;
double *values;
double *y_data, *sol_data;
double error, diff;
nx = 50; /* size per proc nx*ny*nz */
ny = 50;
nz = 50;
values = hypre_CTAlloc(double, 4);
values[0] = 6;
values[1] = -1;
values[2] = -1;
values[3] = -1;
A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol);
hypre_SeqVectorSetConstantValues(x,1);
hypre_SeqVectorSetConstantValues(y,0);
#ifdef _OPENMP
t0 = omp_get_wtime();
#else
auto t0 = std::chrono::steady_clock::now();
#endif
for (i=0; i<testIter; ++i)
hypre_CSRMatrixMatvec(1,A,x,0,y);
#ifdef _OPENMP
t1 = omp_get_wtime() ;
totalWallTime += t1 - t0;
#else
auto t1 = std::chrono::steady_clock::now();
std::chrono::duration<double> tdiff = t1 - t0;
totalWallTime += tdiff.count();
#endif
y_data = hypre_VectorData(y);
sol_data = hypre_VectorData(sol);
error = 0;
for (i=0; i < nx*ny*nz; i++)
{
diff = fabs(y_data[i]-sol_data[i]);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Matvec: error: %e\n", error);
hypre_TFree(values);
hypre_CSRMatrixDestroy(A);
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
hypre_SeqVectorDestroy(sol);
}
__global__ void
relax (const double *A_diag_data, const int *A_diag_i, const int *A_diag_j,
double *u_data, const double *f_data, const int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != 0.0)
{
double res = f_data[i];
for (int jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
int ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
void test_Relax()
{
#ifdef _OPENMP
double t0 = 0.0,
t1 = 0.0;
#endif
hypre_CSRMatrix *A;
hypre_Vector *x, *y, *sol;
int nx, ny, nz, i;
double *values;
double diff, error;
nx = 50; /* size per proc nx*ny*nz */
ny = 50;
nz = 50;
values = hypre_CTAlloc(double, 4);
values[0] = 6;
values[1] = -1;
values[2] = -1;
values[3] = -1;
A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol);
hypre_SeqVectorSetConstantValues(x,1);
double *A_diag_data = hypre_CSRMatrixData(A);
int *A_diag_i = hypre_CSRMatrixI(A);
int *A_diag_j = hypre_CSRMatrixJ(A);
int n = hypre_CSRMatrixNumRows(A);
int nonzero = hypre_CSRMatrixNumNonzeros(A);
double *u_data = hypre_VectorData(x);
//int u_data_size = hypre_VectorSize(x);
double *f_data = hypre_VectorData(sol);
//int f_data_size = hypre_VectorSize(sol);
int grid_size = nx*ny*nz;
double *d_A_diag_data;
int *d_A_diag_i;
int *d_A_diag_j;
double *d_u_data;
double *d_f_data;
hipMalloc((void**)&d_A_diag_data, sizeof(double)*nonzero);
hipMalloc((void**)&d_A_diag_i, sizeof(int)*(grid_size+1));
hipMalloc((void**)&d_A_diag_j, sizeof(int)*nonzero);
hipMalloc((void**)&d_u_data, sizeof(double)*grid_size);
hipMalloc((void**)&d_f_data, sizeof(double)*grid_size);
hipMemcpy(d_A_diag_data, A_diag_data, sizeof(double)*nonzero, hipMemcpyHostToDevice);
hipMemcpy(d_A_diag_i, A_diag_i, sizeof(int)*(grid_size+1), hipMemcpyHostToDevice);
hipMemcpy(d_A_diag_j, A_diag_j, sizeof(int)*nonzero, hipMemcpyHostToDevice);
hipMemcpy(d_u_data, u_data, sizeof(double)*grid_size, hipMemcpyHostToDevice);
hipMemcpy(d_f_data, f_data, sizeof(double)*grid_size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
#ifdef _OPENMP
t0 = omp_get_wtime();
#else
auto t0 = std::chrono::steady_clock::now();
#endif
dim3 block1D(BLOCK_SIZE);
dim3 grid1D((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
for (i = 0; i < testIter; ++i) {
hipLaunchKernelGGL(( relax), dim3(dim3(grid1D)), dim3(dim3(block1D)) , 0, 0,
d_A_diag_data, d_A_diag_i, d_A_diag_j, d_u_data, d_f_data, n);
}
hipDeviceSynchronize();
#ifdef _OPENMP
t1 = omp_get_wtime();
totalWallTime += t1 - t0;
#else
auto t1 = std::chrono::steady_clock::now();
std::chrono::duration<double> tdiff = t1 - t0;
totalWallTime += tdiff.count();
#endif
hipMemcpy(u_data, d_u_data, sizeof(double)*grid_size, hipMemcpyDeviceToHost);
hipFree(d_A_diag_data);
hipFree(d_A_diag_i);
hipFree(d_A_diag_j);
hipFree(d_u_data);
hipFree(d_f_data);
error = 0;
for (i=0; i < nx*ny*nz; i++)
{
diff = fabs(u_data[i]-1);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Relax: error: %e\n", error);
hypre_TFree(values);
hypre_CSRMatrixDestroy(A);
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
hypre_SeqVectorDestroy(sol);
}
void test_Axpy()
{
#ifdef _OPENMP
double t0 = 0.0,
t1 = 0.0;
#endif
hypre_Vector *x, *y;
int nx, i;
double alpha=0.5;
double diff, error;
double *y_data;
nx = 125000; /* size per proc */
x = hypre_SeqVectorCreate(nx);
y = hypre_SeqVectorCreate(nx);
hypre_SeqVectorInitialize(x);
hypre_SeqVectorInitialize(y);
hypre_SeqVectorSetConstantValues(x,1);
hypre_SeqVectorSetConstantValues(y,1);
#ifdef _OPENMP
t0 = omp_get_wtime();
#else
auto t0 = std::chrono::steady_clock::now();
#endif
for (i=0; i<testIter; ++i)
hypre_SeqVectorAxpy(alpha,x,y);
#ifdef _OPENMP
t1 = omp_get_wtime();
#else
auto t1 = std::chrono::steady_clock::now();
#endif
y_data = hypre_VectorData(y);
error = 0;
for (i=0; i < nx; i++)
{
diff = fabs(y_data[i]-1-0.5*(double)testIter);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Axpy: error: %e\n", error);
#ifdef _OPENMP
totalWallTime += t1 - t0;
#else
std::chrono::duration<double> tdiff = t1 - t0;
totalWallTime += tdiff.count();
#endif
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
}
| 94408949373af98aa80061faacfd235d34642b18.cu | /*BHEADER****************************************************************
* (c) 2007 The Regents of the University of California *
* *
* See the file COPYRIGHT_and_DISCLAIMER for a complete copyright *
* notice and disclaimer. *
* *
*EHEADER****************************************************************/
//--------------
// A micro kernel
//--------------
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#ifdef _OPENMP
#include <omp.h>
#else
#include <chrono>
#endif
#include "headers.h"
// CUDA/HIP block size or OpenCL work-group size
#define BLOCK_SIZE 256
//
const int testIter = 500;
double totalWallTime = 0.0;
//
void test_Matvec();
void test_Relax();
void test_Axpy();
//
int main(int argc, char *argv[])
{
#ifdef _OPENMP
double t0 = 0.0,
t1 = 0.0,
#else
printf("**** Warning: OpenMP is disabled ****\n");
#endif
double del_wtime = 0.0;
#ifdef _OPENMP
int max_num_threads;
#endif
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// CORAL AMGmk Benchmark Version 1.0 \n");
printf("// \n");
printf("//------------ \n");
printf("\n testIter = %d \n\n", testIter );
#ifdef _OPENMP
printf("\n testIter = %d \n\n", testIter );
#pragma omp parallel
#pragma omp master
max_num_threads = omp_get_num_threads();
printf("\nmax_num_threads = %d \n\n",max_num_threads );
#endif
#ifdef _OPENMP
t0 = omp_get_wtime();
#else
auto t0 = std::chrono::steady_clock::now();
#endif
// Matvec
totalWallTime = 0.0;
test_Matvec();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// MATVEC\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
// Relax
totalWallTime = 0.0;
test_Relax();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// Relax\n");
printf("// \n");
printf("//------------ \n");
printf("\nTotal kernel time = %f seconds. \n", totalWallTime);
// Axpy
totalWallTime = 0.0;
test_Axpy();
printf("\n");
printf("//------------ \n");
printf("// \n");
printf("// Axpy\n");
printf("// \n");
printf("//------------ \n");
printf("\nWall time = %f seconds. \n", totalWallTime);
#ifdef _OPENMP
t1 = omp_get_wtime();
del_wtime = t1 - t0;
#else
auto t1 = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = t1 - t0;
del_wtime = diff.count();
#endif
printf("\nTotal Wall time = %f seconds. \n", del_wtime);
return 0;
}
void test_Matvec()
{
#ifdef _OPENMP
double t0 = 0.0,
t1 = 0.0;
#endif
hypre_CSRMatrix *A;
hypre_Vector *x, *y, *sol;
int nx, ny, nz, i;
double *values;
double *y_data, *sol_data;
double error, diff;
nx = 50; /* size per proc nx*ny*nz */
ny = 50;
nz = 50;
values = hypre_CTAlloc(double, 4);
values[0] = 6;
values[1] = -1;
values[2] = -1;
values[3] = -1;
A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol);
hypre_SeqVectorSetConstantValues(x,1);
hypre_SeqVectorSetConstantValues(y,0);
#ifdef _OPENMP
t0 = omp_get_wtime();
#else
auto t0 = std::chrono::steady_clock::now();
#endif
for (i=0; i<testIter; ++i)
hypre_CSRMatrixMatvec(1,A,x,0,y);
#ifdef _OPENMP
t1 = omp_get_wtime() ;
totalWallTime += t1 - t0;
#else
auto t1 = std::chrono::steady_clock::now();
std::chrono::duration<double> tdiff = t1 - t0;
totalWallTime += tdiff.count();
#endif
y_data = hypre_VectorData(y);
sol_data = hypre_VectorData(sol);
error = 0;
for (i=0; i < nx*ny*nz; i++)
{
diff = fabs(y_data[i]-sol_data[i]);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Matvec: error: %e\n", error);
hypre_TFree(values);
hypre_CSRMatrixDestroy(A);
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
hypre_SeqVectorDestroy(sol);
}
__global__ void
relax (const double *A_diag_data, const int *A_diag_i, const int *A_diag_j,
double *u_data, const double *f_data, const int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != 0.0)
{
double res = f_data[i];
for (int jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
int ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
void test_Relax()
{
#ifdef _OPENMP
double t0 = 0.0,
t1 = 0.0;
#endif
hypre_CSRMatrix *A;
hypre_Vector *x, *y, *sol;
int nx, ny, nz, i;
double *values;
double diff, error;
nx = 50; /* size per proc nx*ny*nz */
ny = 50;
nz = 50;
values = hypre_CTAlloc(double, 4);
values[0] = 6;
values[1] = -1;
values[2] = -1;
values[3] = -1;
A = GenerateSeqLaplacian(nx, ny, nz, values, &y, &x, &sol);
hypre_SeqVectorSetConstantValues(x,1);
double *A_diag_data = hypre_CSRMatrixData(A);
int *A_diag_i = hypre_CSRMatrixI(A);
int *A_diag_j = hypre_CSRMatrixJ(A);
int n = hypre_CSRMatrixNumRows(A);
int nonzero = hypre_CSRMatrixNumNonzeros(A);
double *u_data = hypre_VectorData(x);
//int u_data_size = hypre_VectorSize(x);
double *f_data = hypre_VectorData(sol);
//int f_data_size = hypre_VectorSize(sol);
int grid_size = nx*ny*nz;
double *d_A_diag_data;
int *d_A_diag_i;
int *d_A_diag_j;
double *d_u_data;
double *d_f_data;
cudaMalloc((void**)&d_A_diag_data, sizeof(double)*nonzero);
cudaMalloc((void**)&d_A_diag_i, sizeof(int)*(grid_size+1));
cudaMalloc((void**)&d_A_diag_j, sizeof(int)*nonzero);
cudaMalloc((void**)&d_u_data, sizeof(double)*grid_size);
cudaMalloc((void**)&d_f_data, sizeof(double)*grid_size);
cudaMemcpy(d_A_diag_data, A_diag_data, sizeof(double)*nonzero, cudaMemcpyHostToDevice);
cudaMemcpy(d_A_diag_i, A_diag_i, sizeof(int)*(grid_size+1), cudaMemcpyHostToDevice);
cudaMemcpy(d_A_diag_j, A_diag_j, sizeof(int)*nonzero, cudaMemcpyHostToDevice);
cudaMemcpy(d_u_data, u_data, sizeof(double)*grid_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_f_data, f_data, sizeof(double)*grid_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
#ifdef _OPENMP
t0 = omp_get_wtime();
#else
auto t0 = std::chrono::steady_clock::now();
#endif
dim3 block1D(BLOCK_SIZE);
dim3 grid1D((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
for (i = 0; i < testIter; ++i) {
relax<<< dim3(grid1D), dim3(block1D) >>> (
d_A_diag_data, d_A_diag_i, d_A_diag_j, d_u_data, d_f_data, n);
}
cudaDeviceSynchronize();
#ifdef _OPENMP
t1 = omp_get_wtime();
totalWallTime += t1 - t0;
#else
auto t1 = std::chrono::steady_clock::now();
std::chrono::duration<double> tdiff = t1 - t0;
totalWallTime += tdiff.count();
#endif
cudaMemcpy(u_data, d_u_data, sizeof(double)*grid_size, cudaMemcpyDeviceToHost);
cudaFree(d_A_diag_data);
cudaFree(d_A_diag_i);
cudaFree(d_A_diag_j);
cudaFree(d_u_data);
cudaFree(d_f_data);
error = 0;
for (i=0; i < nx*ny*nz; i++)
{
diff = fabs(u_data[i]-1);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Relax: error: %e\n", error);
hypre_TFree(values);
hypre_CSRMatrixDestroy(A);
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
hypre_SeqVectorDestroy(sol);
}
void test_Axpy()
{
#ifdef _OPENMP
double t0 = 0.0,
t1 = 0.0;
#endif
hypre_Vector *x, *y;
int nx, i;
double alpha=0.5;
double diff, error;
double *y_data;
nx = 125000; /* size per proc */
x = hypre_SeqVectorCreate(nx);
y = hypre_SeqVectorCreate(nx);
hypre_SeqVectorInitialize(x);
hypre_SeqVectorInitialize(y);
hypre_SeqVectorSetConstantValues(x,1);
hypre_SeqVectorSetConstantValues(y,1);
#ifdef _OPENMP
t0 = omp_get_wtime();
#else
auto t0 = std::chrono::steady_clock::now();
#endif
for (i=0; i<testIter; ++i)
hypre_SeqVectorAxpy(alpha,x,y);
#ifdef _OPENMP
t1 = omp_get_wtime();
#else
auto t1 = std::chrono::steady_clock::now();
#endif
y_data = hypre_VectorData(y);
error = 0;
for (i=0; i < nx; i++)
{
diff = fabs(y_data[i]-1-0.5*(double)testIter);
if (diff > error) error = diff;
}
if (error > 0) printf(" \n Axpy: error: %e\n", error);
#ifdef _OPENMP
totalWallTime += t1 - t0;
#else
std::chrono::duration<double> tdiff = t1 - t0;
totalWallTime += tdiff.count();
#endif
hypre_SeqVectorDestroy(x);
hypre_SeqVectorDestroy(y);
}
|
b848154fa9c623ec409571142db71b0ee59ff323.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <THH/THHAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data,
const int height, const int width,
scalar_t y, scalar_t x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
// do bilinear interpolation
scalar_t lt = bottom_data[y_low * width + x_low];
scalar_t rt = bottom_data[y_low * width + x_high];
scalar_t lb = bottom_data[y_high * width + x_low];
scalar_t rb = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb);
return val;
}
template <typename scalar_t>
__global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data,
const scalar_t *bottom_rois,
const scalar_t spatial_scale,
const int sample_num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
scalar_t *top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
const scalar_t *offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
scalar_t h = (scalar_t)(ph + 0.5) * bin_size_h + roi_start_h;
scalar_t w = (scalar_t)(pw + 0.5) * bin_size_w + roi_start_w;
int hstart = fminf(floor(h), height - 2);
int wstart = fminf(floor(w), width - 2);
scalar_t output_val = 0;
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y = roi_start_h + ph * bin_size_h +
(scalar_t)(iy + scalar_t(.5f)) * bin_size_h /
(scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(scalar_t)(ix + scalar_t(.5f)) * bin_size_w /
(scalar_t)(sample_num_w);
scalar_t val = bilinear_interpolate<scalar_t>(offset_bottom_data,
height, width, y, x);
output_val += val;
}
}
output_val /= (sample_num_h * sample_num_w);
top_data[index] = output_val;
}
}
int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor output) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.scalar_type(), "ROIAlignLaucherForward", ([&] {
const scalar_t *bottom_data = features.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *top_data = output.data<scalar_t>();
hipLaunchKernelGGL(( ROIAlignForward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, bottom_data, rois_data, scalar_t(spatial_scale),
sample_num, channels, height, width, pooled_height,
pooled_width, top_data);
}));
THCudaCheck(hipGetLastError());
return 1;
}
template <typename scalar_t>
__device__ void bilinear_interpolate_gradient(const int height, const int width,
scalar_t y, scalar_t x,
scalar_t &w1, scalar_t &w2,
scalar_t &w3, scalar_t &w4,
int &x_low, int &x_high,
int &y_low, int &y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename scalar_t>
__global__ void ROIAlignBackward(
const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois,
const scalar_t spatial_scale, const int sample_num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, scalar_t *bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
scalar_t *offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int offset_top = (n * channels + c) * pooled_height * pooled_width +
ph * pooled_width + pw;
scalar_t offset_top_diff = top_diff[offset_top];
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
const scalar_t count = (scalar_t)(sample_num_h * sample_num_w);
scalar_t h = (scalar_t)(ph + 0.5) * bin_size_h + roi_start_h;
scalar_t w = (scalar_t)(pw + 0.5) * bin_size_w + roi_start_w;
int hstart = fminf(floor(h), height - 2);
int wstart = fminf(floor(w), width - 2);
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y =
roi_start_h + ph * bin_size_h +
(scalar_t)(iy + .5f) * bin_size_h / (scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x =
roi_start_w + pw * bin_size_w +
(scalar_t)(ix + .5f) * bin_size_w / (scalar_t)(sample_num_w);
scalar_t w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient<scalar_t>(
height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
scalar_t g1 = offset_top_diff * w1 / count;
scalar_t g2 = offset_top_diff * w2 / count;
scalar_t g3 = offset_top_diff * w3 / count;
scalar_t g4 = offset_top_diff * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(offset_bottom_diff + y_low * width + x_low, g1);
atomicAdd(offset_bottom_diff + y_low * width + x_high, g2);
atomicAdd(offset_bottom_diff + y_high * width + x_low, g3);
atomicAdd(offset_bottom_diff + y_high * width + x_high, g4);
}
}
}
}
}
int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor bottom_grad) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.scalar_type(), "ROIAlignLaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data<scalar_t>();
if (sizeof(scalar_t) == sizeof(double)) {
fprintf(stderr, "double is not supported\n");
exit(-1);
}
hipLaunchKernelGGL(( ROIAlignBackward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, top_diff, rois_data, spatial_scale, sample_num,
channels, height, width, pooled_height, pooled_width,
bottom_diff);
}));
THCudaCheck(hipGetLastError());
return 1;
} | b848154fa9c623ec409571142db71b0ee59ff323.cu | #include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data,
const int height, const int width,
scalar_t y, scalar_t x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
// do bilinear interpolation
scalar_t lt = bottom_data[y_low * width + x_low];
scalar_t rt = bottom_data[y_low * width + x_high];
scalar_t lb = bottom_data[y_high * width + x_low];
scalar_t rb = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb);
return val;
}
template <typename scalar_t>
__global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data,
const scalar_t *bottom_rois,
const scalar_t spatial_scale,
const int sample_num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
scalar_t *top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
const scalar_t *offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
scalar_t h = (scalar_t)(ph + 0.5) * bin_size_h + roi_start_h;
scalar_t w = (scalar_t)(pw + 0.5) * bin_size_w + roi_start_w;
int hstart = fminf(floor(h), height - 2);
int wstart = fminf(floor(w), width - 2);
scalar_t output_val = 0;
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y = roi_start_h + ph * bin_size_h +
(scalar_t)(iy + scalar_t(.5f)) * bin_size_h /
(scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(scalar_t)(ix + scalar_t(.5f)) * bin_size_w /
(scalar_t)(sample_num_w);
scalar_t val = bilinear_interpolate<scalar_t>(offset_bottom_data,
height, width, y, x);
output_val += val;
}
}
output_val /= (sample_num_h * sample_num_w);
top_data[index] = output_val;
}
}
int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor output) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.scalar_type(), "ROIAlignLaucherForward", ([&] {
const scalar_t *bottom_data = features.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *top_data = output.data<scalar_t>();
ROIAlignForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, bottom_data, rois_data, scalar_t(spatial_scale),
sample_num, channels, height, width, pooled_height,
pooled_width, top_data);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
template <typename scalar_t>
__device__ void bilinear_interpolate_gradient(const int height, const int width,
scalar_t y, scalar_t x,
scalar_t &w1, scalar_t &w2,
scalar_t &w3, scalar_t &w4,
int &x_low, int &x_high,
int &y_low, int &y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename scalar_t>
__global__ void ROIAlignBackward(
const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois,
const scalar_t spatial_scale, const int sample_num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, scalar_t *bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale;
scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale;
// Force malformed ROIs to be 1x1
scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.);
scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.);
scalar_t bin_size_h = roi_height / pooled_height;
scalar_t bin_size_w = roi_width / pooled_width;
scalar_t *offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int offset_top = (n * channels + c) * pooled_height * pooled_width +
ph * pooled_width + pw;
scalar_t offset_top_diff = top_diff[offset_top];
int sample_num_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int sample_num_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
const scalar_t count = (scalar_t)(sample_num_h * sample_num_w);
scalar_t h = (scalar_t)(ph + 0.5) * bin_size_h + roi_start_h;
scalar_t w = (scalar_t)(pw + 0.5) * bin_size_w + roi_start_w;
int hstart = fminf(floor(h), height - 2);
int wstart = fminf(floor(w), width - 2);
for (int iy = 0; iy < sample_num_h; iy++) {
const scalar_t y =
roi_start_h + ph * bin_size_h +
(scalar_t)(iy + .5f) * bin_size_h / (scalar_t)(sample_num_h);
for (int ix = 0; ix < sample_num_w; ix++) {
const scalar_t x =
roi_start_w + pw * bin_size_w +
(scalar_t)(ix + .5f) * bin_size_w / (scalar_t)(sample_num_w);
scalar_t w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient<scalar_t>(
height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
scalar_t g1 = offset_top_diff * w1 / count;
scalar_t g2 = offset_top_diff * w2 / count;
scalar_t g3 = offset_top_diff * w3 / count;
scalar_t g4 = offset_top_diff * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(offset_bottom_diff + y_low * width + x_low, g1);
atomicAdd(offset_bottom_diff + y_low * width + x_high, g2);
atomicAdd(offset_bottom_diff + y_high * width + x_low, g3);
atomicAdd(offset_bottom_diff + y_high * width + x_high, g4);
}
}
}
}
}
int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor bottom_grad) {
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.scalar_type(), "ROIAlignLaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data<scalar_t>();
if (sizeof(scalar_t) == sizeof(double)) {
fprintf(stderr, "double is not supported\n");
exit(-1);
}
ROIAlignBackward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, top_diff, rois_data, spatial_scale, sample_num,
channels, height, width, pooled_height, pooled_width,
bottom_diff);
}));
THCudaCheck(cudaGetLastError());
return 1;
} |
19489fad01e649d8efc034f78f227e44f69ffc19.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <vector>
#include "pixel.h"
#include "imgSeg.h"
#include <cstdint>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include "CycleTimer.h"
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 16
#define SHARED_BLOCK_DIM 32
#define CHUNK_SIZE 512
#define cudaCheckError(ans) cudaAssert((ans), __FILE__, __LINE__);
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int global_width;
int global_height;
__device__ __inline__ bool mergeCriterion(Pixel p1, Pixel p2, int t){
return ((int)p1.r - (int)p2.r)*((int)p1.r - (int)p2.r) + ((int)p1.g - (int)p2.g)*((int)p1.g -(int) p2.g) +
((int)p1.b - (int)p2.b) * ((int)p1.b - (int)p2.b) < t*t;
}
__device__ Pixel newColor(Pixel A, Pixel B, int sizeA, int sizeB){
int totalSize = sizeA + sizeB;
Pixel newP;
newP.r = (uint8_t)(((int)A.r * sizeA +(int) B.r * sizeB)/(totalSize));
newP.g = (uint8_t)(((int)A.g * sizeA + (int)B.g * sizeB)/(totalSize));
newP.b = (uint8_t)(((int)A.b * sizeA + (int)B.b * sizeB)/(totalSize));
return newP;
}
__device__ int find(int *next,int srow,int scol,int global_width,int global_height){
//int row = srow;
//int col = scol;
int pos = srow * global_width + scol;
for(int i=0;i<global_width*global_height;i++){
//assert(pos>=0);
//assert(pos<= global_width*global_height);
//std::cout<<"in find"<<std::endl;
int index = next[pos];
if(index == -1){
next[pos] = pos;
next[srow * global_width + scol] = pos;
return pos;
}
if(index == pos){
next[srow* global_width + scol] = index;
return index;
}
pos = index;
}
assert(false);
}
/*
__device__ int shared_find(int *next_temp,int sr,int sc, int width,int height){
assert(sr<32);
assert(sc<32);
assert(sr>=0);
assert(sc>=0);
assert(false);
assert(blockDim.x == 32);
int pos = sr *blockDim.x + sc;
while(1){
assert(pos>=0);
assert(pos<1024);
int index = next_temp[pos];
//assert(index<1024);
//assert(index>=-1);
if(index == -1){
next_temp[pos]=pos;
next_temp[sr * blockDim.x + sc] = pos;
return pos;
}
if( index == pos){
next_temp[sr * blockDim.x + sc] = index;
return index;
}
pos = index;
}
}
*/
//local next_temp will need to have the values of the actual next array
//note that it returns the local pos, and just check into the shared array to find actual
__device__ __inline__ int shared_find(int *next_temp, int sr, int sc, int width, int height){
int xStart = blockIdx.x * blockDim.x;
int yStart = blockIdx.y * blockDim.y;
int sX = sc - xStart;
int sY = sr - yStart;
//assert(blockDim.x==32);
//assert(sY>=0);
//assert(sY<32);
//assert(sX>=0);
//assert(sX<32);
int start = xStart + yStart *width;
int pos = sY * blockDim.x +sX;
while(1){
//assert(pos>=0);
//assert(pos<1024);
int index = next_temp[pos];
if(index == -1){
int posy = pos/blockDim.x;
int posx = pos % blockDim.x;
int temp = start + posx + posy * width;
next_temp[pos] = temp;
next_temp[sY * blockDim.x + sX] = temp;
return pos;
}
int checkY = index/width -yStart;
int checkX = index%width - xStart;
if(checkX + checkY*blockDim.x == pos){
next_temp[sY * blockDim.x + sX] = index;
return pos;
}
pos = checkX + checkY * blockDim.x;
}
}
__device__ void verify_edge(Pixel *pixels_cu, int *next_cu,
int *size_cu, int col1, int row1, int col2, int row2,int global_width, int global_height) {
//sanity check
/*assert(col1< global_width);
assert(col1>=0);
assert(col2 < global_width);
assert(col2 >= 0);
assert(row1 < global_height);
assert(row1 >=0);
assert(row2 < global_height);
assert(row2 >=0);
*/
int aIndex = find(next_cu,row1,col1,global_width,global_height);
int bIndex = find(next_cu,row2,col2,global_width,global_height);
//assert(aIndex!= -1 && bIndex != -1);
//assert(aIndex<global_width*global_height && bIndex<global_height*global_width);
if(aIndex != bIndex){
Pixel A = pixels_cu[aIndex];
int aSize = size_cu[aIndex];
Pixel B = pixels_cu[bIndex];
int bSize = size_cu[bIndex];
if(mergeCriterion(A,B,30)){
if(aSize>bSize){
pixels_cu[aIndex] = newColor(A,B, aSize, bSize);
next_cu[bIndex] = aIndex;
size_cu[aIndex] += bSize;
}
else{
pixels_cu[bIndex] = newColor(A, B, aSize, bSize);
next_cu[aIndex] = bIndex;
size_cu[bIndex] += aSize;
}
}
}
return;
}
__device__ void shared_verify_edge(Pixel *pixels_temp, int *next_temp, int *size_temp, int c1, int r1,
int c2, int r2, int width, int height){
assert(c1< width);
assert(c1>=0);
assert(c2 < width);
assert(c2 >= 0);
assert(r1 < height);
assert(r1 >=0);
assert(r2 < height);
assert(r2 >=0);
int aIndex = shared_find(next_temp,r1,c1,width,height);
int bIndex = shared_find(next_temp,r2,c2,width,height);
assert(aIndex!= -1 && bIndex != -1);
assert(aIndex<1024 && bIndex<1024);
if(aIndex != bIndex){
Pixel A = pixels_temp[aIndex];
int aSize = size_temp[aIndex];
Pixel B = pixels_temp[bIndex];
int bSize = size_temp[bIndex];
if(mergeCriterion(A,B,30)){
if(aSize>bSize){
pixels_temp[aIndex] = newColor(A,B,aSize,bSize);
next_temp[bIndex] = next_temp[aIndex];
size_temp[aIndex] += bSize;
}
else{
pixels_temp[bIndex] = newColor(A,B,aSize,bSize);
next_temp[aIndex] =next_temp[bIndex];
size_temp[bIndex] += aSize;
}
}
}
return;
}
static void attempt_1(std::vector<Pixel> &pixels, int width, int height);
static void attempt_2(std::vector<Pixel> &pixels, int width, int height);
static void attempt_3(std::vector<Pixel> &pixels, int width, int height);
void cu_process(std::vector<Pixel> &pixels, int width, int height){
global_width = width;
global_height = height;
attempt_1(pixels,width,height);
return;
}
__global__ void rowComp(Pixel *pixels_cu, int *next_cu, int *size_cu,int start, int rsize,int csize,int global_width,
int global_height){
int y = blockDim.y*blockIdx.y *rsize + threadIdx.y * rsize;
int x = blockIdx.x * blockDim.x * csize + threadIdx.x*csize + start;
//printf("x: %d, y: %d\n global_width: %d\n",x,y,global_width);
if(x+1>=global_width){
return;
}
//printf("here\n");
for(int i=0;i<rsize;i++){
if(y+i>=global_height){
break;
}
//printf("about to ver edge\n");
verify_edge(pixels_cu,next_cu,size_cu,x,y+i,x+1,y+i,global_width,global_height);
}
}
__global__ void colComp(Pixel *pixels_cu, int *next_cu, int *size_cu,int start, int rsize, int csize, int global_width,
int global_height){
int x = blockDim.x *blockIdx.x * csize + threadIdx.x * csize;
int y = blockIdx.y *blockDim.y * rsize + threadIdx.y * rsize + start;
if(y+1>=global_height){
return;
}
//get x and y somehow :)
for(int i=0;i<csize;i++){
if(x+i>=global_width){
break;
}
verify_edge(pixels_cu,next_cu,size_cu,x+i,y,x+i,y+1,global_width,global_height);
}
}
__global__ void rowDiagComp(Pixel *pixels_cu,int *next_cu, int *size_cu, int start,int rsize, int csize,
int global_width,int global_height){
int y = blockDim.y *blockIdx.y*rsize + threadIdx.y * rsize;
int x = blockIdx.x * blockDim.x * csize + threadIdx.x*csize + start;
if(x+1>=global_width){
return;
}
for(int i=0;i<rsize - 1;i++){
if(y+i+1>=global_height){
break;
}
verify_edge(pixels_cu,next_cu,size_cu,x,y+i,x+1,y+i+1,global_width,global_height);
verify_edge(pixels_cu,next_cu,size_cu,x+1,y+i,x,y+i+1,global_width,global_height);
}
}
__global__ void colDiagComp(Pixel *pixels_cu, int *next_cu, int *size_cu, int start, int rsize,int csize,
int global_width, int global_height){
int x= blockDim.x *blockIdx.x*csize + threadIdx.x * csize;
int y = blockIdx.y * blockDim.y * rsize+threadIdx.y * rsize + start;
if(y+1>=global_height){
return;
}
for(int i=0;i<csize-1;i++){
if(x+i+1>=global_width){
break;
}
verify_edge(pixels_cu,next_cu,size_cu,x+i,y,x+i+1,y+1,global_width,global_height);
verify_edge(pixels_cu,next_cu,size_cu,x+i+1,y,x+i,y+1,global_width,global_height);
}
}
__global__ void redirect(Pixel *pixels_cu, int *next_cu,int global_width,int global_height){
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y*global_width + x < global_width * global_height){
int actual = find(next_cu, y,x,global_width,global_height);
pixels_cu[y*global_width + x] = pixels_cu[actual];
}
__syncthreads();
}
__global__ void show(){
//printf("AAAAAAAAAAAAA\n");
/*for(int i=0;i<global_width*global_height;i++){
printf("%d\n",next_cu[i]);
}*/
}
//relatively naive approach, every phase gets its own kernel function, that does work appropriately
static void attempt_1(std::vector<Pixel> &pixels, int width, int height){
Pixel *pixels_cu;
std::vector<int> size(width * height,1);
std::vector<int> next(width * height,-1);
int *size_cu;
int *next_cu;
double ms= CycleTimer::currentSeconds();
hipMalloc((void **)&size_cu, sizeof(int)*width*height);
hipMalloc((void **)&next_cu, sizeof(int)*width*height);
hipMalloc((void **)&pixels_cu,sizeof(Pixel)*width*height);
hipMemcpy(pixels_cu,(Pixel*)&pixels[0],sizeof(Pixel)*width*height,hipMemcpyHostToDevice);
hipMemcpy(size_cu,(int*)&size[0],sizeof(int)*width*height,hipMemcpyHostToDevice);
hipMemcpy(next_cu,(int*)&next[0],sizeof(int)*width*height,hipMemcpyHostToDevice);
double me = CycleTimer::currentSeconds();
printf("cuda overhead: %.3f ms\n",1000.f *(me-ms));
int start = 0;
//what it should look like after a global fn call
int rsize = 1;
int csize = 2;
while(start < width - 1 || start < height -1 ){
double s = CycleTimer::currentSeconds();
int blockWidth = ::max(csize,BLOCK_WIDTH);
int blockHeight = ::max(rsize,BLOCK_HEIGHT);
int rWidthNum = (global_width + blockWidth-1)/blockWidth;
int rHeightNum = (global_height + blockHeight -1)/blockHeight;
dim3 rgridDim(rWidthNum,rHeightNum);
dim3 rblockDim((blockWidth+csize-1)/csize,(blockHeight+rsize-1)/rsize);
hipLaunchKernelGGL(( rowComp), dim3(rgridDim), dim3(rblockDim) , 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
hipDeviceSynchronize();
//both diagonal ops are 'effectless'
hipLaunchKernelGGL(( rowDiagComp), dim3(rgridDim),dim3(rblockDim) , 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
hipDeviceSynchronize();
rsize *= 2;
blockHeight = ::max(rsize,BLOCK_HEIGHT);
rHeightNum = (global_height + blockHeight - 1)/blockHeight;
dim3 cgridDim(rWidthNum,rHeightNum);
dim3 cblockDim((blockWidth +csize-1)/csize,(blockHeight + rsize-1)/rsize);
hipLaunchKernelGGL(( colComp), dim3(cgridDim),dim3(cblockDim) , 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
hipDeviceSynchronize();
hipLaunchKernelGGL(( colDiagComp), dim3(cgridDim),dim3(cblockDim) , 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
hipDeviceSynchronize();
start = 2*(start +1) - 1;
csize *= 2;
double e = CycleTimer::currentSeconds();
printf("iter time: %.3f ms\n",1000.f *(e-s));
}
double rdirS = CycleTimer::currentSeconds();
dim3 gridDim((global_width + BLOCK_WIDTH -1) /BLOCK_WIDTH,(global_height + BLOCK_HEIGHT -1)/BLOCK_HEIGHT);
dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT);
hipLaunchKernelGGL(( redirect), dim3(gridDim),dim3(blockDim) , 0, 0, pixels_cu,next_cu,global_width,global_height);
hipMemcpy((Pixel*)&pixels[0],pixels_cu,sizeof(Pixel)*width*height,hipMemcpyDeviceToHost);
double rdirE = CycleTimer::currentSeconds();
printf("Redir time: %.3f ms\n", 1000.f *(rdirE-rdirS));
return;
}
/**********************************************************************************
*
*
* SHARED MEMORY ATTEMPT
*
*
**********************************************************************************/
__device__ __inline__ void sharedRowComp(Pixel *pixels_temp,int *next_temp, int *size_temp, int start, int rsize, int csize,
int width, int height){
//determine if this thread needs to do work
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int locX = threadIdx.x;
int locY = threadIdx.y;
if(x>=start && (x-start) % (csize) == (0) && (x+1)<width && y % rsize == 0){
//printf("here x %d y %d\n",x,y);
for(int i=0;i<rsize;i++){
if(y+i>=height){
break;
}
shared_verify_edge(pixels_temp,next_temp, size_temp,x,y+i,x+1,y+i,width,height);
}
}
return;
}
__device__ __inline__ void sharedColComp(Pixel *pixels_temp, int *next_temp, int *size_temp, int start, int rsize, int csize,
int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int locX = threadIdx.x;
int locY = threadIdx.y;
if(y>=start && (y-start) % (rsize) == (0) && y+1<height && x % csize == 0){
for(int i=0;i<csize;i++){
if(x+i>=width){
break;
}
shared_verify_edge(pixels_temp,next_temp,size_temp,x+i,y,x+i,y+1,width,height);
}
}
return;
}
__device__ __inline__ void sharedRowDiagComp(Pixel *pixels_temp, int *next_temp, int *size_temp, int start, int rsize, int csize,
int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int locX = threadIdx.x;
int locY = threadIdx.y;
if(x>= start && (x-start) %(csize) == (0) && x+1<width && y %rsize == 0){
for(int i=0;i<rsize-1;i++){
if(y+i+1>=height){
break;
}
shared_verify_edge(pixels_temp,next_temp,size_temp,x,y+i,x+1,y+i+1,width,height);
shared_verify_edge(pixels_temp,next_temp,size_temp,x+1,y+i,x,y+i+1,width,height);
}
}
return;
}
__device__ __inline__ void sharedColDiagComp(Pixel *pixels_temp, int *next_temp, int *size_temp, int start, int rsize, int csize,
int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int locX = threadIdx.x;
int locY = threadIdx.y;
if(y>= start && (y-start) % (rsize) == (0) && y+1<height && x % csize == 0){
for(int i=0;i<csize-1;i++){
if(x+i+1>=width){
break;
}
shared_verify_edge(pixels_temp,next_temp,size_temp,x+i,y,x+i+1,y+1,width,height);
shared_verify_edge(pixels_temp,next_temp,size_temp,x+i+1,y,x+i,y+1,width,height);
}
}
return;
}
__global__ void shared_process(Pixel *pixels_cu, int *next_cu, int *size_cu,int width,int height){
__shared__ Pixel pixels_temp[SHARED_BLOCK_DIM*SHARED_BLOCK_DIM];
__shared__ int size_temp[SHARED_BLOCK_DIM*SHARED_BLOCK_DIM];
__shared__ int next_temp[SHARED_BLOCK_DIM*SHARED_BLOCK_DIM];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int tempIndex = threadIdx.y * blockDim.x + threadIdx.x;
int actualIndex = y *width + x;
if(actualIndex>=0 && actualIndex<width*height && tempIndex < SHARED_BLOCK_DIM*SHARED_BLOCK_DIM && tempIndex>=0){
pixels_temp[tempIndex] = pixels_cu[actualIndex];
size_temp[tempIndex] = size_cu[actualIndex];
next_temp[tempIndex] = next_cu[actualIndex];
__syncthreads();
int start = 0;
int rsize = 1;
int csize = 2;
while(start < SHARED_BLOCK_DIM-1){
//manual rowcomp rowdiag, col comp and coldiag
sharedRowComp(pixels_temp,next_temp,size_temp,start,rsize,csize,width,height);
__syncthreads();
//break;
sharedRowDiagComp(pixels_temp,next_temp,size_temp,start,rsize,csize,width,height);
__syncthreads();
rsize *=2;
sharedColComp(pixels_temp,next_temp,size_temp,start,rsize,csize,width,height);
__syncthreads();
sharedColDiagComp(pixels_temp,next_temp,size_temp,start,rsize,csize,width,height);
__syncthreads();
start = 2*(start+1)-1;
csize *= 2;
}
pixels_cu[actualIndex] = pixels_temp[tempIndex];
size_cu[actualIndex] = size_temp[tempIndex];
int tempX= next_temp[tempIndex] % blockDim.x;
int tempY = next_temp[tempIndex] / blockDim.x;
int store = tempX + (blockIdx.x*blockDim.x) + (tempY + (blockIdx.y*blockDim.y))*width;
next_cu[actualIndex] =next_temp[tempIndex];
//assert(store<width*height);
//printf("next cu %d x %d y %d\n",next_temp[actualIndex],x,y);
}
//__syncthreads();
return;
}
/* a shared memory approach
step one involves using shared memory
second is traditional
*/
static void attempt_2(std::vector<Pixel> &pixels, int width, int height){
if(width<SHARED_BLOCK_DIM || height < SHARED_BLOCK_DIM){
attempt_1(pixels,width,height);
return;
}
else{
Pixel *pixels_cu;
std::vector<int> size(width*height,1);
std::vector<int> next(width*height,-1);
int *size_cu;
int *next_cu;
double mS = CycleTimer::currentSeconds();
hipMalloc((void**)&pixels_cu,sizeof(Pixel)*width*height);
hipMalloc((void**)&size_cu,sizeof(int)*width*height);
hipMalloc((void**)&next_cu,sizeof(int)*width*height);
hipMemcpy(pixels_cu,(Pixel*)&pixels[0],sizeof(Pixel)*width*height,hipMemcpyHostToDevice);
hipMemcpy(next_cu,(int*)&next[0],sizeof(int)*width*height,hipMemcpyHostToDevice);
hipMemcpy(size_cu,(int*)&size[0],sizeof(int)*width*height,hipMemcpyHostToDevice);
double mE = CycleTimer::currentSeconds();
printf("cuda overhead %.3f ms\n",1000.f * (mE-mS));
int widthNum = (width+SHARED_BLOCK_DIM-1)/SHARED_BLOCK_DIM;
int heightNum = (height + SHARED_BLOCK_DIM-1)/SHARED_BLOCK_DIM;
//dim3 gridDim(1,1);
//dim3 blockDim(2,2);
dim3 gridDim(widthNum,heightNum);
dim3 blockDim(SHARED_BLOCK_DIM,SHARED_BLOCK_DIM);
double sS = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( shared_process), dim3(gridDim),dim3(blockDim), 0, 0, pixels_cu,next_cu,size_cu,width,height);
cudaCheckError(hipDeviceSynchronize());
double sE = CycleTimer::currentSeconds();
printf("shared memory time %.3f ms\n",1000.f * (sE-sS));
//return;
/*hipMemcpy((int*)&next[0],next_cu,sizeof(int)*width*height,hipMemcpyDeviceToHost);
for(int i=0;i<width*height;i++){
printf("%d\n",next[i]);
}
return;*/
int start = 31;
int rsize = 32;
int csize = 64;
//copy from attempt1
while(start<width -1 || start < height-1){
double s = CycleTimer::currentSeconds();
int blockWidth = ::max(csize,BLOCK_WIDTH);
int blockHeight = ::max(rsize,BLOCK_HEIGHT);
int rWidthNum = (width + blockWidth-1)/blockWidth;
int rHeightNum = (height + blockHeight-1)/blockHeight;
dim3 rgridDim(rWidthNum,rHeightNum);
dim3 rblockDim((blockWidth+csize-1)/csize,(blockHeight+rsize-1)/rsize);
hipLaunchKernelGGL(( rowComp), dim3(rgridDim),dim3(rblockDim), 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,width,height);
//hipDeviceSynchronize();
cudaCheckError(hipDeviceSynchronize());
//printf("row\n");
hipLaunchKernelGGL(( rowDiagComp), dim3(rgridDim),dim3(rblockDim), 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,width,height);
hipDeviceSynchronize();
//printf("rowdia\n");
rsize *=2;
blockHeight = ::max(rsize,BLOCK_HEIGHT);
rHeightNum = (height + blockHeight-1)/blockHeight;
dim3 cgridDim(rWidthNum,rHeightNum);
dim3 cblockDim((blockWidth+csize-1)/csize,(blockHeight+rsize-1)/rsize);
hipLaunchKernelGGL(( colComp), dim3(cgridDim),dim3(cblockDim), 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,width,height);
hipDeviceSynchronize();
//printf("col\n");
hipLaunchKernelGGL(( colDiagComp), dim3(cgridDim),dim3(cblockDim), 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,width,height);
hipDeviceSynchronize();
//printf("coldia\n");
start = 2*(start +1 )-1;
csize *= 2;
double e = CycleTimer::currentSeconds();
printf("iter time: %.3f ms\n",1000.f *(e-s));
}
double rS = CycleTimer::currentSeconds();
dim3 gD((width+BLOCK_WIDTH-1)/BLOCK_WIDTH,(height+BLOCK_HEIGHT-1)/BLOCK_HEIGHT);
dim3 bD(BLOCK_WIDTH,BLOCK_HEIGHT);
hipLaunchKernelGGL(( redirect), dim3(gD),dim3(bD), 0, 0, pixels_cu,next_cu,width,height);
hipMemcpy((Pixel*)&pixels[0],pixels_cu,sizeof(Pixel)*width*height,hipMemcpyDeviceToHost);
double rE = CycleTimer::currentSeconds();
printf("Redir time %.3f ms\n",1000.f *(rE-rS));
}
return;
}
/*********************************************************************
ATTEMPT 3 "HYBRID SOLUTION"
*******************************************************************/
__global__ void seq_continue(Pixel *pixels_cu,int * next_cu, int *size_cu,int csize, int width, int height){
int start = CHUNK_SIZE *2 - 1;
int offset = csize;
int limit;
while(start< width -1 || start < height -1){
//double s = CycleTimer::currentSeconds();
//Comparing along row
for(int y =0;y<height;y++){
for(int x = start;x<=width-offset;x+=offset){
verify_edge(pixels_cu,next_cu,size_cu,x,y,x+1,y,width,height);
}
}
//std::cout<<"row comparison done"<<std::endl;
for(int y = 0; y<height; y+=offset/2){
for(int x = start;x<=width-offset;x+=offset){
limit = offset/2 - 1;
//guarantee y+limit <= height
if(y + limit > height){
limit = height - y -1;
}
for(int n=0;n<limit;n++){
verify_edge(pixels_cu,next_cu,size_cu,x,y+n,x+1,y+n+1,width,height);
verify_edge(pixels_cu,next_cu,size_cu,x+1,y+n,x,y+n+1,width,height);
}
}
}
//std::cout<<"second loop done"<<std::endl;
for ( int y = start; y<=height-offset; y += offset){
for(int x =0 ; x< width; x++){
verify_edge(pixels_cu,next_cu,size_cu,x,y,x,y+1,width,height);
}
}
//std::cout<<"third loop done"<<std::endl;
for(int y = start; y <= height-offset; y+= offset){
for(int x = 0; x<= width-offset; x+=offset){
limit = offset -1;
if(x+limit>width){
limit =width - x -1;
}
for( int n = 0;n<limit;n++){
verify_edge(pixels_cu,next_cu,size_cu,x+n,y,x+n+1,y+1,width,height);
verify_edge(pixels_cu,next_cu,size_cu,x+n+1,y,x+n,y+1,width,height);
}
}
}
start = 2*(start+1)-1;
offset *=2;
//double e = CycleTimer::currentSeconds();
//printf("Iter time: %.3f ms\n",1000.f * (e-s));
}
}
static void attempt_3(std::vector<Pixel> &pixels, int width, int height){
Pixel *pixels_cu;
std::vector<int> size(width*height,1);
std::vector<int> next(width*height,-1);
int *size_cu;
int *next_cu;
double ms = CycleTimer::currentSeconds();
hipMalloc((void**) &size_cu, sizeof(int) *width*height);
hipMalloc((void**)&next_cu,sizeof(int)*width*height);
hipMalloc((void**)&pixels_cu,sizeof(Pixel)*width*height);
hipMemcpy(pixels_cu,(Pixel*)&pixels[0],sizeof(Pixel)*width*height,hipMemcpyHostToDevice);
hipMemcpy(size_cu,(int*)&size[0],sizeof(int)*width*height,hipMemcpyHostToDevice);
hipMemcpy(next_cu,(int*)&next[0],sizeof(int)*width*height,hipMemcpyHostToDevice);
double me = CycleTimer::currentSeconds();
printf("cuda overhead: %.3f ms\n",1000.f *(me-ms));
int start = 0;
int rsize = 1;
int csize = 2;
while(start< ::min(CHUNK_SIZE,width-1) || start < ::min(CHUNK_SIZE,height-1)){
double s = CycleTimer::currentSeconds();
int blockWidth = ::max(csize,BLOCK_WIDTH);
int blockHeight = ::max(rsize,BLOCK_HEIGHT);
int rWidthNum = (global_width + blockWidth-1)/blockWidth;
int rHeightNum = (global_height + blockHeight -1)/blockHeight;
dim3 rgridDim(rWidthNum,rHeightNum);
dim3 rblockDim((blockWidth+csize-1)/csize,(blockHeight+rsize-1)/rsize);
hipLaunchKernelGGL(( rowComp), dim3(rgridDim), dim3(rblockDim) , 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
hipDeviceSynchronize();
//both diagonal ops are 'effectless'
hipLaunchKernelGGL(( rowDiagComp), dim3(rgridDim),dim3(rblockDim) , 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
hipDeviceSynchronize();
rsize *= 2;
blockHeight = ::max(rsize,BLOCK_HEIGHT);
rHeightNum = (global_height + blockHeight - 1)/blockHeight;
dim3 cgridDim(rWidthNum,rHeightNum);
dim3 cblockDim((blockWidth +csize-1)/csize,(blockHeight + rsize-1)/rsize);
hipLaunchKernelGGL(( colComp), dim3(cgridDim),dim3(cblockDim) , 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
hipDeviceSynchronize();
hipLaunchKernelGGL(( colDiagComp), dim3(cgridDim),dim3(cblockDim) , 0, 0, pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
hipDeviceSynchronize();
start = 2*(start +1) - 1;
csize *= 2;
double e = CycleTimer::currentSeconds();
printf("iter time: %.3f ms\n",1000.f *(e-s));
}
double scs= CycleTimer::currentSeconds();
hipLaunchKernelGGL(( seq_continue), dim3(1),dim3(1), 0, 0, pixels_cu,next_cu,size_cu,csize,width,height);
hipDeviceSynchronize();
double sce = CycleTimer::currentSeconds();
printf("seq part : %.3f ms\n",1000.f *(sce-scs));
double rdirS = CycleTimer::currentSeconds();
dim3 gridDim((global_width + BLOCK_WIDTH -1) /BLOCK_WIDTH,(global_height + BLOCK_HEIGHT -1)/BLOCK_HEIGHT);
dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT);
hipLaunchKernelGGL(( redirect), dim3(gridDim),dim3(blockDim) , 0, 0, pixels_cu,next_cu,global_width,global_height);
hipMemcpy((Pixel*)&pixels[0],pixels_cu,sizeof(Pixel)*width*height,hipMemcpyDeviceToHost);
double rdirE = CycleTimer::currentSeconds();
printf("redir: %.3f ms\n",1000.f *(rdirE-rdirS));
}
| 19489fad01e649d8efc034f78f227e44f69ffc19.cu | #include <iostream>
#include <string>
#include <vector>
#include "pixel.h"
#include "imgSeg.h"
#include <cstdint>
#include <assert.h>
#include <cuda.h>
#include <driver_functions.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include "CycleTimer.h"
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 16
#define SHARED_BLOCK_DIM 32
#define CHUNK_SIZE 512
#define cudaCheckError(ans) cudaAssert((ans), __FILE__, __LINE__);
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int global_width;
int global_height;
__device__ __inline__ bool mergeCriterion(Pixel p1, Pixel p2, int t){
return ((int)p1.r - (int)p2.r)*((int)p1.r - (int)p2.r) + ((int)p1.g - (int)p2.g)*((int)p1.g -(int) p2.g) +
((int)p1.b - (int)p2.b) * ((int)p1.b - (int)p2.b) < t*t;
}
__device__ Pixel newColor(Pixel A, Pixel B, int sizeA, int sizeB){
int totalSize = sizeA + sizeB;
Pixel newP;
newP.r = (uint8_t)(((int)A.r * sizeA +(int) B.r * sizeB)/(totalSize));
newP.g = (uint8_t)(((int)A.g * sizeA + (int)B.g * sizeB)/(totalSize));
newP.b = (uint8_t)(((int)A.b * sizeA + (int)B.b * sizeB)/(totalSize));
return newP;
}
__device__ int find(int *next,int srow,int scol,int global_width,int global_height){
//int row = srow;
//int col = scol;
int pos = srow * global_width + scol;
for(int i=0;i<global_width*global_height;i++){
//assert(pos>=0);
//assert(pos<= global_width*global_height);
//std::cout<<"in find"<<std::endl;
int index = next[pos];
if(index == -1){
next[pos] = pos;
next[srow * global_width + scol] = pos;
return pos;
}
if(index == pos){
next[srow* global_width + scol] = index;
return index;
}
pos = index;
}
assert(false);
}
/*
__device__ int shared_find(int *next_temp,int sr,int sc, int width,int height){
assert(sr<32);
assert(sc<32);
assert(sr>=0);
assert(sc>=0);
assert(false);
assert(blockDim.x == 32);
int pos = sr *blockDim.x + sc;
while(1){
assert(pos>=0);
assert(pos<1024);
int index = next_temp[pos];
//assert(index<1024);
//assert(index>=-1);
if(index == -1){
next_temp[pos]=pos;
next_temp[sr * blockDim.x + sc] = pos;
return pos;
}
if( index == pos){
next_temp[sr * blockDim.x + sc] = index;
return index;
}
pos = index;
}
}
*/
//local next_temp will need to have the values of the actual next array
//note that it returns the local pos, and just check into the shared array to find actual
__device__ __inline__ int shared_find(int *next_temp, int sr, int sc, int width, int height){
int xStart = blockIdx.x * blockDim.x;
int yStart = blockIdx.y * blockDim.y;
int sX = sc - xStart;
int sY = sr - yStart;
//assert(blockDim.x==32);
//assert(sY>=0);
//assert(sY<32);
//assert(sX>=0);
//assert(sX<32);
int start = xStart + yStart *width;
int pos = sY * blockDim.x +sX;
while(1){
//assert(pos>=0);
//assert(pos<1024);
int index = next_temp[pos];
if(index == -1){
int posy = pos/blockDim.x;
int posx = pos % blockDim.x;
int temp = start + posx + posy * width;
next_temp[pos] = temp;
next_temp[sY * blockDim.x + sX] = temp;
return pos;
}
int checkY = index/width -yStart;
int checkX = index%width - xStart;
if(checkX + checkY*blockDim.x == pos){
next_temp[sY * blockDim.x + sX] = index;
return pos;
}
pos = checkX + checkY * blockDim.x;
}
}
__device__ void verify_edge(Pixel *pixels_cu, int *next_cu,
int *size_cu, int col1, int row1, int col2, int row2,int global_width, int global_height) {
//sanity check
/*assert(col1< global_width);
assert(col1>=0);
assert(col2 < global_width);
assert(col2 >= 0);
assert(row1 < global_height);
assert(row1 >=0);
assert(row2 < global_height);
assert(row2 >=0);
*/
int aIndex = find(next_cu,row1,col1,global_width,global_height);
int bIndex = find(next_cu,row2,col2,global_width,global_height);
//assert(aIndex!= -1 && bIndex != -1);
//assert(aIndex<global_width*global_height && bIndex<global_height*global_width);
if(aIndex != bIndex){
Pixel A = pixels_cu[aIndex];
int aSize = size_cu[aIndex];
Pixel B = pixels_cu[bIndex];
int bSize = size_cu[bIndex];
if(mergeCriterion(A,B,30)){
if(aSize>bSize){
pixels_cu[aIndex] = newColor(A,B, aSize, bSize);
next_cu[bIndex] = aIndex;
size_cu[aIndex] += bSize;
}
else{
pixels_cu[bIndex] = newColor(A, B, aSize, bSize);
next_cu[aIndex] = bIndex;
size_cu[bIndex] += aSize;
}
}
}
return;
}
__device__ void shared_verify_edge(Pixel *pixels_temp, int *next_temp, int *size_temp, int c1, int r1,
int c2, int r2, int width, int height){
assert(c1< width);
assert(c1>=0);
assert(c2 < width);
assert(c2 >= 0);
assert(r1 < height);
assert(r1 >=0);
assert(r2 < height);
assert(r2 >=0);
int aIndex = shared_find(next_temp,r1,c1,width,height);
int bIndex = shared_find(next_temp,r2,c2,width,height);
assert(aIndex!= -1 && bIndex != -1);
assert(aIndex<1024 && bIndex<1024);
if(aIndex != bIndex){
Pixel A = pixels_temp[aIndex];
int aSize = size_temp[aIndex];
Pixel B = pixels_temp[bIndex];
int bSize = size_temp[bIndex];
if(mergeCriterion(A,B,30)){
if(aSize>bSize){
pixels_temp[aIndex] = newColor(A,B,aSize,bSize);
next_temp[bIndex] = next_temp[aIndex];
size_temp[aIndex] += bSize;
}
else{
pixels_temp[bIndex] = newColor(A,B,aSize,bSize);
next_temp[aIndex] =next_temp[bIndex];
size_temp[bIndex] += aSize;
}
}
}
return;
}
static void attempt_1(std::vector<Pixel> &pixels, int width, int height);
static void attempt_2(std::vector<Pixel> &pixels, int width, int height);
static void attempt_3(std::vector<Pixel> &pixels, int width, int height);
void cu_process(std::vector<Pixel> &pixels, int width, int height){
global_width = width;
global_height = height;
attempt_1(pixels,width,height);
return;
}
__global__ void rowComp(Pixel *pixels_cu, int *next_cu, int *size_cu,int start, int rsize,int csize,int global_width,
int global_height){
int y = blockDim.y*blockIdx.y *rsize + threadIdx.y * rsize;
int x = blockIdx.x * blockDim.x * csize + threadIdx.x*csize + start;
//printf("x: %d, y: %d\n global_width: %d\n",x,y,global_width);
if(x+1>=global_width){
return;
}
//printf("here\n");
for(int i=0;i<rsize;i++){
if(y+i>=global_height){
break;
}
//printf("about to ver edge\n");
verify_edge(pixels_cu,next_cu,size_cu,x,y+i,x+1,y+i,global_width,global_height);
}
}
__global__ void colComp(Pixel *pixels_cu, int *next_cu, int *size_cu,int start, int rsize, int csize, int global_width,
int global_height){
int x = blockDim.x *blockIdx.x * csize + threadIdx.x * csize;
int y = blockIdx.y *blockDim.y * rsize + threadIdx.y * rsize + start;
if(y+1>=global_height){
return;
}
//get x and y somehow :)
for(int i=0;i<csize;i++){
if(x+i>=global_width){
break;
}
verify_edge(pixels_cu,next_cu,size_cu,x+i,y,x+i,y+1,global_width,global_height);
}
}
__global__ void rowDiagComp(Pixel *pixels_cu,int *next_cu, int *size_cu, int start,int rsize, int csize,
int global_width,int global_height){
int y = blockDim.y *blockIdx.y*rsize + threadIdx.y * rsize;
int x = blockIdx.x * blockDim.x * csize + threadIdx.x*csize + start;
if(x+1>=global_width){
return;
}
for(int i=0;i<rsize - 1;i++){
if(y+i+1>=global_height){
break;
}
verify_edge(pixels_cu,next_cu,size_cu,x,y+i,x+1,y+i+1,global_width,global_height);
verify_edge(pixels_cu,next_cu,size_cu,x+1,y+i,x,y+i+1,global_width,global_height);
}
}
__global__ void colDiagComp(Pixel *pixels_cu, int *next_cu, int *size_cu, int start, int rsize,int csize,
int global_width, int global_height){
int x= blockDim.x *blockIdx.x*csize + threadIdx.x * csize;
int y = blockIdx.y * blockDim.y * rsize+threadIdx.y * rsize + start;
if(y+1>=global_height){
return;
}
for(int i=0;i<csize-1;i++){
if(x+i+1>=global_width){
break;
}
verify_edge(pixels_cu,next_cu,size_cu,x+i,y,x+i+1,y+1,global_width,global_height);
verify_edge(pixels_cu,next_cu,size_cu,x+i+1,y,x+i,y+1,global_width,global_height);
}
}
__global__ void redirect(Pixel *pixels_cu, int *next_cu,int global_width,int global_height){
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y*global_width + x < global_width * global_height){
int actual = find(next_cu, y,x,global_width,global_height);
pixels_cu[y*global_width + x] = pixels_cu[actual];
}
__syncthreads();
}
__global__ void show(){
//printf("AAAAAAAAAAAAA\n");
/*for(int i=0;i<global_width*global_height;i++){
printf("%d\n",next_cu[i]);
}*/
}
//relatively naive approach, every phase gets its own kernel function, that does work appropriately
static void attempt_1(std::vector<Pixel> &pixels, int width, int height){
Pixel *pixels_cu;
std::vector<int> size(width * height,1);
std::vector<int> next(width * height,-1);
int *size_cu;
int *next_cu;
double ms= CycleTimer::currentSeconds();
cudaMalloc((void **)&size_cu, sizeof(int)*width*height);
cudaMalloc((void **)&next_cu, sizeof(int)*width*height);
cudaMalloc((void **)&pixels_cu,sizeof(Pixel)*width*height);
cudaMemcpy(pixels_cu,(Pixel*)&pixels[0],sizeof(Pixel)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(size_cu,(int*)&size[0],sizeof(int)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(next_cu,(int*)&next[0],sizeof(int)*width*height,cudaMemcpyHostToDevice);
double me = CycleTimer::currentSeconds();
printf("cuda overhead: %.3f ms\n",1000.f *(me-ms));
int start = 0;
//what it should look like after a global fn call
int rsize = 1;
int csize = 2;
while(start < width - 1 || start < height -1 ){
double s = CycleTimer::currentSeconds();
int blockWidth = std::max(csize,BLOCK_WIDTH);
int blockHeight = std::max(rsize,BLOCK_HEIGHT);
int rWidthNum = (global_width + blockWidth-1)/blockWidth;
int rHeightNum = (global_height + blockHeight -1)/blockHeight;
dim3 rgridDim(rWidthNum,rHeightNum);
dim3 rblockDim((blockWidth+csize-1)/csize,(blockHeight+rsize-1)/rsize);
rowComp<<< rgridDim, rblockDim >>>(pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
cudaThreadSynchronize();
//both diagonal ops are 'effectless'
rowDiagComp<<<rgridDim,rblockDim >>>(pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
cudaDeviceSynchronize();
rsize *= 2;
blockHeight = std::max(rsize,BLOCK_HEIGHT);
rHeightNum = (global_height + blockHeight - 1)/blockHeight;
dim3 cgridDim(rWidthNum,rHeightNum);
dim3 cblockDim((blockWidth +csize-1)/csize,(blockHeight + rsize-1)/rsize);
colComp<<<cgridDim,cblockDim >>>(pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
cudaDeviceSynchronize();
colDiagComp<<<cgridDim,cblockDim >>>(pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
cudaDeviceSynchronize();
start = 2*(start +1) - 1;
csize *= 2;
double e = CycleTimer::currentSeconds();
printf("iter time: %.3f ms\n",1000.f *(e-s));
}
double rdirS = CycleTimer::currentSeconds();
dim3 gridDim((global_width + BLOCK_WIDTH -1) /BLOCK_WIDTH,(global_height + BLOCK_HEIGHT -1)/BLOCK_HEIGHT);
dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT);
redirect<<<gridDim,blockDim >>>(pixels_cu,next_cu,global_width,global_height);
cudaMemcpy((Pixel*)&pixels[0],pixels_cu,sizeof(Pixel)*width*height,cudaMemcpyDeviceToHost);
double rdirE = CycleTimer::currentSeconds();
printf("Redir time: %.3f ms\n", 1000.f *(rdirE-rdirS));
return;
}
/**********************************************************************************
*
*
* SHARED MEMORY ATTEMPT
*
*
**********************************************************************************/
__device__ __inline__ void sharedRowComp(Pixel *pixels_temp,int *next_temp, int *size_temp, int start, int rsize, int csize,
int width, int height){
//determine if this thread needs to do work
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int locX = threadIdx.x;
int locY = threadIdx.y;
if(x>=start && (x-start) % (csize) == (0) && (x+1)<width && y % rsize == 0){
//printf("here x %d y %d\n",x,y);
for(int i=0;i<rsize;i++){
if(y+i>=height){
break;
}
shared_verify_edge(pixels_temp,next_temp, size_temp,x,y+i,x+1,y+i,width,height);
}
}
return;
}
__device__ __inline__ void sharedColComp(Pixel *pixels_temp, int *next_temp, int *size_temp, int start, int rsize, int csize,
int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int locX = threadIdx.x;
int locY = threadIdx.y;
if(y>=start && (y-start) % (rsize) == (0) && y+1<height && x % csize == 0){
for(int i=0;i<csize;i++){
if(x+i>=width){
break;
}
shared_verify_edge(pixels_temp,next_temp,size_temp,x+i,y,x+i,y+1,width,height);
}
}
return;
}
__device__ __inline__ void sharedRowDiagComp(Pixel *pixels_temp, int *next_temp, int *size_temp, int start, int rsize, int csize,
int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int locX = threadIdx.x;
int locY = threadIdx.y;
if(x>= start && (x-start) %(csize) == (0) && x+1<width && y %rsize == 0){
for(int i=0;i<rsize-1;i++){
if(y+i+1>=height){
break;
}
shared_verify_edge(pixels_temp,next_temp,size_temp,x,y+i,x+1,y+i+1,width,height);
shared_verify_edge(pixels_temp,next_temp,size_temp,x+1,y+i,x,y+i+1,width,height);
}
}
return;
}
__device__ __inline__ void sharedColDiagComp(Pixel *pixels_temp, int *next_temp, int *size_temp, int start, int rsize, int csize,
int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int locX = threadIdx.x;
int locY = threadIdx.y;
if(y>= start && (y-start) % (rsize) == (0) && y+1<height && x % csize == 0){
for(int i=0;i<csize-1;i++){
if(x+i+1>=width){
break;
}
shared_verify_edge(pixels_temp,next_temp,size_temp,x+i,y,x+i+1,y+1,width,height);
shared_verify_edge(pixels_temp,next_temp,size_temp,x+i+1,y,x+i,y+1,width,height);
}
}
return;
}
__global__ void shared_process(Pixel *pixels_cu, int *next_cu, int *size_cu,int width,int height){
__shared__ Pixel pixels_temp[SHARED_BLOCK_DIM*SHARED_BLOCK_DIM];
__shared__ int size_temp[SHARED_BLOCK_DIM*SHARED_BLOCK_DIM];
__shared__ int next_temp[SHARED_BLOCK_DIM*SHARED_BLOCK_DIM];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int tempIndex = threadIdx.y * blockDim.x + threadIdx.x;
int actualIndex = y *width + x;
if(actualIndex>=0 && actualIndex<width*height && tempIndex < SHARED_BLOCK_DIM*SHARED_BLOCK_DIM && tempIndex>=0){
pixels_temp[tempIndex] = pixels_cu[actualIndex];
size_temp[tempIndex] = size_cu[actualIndex];
next_temp[tempIndex] = next_cu[actualIndex];
__syncthreads();
int start = 0;
int rsize = 1;
int csize = 2;
while(start < SHARED_BLOCK_DIM-1){
//manual rowcomp rowdiag, col comp and coldiag
sharedRowComp(pixels_temp,next_temp,size_temp,start,rsize,csize,width,height);
__syncthreads();
//break;
sharedRowDiagComp(pixels_temp,next_temp,size_temp,start,rsize,csize,width,height);
__syncthreads();
rsize *=2;
sharedColComp(pixels_temp,next_temp,size_temp,start,rsize,csize,width,height);
__syncthreads();
sharedColDiagComp(pixels_temp,next_temp,size_temp,start,rsize,csize,width,height);
__syncthreads();
start = 2*(start+1)-1;
csize *= 2;
}
pixels_cu[actualIndex] = pixels_temp[tempIndex];
size_cu[actualIndex] = size_temp[tempIndex];
int tempX= next_temp[tempIndex] % blockDim.x;
int tempY = next_temp[tempIndex] / blockDim.x;
int store = tempX + (blockIdx.x*blockDim.x) + (tempY + (blockIdx.y*blockDim.y))*width;
next_cu[actualIndex] =next_temp[tempIndex];
//assert(store<width*height);
//printf("next cu %d x %d y %d\n",next_temp[actualIndex],x,y);
}
//__syncthreads();
return;
}
/* a shared memory approach
step one involves using shared memory
second is traditional
*/
static void attempt_2(std::vector<Pixel> &pixels, int width, int height){
if(width<SHARED_BLOCK_DIM || height < SHARED_BLOCK_DIM){
attempt_1(pixels,width,height);
return;
}
else{
Pixel *pixels_cu;
std::vector<int> size(width*height,1);
std::vector<int> next(width*height,-1);
int *size_cu;
int *next_cu;
double mS = CycleTimer::currentSeconds();
cudaMalloc((void**)&pixels_cu,sizeof(Pixel)*width*height);
cudaMalloc((void**)&size_cu,sizeof(int)*width*height);
cudaMalloc((void**)&next_cu,sizeof(int)*width*height);
cudaMemcpy(pixels_cu,(Pixel*)&pixels[0],sizeof(Pixel)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(next_cu,(int*)&next[0],sizeof(int)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(size_cu,(int*)&size[0],sizeof(int)*width*height,cudaMemcpyHostToDevice);
double mE = CycleTimer::currentSeconds();
printf("cuda overhead %.3f ms\n",1000.f * (mE-mS));
int widthNum = (width+SHARED_BLOCK_DIM-1)/SHARED_BLOCK_DIM;
int heightNum = (height + SHARED_BLOCK_DIM-1)/SHARED_BLOCK_DIM;
//dim3 gridDim(1,1);
//dim3 blockDim(2,2);
dim3 gridDim(widthNum,heightNum);
dim3 blockDim(SHARED_BLOCK_DIM,SHARED_BLOCK_DIM);
double sS = CycleTimer::currentSeconds();
shared_process<<<gridDim,blockDim>>>(pixels_cu,next_cu,size_cu,width,height);
cudaCheckError(cudaThreadSynchronize());
double sE = CycleTimer::currentSeconds();
printf("shared memory time %.3f ms\n",1000.f * (sE-sS));
//return;
/*cudaMemcpy((int*)&next[0],next_cu,sizeof(int)*width*height,cudaMemcpyDeviceToHost);
for(int i=0;i<width*height;i++){
printf("%d\n",next[i]);
}
return;*/
int start = 31;
int rsize = 32;
int csize = 64;
//copy from attempt1
while(start<width -1 || start < height-1){
double s = CycleTimer::currentSeconds();
int blockWidth = std::max(csize,BLOCK_WIDTH);
int blockHeight = std::max(rsize,BLOCK_HEIGHT);
int rWidthNum = (width + blockWidth-1)/blockWidth;
int rHeightNum = (height + blockHeight-1)/blockHeight;
dim3 rgridDim(rWidthNum,rHeightNum);
dim3 rblockDim((blockWidth+csize-1)/csize,(blockHeight+rsize-1)/rsize);
rowComp<<<rgridDim,rblockDim>>>(pixels_cu,next_cu,size_cu,start,rsize,csize,width,height);
//cudaThreadSynchronize();
cudaCheckError(cudaThreadSynchronize());
//printf("row\n");
rowDiagComp<<<rgridDim,rblockDim>>>(pixels_cu,next_cu,size_cu,start,rsize,csize,width,height);
cudaThreadSynchronize();
//printf("rowdia\n");
rsize *=2;
blockHeight = std::max(rsize,BLOCK_HEIGHT);
rHeightNum = (height + blockHeight-1)/blockHeight;
dim3 cgridDim(rWidthNum,rHeightNum);
dim3 cblockDim((blockWidth+csize-1)/csize,(blockHeight+rsize-1)/rsize);
colComp<<<cgridDim,cblockDim>>>(pixels_cu,next_cu,size_cu,start,rsize,csize,width,height);
cudaThreadSynchronize();
//printf("col\n");
colDiagComp<<<cgridDim,cblockDim>>>(pixels_cu,next_cu,size_cu,start,rsize,csize,width,height);
cudaThreadSynchronize();
//printf("coldia\n");
start = 2*(start +1 )-1;
csize *= 2;
double e = CycleTimer::currentSeconds();
printf("iter time: %.3f ms\n",1000.f *(e-s));
}
double rS = CycleTimer::currentSeconds();
dim3 gD((width+BLOCK_WIDTH-1)/BLOCK_WIDTH,(height+BLOCK_HEIGHT-1)/BLOCK_HEIGHT);
dim3 bD(BLOCK_WIDTH,BLOCK_HEIGHT);
redirect<<<gD,bD>>>(pixels_cu,next_cu,width,height);
cudaMemcpy((Pixel*)&pixels[0],pixels_cu,sizeof(Pixel)*width*height,cudaMemcpyDeviceToHost);
double rE = CycleTimer::currentSeconds();
printf("Redir time %.3f ms\n",1000.f *(rE-rS));
}
return;
}
/*********************************************************************
ATTEMPT 3 "HYBRID SOLUTION"
*******************************************************************/
__global__ void seq_continue(Pixel *pixels_cu,int * next_cu, int *size_cu,int csize, int width, int height){
int start = CHUNK_SIZE *2 - 1;
int offset = csize;
int limit;
while(start< width -1 || start < height -1){
//double s = CycleTimer::currentSeconds();
//Comparing along row
for(int y =0;y<height;y++){
for(int x = start;x<=width-offset;x+=offset){
verify_edge(pixels_cu,next_cu,size_cu,x,y,x+1,y,width,height);
}
}
//std::cout<<"row comparison done"<<std::endl;
for(int y = 0; y<height; y+=offset/2){
for(int x = start;x<=width-offset;x+=offset){
limit = offset/2 - 1;
//guarantee y+limit <= height
if(y + limit > height){
limit = height - y -1;
}
for(int n=0;n<limit;n++){
verify_edge(pixels_cu,next_cu,size_cu,x,y+n,x+1,y+n+1,width,height);
verify_edge(pixels_cu,next_cu,size_cu,x+1,y+n,x,y+n+1,width,height);
}
}
}
//std::cout<<"second loop done"<<std::endl;
for ( int y = start; y<=height-offset; y += offset){
for(int x =0 ; x< width; x++){
verify_edge(pixels_cu,next_cu,size_cu,x,y,x,y+1,width,height);
}
}
//std::cout<<"third loop done"<<std::endl;
for(int y = start; y <= height-offset; y+= offset){
for(int x = 0; x<= width-offset; x+=offset){
limit = offset -1;
if(x+limit>width){
limit =width - x -1;
}
for( int n = 0;n<limit;n++){
verify_edge(pixels_cu,next_cu,size_cu,x+n,y,x+n+1,y+1,width,height);
verify_edge(pixels_cu,next_cu,size_cu,x+n+1,y,x+n,y+1,width,height);
}
}
}
start = 2*(start+1)-1;
offset *=2;
//double e = CycleTimer::currentSeconds();
//printf("Iter time: %.3f ms\n",1000.f * (e-s));
}
}
static void attempt_3(std::vector<Pixel> &pixels, int width, int height){
Pixel *pixels_cu;
std::vector<int> size(width*height,1);
std::vector<int> next(width*height,-1);
int *size_cu;
int *next_cu;
double ms = CycleTimer::currentSeconds();
cudaMalloc((void**) &size_cu, sizeof(int) *width*height);
cudaMalloc((void**)&next_cu,sizeof(int)*width*height);
cudaMalloc((void**)&pixels_cu,sizeof(Pixel)*width*height);
cudaMemcpy(pixels_cu,(Pixel*)&pixels[0],sizeof(Pixel)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(size_cu,(int*)&size[0],sizeof(int)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(next_cu,(int*)&next[0],sizeof(int)*width*height,cudaMemcpyHostToDevice);
double me = CycleTimer::currentSeconds();
printf("cuda overhead: %.3f ms\n",1000.f *(me-ms));
int start = 0;
int rsize = 1;
int csize = 2;
while(start< std::min(CHUNK_SIZE,width-1) || start < std::min(CHUNK_SIZE,height-1)){
double s = CycleTimer::currentSeconds();
int blockWidth = std::max(csize,BLOCK_WIDTH);
int blockHeight = std::max(rsize,BLOCK_HEIGHT);
int rWidthNum = (global_width + blockWidth-1)/blockWidth;
int rHeightNum = (global_height + blockHeight -1)/blockHeight;
dim3 rgridDim(rWidthNum,rHeightNum);
dim3 rblockDim((blockWidth+csize-1)/csize,(blockHeight+rsize-1)/rsize);
rowComp<<< rgridDim, rblockDim >>>(pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
cudaThreadSynchronize();
//both diagonal ops are 'effectless'
rowDiagComp<<<rgridDim,rblockDim >>>(pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
cudaDeviceSynchronize();
rsize *= 2;
blockHeight = std::max(rsize,BLOCK_HEIGHT);
rHeightNum = (global_height + blockHeight - 1)/blockHeight;
dim3 cgridDim(rWidthNum,rHeightNum);
dim3 cblockDim((blockWidth +csize-1)/csize,(blockHeight + rsize-1)/rsize);
colComp<<<cgridDim,cblockDim >>>(pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
cudaDeviceSynchronize();
colDiagComp<<<cgridDim,cblockDim >>>(pixels_cu,next_cu,size_cu,start,rsize,csize,global_width,global_height);
cudaDeviceSynchronize();
start = 2*(start +1) - 1;
csize *= 2;
double e = CycleTimer::currentSeconds();
printf("iter time: %.3f ms\n",1000.f *(e-s));
}
double scs= CycleTimer::currentSeconds();
seq_continue<<<1,1>>>(pixels_cu,next_cu,size_cu,csize,width,height);
cudaDeviceSynchronize();
double sce = CycleTimer::currentSeconds();
printf("seq part : %.3f ms\n",1000.f *(sce-scs));
double rdirS = CycleTimer::currentSeconds();
dim3 gridDim((global_width + BLOCK_WIDTH -1) /BLOCK_WIDTH,(global_height + BLOCK_HEIGHT -1)/BLOCK_HEIGHT);
dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT);
redirect<<<gridDim,blockDim >>>(pixels_cu,next_cu,global_width,global_height);
cudaMemcpy((Pixel*)&pixels[0],pixels_cu,sizeof(Pixel)*width*height,cudaMemcpyDeviceToHost);
double rdirE = CycleTimer::currentSeconds();
printf("redir: %.3f ms\n",1000.f *(rdirE-rdirS));
}
|
872076942b0d92ad8e492ebd44d740f4e2431141.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tanh_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int idx = 1;
float *dy = NULL;
hipMalloc(&dy, XSIZE*YSIZE);
int incy = 1;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
tanh_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
tanh_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
tanh_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 872076942b0d92ad8e492ebd44d740f4e2431141.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "tanh_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int idx = 1;
float *dy = NULL;
cudaMalloc(&dy, XSIZE*YSIZE);
int incy = 1;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
tanh_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
tanh_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
tanh_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.