hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
2e31ef830ed77953e32cdc236a3074e8d51fb37f.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include "srad.h" // includes, project #include <hip/hip_runtime.h> // includes, kernels #include "srad_kernel.hip" double get_time(){ struct timeval time; if (gettimeofday(&time,NULL)){ // Handle error return 0; } return (double)time.tv_sec + (double)time.tv_usec * .000001; } double duration = 0; void random_matrix(float *I, int rows, int cols); void runTest( int argc, char** argv); void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]); fprintf(stderr, "\t<rows> - number of rows\n"); fprintf(stderr, "\t<cols> - number of cols\n"); fprintf(stderr, "\t<y1> - y1 value of the speckle\n"); fprintf(stderr, "\t<y2> - y2 value of the speckle\n"); fprintf(stderr, "\t<x1> - x1 value of the speckle\n"); fprintf(stderr, "\t<x2> - x2 value of the speckle\n"); fprintf(stderr, "\t<lamda> - lambda (0,1)\n"); fprintf(stderr, "\t<no. of iter> - number of iterations\n"); exit(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); runTest( argc, argv); fprintf(stderr, "%f\n", duration); return EXIT_SUCCESS; } void runTest( int argc, char** argv) { int rows, cols, size_I, size_R, niter = 10, iter; float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ; hipDeviceSetLimit(hipLimitMallocHeapSize, 1024*1024*600); //sderek #ifdef CPU float Jc, G2, L, num, den, qsqr; int *iN,*iS,*jE,*jW, k; float *dN,*dS,*dW,*dE; float cN,cS,cW,cE,D; #endif #ifdef GPU float *J_cuda; float *C_cuda; float *E_C, *W_C, *N_C, *S_C; #endif unsigned int r1, r2, c1, c2; float *c; if (argc == 9) { rows = atoi(argv[1]); //number of rows in the domain cols = atoi(argv[2]); //number of cols in the domain if ((rows%16!=0) || (cols%16!=0)){ fprintf(stderr, "rows and cols must be multiples of 16\n"); exit(1); } r1 = atoi(argv[3]); //y1 position of the speckle r2 = atoi(argv[4]); //y2 position of the speckle c1 = atoi(argv[5]); //x1 position of the speckle c2 = atoi(argv[6]); //x2 position of the speckle lambda = atof(argv[7]); //Lambda value niter = atoi(argv[8]); //number of iterations } else{ usage(argc, argv); } size_I = cols * rows; size_R = (r2-r1+1)*(c2-c1+1); I = (float *)malloc( size_I * sizeof(float) ); J = (float *)malloc( size_I * sizeof(float) ); c = (float *)malloc(sizeof(float)* size_I) ; #ifdef CPU iN = (int *)malloc(sizeof(unsigned int*) * rows) ; iS = (int *)malloc(sizeof(unsigned int*) * rows) ; jW = (int *)malloc(sizeof(unsigned int*) * cols) ; jE = (int *)malloc(sizeof(unsigned int*) * cols) ; dN = (float *)malloc(sizeof(float)* size_I) ; dS = (float *)malloc(sizeof(float)* size_I) ; dW = (float *)malloc(sizeof(float)* size_I) ; dE = (float *)malloc(sizeof(float)* size_I) ; for (int i=0; i< rows; i++) { iN[i] = i-1; iS[i] = i+1; } for (int j=0; j< cols; j++) { jW[j] = j-1; jE[j] = j+1; } iN[0] = 0; iS[rows-1] = rows-1; jW[0] = 0; jE[cols-1] = cols-1; #endif #ifdef GPU //Allocate device memory hipMalloc((void**)& J_cuda, sizeof(float)* size_I); hipMalloc((void**)& C_cuda, sizeof(float)* size_I); hipMalloc((void**)& E_C, sizeof(float)* size_I); hipMalloc((void**)& W_C, sizeof(float)* size_I); hipMalloc((void**)& S_C, sizeof(float)* size_I); hipMalloc((void**)& N_C, sizeof(float)* size_I); #endif printf("Randomizing the input matrix\n"); //Generate a random matrix random_matrix(I, rows, cols); for (int k = 0; k < size_I; k++ ) { J[k] = (float)exp(I[k]) ; } printf("Start the SRAD main loop\n"); for (iter=0; iter< niter; iter++){ sum=0; sum2=0; for (int i=r1; i<=r2; i++) { for (int j=c1; j<=c2; j++) { tmp = J[i * cols + j]; sum += tmp ; sum2 += tmp*tmp; } } meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI*meanROI; q0sqr = varROI / (meanROI*meanROI); #ifdef CPU for (int i = 0 ; i < rows ; i++) { for (int j = 0; j < cols; j++) { k = i * cols + j; Jc = J[k]; // directional derivates dN[k] = J[iN[i] * cols + j] - Jc; dS[k] = J[iS[i] * cols + j] - Jc; dW[k] = J[i * cols + jW[j]] - Jc; dE[k] = J[i * cols + jE[j]] - Jc; G2 = (dN[k]*dN[k] + dS[k]*dS[k] + dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc); L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc; num = (0.5*G2) - ((1.0/16.0)*(L*L)) ; den = 1 + (.25*L); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c[k] = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c[k] < 0) {c[k] = 0;} else if (c[k] > 1) {c[k] = 1;} } } for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { // current index k = i * cols + j; // diffusion coefficent cN = c[k]; cS = c[iS[i] * cols + j]; cW = c[k]; cE = c[i * cols + jE[j]]; // divergence (equ 58) D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // image update (equ 61) J[k] = J[k] + 0.25*lambda*D; } } #endif // CPU #ifdef GPU //Currently the input size must be divided by 16 - the block size int block_x = cols/BLOCK_SIZE ; int block_y = rows/BLOCK_SIZE ; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(block_x , block_y); //Copy data from main memory to device memory hipMemcpy(J_cuda, J, sizeof(float) * size_I, hipMemcpyHostToDevice); hipDeviceSynchronize(); double time0 = get_time(); //Run kernels hipLaunchKernelGGL(( srad_cuda_1), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr); hipLaunchKernelGGL(( srad_cuda_2), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr); hipDeviceSynchronize(); double time1 = get_time(); duration += time1-time0; //Copy data from device memory to main memory hipMemcpy(J, J_cuda, sizeof(float) * size_I, hipMemcpyDeviceToHost); #endif } hipDeviceSynchronize(); printf("Printing Output:\n"); for( int i = 0 ; i < 10 ; i++){ for ( int j = 0 ; j < 10 ; j++){ printf("%.5f ", J[i * cols + j]); } printf("\n"); } #ifdef OUTPUT //Printing output printf("Printing Output:\n"); for( int i = 0 ; i < rows ; i++){ for ( int j = 0 ; j < cols ; j++){ printf("%.5f ", J[i * cols + j]); } printf("\n"); } #endif printf("Computation Done\n"); free(I); free(J); #ifdef CPU free(iN); free(iS); free(jW); free(jE); free(dN); free(dS); free(dW); free(dE); #endif #ifdef GPU hipFree(C_cuda); hipFree(J_cuda); hipFree(E_C); hipFree(W_C); hipFree(N_C); hipFree(S_C); #endif free(c); } void random_matrix(float *I, int rows, int cols){ srand(7); for( int i = 0 ; i < rows ; i++){ for ( int j = 0 ; j < cols ; j++){ I[i * cols + j] = rand()/(float)RAND_MAX ; } } }
2e31ef830ed77953e32cdc236a3074e8d51fb37f.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include "srad.h" // includes, project #include <cuda.h> // includes, kernels #include "srad_kernel.cu" double get_time(){ struct timeval time; if (gettimeofday(&time,NULL)){ // Handle error return 0; } return (double)time.tv_sec + (double)time.tv_usec * .000001; } double duration = 0; void random_matrix(float *I, int rows, int cols); void runTest( int argc, char** argv); void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]); fprintf(stderr, "\t<rows> - number of rows\n"); fprintf(stderr, "\t<cols> - number of cols\n"); fprintf(stderr, "\t<y1> - y1 value of the speckle\n"); fprintf(stderr, "\t<y2> - y2 value of the speckle\n"); fprintf(stderr, "\t<x1> - x1 value of the speckle\n"); fprintf(stderr, "\t<x2> - x2 value of the speckle\n"); fprintf(stderr, "\t<lamda> - lambda (0,1)\n"); fprintf(stderr, "\t<no. of iter> - number of iterations\n"); exit(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); runTest( argc, argv); fprintf(stderr, "%f\n", duration); return EXIT_SUCCESS; } void runTest( int argc, char** argv) { int rows, cols, size_I, size_R, niter = 10, iter; float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ; cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024*1024*600); //sderek #ifdef CPU float Jc, G2, L, num, den, qsqr; int *iN,*iS,*jE,*jW, k; float *dN,*dS,*dW,*dE; float cN,cS,cW,cE,D; #endif #ifdef GPU float *J_cuda; float *C_cuda; float *E_C, *W_C, *N_C, *S_C; #endif unsigned int r1, r2, c1, c2; float *c; if (argc == 9) { rows = atoi(argv[1]); //number of rows in the domain cols = atoi(argv[2]); //number of cols in the domain if ((rows%16!=0) || (cols%16!=0)){ fprintf(stderr, "rows and cols must be multiples of 16\n"); exit(1); } r1 = atoi(argv[3]); //y1 position of the speckle r2 = atoi(argv[4]); //y2 position of the speckle c1 = atoi(argv[5]); //x1 position of the speckle c2 = atoi(argv[6]); //x2 position of the speckle lambda = atof(argv[7]); //Lambda value niter = atoi(argv[8]); //number of iterations } else{ usage(argc, argv); } size_I = cols * rows; size_R = (r2-r1+1)*(c2-c1+1); I = (float *)malloc( size_I * sizeof(float) ); J = (float *)malloc( size_I * sizeof(float) ); c = (float *)malloc(sizeof(float)* size_I) ; #ifdef CPU iN = (int *)malloc(sizeof(unsigned int*) * rows) ; iS = (int *)malloc(sizeof(unsigned int*) * rows) ; jW = (int *)malloc(sizeof(unsigned int*) * cols) ; jE = (int *)malloc(sizeof(unsigned int*) * cols) ; dN = (float *)malloc(sizeof(float)* size_I) ; dS = (float *)malloc(sizeof(float)* size_I) ; dW = (float *)malloc(sizeof(float)* size_I) ; dE = (float *)malloc(sizeof(float)* size_I) ; for (int i=0; i< rows; i++) { iN[i] = i-1; iS[i] = i+1; } for (int j=0; j< cols; j++) { jW[j] = j-1; jE[j] = j+1; } iN[0] = 0; iS[rows-1] = rows-1; jW[0] = 0; jE[cols-1] = cols-1; #endif #ifdef GPU //Allocate device memory cudaMalloc((void**)& J_cuda, sizeof(float)* size_I); cudaMalloc((void**)& C_cuda, sizeof(float)* size_I); cudaMalloc((void**)& E_C, sizeof(float)* size_I); cudaMalloc((void**)& W_C, sizeof(float)* size_I); cudaMalloc((void**)& S_C, sizeof(float)* size_I); cudaMalloc((void**)& N_C, sizeof(float)* size_I); #endif printf("Randomizing the input matrix\n"); //Generate a random matrix random_matrix(I, rows, cols); for (int k = 0; k < size_I; k++ ) { J[k] = (float)exp(I[k]) ; } printf("Start the SRAD main loop\n"); for (iter=0; iter< niter; iter++){ sum=0; sum2=0; for (int i=r1; i<=r2; i++) { for (int j=c1; j<=c2; j++) { tmp = J[i * cols + j]; sum += tmp ; sum2 += tmp*tmp; } } meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI*meanROI; q0sqr = varROI / (meanROI*meanROI); #ifdef CPU for (int i = 0 ; i < rows ; i++) { for (int j = 0; j < cols; j++) { k = i * cols + j; Jc = J[k]; // directional derivates dN[k] = J[iN[i] * cols + j] - Jc; dS[k] = J[iS[i] * cols + j] - Jc; dW[k] = J[i * cols + jW[j]] - Jc; dE[k] = J[i * cols + jE[j]] - Jc; G2 = (dN[k]*dN[k] + dS[k]*dS[k] + dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc); L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc; num = (0.5*G2) - ((1.0/16.0)*(L*L)) ; den = 1 + (.25*L); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c[k] = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c[k] < 0) {c[k] = 0;} else if (c[k] > 1) {c[k] = 1;} } } for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { // current index k = i * cols + j; // diffusion coefficent cN = c[k]; cS = c[iS[i] * cols + j]; cW = c[k]; cE = c[i * cols + jE[j]]; // divergence (equ 58) D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // image update (equ 61) J[k] = J[k] + 0.25*lambda*D; } } #endif // CPU #ifdef GPU //Currently the input size must be divided by 16 - the block size int block_x = cols/BLOCK_SIZE ; int block_y = rows/BLOCK_SIZE ; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(block_x , block_y); //Copy data from main memory to device memory cudaMemcpy(J_cuda, J, sizeof(float) * size_I, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); double time0 = get_time(); //Run kernels srad_cuda_1<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr); srad_cuda_2<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr); cudaDeviceSynchronize(); double time1 = get_time(); duration += time1-time0; //Copy data from device memory to main memory cudaMemcpy(J, J_cuda, sizeof(float) * size_I, cudaMemcpyDeviceToHost); #endif } cudaThreadSynchronize(); printf("Printing Output:\n"); for( int i = 0 ; i < 10 ; i++){ for ( int j = 0 ; j < 10 ; j++){ printf("%.5f ", J[i * cols + j]); } printf("\n"); } #ifdef OUTPUT //Printing output printf("Printing Output:\n"); for( int i = 0 ; i < rows ; i++){ for ( int j = 0 ; j < cols ; j++){ printf("%.5f ", J[i * cols + j]); } printf("\n"); } #endif printf("Computation Done\n"); free(I); free(J); #ifdef CPU free(iN); free(iS); free(jW); free(jE); free(dN); free(dS); free(dW); free(dE); #endif #ifdef GPU cudaFree(C_cuda); cudaFree(J_cuda); cudaFree(E_C); cudaFree(W_C); cudaFree(N_C); cudaFree(S_C); #endif free(c); } void random_matrix(float *I, int rows, int cols){ srand(7); for( int i = 0 ; i < rows ; i++){ for ( int j = 0 ; j < cols ; j++){ I[i * cols + j] = rand()/(float)RAND_MAX ; } } }
98e005bfdf41acdec5eca5317949b992714587fc.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2010 @generated c Wed Nov 14 22:53:54 2012 @author Ichitaro Yamazaki */ #include "common_magma.h" #define PRECISION_c #include "commonblas.h" // // m, n - dimensions in the output (ha) matrix. // This routine copies the dat matrix from the GPU // to ha on the CPU. In addition, the output matrix // is transposed. The routine uses a buffer of size // 2*lddb*nb pointed to by dB (lddb > m) on the GPU. // Note that lda >= m and lddat >= n. // extern "C" void magmablas_cgetmatrix_transpose_mgpu( magma_int_t ngpus, hipStream_t stream[][2], cuFloatComplex **dat, magma_int_t ldda, cuFloatComplex *ha, magma_int_t lda, cuFloatComplex **db, magma_int_t lddb, magma_int_t m, magma_int_t n, magma_int_t nb) { #define A(j) (ha + (j)*lda) #define dB(d, j) (db[(d)] + (j)*nb*lddb) #define dAT(d, j) (dat[(d)] + (j)*nb) int nstreams = 2, j, j_local, d, id, ib; /* Quick return */ if ( (m == 0) || (n == 0) ) return; if (lda < m || ngpus*ldda < n || lddb < m){ printf( "Wrong arguments in magmablas_cgetmatrix_transpose_mgpu (%d<%d), (%d*%d<%d), or (%d<%d).\n", lda, m, ngpus, ldda, n, lddb, m ); return; } /* Move data from GPU to CPU using two buffers; first transpose the data on the GPU */ for(j=0; j<n; j+=nb){ d = (j/nb)%ngpus; j_local = (j/nb)/ngpus; id = j_local%nstreams; magma_setdevice(d); ib = min(n-j, nb); magmablasSetKernelStream(stream[d][id]); magmablas_ctranspose2( dB(d, id), lddb, dAT(d, j_local), ldda, ib, m); magma_cgetmatrix_async( m, ib, dB(d, id), lddb, A(j), lda, stream[d][id] ); } }
98e005bfdf41acdec5eca5317949b992714587fc.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2010 @generated c Wed Nov 14 22:53:54 2012 @author Ichitaro Yamazaki */ #include "common_magma.h" #define PRECISION_c #include "commonblas.h" // // m, n - dimensions in the output (ha) matrix. // This routine copies the dat matrix from the GPU // to ha on the CPU. In addition, the output matrix // is transposed. The routine uses a buffer of size // 2*lddb*nb pointed to by dB (lddb > m) on the GPU. // Note that lda >= m and lddat >= n. // extern "C" void magmablas_cgetmatrix_transpose_mgpu( magma_int_t ngpus, cudaStream_t stream[][2], cuFloatComplex **dat, magma_int_t ldda, cuFloatComplex *ha, magma_int_t lda, cuFloatComplex **db, magma_int_t lddb, magma_int_t m, magma_int_t n, magma_int_t nb) { #define A(j) (ha + (j)*lda) #define dB(d, j) (db[(d)] + (j)*nb*lddb) #define dAT(d, j) (dat[(d)] + (j)*nb) int nstreams = 2, j, j_local, d, id, ib; /* Quick return */ if ( (m == 0) || (n == 0) ) return; if (lda < m || ngpus*ldda < n || lddb < m){ printf( "Wrong arguments in magmablas_cgetmatrix_transpose_mgpu (%d<%d), (%d*%d<%d), or (%d<%d).\n", lda, m, ngpus, ldda, n, lddb, m ); return; } /* Move data from GPU to CPU using two buffers; first transpose the data on the GPU */ for(j=0; j<n; j+=nb){ d = (j/nb)%ngpus; j_local = (j/nb)/ngpus; id = j_local%nstreams; magma_setdevice(d); ib = min(n-j, nb); magmablasSetKernelStream(stream[d][id]); magmablas_ctranspose2( dB(d, id), lddb, dAT(d, j_local), ldda, ib, m); magma_cgetmatrix_async( m, ib, dB(d, id), lddb, A(j), lda, stream[d][id] ); } }
f6447b10cb89dbcf3fae845204f424151ff93e1d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "col2im_dilated.h" #include "hip/hip_runtime.h" } // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __device__ float get_col_gpu_pixel(int row, int dilate_ksize, int ksize, int dilate_rate, int height_col, int width_col, int stride, int h_col, int w_col, const float* col_gpu) { int width_kernel_dilated = row % dilate_ksize; // start from 1 int height_kernel_dilated = (row / (dilate_ksize)) % dilate_ksize + 1; // start from 1 if (width_kernel_dilated == 0){ width_kernel_dilated = dilate_ksize; height_kernel_dilated--; } int channel_kernel_dilated = row / (dilate_ksize * dilate_ksize); // start from 1 int c = channel_kernel_dilated; int w = width_kernel_dilated / dilate_rate; int h = height_kernel_dilated / dilate_rate; int pixel_row = c * ksize * ksize + (h-1) * ksize + w - 1; int pixel_column = h_col * width_col + w_col; float pixel = col_gpu[pixel_row * width_col * height_col + pixel_column]; return pixel; } __device__ bool isvalid(int dilate_ksize, int dilate_rate, int row) { int width_kernel = row % dilate_ksize; // start from 1 int height_kernel = (row / dilate_ksize) % dilate_ksize + 1; // start from 1 if (width_kernel == 0){ width_kernel = dilate_ksize; height_kernel = height_kernel - 1; } if (width_kernel % dilate_rate==0 && height_kernel % dilate_rate==0) return 1; else return 0; } __global__ void col2im_dilated_gpu_kernel(const int n, const float* col_gpu, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, int dilate_rate,int channels, float *im_gpu) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ float val = 0; int w = index % width + pad; int h = (index / width) % height + pad; int c = index / (width * height); // compute the start and end of the output int d_ksize = (dilate_rate - 1) * (ksize + 1) + ksize; // dilated kernel size int w_col_start = (w < d_ksize) ? 0 : (w - d_ksize) / stride + 1; int w_col_end = min(w / stride + 1, width_col); int h_col_start = (h < d_ksize) ? 0 : (h - d_ksize) / stride + 1; int h_col_end = min(h / stride + 1, height_col); // equivalent implementation int d_offset = (c * d_ksize * d_ksize + h * d_ksize + w) * height_col * width_col; int d_coeff_h_col = (1 - stride * d_ksize * height_col) * width_col; int d_coeff_w_col = (1 - stride * height_col * width_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int row = (d_offset + h_col * d_coeff_h_col + w_col * d_coeff_w_col)/(height_col*width_col) + 1; if(isvalid(d_ksize, dilate_rate, row)){ val += get_col_gpu_pixel(row, d_ksize, ksize, dilate_rate, height_col, width_col, stride, h_col, w_col, col_gpu); }else{ val += 0; } } } im_gpu[index] += val; } } void col2im_dilated_gpu(float *col_gpu, int channels, int height, int width, int ksize, int stride, int pad, int dilate_rate, float *im_gpu){ // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int dilate_ksize = (dilate_rate - 1) * (ksize + 1) + ksize; int height_col = (height + 2 * pad - dilate_ksize) / stride + 1; // convolutional layer output height int width_col = (width + 2 * pad - dilate_ksize) / stride + 1; // convolutional layer output width int num_kernels = channels * height_col * width_col; // number of elements in each kernel hipLaunchKernelGGL(( col2im_dilated_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK), dim3(BLOCK), 0, 0, channels*height*width, col_gpu, height, width, ksize, pad, stride, height_col, width_col, dilate_rate,channels, im_gpu); }
f6447b10cb89dbcf3fae845204f424151ff93e1d.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "col2im_dilated.h" #include "cuda.h" } // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __device__ float get_col_gpu_pixel(int row, int dilate_ksize, int ksize, int dilate_rate, int height_col, int width_col, int stride, int h_col, int w_col, const float* col_gpu) { int width_kernel_dilated = row % dilate_ksize; // start from 1 int height_kernel_dilated = (row / (dilate_ksize)) % dilate_ksize + 1; // start from 1 if (width_kernel_dilated == 0){ width_kernel_dilated = dilate_ksize; height_kernel_dilated--; } int channel_kernel_dilated = row / (dilate_ksize * dilate_ksize); // start from 1 int c = channel_kernel_dilated; int w = width_kernel_dilated / dilate_rate; int h = height_kernel_dilated / dilate_rate; int pixel_row = c * ksize * ksize + (h-1) * ksize + w - 1; int pixel_column = h_col * width_col + w_col; float pixel = col_gpu[pixel_row * width_col * height_col + pixel_column]; return pixel; } __device__ bool isvalid(int dilate_ksize, int dilate_rate, int row) { int width_kernel = row % dilate_ksize; // start from 1 int height_kernel = (row / dilate_ksize) % dilate_ksize + 1; // start from 1 if (width_kernel == 0){ width_kernel = dilate_ksize; height_kernel = height_kernel - 1; } if (width_kernel % dilate_rate==0 && height_kernel % dilate_rate==0) return 1; else return 0; } __global__ void col2im_dilated_gpu_kernel(const int n, const float* col_gpu, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, int dilate_rate,int channels, float *im_gpu) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ float val = 0; int w = index % width + pad; int h = (index / width) % height + pad; int c = index / (width * height); // compute the start and end of the output int d_ksize = (dilate_rate - 1) * (ksize + 1) + ksize; // dilated kernel size int w_col_start = (w < d_ksize) ? 0 : (w - d_ksize) / stride + 1; int w_col_end = min(w / stride + 1, width_col); int h_col_start = (h < d_ksize) ? 0 : (h - d_ksize) / stride + 1; int h_col_end = min(h / stride + 1, height_col); // equivalent implementation int d_offset = (c * d_ksize * d_ksize + h * d_ksize + w) * height_col * width_col; int d_coeff_h_col = (1 - stride * d_ksize * height_col) * width_col; int d_coeff_w_col = (1 - stride * height_col * width_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int row = (d_offset + h_col * d_coeff_h_col + w_col * d_coeff_w_col)/(height_col*width_col) + 1; if(isvalid(d_ksize, dilate_rate, row)){ val += get_col_gpu_pixel(row, d_ksize, ksize, dilate_rate, height_col, width_col, stride, h_col, w_col, col_gpu); }else{ val += 0; } } } im_gpu[index] += val; } } void col2im_dilated_gpu(float *col_gpu, int channels, int height, int width, int ksize, int stride, int pad, int dilate_rate, float *im_gpu){ // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int dilate_ksize = (dilate_rate - 1) * (ksize + 1) + ksize; int height_col = (height + 2 * pad - dilate_ksize) / stride + 1; // convolutional layer output height int width_col = (width + 2 * pad - dilate_ksize) / stride + 1; // convolutional layer output width int num_kernels = channels * height_col * width_col; // number of elements in each kernel col2im_dilated_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK, BLOCK>>>( channels*height*width, col_gpu, height, width, ksize, pad, stride, height_col, width_col, dilate_rate,channels, im_gpu); }
844adecc448c4a04238ac4afcdafffeff44ff283.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <strings/regex/regcomp.h> #include <strings/regex/regex.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <algorithm> namespace cudf { namespace strings { namespace detail { namespace { /** * @brief Converts UTF-8 string into fixed-width 32-bit character vector. * * No character conversion occurs. * Each UTF-8 character is promoted into a 32-bit value. * The last entry in the returned vector will be a 0 value. * The fixed-width vector makes it easier to compile and faster to execute. * * @param pattern Regular expression encoded with UTF-8. * @return Fixed-width 32-bit character vector. */ std::vector<char32_t> string_to_char32_vector(std::string const& pattern) { size_type size = static_cast<size_type>(pattern.size()); size_type count = std::count_if(pattern.cbegin(), pattern.cend(), [](char ch) { return is_begin_utf8_char(static_cast<uint8_t>(ch)); }); std::vector<char32_t> result(count + 1); char32_t* output_ptr = result.data(); const char* input_ptr = pattern.data(); for (size_type idx = 0; idx < size; ++idx) { char_utf8 output_character = 0; size_type ch_width = to_char_utf8(input_ptr, output_character); input_ptr += ch_width; idx += ch_width - 1; *output_ptr++ = output_character; } result[count] = 0; // last entry set to 0 return result; } } // namespace // Copy reprog primitive values reprog_device::reprog_device(reprog& prog) : _startinst_id{prog.get_start_inst()}, _num_capturing_groups{prog.groups_count()}, _insts_count{prog.insts_count()}, _starts_count{prog.starts_count()}, _classes_count{prog.classes_count()} { } std::unique_ptr<reprog_device, std::function<void(reprog_device*)>> reprog_device::create( std::string const& pattern, uint8_t const* codepoint_flags, size_type strings_count, rmm::cuda_stream_view stream) { return reprog_device::create( pattern, regex_flags::MULTILINE, codepoint_flags, strings_count, stream); } // Create instance of the reprog that can be passed into a device kernel std::unique_ptr<reprog_device, std::function<void(reprog_device*)>> reprog_device::create( std::string const& pattern, regex_flags const flags, uint8_t const* codepoint_flags, size_type strings_count, rmm::cuda_stream_view stream) { std::vector<char32_t> pattern32 = string_to_char32_vector(pattern); // compile pattern into host object reprog h_prog = reprog::create_from(pattern32.data(), flags); // compute size to hold all the member data auto insts_count = h_prog.insts_count(); auto classes_count = h_prog.classes_count(); auto starts_count = h_prog.starts_count(); // compute size of each section; make sure each is aligned appropriately auto insts_size = cudf::util::round_up_safe<size_t>(insts_count * sizeof(_insts[0]), sizeof(size_t)); auto startids_size = cudf::util::round_up_safe<size_t>(starts_count * sizeof(_startinst_ids[0]), sizeof(size_t)); auto classes_size = cudf::util::round_up_safe<size_t>(classes_count * sizeof(_classes[0]), sizeof(size_t)); for (int32_t idx = 0; idx < classes_count; ++idx) classes_size += static_cast<int32_t>((h_prog.class_at(idx).literals.size()) * sizeof(char32_t)); size_t memsize = insts_size + startids_size + classes_size; size_t rlm_size = 0; // check memory size needed for executing regex if (insts_count > RX_LARGE_INSTS) { auto relist_alloc_size = relist::alloc_size(insts_count); rlm_size = relist_alloc_size * 2L * strings_count; // reljunk has 2 relist ptrs } // allocate memory to store prog data std::vector<u_char> h_buffer(memsize); u_char* h_ptr = h_buffer.data(); // running pointer auto* d_buffer = new rmm::device_buffer(memsize, stream); u_char* d_ptr = reinterpret_cast<u_char*>(d_buffer->data()); // running device pointer // put everything into a flat host buffer first reprog_device* d_prog = new reprog_device(h_prog); // copy the instructions array first (fixed-size structs) reinst* insts = reinterpret_cast<reinst*>(h_ptr); memcpy(insts, h_prog.insts_data(), insts_size); h_ptr += insts_size; // next section d_prog->_insts = reinterpret_cast<reinst*>(d_ptr); d_ptr += insts_size; // copy the startinst_ids next (ints) int32_t* startinst_ids = reinterpret_cast<int32_t*>(h_ptr); memcpy(startinst_ids, h_prog.starts_data(), startids_size); h_ptr += startids_size; // next section d_prog->_startinst_ids = reinterpret_cast<int32_t*>(d_ptr); d_ptr += startids_size; // copy classes into flat memory: [class1,class2,...][char32 arrays] reclass_device* classes = reinterpret_cast<reclass_device*>(h_ptr); d_prog->_classes = reinterpret_cast<reclass_device*>(d_ptr); // get pointer to the end to handle variable length data u_char* h_end = h_ptr + (classes_count * sizeof(reclass_device)); u_char* d_end = d_ptr + (classes_count * sizeof(reclass_device)); // place each class and append the variable length data for (int32_t idx = 0; idx < classes_count; ++idx) { reclass& h_class = h_prog.class_at(idx); reclass_device d_class; d_class.builtins = h_class.builtins; d_class.count = h_class.literals.size() / 2; d_class.literals = reinterpret_cast<char32_t*>(d_end); memcpy(classes++, &d_class, sizeof(d_class)); memcpy(h_end, h_class.literals.c_str(), h_class.literals.size() * sizeof(char32_t)); h_end += h_class.literals.size() * sizeof(char32_t); d_end += h_class.literals.size() * sizeof(char32_t); } // initialize the rest of the elements d_prog->_insts_count = insts_count; d_prog->_starts_count = starts_count; d_prog->_classes_count = classes_count; d_prog->_codepoint_flags = codepoint_flags; // allocate execute memory if needed rmm::device_buffer* d_relists{}; if (rlm_size > 0) { d_relists = new rmm::device_buffer(rlm_size, stream); d_prog->_relists_mem = d_relists->data(); } // copy flat prog to device memory CUDA_TRY(hipMemcpyAsync( d_buffer->data(), h_buffer.data(), memsize, hipMemcpyHostToDevice, stream.value())); // auto deleter = [d_buffer, d_relists](reprog_device* t) { t->destroy(); delete d_buffer; delete d_relists; }; return std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>(d_prog, deleter); } void reprog_device::destroy() { delete this; } } // namespace detail } // namespace strings } // namespace cudf
844adecc448c4a04238ac4afcdafffeff44ff283.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <strings/regex/regcomp.h> #include <strings/regex/regex.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <algorithm> namespace cudf { namespace strings { namespace detail { namespace { /** * @brief Converts UTF-8 string into fixed-width 32-bit character vector. * * No character conversion occurs. * Each UTF-8 character is promoted into a 32-bit value. * The last entry in the returned vector will be a 0 value. * The fixed-width vector makes it easier to compile and faster to execute. * * @param pattern Regular expression encoded with UTF-8. * @return Fixed-width 32-bit character vector. */ std::vector<char32_t> string_to_char32_vector(std::string const& pattern) { size_type size = static_cast<size_type>(pattern.size()); size_type count = std::count_if(pattern.cbegin(), pattern.cend(), [](char ch) { return is_begin_utf8_char(static_cast<uint8_t>(ch)); }); std::vector<char32_t> result(count + 1); char32_t* output_ptr = result.data(); const char* input_ptr = pattern.data(); for (size_type idx = 0; idx < size; ++idx) { char_utf8 output_character = 0; size_type ch_width = to_char_utf8(input_ptr, output_character); input_ptr += ch_width; idx += ch_width - 1; *output_ptr++ = output_character; } result[count] = 0; // last entry set to 0 return result; } } // namespace // Copy reprog primitive values reprog_device::reprog_device(reprog& prog) : _startinst_id{prog.get_start_inst()}, _num_capturing_groups{prog.groups_count()}, _insts_count{prog.insts_count()}, _starts_count{prog.starts_count()}, _classes_count{prog.classes_count()} { } std::unique_ptr<reprog_device, std::function<void(reprog_device*)>> reprog_device::create( std::string const& pattern, uint8_t const* codepoint_flags, size_type strings_count, rmm::cuda_stream_view stream) { return reprog_device::create( pattern, regex_flags::MULTILINE, codepoint_flags, strings_count, stream); } // Create instance of the reprog that can be passed into a device kernel std::unique_ptr<reprog_device, std::function<void(reprog_device*)>> reprog_device::create( std::string const& pattern, regex_flags const flags, uint8_t const* codepoint_flags, size_type strings_count, rmm::cuda_stream_view stream) { std::vector<char32_t> pattern32 = string_to_char32_vector(pattern); // compile pattern into host object reprog h_prog = reprog::create_from(pattern32.data(), flags); // compute size to hold all the member data auto insts_count = h_prog.insts_count(); auto classes_count = h_prog.classes_count(); auto starts_count = h_prog.starts_count(); // compute size of each section; make sure each is aligned appropriately auto insts_size = cudf::util::round_up_safe<size_t>(insts_count * sizeof(_insts[0]), sizeof(size_t)); auto startids_size = cudf::util::round_up_safe<size_t>(starts_count * sizeof(_startinst_ids[0]), sizeof(size_t)); auto classes_size = cudf::util::round_up_safe<size_t>(classes_count * sizeof(_classes[0]), sizeof(size_t)); for (int32_t idx = 0; idx < classes_count; ++idx) classes_size += static_cast<int32_t>((h_prog.class_at(idx).literals.size()) * sizeof(char32_t)); size_t memsize = insts_size + startids_size + classes_size; size_t rlm_size = 0; // check memory size needed for executing regex if (insts_count > RX_LARGE_INSTS) { auto relist_alloc_size = relist::alloc_size(insts_count); rlm_size = relist_alloc_size * 2L * strings_count; // reljunk has 2 relist ptrs } // allocate memory to store prog data std::vector<u_char> h_buffer(memsize); u_char* h_ptr = h_buffer.data(); // running pointer auto* d_buffer = new rmm::device_buffer(memsize, stream); u_char* d_ptr = reinterpret_cast<u_char*>(d_buffer->data()); // running device pointer // put everything into a flat host buffer first reprog_device* d_prog = new reprog_device(h_prog); // copy the instructions array first (fixed-size structs) reinst* insts = reinterpret_cast<reinst*>(h_ptr); memcpy(insts, h_prog.insts_data(), insts_size); h_ptr += insts_size; // next section d_prog->_insts = reinterpret_cast<reinst*>(d_ptr); d_ptr += insts_size; // copy the startinst_ids next (ints) int32_t* startinst_ids = reinterpret_cast<int32_t*>(h_ptr); memcpy(startinst_ids, h_prog.starts_data(), startids_size); h_ptr += startids_size; // next section d_prog->_startinst_ids = reinterpret_cast<int32_t*>(d_ptr); d_ptr += startids_size; // copy classes into flat memory: [class1,class2,...][char32 arrays] reclass_device* classes = reinterpret_cast<reclass_device*>(h_ptr); d_prog->_classes = reinterpret_cast<reclass_device*>(d_ptr); // get pointer to the end to handle variable length data u_char* h_end = h_ptr + (classes_count * sizeof(reclass_device)); u_char* d_end = d_ptr + (classes_count * sizeof(reclass_device)); // place each class and append the variable length data for (int32_t idx = 0; idx < classes_count; ++idx) { reclass& h_class = h_prog.class_at(idx); reclass_device d_class; d_class.builtins = h_class.builtins; d_class.count = h_class.literals.size() / 2; d_class.literals = reinterpret_cast<char32_t*>(d_end); memcpy(classes++, &d_class, sizeof(d_class)); memcpy(h_end, h_class.literals.c_str(), h_class.literals.size() * sizeof(char32_t)); h_end += h_class.literals.size() * sizeof(char32_t); d_end += h_class.literals.size() * sizeof(char32_t); } // initialize the rest of the elements d_prog->_insts_count = insts_count; d_prog->_starts_count = starts_count; d_prog->_classes_count = classes_count; d_prog->_codepoint_flags = codepoint_flags; // allocate execute memory if needed rmm::device_buffer* d_relists{}; if (rlm_size > 0) { d_relists = new rmm::device_buffer(rlm_size, stream); d_prog->_relists_mem = d_relists->data(); } // copy flat prog to device memory CUDA_TRY(cudaMemcpyAsync( d_buffer->data(), h_buffer.data(), memsize, cudaMemcpyHostToDevice, stream.value())); // auto deleter = [d_buffer, d_relists](reprog_device* t) { t->destroy(); delete d_buffer; delete d_relists; }; return std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>(d_prog, deleter); } void reprog_device::destroy() { delete this; } } // namespace detail } // namespace strings } // namespace cudf
5456c03f62289e3585def7db06d4b76fa8833113.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 6 //Poisson Blending #include "utils.h" #include <thrust/host_vector.h> #include "reference_calc.cpp" #include <float.h> #include <math.h> #include <stdio.h> //pre-compute the values of g, which depend only the source image //and aren't iteration dependent. void compute_G(const unsigned char* const channel, float* const g, const size_t numColsSource, const std::vector<uint2>& interiorPixelList) { for (size_t i = 0; i < interiorPixelList.size(); ++i) { uint2 coord = interiorPixelList[i]; unsigned int offset = coord.x * numColsSource + coord.y; float sum = 4.f * channel[offset]; sum -= (float)channel[offset - 1] + (float)channel[offset + 1]; sum -= (float)channel[offset + numColsSource] + (float)channel[offset - numColsSource]; g[offset] = sum; } } __global__ void addToBlended(float * blendedValsRed_1, float * blendedValsRed_2, float * blendedValsBlue_1, float * blendedValsBlue_2, float * blendedValsGreen_1, float * blendedValsGreen_2, unsigned char* d_red_src, unsigned char* d_blue_src, unsigned char* d_green_src, const size_t numCols, const size_t numRows) { const size_t srcSize = numCols * numRows; int main_x = threadIdx.x + blockDim.x * blockIdx.x; int main_y = threadIdx.y + blockDim.y * blockIdx.y; int i = main_x + main_y * numCols; if (i < srcSize) { blendedValsRed_1[i] = d_red_src[i]; blendedValsRed_2[i] = d_red_src[i]; blendedValsBlue_1[i] = d_blue_src[i]; blendedValsBlue_2[i] = d_blue_src[i]; blendedValsGreen_1[i] = d_green_src[i]; blendedValsGreen_2[i] = d_green_src[i]; } } __device__ bool isMasked(uchar4 val) { return (val.x < 255 || val.y < 255 || val.z < 255); } __global__ void getMask(unsigned char * d_mask, uchar4 * d_sourceImg, const size_t numCols, const size_t numRows ) { const size_t srcSize = numCols * numRows; int main_x = threadIdx.x + blockDim.x * blockIdx.x; int main_y = threadIdx.y + blockDim.y * blockIdx.y; int main_id = main_x + main_y * numCols; if (main_id >= srcSize) return; d_mask[main_id] = (d_sourceImg[main_id].x + d_sourceImg[main_id].y + d_sourceImg[main_id].z < 3 * 255) ? 1 : 0; } __global__ void findBorderPixels(unsigned char * d_mask, unsigned char * d_borderPixels, unsigned char * d_strictInteriorPixels, const size_t numCols) { int main_x = threadIdx.x + blockDim.x * blockIdx.x; int main_y = threadIdx.y + blockDim.y * blockIdx.y; int main_id = main_x + main_y * numCols; int right = (main_x + 1) + main_y * numCols; int left = (main_x - 1) + main_y * numCols; int up = main_x + (main_y + 1) * numCols; int down = main_x + (main_y - 1) * numCols; // __syncthreads(); //now we need to check the four pixels north south east west to see if they are in the mask or Not if (d_mask[main_id] ==1) { int isInside = 0; if (d_mask[left] ==1) isInside++; if (d_mask[right] ==1) isInside++; if (d_mask[up] ==1) isInside++; if (d_mask[down] ==1) isInside++; if (isInside == 4) { d_strictInteriorPixels[main_id] = 1; } else if (isInside > 0) { d_borderPixels[main_id] = 1; } } } __global__ void seperateRGB( uchar4 * d_sourceImg, uchar4 * d_destImg, unsigned char * red_src, unsigned char* blue_src, unsigned char* green_src, unsigned char* red_dst, unsigned char* blue_dst, unsigned char* green_dst, const size_t numCols, const size_t numRows) { int main_x = threadIdx.x + blockDim.x * blockIdx.x; int main_y = threadIdx.y + blockDim.y * blockIdx.y; int main_id = main_x + main_y * numCols; red_src[main_id] = d_sourceImg[main_id].x; blue_src[main_id] = d_sourceImg[main_id].y; green_src[main_id] = d_sourceImg[main_id].z; red_dst[main_id] = d_destImg[main_id].x; blue_dst[main_id] = d_destImg[main_id].y; green_dst[main_id] = d_destImg[main_id].z; } //jocobi kernel __global__ void jacobi( unsigned char* const dstImg, unsigned char* const strictInteriorPixels, unsigned char* const borderPixels, uint2 * interiorPixelList, const size_t numColsSource, float* f, float* g, float* const f_next, int listSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; int zero = 0; if (i < listSize) { float blendedSum = 0.f; float borderSum = 0.f; uint2 coord = interiorPixelList[i]; unsigned int offset = coord.x * numColsSource + coord.y; //process all 4 neighbor pixels //for each pixel if it is an interior pixel //then we add the previous f, otherwise if it is a //border pixel then we add the value of the destination //image at the border. These border values are our boundary //conditions. if (strictInteriorPixels[offset - 1]) { blendedSum += f[offset - 1]; } else { borderSum += dstImg[offset - 1]; } if (strictInteriorPixels[offset + 1]) { blendedSum += f[offset + 1]; } else { borderSum += dstImg[offset + 1]; } if (strictInteriorPixels[offset - numColsSource]) { blendedSum += f[offset - numColsSource]; } else { borderSum += dstImg[offset - numColsSource]; } if (strictInteriorPixels[offset + numColsSource]) { blendedSum += f[offset + numColsSource]; } else { borderSum += dstImg[offset + numColsSource]; } float f_next_val = (blendedSum + borderSum + g[offset]) / 4.f; f_next[offset] = min(255.f, max(0.f, f_next_val)); //clip to [0, 255] } } void your_blend(const uchar4* const h_sourceImg, //IN const size_t numRowsSource, const size_t numColsSource, const uchar4* const h_destImg, //IN uchar4* const h_blendedImg) //OUT { size_t srcSize = numRowsSource * numColsSource; //cuaMalloc all a mask array and aray of boarder and interior items unsigned char * d_mask; unsigned char * d_borderPixels; unsigned char * d_strictInteriorPixels; //some test host variables unsigned char test_mask[srcSize]; unsigned char test_strinct_interior[srcSize]; unsigned char test_borderpixel[srcSize]; uchar4 * d_sourceImg; uchar4 * d_destImg; uchar4 * d_blendedImg; checkCudaErrors(hipMalloc(&d_mask, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_borderPixels, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_strictInteriorPixels, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMemset(d_borderPixels, 0, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMemset(d_strictInteriorPixels, 0, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_sourceImg, srcSize * sizeof(uchar4))); checkCudaErrors(hipMalloc(&d_destImg, srcSize * sizeof(uchar4))); checkCudaErrors(hipMalloc(&d_blendedImg, srcSize * sizeof(uchar4))); checkCudaErrors(hipMemcpy(d_sourceImg, h_sourceImg, (sizeof(uchar4) * srcSize), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_destImg, h_destImg, (sizeof(uchar4) * srcSize), hipMemcpyHostToDevice)); int BLOCKS = 32; dim3 block_dim(BLOCKS, BLOCKS); dim3 thread_dim(ceil(numColsSource/block_dim.x)+1, ceil(numRowsSource/block_dim.y)+1);hipLaunchKernelGGL(( getMask), dim3(block_dim), dim3(thread_dim), 0, 0, d_mask, d_sourceImg, numColsSource, numRowsSource); //hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); size_t cpySize = sizeof(unsigned char) * srcSize; checkCudaErrors(hipMemcpy(&test_mask, d_mask, cpySize, hipMemcpyDeviceToHost)); hipLaunchKernelGGL(( findBorderPixels), dim3(block_dim), dim3(thread_dim), 0, 0, d_mask, d_borderPixels, d_strictInteriorPixels, numColsSource); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(&test_borderpixel, d_borderPixels, cpySize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&test_strinct_interior, d_strictInteriorPixels, cpySize, hipMemcpyDeviceToHost)); //this whole bit is still needed for a later part of the serial implemnintaion. std::vector<uint2> interiorPixelList; //the source region in the homework isn't near an image boundary, so we can //simplify the conditionals a little... for (size_t r = 1; r < numRowsSource - 1; ++r) { for (size_t c = 1; c < numColsSource - 1; ++c) { if (test_mask[r * numColsSource + c]) { if (test_mask[(r -1) * numColsSource + c] && test_mask[(r + 1) * numColsSource + c] && test_mask[r * numColsSource + c - 1] && test_mask[r * numColsSource + c + 1]) { interiorPixelList.push_back(make_uint2(r, c)); } } }} int listSize = interiorPixelList.size(); uint2 transferList[listSize]; for (size_t i = 0; i < interiorPixelList.size(); ++i) { transferList[i] = interiorPixelList[i]; } uint2 * d_interiorPixelList; checkCudaErrors(hipMalloc(&d_interiorPixelList, (listSize * sizeof(uint2)))); checkCudaErrors(hipMemcpy(d_interiorPixelList, transferList, (listSize * sizeof(uint2)), hipMemcpyHostToDevice)); //serial get mask for loop //split the source and destination images into their respective //channels unsigned char t_red_src[srcSize]; unsigned char t_blue_src[srcSize]; unsigned char t_green_src[srcSize]; unsigned char t_red_dst[srcSize]; unsigned char t_blue_dst[srcSize]; unsigned char t_green_dst[srcSize]; unsigned char* d_red_src; unsigned char* d_blue_src; unsigned char* d_green_src; unsigned char* d_red_dst; unsigned char* d_blue_dst; unsigned char* d_green_dst; checkCudaErrors(hipMalloc(&d_red_src, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_blue_src, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_green_src, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_red_dst, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_blue_dst, srcSize * sizeof(unsigned char))); checkCudaErrors(hipMalloc(&d_green_dst, srcSize * sizeof(unsigned char))); hipLaunchKernelGGL(( seperateRGB), dim3(block_dim), dim3(thread_dim), 0, 0, d_sourceImg, d_destImg, d_red_src, d_blue_src, d_green_src, d_red_dst, d_blue_dst, d_green_dst, numColsSource, numRowsSource); //hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(&t_red_src, d_red_src, cpySize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&t_blue_src, d_blue_src, cpySize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&t_green_src, d_green_src, cpySize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&t_red_dst, d_red_dst, cpySize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&t_blue_dst, d_blue_dst, cpySize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&t_green_dst, d_green_dst, cpySize, hipMemcpyDeviceToHost)); //next we'll precompute the g term - it never changes, no need to recompute every iteration float *g_red = new float[srcSize]; float *g_blue = new float[srcSize]; float *g_green = new float[srcSize]; memset(g_red, 0, srcSize * sizeof(float)); memset(g_blue, 0, srcSize * sizeof(float)); memset(g_green, 0, srcSize * sizeof(float)); size_t floatSize = sizeof(float)*srcSize; float *d_g_red; float *d_g_blue; float *d_g_green; checkCudaErrors(hipMalloc(&d_g_red, floatSize)); checkCudaErrors(hipMalloc(&d_g_blue, floatSize)); checkCudaErrors(hipMalloc(&d_g_green, floatSize)); checkCudaErrors(hipMemcpy(d_g_red, g_red, floatSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_blue, g_blue, floatSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_green, g_green, floatSize, hipMemcpyHostToDevice)); int blockSize = 28; dim3 jacobiBlock(blockSize); dim3 jacobiThread(ceil(listSize/blockSize)+1); // comp_G<<<block_dim, thread_dim>>>(d_red_src, d_g_red, numColsSource, listSize, transferList); // hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); compute_G(t_red_src, g_red, numColsSource, interiorPixelList); compute_G(t_blue_src, g_blue, numColsSource, interiorPixelList); compute_G(t_green_src, g_green, numColsSource, interiorPixelList); //for each color channel we'll need two buffers and we'll ping-pong between them float *blendedValsRed_1 = new float[srcSize]; float *blendedValsRed_2 = new float[srcSize]; float *blendedValsBlue_1 = new float[srcSize]; float *blendedValsBlue_2 = new float[srcSize]; float *blendedValsGreen_1 = new float[srcSize]; float *blendedValsGreen_2 = new float[srcSize]; //test stuff float *t_blendedValsRed_1 = new float[srcSize]; float *t_blendedValsRed_2 = new float[srcSize]; float *t_blendedValsBlue_1 = new float[srcSize]; float *t_blendedValsBlue_2 = new float[srcSize]; float *t_blendedValsGreen_1 = new float[srcSize]; float *t_blendedValsGreen_2 = new float[srcSize]; float *d_blendedValsRed_1; float *d_blendedValsRed_2; float *d_blendedValsBlue_1; float *d_blendedValsBlue_2; float *d_blendedValsGreen_1; float *d_blendedValsGreen_2; checkCudaErrors(hipMalloc(&d_blendedValsRed_1, floatSize)); checkCudaErrors(hipMalloc(&d_blendedValsRed_2, floatSize)); checkCudaErrors(hipMalloc(&d_blendedValsBlue_1, floatSize)); checkCudaErrors(hipMalloc(&d_blendedValsBlue_2, floatSize)); checkCudaErrors(hipMalloc(&d_blendedValsGreen_1, floatSize)); checkCudaErrors(hipMalloc(&d_blendedValsGreen_2, floatSize)); hipLaunchKernelGGL(( addToBlended), dim3(block_dim), dim3(thread_dim), 0, 0, d_blendedValsRed_1, d_blendedValsRed_2, d_blendedValsBlue_1, d_blendedValsBlue_2, d_blendedValsGreen_1, d_blendedValsGreen_2, d_red_src, d_blue_src, d_green_src, numColsSource, numRowsSource); //hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(t_blendedValsRed_1, d_blendedValsRed_1, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsRed_2, d_blendedValsRed_2, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsBlue_1, d_blendedValsBlue_1, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsBlue_2, d_blendedValsBlue_2, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsGreen_1, d_blendedValsGreen_1, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsGreen_2, d_blendedValsGreen_2, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(d_g_red, g_red, floatSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_blue, g_blue, floatSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_g_green, g_green, floatSize, hipMemcpyHostToDevice)); int eightHun = 800; for (int i = 0; i < eightHun; i++) { //kernel launch for red channel hipLaunchKernelGGL(( jacobi), dim3(jacobiBlock), dim3(jacobiThread), 0, 0, d_red_dst, d_strictInteriorPixels, d_borderPixels, d_interiorPixelList, numColsSource, d_blendedValsRed_1, d_g_red, d_blendedValsRed_2, listSize); // hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); std::swap(d_blendedValsRed_1, d_blendedValsRed_2); //kernel launch for red channel hipLaunchKernelGGL(( jacobi), dim3(jacobiBlock), dim3(jacobiThread), 0, 0, d_blue_dst, d_strictInteriorPixels, d_borderPixels, d_interiorPixelList, numColsSource, d_blendedValsBlue_1, d_g_blue, d_blendedValsBlue_2, listSize); // hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); std::swap(d_blendedValsBlue_1, d_blendedValsBlue_2); hipLaunchKernelGGL(( jacobi), dim3(jacobiBlock), dim3(jacobiThread), 0, 0, d_green_dst, d_strictInteriorPixels, d_borderPixels, d_interiorPixelList, numColsSource, d_blendedValsGreen_1, d_g_green, d_blendedValsGreen_2, listSize); // hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); std::swap(d_blendedValsGreen_1, d_blendedValsGreen_2); } //copy stuff over and perform the final swap not going to save anyting but kinda clever checkCudaErrors(hipMemcpy(t_blendedValsRed_1, d_blendedValsRed_2, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsRed_2, d_blendedValsRed_1, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsBlue_1, d_blendedValsBlue_2, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsBlue_2, d_blendedValsBlue_1, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsGreen_1, d_blendedValsGreen_2, floatSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(t_blendedValsGreen_2, d_blendedValsGreen_1, floatSize, hipMemcpyDeviceToHost)); memcpy(h_blendedImg, h_destImg, sizeof(uchar4) * srcSize); checkCudaErrors(hipMemcpy(d_blendedImg, d_destImg, sizeof(uchar4) * srcSize , hipMemcpyDeviceToDevice)); //copy computed values for the interior into the output for (size_t i = 0; i < interiorPixelList.size(); ++i) { uint2 coord = interiorPixelList[i]; unsigned int offset = coord.x * numColsSource + coord.y; h_blendedImg[offset].x = t_blendedValsRed_2[offset]; h_blendedImg[offset].y = t_blendedValsBlue_2[offset]; h_blendedImg[offset].z = t_blendedValsGreen_2[offset]; } delete[] g_red; delete[] g_blue; delete[] g_green; //test stuff delete[] t_blendedValsRed_1; delete[] t_blendedValsRed_2; delete[] t_blendedValsBlue_1 ; delete[] t_blendedValsBlue_2 ; delete[] t_blendedValsGreen_1 ; delete[] t_blendedValsGreen_2 ; }
5456c03f62289e3585def7db06d4b76fa8833113.cu
//Udacity HW 6 //Poisson Blending #include "utils.h" #include <thrust/host_vector.h> #include "reference_calc.cpp" #include <float.h> #include <math.h> #include <stdio.h> //pre-compute the values of g, which depend only the source image //and aren't iteration dependent. void compute_G(const unsigned char* const channel, float* const g, const size_t numColsSource, const std::vector<uint2>& interiorPixelList) { for (size_t i = 0; i < interiorPixelList.size(); ++i) { uint2 coord = interiorPixelList[i]; unsigned int offset = coord.x * numColsSource + coord.y; float sum = 4.f * channel[offset]; sum -= (float)channel[offset - 1] + (float)channel[offset + 1]; sum -= (float)channel[offset + numColsSource] + (float)channel[offset - numColsSource]; g[offset] = sum; } } __global__ void addToBlended(float * blendedValsRed_1, float * blendedValsRed_2, float * blendedValsBlue_1, float * blendedValsBlue_2, float * blendedValsGreen_1, float * blendedValsGreen_2, unsigned char* d_red_src, unsigned char* d_blue_src, unsigned char* d_green_src, const size_t numCols, const size_t numRows) { const size_t srcSize = numCols * numRows; int main_x = threadIdx.x + blockDim.x * blockIdx.x; int main_y = threadIdx.y + blockDim.y * blockIdx.y; int i = main_x + main_y * numCols; if (i < srcSize) { blendedValsRed_1[i] = d_red_src[i]; blendedValsRed_2[i] = d_red_src[i]; blendedValsBlue_1[i] = d_blue_src[i]; blendedValsBlue_2[i] = d_blue_src[i]; blendedValsGreen_1[i] = d_green_src[i]; blendedValsGreen_2[i] = d_green_src[i]; } } __device__ bool isMasked(uchar4 val) { return (val.x < 255 || val.y < 255 || val.z < 255); } __global__ void getMask(unsigned char * d_mask, uchar4 * d_sourceImg, const size_t numCols, const size_t numRows ) { const size_t srcSize = numCols * numRows; int main_x = threadIdx.x + blockDim.x * blockIdx.x; int main_y = threadIdx.y + blockDim.y * blockIdx.y; int main_id = main_x + main_y * numCols; if (main_id >= srcSize) return; d_mask[main_id] = (d_sourceImg[main_id].x + d_sourceImg[main_id].y + d_sourceImg[main_id].z < 3 * 255) ? 1 : 0; } __global__ void findBorderPixels(unsigned char * d_mask, unsigned char * d_borderPixels, unsigned char * d_strictInteriorPixels, const size_t numCols) { int main_x = threadIdx.x + blockDim.x * blockIdx.x; int main_y = threadIdx.y + blockDim.y * blockIdx.y; int main_id = main_x + main_y * numCols; int right = (main_x + 1) + main_y * numCols; int left = (main_x - 1) + main_y * numCols; int up = main_x + (main_y + 1) * numCols; int down = main_x + (main_y - 1) * numCols; // __syncthreads(); //now we need to check the four pixels north south east west to see if they are in the mask or Not if (d_mask[main_id] ==1) { int isInside = 0; if (d_mask[left] ==1) isInside++; if (d_mask[right] ==1) isInside++; if (d_mask[up] ==1) isInside++; if (d_mask[down] ==1) isInside++; if (isInside == 4) { d_strictInteriorPixels[main_id] = 1; } else if (isInside > 0) { d_borderPixels[main_id] = 1; } } } __global__ void seperateRGB( uchar4 * d_sourceImg, uchar4 * d_destImg, unsigned char * red_src, unsigned char* blue_src, unsigned char* green_src, unsigned char* red_dst, unsigned char* blue_dst, unsigned char* green_dst, const size_t numCols, const size_t numRows) { int main_x = threadIdx.x + blockDim.x * blockIdx.x; int main_y = threadIdx.y + blockDim.y * blockIdx.y; int main_id = main_x + main_y * numCols; red_src[main_id] = d_sourceImg[main_id].x; blue_src[main_id] = d_sourceImg[main_id].y; green_src[main_id] = d_sourceImg[main_id].z; red_dst[main_id] = d_destImg[main_id].x; blue_dst[main_id] = d_destImg[main_id].y; green_dst[main_id] = d_destImg[main_id].z; } //jocobi kernel __global__ void jacobi( unsigned char* const dstImg, unsigned char* const strictInteriorPixels, unsigned char* const borderPixels, uint2 * interiorPixelList, const size_t numColsSource, float* f, float* g, float* const f_next, int listSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; int zero = 0; if (i < listSize) { float blendedSum = 0.f; float borderSum = 0.f; uint2 coord = interiorPixelList[i]; unsigned int offset = coord.x * numColsSource + coord.y; //process all 4 neighbor pixels //for each pixel if it is an interior pixel //then we add the previous f, otherwise if it is a //border pixel then we add the value of the destination //image at the border. These border values are our boundary //conditions. if (strictInteriorPixels[offset - 1]) { blendedSum += f[offset - 1]; } else { borderSum += dstImg[offset - 1]; } if (strictInteriorPixels[offset + 1]) { blendedSum += f[offset + 1]; } else { borderSum += dstImg[offset + 1]; } if (strictInteriorPixels[offset - numColsSource]) { blendedSum += f[offset - numColsSource]; } else { borderSum += dstImg[offset - numColsSource]; } if (strictInteriorPixels[offset + numColsSource]) { blendedSum += f[offset + numColsSource]; } else { borderSum += dstImg[offset + numColsSource]; } float f_next_val = (blendedSum + borderSum + g[offset]) / 4.f; f_next[offset] = min(255.f, max(0.f, f_next_val)); //clip to [0, 255] } } void your_blend(const uchar4* const h_sourceImg, //IN const size_t numRowsSource, const size_t numColsSource, const uchar4* const h_destImg, //IN uchar4* const h_blendedImg) //OUT { size_t srcSize = numRowsSource * numColsSource; //cuaMalloc all a mask array and aray of boarder and interior items unsigned char * d_mask; unsigned char * d_borderPixels; unsigned char * d_strictInteriorPixels; //some test host variables unsigned char test_mask[srcSize]; unsigned char test_strinct_interior[srcSize]; unsigned char test_borderpixel[srcSize]; uchar4 * d_sourceImg; uchar4 * d_destImg; uchar4 * d_blendedImg; checkCudaErrors(cudaMalloc(&d_mask, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_borderPixels, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_strictInteriorPixels, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMemset(d_borderPixels, 0, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMemset(d_strictInteriorPixels, 0, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_sourceImg, srcSize * sizeof(uchar4))); checkCudaErrors(cudaMalloc(&d_destImg, srcSize * sizeof(uchar4))); checkCudaErrors(cudaMalloc(&d_blendedImg, srcSize * sizeof(uchar4))); checkCudaErrors(cudaMemcpy(d_sourceImg, h_sourceImg, (sizeof(uchar4) * srcSize), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_destImg, h_destImg, (sizeof(uchar4) * srcSize), cudaMemcpyHostToDevice)); int BLOCKS = 32; dim3 block_dim(BLOCKS, BLOCKS); dim3 thread_dim(ceil(numColsSource/block_dim.x)+1, ceil(numRowsSource/block_dim.y)+1); getMask<<<block_dim, thread_dim>>>(d_mask, d_sourceImg, numColsSource, numRowsSource); //cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); size_t cpySize = sizeof(unsigned char) * srcSize; checkCudaErrors(cudaMemcpy(&test_mask, d_mask, cpySize, cudaMemcpyDeviceToHost)); findBorderPixels<<<block_dim, thread_dim>>>(d_mask, d_borderPixels, d_strictInteriorPixels, numColsSource); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(&test_borderpixel, d_borderPixels, cpySize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&test_strinct_interior, d_strictInteriorPixels, cpySize, cudaMemcpyDeviceToHost)); //this whole bit is still needed for a later part of the serial implemnintaion. std::vector<uint2> interiorPixelList; //the source region in the homework isn't near an image boundary, so we can //simplify the conditionals a little... for (size_t r = 1; r < numRowsSource - 1; ++r) { for (size_t c = 1; c < numColsSource - 1; ++c) { if (test_mask[r * numColsSource + c]) { if (test_mask[(r -1) * numColsSource + c] && test_mask[(r + 1) * numColsSource + c] && test_mask[r * numColsSource + c - 1] && test_mask[r * numColsSource + c + 1]) { interiorPixelList.push_back(make_uint2(r, c)); } } }} int listSize = interiorPixelList.size(); uint2 transferList[listSize]; for (size_t i = 0; i < interiorPixelList.size(); ++i) { transferList[i] = interiorPixelList[i]; } uint2 * d_interiorPixelList; checkCudaErrors(cudaMalloc(&d_interiorPixelList, (listSize * sizeof(uint2)))); checkCudaErrors(cudaMemcpy(d_interiorPixelList, transferList, (listSize * sizeof(uint2)), cudaMemcpyHostToDevice)); //serial get mask for loop //split the source and destination images into their respective //channels unsigned char t_red_src[srcSize]; unsigned char t_blue_src[srcSize]; unsigned char t_green_src[srcSize]; unsigned char t_red_dst[srcSize]; unsigned char t_blue_dst[srcSize]; unsigned char t_green_dst[srcSize]; unsigned char* d_red_src; unsigned char* d_blue_src; unsigned char* d_green_src; unsigned char* d_red_dst; unsigned char* d_blue_dst; unsigned char* d_green_dst; checkCudaErrors(cudaMalloc(&d_red_src, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_blue_src, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_green_src, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_red_dst, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_blue_dst, srcSize * sizeof(unsigned char))); checkCudaErrors(cudaMalloc(&d_green_dst, srcSize * sizeof(unsigned char))); seperateRGB<<<block_dim, thread_dim>>>(d_sourceImg, d_destImg, d_red_src, d_blue_src, d_green_src, d_red_dst, d_blue_dst, d_green_dst, numColsSource, numRowsSource); //cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(&t_red_src, d_red_src, cpySize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&t_blue_src, d_blue_src, cpySize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&t_green_src, d_green_src, cpySize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&t_red_dst, d_red_dst, cpySize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&t_blue_dst, d_blue_dst, cpySize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&t_green_dst, d_green_dst, cpySize, cudaMemcpyDeviceToHost)); //next we'll precompute the g term - it never changes, no need to recompute every iteration float *g_red = new float[srcSize]; float *g_blue = new float[srcSize]; float *g_green = new float[srcSize]; memset(g_red, 0, srcSize * sizeof(float)); memset(g_blue, 0, srcSize * sizeof(float)); memset(g_green, 0, srcSize * sizeof(float)); size_t floatSize = sizeof(float)*srcSize; float *d_g_red; float *d_g_blue; float *d_g_green; checkCudaErrors(cudaMalloc(&d_g_red, floatSize)); checkCudaErrors(cudaMalloc(&d_g_blue, floatSize)); checkCudaErrors(cudaMalloc(&d_g_green, floatSize)); checkCudaErrors(cudaMemcpy(d_g_red, g_red, floatSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_blue, g_blue, floatSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_green, g_green, floatSize, cudaMemcpyHostToDevice)); int blockSize = 28; dim3 jacobiBlock(blockSize); dim3 jacobiThread(ceil(listSize/blockSize)+1); // comp_G<<<block_dim, thread_dim>>>(d_red_src, d_g_red, numColsSource, listSize, transferList); // cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); compute_G(t_red_src, g_red, numColsSource, interiorPixelList); compute_G(t_blue_src, g_blue, numColsSource, interiorPixelList); compute_G(t_green_src, g_green, numColsSource, interiorPixelList); //for each color channel we'll need two buffers and we'll ping-pong between them float *blendedValsRed_1 = new float[srcSize]; float *blendedValsRed_2 = new float[srcSize]; float *blendedValsBlue_1 = new float[srcSize]; float *blendedValsBlue_2 = new float[srcSize]; float *blendedValsGreen_1 = new float[srcSize]; float *blendedValsGreen_2 = new float[srcSize]; //test stuff float *t_blendedValsRed_1 = new float[srcSize]; float *t_blendedValsRed_2 = new float[srcSize]; float *t_blendedValsBlue_1 = new float[srcSize]; float *t_blendedValsBlue_2 = new float[srcSize]; float *t_blendedValsGreen_1 = new float[srcSize]; float *t_blendedValsGreen_2 = new float[srcSize]; float *d_blendedValsRed_1; float *d_blendedValsRed_2; float *d_blendedValsBlue_1; float *d_blendedValsBlue_2; float *d_blendedValsGreen_1; float *d_blendedValsGreen_2; checkCudaErrors(cudaMalloc(&d_blendedValsRed_1, floatSize)); checkCudaErrors(cudaMalloc(&d_blendedValsRed_2, floatSize)); checkCudaErrors(cudaMalloc(&d_blendedValsBlue_1, floatSize)); checkCudaErrors(cudaMalloc(&d_blendedValsBlue_2, floatSize)); checkCudaErrors(cudaMalloc(&d_blendedValsGreen_1, floatSize)); checkCudaErrors(cudaMalloc(&d_blendedValsGreen_2, floatSize)); addToBlended<<<block_dim, thread_dim>>>(d_blendedValsRed_1, d_blendedValsRed_2, d_blendedValsBlue_1, d_blendedValsBlue_2, d_blendedValsGreen_1, d_blendedValsGreen_2, d_red_src, d_blue_src, d_green_src, numColsSource, numRowsSource); //cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(t_blendedValsRed_1, d_blendedValsRed_1, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsRed_2, d_blendedValsRed_2, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsBlue_1, d_blendedValsBlue_1, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsBlue_2, d_blendedValsBlue_2, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsGreen_1, d_blendedValsGreen_1, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsGreen_2, d_blendedValsGreen_2, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(d_g_red, g_red, floatSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_blue, g_blue, floatSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_g_green, g_green, floatSize, cudaMemcpyHostToDevice)); int eightHun = 800; for (int i = 0; i < eightHun; i++) { //kernel launch for red channel jacobi<<<jacobiBlock, jacobiThread>>>(d_red_dst, d_strictInteriorPixels, d_borderPixels, d_interiorPixelList, numColsSource, d_blendedValsRed_1, d_g_red, d_blendedValsRed_2, listSize); // cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); std::swap(d_blendedValsRed_1, d_blendedValsRed_2); //kernel launch for red channel jacobi<<<jacobiBlock, jacobiThread>>>(d_blue_dst, d_strictInteriorPixels, d_borderPixels, d_interiorPixelList, numColsSource, d_blendedValsBlue_1, d_g_blue, d_blendedValsBlue_2, listSize); // cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); std::swap(d_blendedValsBlue_1, d_blendedValsBlue_2); jacobi<<<jacobiBlock, jacobiThread>>>(d_green_dst, d_strictInteriorPixels, d_borderPixels, d_interiorPixelList, numColsSource, d_blendedValsGreen_1, d_g_green, d_blendedValsGreen_2, listSize); // cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); std::swap(d_blendedValsGreen_1, d_blendedValsGreen_2); } //copy stuff over and perform the final swap not going to save anyting but kinda clever checkCudaErrors(cudaMemcpy(t_blendedValsRed_1, d_blendedValsRed_2, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsRed_2, d_blendedValsRed_1, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsBlue_1, d_blendedValsBlue_2, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsBlue_2, d_blendedValsBlue_1, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsGreen_1, d_blendedValsGreen_2, floatSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(t_blendedValsGreen_2, d_blendedValsGreen_1, floatSize, cudaMemcpyDeviceToHost)); memcpy(h_blendedImg, h_destImg, sizeof(uchar4) * srcSize); checkCudaErrors(cudaMemcpy(d_blendedImg, d_destImg, sizeof(uchar4) * srcSize , cudaMemcpyDeviceToDevice)); //copy computed values for the interior into the output for (size_t i = 0; i < interiorPixelList.size(); ++i) { uint2 coord = interiorPixelList[i]; unsigned int offset = coord.x * numColsSource + coord.y; h_blendedImg[offset].x = t_blendedValsRed_2[offset]; h_blendedImg[offset].y = t_blendedValsBlue_2[offset]; h_blendedImg[offset].z = t_blendedValsGreen_2[offset]; } delete[] g_red; delete[] g_blue; delete[] g_green; //test stuff delete[] t_blendedValsRed_1; delete[] t_blendedValsRed_2; delete[] t_blendedValsBlue_1 ; delete[] t_blendedValsBlue_2 ; delete[] t_blendedValsGreen_1 ; delete[] t_blendedValsGreen_2 ; }
a2c1eaad8ceb03b3b3aa69f86547976489e8e88e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void g_FullConnectWgrad(float* wgrad, float* w, int len, float lambda, int batch) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int id = i + blockDim.x * blockIdx.x + threadIdx.x; if(id < len) { if(fabs(lambda) < 1e-10) wgrad[id] = wgrad[id] / batch /** dropM[id]*/; else wgrad[id] = (wgrad[id] / batch + lambda * w[id]) /** dropM[id]*/; } } }
a2c1eaad8ceb03b3b3aa69f86547976489e8e88e.cu
#include "includes.h" __global__ void g_FullConnectWgrad(float* wgrad, float* w, int len, float lambda, int batch) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int id = i + blockDim.x * blockIdx.x + threadIdx.x; if(id < len) { if(fabs(lambda) < 1e-10) wgrad[id] = wgrad[id] / batch /** dropM[id]*/; else wgrad[id] = (wgrad[id] / batch + lambda * w[id]) /** dropM[id]*/; } } }
ebd3ecf9f5b272291873fa93e33018e8c11b81b5.hip
// !!! This is a file automatically generated by hipify!!! #pragma once #include <stdio.h> #include <iostream> #include <assert.h> #include <hip/hip_runtime.h> #include "core/pack/Pack.h" #include "core/pack/GetInds.h" #include "core/pack/GetDims.h" #include "core/utils/CudaErrorCheck.cu" #include "core/utils/CudaSizes.h" #include "core/utils/TypesUtils.h" namespace keops { template<typename TYPE, class FUN> __global__ void GpuConv1DOnDevice(FUN fun, int nx, int ny, TYPE **px, TYPE **py, TYPE **pp) { // get the index of the current thread int i = blockIdx.x * blockDim.x + threadIdx.x; // declare shared mem extern __shared__ TYPE yj[]; // get templated dimensions : typedef typename FUN::DIMSX DIMSX; // DIMSX is a "vector" of templates giving dimensions of xi variables typedef typename FUN::DIMSY DIMSY; // DIMSY is a "vector" of templates giving dimensions of yj variables typedef typename FUN::DIMSP DIMSP; // DIMSP is a "vector" of templates giving dimensions of parameters variables const int DIMX = DIMSX::SUM; // DIMX is sum of dimensions for xi variables const int DIMY = DIMSY::SUM; // DIMY is sum of dimensions for yj variables const int DIMP = DIMSP::SUM; // DIMP is sum of dimensions for parameters variables const int DIMOUT = FUN::DIM; // dimension of output variable const int DIMRED = FUN::DIMRED; // dimension of reduction operation const int DIMFOUT = DIMSX::FIRST; // DIMFOUT is dimension of output variable of inner function // load parameter(s) TYPE param_loc[DIMP < 1 ? 1 : DIMP]; load<DIMSP>(0, param_loc, pp); // load parameters variables from global memory to local thread memory // get the value of variable (index with i) TYPE xi[DIMX < 1 ? 1 : DIMX]; __TYPEACC__ acc[DIMRED]; #if SUM_SCHEME == BLOCK_SUM // additional tmp vector to store intermediate results from each block TYPE tmp[DIMRED]; #elif SUM_SCHEME == KAHAN_SCHEME // additional tmp vector to accumulate errors const int DIM_KAHAN = FUN::template KahanScheme<__TYPEACC__,TYPE>::DIMACC; TYPE tmp[DIM_KAHAN]; #endif if (i < nx) { typename FUN::template InitializeReduction<__TYPEACC__, TYPE >()(acc); // acc = 0 #if SUM_SCHEME == KAHAN_SCHEME VectAssign<DIM_KAHAN>(tmp,0.0f); #endif load<typename DIMSX::NEXT>(i, xi + DIMFOUT, px + 1); // load xi variables from global memory to local thread memory } for (int jstart = 0, tile = 0; jstart < ny; jstart += blockDim.x, tile++) { // get the current column int j = tile * blockDim.x + threadIdx.x; if (j < ny) { // we load yj from device global memory only if j<ny load<DIMSY>(j, yj + threadIdx.x * DIMY, py); // load yj variables from global memory to shared memory } __syncthreads(); if (i < nx) { // we compute x1i only if needed TYPE * yjrel = yj; // Loop on the columns of the current block. #if SUM_SCHEME == BLOCK_SUM typename FUN::template InitializeReduction<TYPE,TYPE>()(tmp); // tmp = 0 #endif for (int jrel = 0; (jrel < blockDim.x) && (jrel < ny - jstart); jrel++, yjrel += DIMY) { call<DIMSX, DIMSY, DIMSP>(fun, xi, yjrel, param_loc); // Call the function, which outputs results in xi[0:DIMX1] #if SUM_SCHEME == BLOCK_SUM #if USE_HALF int ind = jrel + tile * blockDim.x; typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, xi, __floats2half2_rn(2*ind,2*ind+1)); // tmp += xi #else typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, xi, jrel + tile * blockDim.x); // tmp += xi #endif #elif SUM_SCHEME == KAHAN_SCHEME typename FUN::template KahanScheme<__TYPEACC__,TYPE>()(acc, xi, tmp); #else #if USE_HALF int ind = jrel + tile * blockDim.x; typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, xi, __floats2half2_rn(2*ind,2*ind+1)); // acc += xi #else typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, xi, jrel + tile * blockDim.x); // acc += xi #endif #endif } #if SUM_SCHEME == BLOCK_SUM typename FUN::template ReducePair<__TYPEACC__,TYPE>()(acc, tmp); // acc += tmp #endif } __syncthreads(); } if (i < nx) { typename FUN::template FinalizeOutput<__TYPEACC__,TYPE>()(acc, px[0] + i * DIMOUT, px, i); } } struct GpuConv1D_FromHost { template<typename TYPE, class FUN> static int Eval_(FUN fun, int nx, int ny, TYPE **px_h, TYPE **py_h, TYPE **pp_h) { typedef typename FUN::DIMSX DIMSX; typedef typename FUN::DIMSY DIMSY; typedef typename FUN::DIMSP DIMSP; const int DIMX = DIMSX::SUM; const int DIMY = DIMSY::SUM; const int DIMP = DIMSP::SUM; const int DIMOUT = FUN::DIM; // dimension of output variable const int DIMFOUT = DIMSX::FIRST; // DIMFOUT is dimension of output variable of inner function const int SIZEI = DIMSX::SIZE; const int SIZEJ = DIMSY::SIZE; const int SIZEP = DIMSP::SIZE; // pointers to device data TYPE *x_d, *y_d, *param_d; // device arrays of pointers to device data TYPE **px_d, **py_d, **pp_d; // single hipMalloc void **p_data; CudaSafeCall(hipMalloc((void **) &p_data, sizeof(TYPE *) * (SIZEI + SIZEJ + SIZEP) + sizeof(TYPE) * (DIMP + nx * (DIMX - DIMFOUT + DIMOUT) + ny * DIMY))); TYPE **p_data_a = (TYPE **) p_data; px_d = p_data_a; p_data_a += SIZEI; py_d = p_data_a; p_data_a += SIZEJ; pp_d = p_data_a; p_data_a += SIZEP; TYPE *p_data_b = (TYPE *) p_data_a; param_d = p_data_b; p_data_b += DIMP; x_d = p_data_b; p_data_b += nx * (DIMX - DIMFOUT + DIMOUT); y_d = p_data_b; // host arrays of pointers to device data TYPE *phx_d[SIZEI]; TYPE *phy_d[SIZEJ]; TYPE *php_d[SIZEP]; int nvals; // if DIMSP is empty (i.e. no parameter), nvals = -1 which could result in a segfault if (SIZEP > 0) { php_d[0] = param_d; nvals = DIMSP::VAL(0); CudaSafeCall(hipMemcpy(php_d[0], pp_h[0], sizeof(TYPE) * nvals, hipMemcpyHostToDevice)); for (int k = 1; k < SIZEP; k++) { php_d[k] = php_d[k - 1] + nvals; nvals = DIMSP::VAL(k); CudaSafeCall(hipMemcpy(php_d[k], pp_h[k], sizeof(TYPE) * nvals, hipMemcpyHostToDevice)); } } phx_d[0] = x_d; nvals = nx * DIMOUT; for (int k = 1; k < SIZEI; k++) { phx_d[k] = phx_d[k - 1] + nvals; nvals = nx * DIMSX::VAL(k); CudaSafeCall(hipMemcpy(phx_d[k], px_h[k], sizeof(TYPE) * nvals, hipMemcpyHostToDevice)); } // if DIMSY is empty (i.e. no Vj variable), nvals = -1 which could result in a segfault if (SIZEJ > 0) { phy_d[0] = y_d; nvals = ny * DIMSY::VAL(0); CudaSafeCall(hipMemcpy(phy_d[0], py_h[0], sizeof(TYPE) * nvals, hipMemcpyHostToDevice)); for (int k = 1; k < SIZEJ; k++) { phy_d[k] = phy_d[k - 1] + nvals; nvals = ny * (int) DIMSY::VAL(k); CudaSafeCall(hipMemcpy(phy_d[k], py_h[k], sizeof(TYPE) * nvals, hipMemcpyHostToDevice)); } } // copy arrays of pointers CudaSafeCall(hipMemcpy(pp_d, php_d, SIZEP * sizeof(TYPE *), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(px_d, phx_d, SIZEI * sizeof(TYPE *), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(py_d, phy_d, SIZEJ * sizeof(TYPE *), hipMemcpyHostToDevice)); // Compute on device : grid and block are both 1d int dev = -1; CudaSafeCall(hipGetDevice(&dev)); dim3 blockSize; SetGpuProps(dev); // warning : blockSize.x was previously set to CUDA_BLOCK_SIZE; currently CUDA_BLOCK_SIZE value is used as a bound. blockSize.x = ::::min(CUDA_BLOCK_SIZE, ::::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::::max(1, (int) (DIMY * sizeof(TYPE)))))); // number of threads in each block dim3 gridSize; gridSize.x = nx / blockSize.x + (nx % blockSize.x == 0 ? 0 : 1); // Size of the SharedData : blockSize.x*(DIMY)*sizeof(TYPE) GpuConv1DOnDevice<TYPE> << < gridSize, blockSize, blockSize.x * (DIMY) * sizeof(TYPE) >> > (fun, nx, ny, px_d, py_d, pp_d); // block until the device has completed CudaSafeCall(hipDeviceSynchronize()); CudaCheckError(); // Send data from device to host. CudaSafeCall(hipMemcpy(*px_h, x_d, sizeof(TYPE) * (nx * DIMOUT), hipMemcpyDeviceToHost)); // Free memory. CudaSafeCall(hipFree(p_data)); return 0; } // and use getlist to enroll them into "pointers arrays" px and py. template<typename TYPE, class FUN, typename... Args> static int Eval(FUN fun, int nx, int ny, int device_id, TYPE *x1_h, Args... args) { if (device_id != -1) CudaSafeCall(hipSetDevice(device_id)); typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE + 1; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; using DIMSX = GetDims<VARSI>; using DIMSY = GetDims<VARSJ>; using DIMSP = GetDims<VARSP>; using INDSI = GetInds<VARSI>; using INDSJ = GetInds<VARSJ>; using INDSP = GetInds<VARSP>; TYPE *px_h[SIZEI]; TYPE *py_h[SIZEJ]; TYPE *pp_h[SIZEP]; px_h[0] = x1_h; getlist<INDSI>(px_h + 1, args...); getlist<INDSJ>(py_h, args...); getlist<INDSP>(pp_h, args...); return Eval_(fun, nx, ny, px_h, py_h, pp_h); } // same without the device_id argument template<typename TYPE, class FUN, typename... Args> static int Eval(FUN fun, int nx, int ny, TYPE *x1_h, Args... args) { return Eval(fun, nx, ny, -1, x1_h, args...); } // Idem, but with args given as an array of arrays, instead of an explicit list of arrays template<typename TYPE, class FUN> static int Eval(FUN fun, int nx, int ny, TYPE *x1_h, TYPE **args, int device_id = -1) { // We set the GPU device on which computations will be performed if (device_id != -1) CudaSafeCall(hipSetDevice(device_id)); typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE + 1; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; using DIMSX = GetDims<VARSI>; using DIMSY = GetDims<VARSJ>; using DIMSP = GetDims<VARSP>; using INDSI = GetInds<VARSI>; using INDSJ = GetInds<VARSJ>; using INDSP = GetInds<VARSP>; TYPE *px_h[SIZEI]; TYPE *py_h[SIZEJ]; TYPE *pp_h[SIZEP]; px_h[0] = x1_h; for (int i = 1; i < SIZEI; i++) px_h[i] = args[INDSI::VAL(i - 1)]; for (int i = 0; i < SIZEJ; i++) py_h[i] = args[INDSJ::VAL(i)]; for (int i = 0; i < SIZEP; i++) pp_h[i] = args[INDSP::VAL(i)]; return Eval_(fun, nx, ny, px_h, py_h, pp_h); } }; struct GpuConv1D_FromDevice { template<typename TYPE, class FUN> static int Eval_(FUN fun, int nx, int ny, TYPE **phx_d, TYPE **phy_d, TYPE **php_d) { typedef typename FUN::DIMSX DIMSX; typedef typename FUN::DIMSY DIMSY; typedef typename FUN::DIMSP DIMSP; const int DIMY = DIMSY::SUM; const int SIZEI = DIMSX::SIZE; const int SIZEJ = DIMSY::SIZE; const int SIZEP = DIMSP::SIZE; // device arrays of pointers to device data TYPE **px_d, **py_d, **pp_d; // single hipMalloc void **p_data; CudaSafeCall(hipMalloc((void **) &p_data, sizeof(TYPE *) * (SIZEI + SIZEJ + SIZEP))); TYPE **p_data_a = (TYPE **) p_data; px_d = p_data_a; p_data_a += SIZEI; py_d = p_data_a; p_data_a += SIZEJ; pp_d = p_data_a; CudaSafeCall(hipMemcpy(px_d, phx_d, SIZEI * sizeof(TYPE *), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(py_d, phy_d, SIZEJ * sizeof(TYPE *), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(pp_d, php_d, SIZEP * sizeof(TYPE *), hipMemcpyHostToDevice)); // Compute on device : grid and block are both 1d int dev = -1; CudaSafeCall(hipGetDevice(&dev)); SetGpuProps(dev); dim3 blockSize; // warning : blockSize.x was previously set to CUDA_BLOCK_SIZE; currently CUDA_BLOCK_SIZE value is used as a bound. blockSize.x = ::::min(CUDA_BLOCK_SIZE, ::::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::::max(1, (int) (DIMY * sizeof(TYPE)))))); // number of threads in each block dim3 gridSize; gridSize.x = nx / blockSize.x + (nx % blockSize.x == 0 ? 0 : 1); // Size of the SharedData : blockSize.x*(DIMY)*sizeof(TYPE) GpuConv1DOnDevice<TYPE> << < gridSize, blockSize, blockSize.x * (DIMY) * sizeof(TYPE) >> > (fun, nx, ny, px_d, py_d, pp_d); // block until the device has completed CudaSafeCall(hipDeviceSynchronize()); CudaCheckError(); CudaSafeCall(hipFree(p_data)); return 0; } // Same wrappers, but for data located on the device template<typename TYPE, class FUN, typename... Args> static int Eval(FUN fun, int nx, int ny, int device_id, TYPE *x1_d, Args... args) { // device_id is provided, so we set the GPU device accordingly // Warning : is has to be consistent with location of data CudaSafeCall(hipSetDevice(device_id)); typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE + 1; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; using DIMSX = GetDims<VARSI>; using DIMSY = GetDims<VARSJ>; using DIMSP = GetDims<VARSP>; using INDSI = GetInds<VARSI>; using INDSJ = GetInds<VARSJ>; using INDSP = GetInds<VARSP>; TYPE *phx_d[SIZEI]; TYPE *phy_d[SIZEJ]; TYPE *php_d[SIZEP]; phx_d[0] = x1_d; getlist<INDSI>(phx_d + 1, args...); getlist<INDSJ>(phy_d, args...); getlist<INDSP>(php_d, args...); return Eval_(fun, nx, ny, phx_d, phy_d, php_d); } // same without the device_id argument template<typename TYPE, class FUN, typename... Args> static int Eval(FUN fun, int nx, int ny, TYPE *x1_d, Args... args) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of x1_d which is the output vector // so we assume that input data is on the same GPU // note : hipPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (first function above) hipPointerAttribute_t attributes; CudaSafeCall(hipPointerGetAttributes(&attributes, x1_d)); return Eval(fun, nx, ny, attributes.device, x1_d, args...); } template<typename TYPE, class FUN> static int Eval(FUN fun, int nx, int ny, TYPE *x1_d, TYPE **args, int device_id = -1) { if (device_id == -1) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of x1_d which is the output vector // so we assume that input data is on the same GPU // note : hipPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (else statement below) hipPointerAttribute_t attributes; CudaSafeCall(hipPointerGetAttributes(&attributes, x1_d)); CudaSafeCall(hipSetDevice(attributes.device)); } else // device_id is provided, so we use it. Warning : is has to be consistent with location of data CudaSafeCall(hipSetDevice(device_id)); typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE + 1; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; using DIMSX = GetDims<VARSI>; using DIMSY = GetDims<VARSJ>; using DIMSP = GetDims<VARSP>; using INDSI = GetInds<VARSI>; using INDSJ = GetInds<VARSJ>; using INDSP = GetInds<VARSP>; TYPE *px_d[SIZEI]; TYPE *py_d[SIZEJ]; TYPE *pp_d[SIZEP]; px_d[0] = x1_d; for (int i = 1; i < SIZEI; i++) px_d[i] = args[INDSI::VAL(i - 1)]; for (int i = 0; i < SIZEJ; i++) py_d[i] = args[INDSJ::VAL(i)]; for (int i = 0; i < SIZEP; i++) pp_d[i] = args[INDSP::VAL(i)]; return Eval_(fun, nx, ny, px_d, py_d, pp_d); } }; }
ebd3ecf9f5b272291873fa93e33018e8c11b81b5.cu
#pragma once #include <stdio.h> #include <iostream> #include <assert.h> #include <cuda.h> #include "core/pack/Pack.h" #include "core/pack/GetInds.h" #include "core/pack/GetDims.h" #include "core/utils/CudaErrorCheck.cu" #include "core/utils/CudaSizes.h" #include "core/utils/TypesUtils.h" namespace keops { template<typename TYPE, class FUN> __global__ void GpuConv1DOnDevice(FUN fun, int nx, int ny, TYPE **px, TYPE **py, TYPE **pp) { // get the index of the current thread int i = blockIdx.x * blockDim.x + threadIdx.x; // declare shared mem extern __shared__ TYPE yj[]; // get templated dimensions : typedef typename FUN::DIMSX DIMSX; // DIMSX is a "vector" of templates giving dimensions of xi variables typedef typename FUN::DIMSY DIMSY; // DIMSY is a "vector" of templates giving dimensions of yj variables typedef typename FUN::DIMSP DIMSP; // DIMSP is a "vector" of templates giving dimensions of parameters variables const int DIMX = DIMSX::SUM; // DIMX is sum of dimensions for xi variables const int DIMY = DIMSY::SUM; // DIMY is sum of dimensions for yj variables const int DIMP = DIMSP::SUM; // DIMP is sum of dimensions for parameters variables const int DIMOUT = FUN::DIM; // dimension of output variable const int DIMRED = FUN::DIMRED; // dimension of reduction operation const int DIMFOUT = DIMSX::FIRST; // DIMFOUT is dimension of output variable of inner function // load parameter(s) TYPE param_loc[DIMP < 1 ? 1 : DIMP]; load<DIMSP>(0, param_loc, pp); // load parameters variables from global memory to local thread memory // get the value of variable (index with i) TYPE xi[DIMX < 1 ? 1 : DIMX]; __TYPEACC__ acc[DIMRED]; #if SUM_SCHEME == BLOCK_SUM // additional tmp vector to store intermediate results from each block TYPE tmp[DIMRED]; #elif SUM_SCHEME == KAHAN_SCHEME // additional tmp vector to accumulate errors const int DIM_KAHAN = FUN::template KahanScheme<__TYPEACC__,TYPE>::DIMACC; TYPE tmp[DIM_KAHAN]; #endif if (i < nx) { typename FUN::template InitializeReduction<__TYPEACC__, TYPE >()(acc); // acc = 0 #if SUM_SCHEME == KAHAN_SCHEME VectAssign<DIM_KAHAN>(tmp,0.0f); #endif load<typename DIMSX::NEXT>(i, xi + DIMFOUT, px + 1); // load xi variables from global memory to local thread memory } for (int jstart = 0, tile = 0; jstart < ny; jstart += blockDim.x, tile++) { // get the current column int j = tile * blockDim.x + threadIdx.x; if (j < ny) { // we load yj from device global memory only if j<ny load<DIMSY>(j, yj + threadIdx.x * DIMY, py); // load yj variables from global memory to shared memory } __syncthreads(); if (i < nx) { // we compute x1i only if needed TYPE * yjrel = yj; // Loop on the columns of the current block. #if SUM_SCHEME == BLOCK_SUM typename FUN::template InitializeReduction<TYPE,TYPE>()(tmp); // tmp = 0 #endif for (int jrel = 0; (jrel < blockDim.x) && (jrel < ny - jstart); jrel++, yjrel += DIMY) { call<DIMSX, DIMSY, DIMSP>(fun, xi, yjrel, param_loc); // Call the function, which outputs results in xi[0:DIMX1] #if SUM_SCHEME == BLOCK_SUM #if USE_HALF int ind = jrel + tile * blockDim.x; typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, xi, __floats2half2_rn(2*ind,2*ind+1)); // tmp += xi #else typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, xi, jrel + tile * blockDim.x); // tmp += xi #endif #elif SUM_SCHEME == KAHAN_SCHEME typename FUN::template KahanScheme<__TYPEACC__,TYPE>()(acc, xi, tmp); #else #if USE_HALF int ind = jrel + tile * blockDim.x; typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, xi, __floats2half2_rn(2*ind,2*ind+1)); // acc += xi #else typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, xi, jrel + tile * blockDim.x); // acc += xi #endif #endif } #if SUM_SCHEME == BLOCK_SUM typename FUN::template ReducePair<__TYPEACC__,TYPE>()(acc, tmp); // acc += tmp #endif } __syncthreads(); } if (i < nx) { typename FUN::template FinalizeOutput<__TYPEACC__,TYPE>()(acc, px[0] + i * DIMOUT, px, i); } } struct GpuConv1D_FromHost { template<typename TYPE, class FUN> static int Eval_(FUN fun, int nx, int ny, TYPE **px_h, TYPE **py_h, TYPE **pp_h) { typedef typename FUN::DIMSX DIMSX; typedef typename FUN::DIMSY DIMSY; typedef typename FUN::DIMSP DIMSP; const int DIMX = DIMSX::SUM; const int DIMY = DIMSY::SUM; const int DIMP = DIMSP::SUM; const int DIMOUT = FUN::DIM; // dimension of output variable const int DIMFOUT = DIMSX::FIRST; // DIMFOUT is dimension of output variable of inner function const int SIZEI = DIMSX::SIZE; const int SIZEJ = DIMSY::SIZE; const int SIZEP = DIMSP::SIZE; // pointers to device data TYPE *x_d, *y_d, *param_d; // device arrays of pointers to device data TYPE **px_d, **py_d, **pp_d; // single cudaMalloc void **p_data; CudaSafeCall(cudaMalloc((void **) &p_data, sizeof(TYPE *) * (SIZEI + SIZEJ + SIZEP) + sizeof(TYPE) * (DIMP + nx * (DIMX - DIMFOUT + DIMOUT) + ny * DIMY))); TYPE **p_data_a = (TYPE **) p_data; px_d = p_data_a; p_data_a += SIZEI; py_d = p_data_a; p_data_a += SIZEJ; pp_d = p_data_a; p_data_a += SIZEP; TYPE *p_data_b = (TYPE *) p_data_a; param_d = p_data_b; p_data_b += DIMP; x_d = p_data_b; p_data_b += nx * (DIMX - DIMFOUT + DIMOUT); y_d = p_data_b; // host arrays of pointers to device data TYPE *phx_d[SIZEI]; TYPE *phy_d[SIZEJ]; TYPE *php_d[SIZEP]; int nvals; // if DIMSP is empty (i.e. no parameter), nvals = -1 which could result in a segfault if (SIZEP > 0) { php_d[0] = param_d; nvals = DIMSP::VAL(0); CudaSafeCall(cudaMemcpy(php_d[0], pp_h[0], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); for (int k = 1; k < SIZEP; k++) { php_d[k] = php_d[k - 1] + nvals; nvals = DIMSP::VAL(k); CudaSafeCall(cudaMemcpy(php_d[k], pp_h[k], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); } } phx_d[0] = x_d; nvals = nx * DIMOUT; for (int k = 1; k < SIZEI; k++) { phx_d[k] = phx_d[k - 1] + nvals; nvals = nx * DIMSX::VAL(k); CudaSafeCall(cudaMemcpy(phx_d[k], px_h[k], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); } // if DIMSY is empty (i.e. no Vj variable), nvals = -1 which could result in a segfault if (SIZEJ > 0) { phy_d[0] = y_d; nvals = ny * DIMSY::VAL(0); CudaSafeCall(cudaMemcpy(phy_d[0], py_h[0], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); for (int k = 1; k < SIZEJ; k++) { phy_d[k] = phy_d[k - 1] + nvals; nvals = ny * (int) DIMSY::VAL(k); CudaSafeCall(cudaMemcpy(phy_d[k], py_h[k], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); } } // copy arrays of pointers CudaSafeCall(cudaMemcpy(pp_d, php_d, SIZEP * sizeof(TYPE *), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(px_d, phx_d, SIZEI * sizeof(TYPE *), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(py_d, phy_d, SIZEJ * sizeof(TYPE *), cudaMemcpyHostToDevice)); // Compute on device : grid and block are both 1d int dev = -1; CudaSafeCall(cudaGetDevice(&dev)); dim3 blockSize; SetGpuProps(dev); // warning : blockSize.x was previously set to CUDA_BLOCK_SIZE; currently CUDA_BLOCK_SIZE value is used as a bound. blockSize.x = ::std::min(CUDA_BLOCK_SIZE, ::std::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::std::max(1, (int) (DIMY * sizeof(TYPE)))))); // number of threads in each block dim3 gridSize; gridSize.x = nx / blockSize.x + (nx % blockSize.x == 0 ? 0 : 1); // Size of the SharedData : blockSize.x*(DIMY)*sizeof(TYPE) GpuConv1DOnDevice<TYPE> << < gridSize, blockSize, blockSize.x * (DIMY) * sizeof(TYPE) >> > (fun, nx, ny, px_d, py_d, pp_d); // block until the device has completed CudaSafeCall(cudaDeviceSynchronize()); CudaCheckError(); // Send data from device to host. CudaSafeCall(cudaMemcpy(*px_h, x_d, sizeof(TYPE) * (nx * DIMOUT), cudaMemcpyDeviceToHost)); // Free memory. CudaSafeCall(cudaFree(p_data)); return 0; } // and use getlist to enroll them into "pointers arrays" px and py. template<typename TYPE, class FUN, typename... Args> static int Eval(FUN fun, int nx, int ny, int device_id, TYPE *x1_h, Args... args) { if (device_id != -1) CudaSafeCall(cudaSetDevice(device_id)); typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE + 1; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; using DIMSX = GetDims<VARSI>; using DIMSY = GetDims<VARSJ>; using DIMSP = GetDims<VARSP>; using INDSI = GetInds<VARSI>; using INDSJ = GetInds<VARSJ>; using INDSP = GetInds<VARSP>; TYPE *px_h[SIZEI]; TYPE *py_h[SIZEJ]; TYPE *pp_h[SIZEP]; px_h[0] = x1_h; getlist<INDSI>(px_h + 1, args...); getlist<INDSJ>(py_h, args...); getlist<INDSP>(pp_h, args...); return Eval_(fun, nx, ny, px_h, py_h, pp_h); } // same without the device_id argument template<typename TYPE, class FUN, typename... Args> static int Eval(FUN fun, int nx, int ny, TYPE *x1_h, Args... args) { return Eval(fun, nx, ny, -1, x1_h, args...); } // Idem, but with args given as an array of arrays, instead of an explicit list of arrays template<typename TYPE, class FUN> static int Eval(FUN fun, int nx, int ny, TYPE *x1_h, TYPE **args, int device_id = -1) { // We set the GPU device on which computations will be performed if (device_id != -1) CudaSafeCall(cudaSetDevice(device_id)); typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE + 1; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; using DIMSX = GetDims<VARSI>; using DIMSY = GetDims<VARSJ>; using DIMSP = GetDims<VARSP>; using INDSI = GetInds<VARSI>; using INDSJ = GetInds<VARSJ>; using INDSP = GetInds<VARSP>; TYPE *px_h[SIZEI]; TYPE *py_h[SIZEJ]; TYPE *pp_h[SIZEP]; px_h[0] = x1_h; for (int i = 1; i < SIZEI; i++) px_h[i] = args[INDSI::VAL(i - 1)]; for (int i = 0; i < SIZEJ; i++) py_h[i] = args[INDSJ::VAL(i)]; for (int i = 0; i < SIZEP; i++) pp_h[i] = args[INDSP::VAL(i)]; return Eval_(fun, nx, ny, px_h, py_h, pp_h); } }; struct GpuConv1D_FromDevice { template<typename TYPE, class FUN> static int Eval_(FUN fun, int nx, int ny, TYPE **phx_d, TYPE **phy_d, TYPE **php_d) { typedef typename FUN::DIMSX DIMSX; typedef typename FUN::DIMSY DIMSY; typedef typename FUN::DIMSP DIMSP; const int DIMY = DIMSY::SUM; const int SIZEI = DIMSX::SIZE; const int SIZEJ = DIMSY::SIZE; const int SIZEP = DIMSP::SIZE; // device arrays of pointers to device data TYPE **px_d, **py_d, **pp_d; // single cudaMalloc void **p_data; CudaSafeCall(cudaMalloc((void **) &p_data, sizeof(TYPE *) * (SIZEI + SIZEJ + SIZEP))); TYPE **p_data_a = (TYPE **) p_data; px_d = p_data_a; p_data_a += SIZEI; py_d = p_data_a; p_data_a += SIZEJ; pp_d = p_data_a; CudaSafeCall(cudaMemcpy(px_d, phx_d, SIZEI * sizeof(TYPE *), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(py_d, phy_d, SIZEJ * sizeof(TYPE *), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(pp_d, php_d, SIZEP * sizeof(TYPE *), cudaMemcpyHostToDevice)); // Compute on device : grid and block are both 1d int dev = -1; CudaSafeCall(cudaGetDevice(&dev)); SetGpuProps(dev); dim3 blockSize; // warning : blockSize.x was previously set to CUDA_BLOCK_SIZE; currently CUDA_BLOCK_SIZE value is used as a bound. blockSize.x = ::std::min(CUDA_BLOCK_SIZE, ::std::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::std::max(1, (int) (DIMY * sizeof(TYPE)))))); // number of threads in each block dim3 gridSize; gridSize.x = nx / blockSize.x + (nx % blockSize.x == 0 ? 0 : 1); // Size of the SharedData : blockSize.x*(DIMY)*sizeof(TYPE) GpuConv1DOnDevice<TYPE> << < gridSize, blockSize, blockSize.x * (DIMY) * sizeof(TYPE) >> > (fun, nx, ny, px_d, py_d, pp_d); // block until the device has completed CudaSafeCall(cudaDeviceSynchronize()); CudaCheckError(); CudaSafeCall(cudaFree(p_data)); return 0; } // Same wrappers, but for data located on the device template<typename TYPE, class FUN, typename... Args> static int Eval(FUN fun, int nx, int ny, int device_id, TYPE *x1_d, Args... args) { // device_id is provided, so we set the GPU device accordingly // Warning : is has to be consistent with location of data CudaSafeCall(cudaSetDevice(device_id)); typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE + 1; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; using DIMSX = GetDims<VARSI>; using DIMSY = GetDims<VARSJ>; using DIMSP = GetDims<VARSP>; using INDSI = GetInds<VARSI>; using INDSJ = GetInds<VARSJ>; using INDSP = GetInds<VARSP>; TYPE *phx_d[SIZEI]; TYPE *phy_d[SIZEJ]; TYPE *php_d[SIZEP]; phx_d[0] = x1_d; getlist<INDSI>(phx_d + 1, args...); getlist<INDSJ>(phy_d, args...); getlist<INDSP>(php_d, args...); return Eval_(fun, nx, ny, phx_d, phy_d, php_d); } // same without the device_id argument template<typename TYPE, class FUN, typename... Args> static int Eval(FUN fun, int nx, int ny, TYPE *x1_d, Args... args) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of x1_d which is the output vector // so we assume that input data is on the same GPU // note : cudaPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (first function above) cudaPointerAttributes attributes; CudaSafeCall(cudaPointerGetAttributes(&attributes, x1_d)); return Eval(fun, nx, ny, attributes.device, x1_d, args...); } template<typename TYPE, class FUN> static int Eval(FUN fun, int nx, int ny, TYPE *x1_d, TYPE **args, int device_id = -1) { if (device_id == -1) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of x1_d which is the output vector // so we assume that input data is on the same GPU // note : cudaPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (else statement below) cudaPointerAttributes attributes; CudaSafeCall(cudaPointerGetAttributes(&attributes, x1_d)); CudaSafeCall(cudaSetDevice(attributes.device)); } else // device_id is provided, so we use it. Warning : is has to be consistent with location of data CudaSafeCall(cudaSetDevice(device_id)); typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE + 1; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; using DIMSX = GetDims<VARSI>; using DIMSY = GetDims<VARSJ>; using DIMSP = GetDims<VARSP>; using INDSI = GetInds<VARSI>; using INDSJ = GetInds<VARSJ>; using INDSP = GetInds<VARSP>; TYPE *px_d[SIZEI]; TYPE *py_d[SIZEJ]; TYPE *pp_d[SIZEP]; px_d[0] = x1_d; for (int i = 1; i < SIZEI; i++) px_d[i] = args[INDSI::VAL(i - 1)]; for (int i = 0; i < SIZEJ; i++) py_d[i] = args[INDSJ::VAL(i)]; for (int i = 0; i < SIZEP; i++) pp_d[i] = args[INDSP::VAL(i)]; return Eval_(fun, nx, ny, px_d, py_d, pp_d); } }; }
ea7d47e86fa7cef6f021521643bc75700baef4a9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cudaDmult_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int size = 1; const double *x1 = NULL; hipMalloc(&x1, XSIZE*YSIZE); const double *x2 = NULL; hipMalloc(&x2, XSIZE*YSIZE); double *y = NULL; hipMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cudaDmult_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x1,x2,y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cudaDmult_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x1,x2,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cudaDmult_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x1,x2,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ea7d47e86fa7cef6f021521643bc75700baef4a9.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cudaDmult_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int size = 1; const double *x1 = NULL; cudaMalloc(&x1, XSIZE*YSIZE); const double *x2 = NULL; cudaMalloc(&x2, XSIZE*YSIZE); double *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cudaDmult_kernel<<<gridBlock,threadBlock>>>(size,x1,x2,y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cudaDmult_kernel<<<gridBlock,threadBlock>>>(size,x1,x2,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cudaDmult_kernel<<<gridBlock,threadBlock>>>(size,x1,x2,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ffdbdd6c5ec7e32f5bafaf37bb49f520bb1d5e9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/common/balanced_splitter.h" #include "oneflow/core/kernel/kernel_util.h" #include "oneflow/user/kernels/math_unary_elementwise_func.h" namespace oneflow { namespace { template<typename T, typename K> __global__ void GpuForward(const int64_t n, const int64_t num_classes, const int64_t lower_bound, const T m1, const T m2, const T m3, const T* in, const K* labels, T* out, T* theta) { CUDA_1D_KERNEL_LOOP(i, n) { const int32_t row_id = i / num_classes; const int32_t col_id = i - row_id * num_classes; const T in_data = in[i]; T out_data = in_data; K label = labels[row_id] - lower_bound; if (label == col_id) { const T theta_data = AcosFunctor<T>::Forward(in_data); out_data = CosFunctor<T>::Forward(theta_data * m1 + m2) - m3; theta[row_id] = theta_data; } else if ((label < 0 || label >= num_classes) && col_id == 0) { theta[row_id] = 0; } out[i] = out_data; } } template<typename T, typename K> __global__ void GpuBackward(const int64_t n, const int64_t num_classes, const int64_t lower_bound, const T m1, const T m2, const T m3, const T* dy, const K* labels, const T* theta, T* dx) { CUDA_1D_KERNEL_LOOP(i, n) { const int32_t row_id = i / num_classes; const int32_t col_id = i - row_id * num_classes; K label = labels[row_id] - lower_bound; const T dy_data = dy[i]; const T theta_data = theta[row_id]; T dx_data = dy_data; if (label == col_id) { dx_data = dy_data * SinFunctor<T>::Forward(theta_data * m1 + m2) * m1 / SinFunctor<T>::Forward(theta_data); } dx[i] = dx_data; } } class CombinedMarginLossOpKernelState final : public user_op::OpKernelState { public: CombinedMarginLossOpKernelState(int64_t lower, int64_t upper) : lower_(lower), upper_(upper) {} ~CombinedMarginLossOpKernelState() override = default; int64_t lower() const { return lower_; } int64_t upper() const { return upper_; } private: const int64_t lower_; const int64_t upper_; }; std::shared_ptr<user_op::OpKernelState> CreateCombinedMarginLossOpKernelState( user_op::KernelInitContext* ctx, const std::string& in_arg_name) { const SbpParallel& in_sbp = ctx->SbpParallel4ArgNameAndIndex(in_arg_name, 0); if (in_sbp.has_split_parallel() && in_sbp.split_parallel().axis() == 1 && ctx->parallel_ctx().parallel_num() > 1) { CHECK(ctx->SbpParallel4ArgNameAndIndex("label", 0).has_broadcast_parallel()); const user_op::TensorDesc* in_logical_desc = ctx->LogicalTensorDesc4ArgNameAndIndex(in_arg_name, 0); const auto depth = ctx->Attr<int64_t>("depth"); CHECK_EQ(depth, in_logical_desc->shape().At(1)); BalancedSplitter bs(depth, ctx->parallel_ctx().parallel_num()); return std::make_shared<CombinedMarginLossOpKernelState>( bs.At(ctx->parallel_ctx().parallel_id()).begin(), bs.At(ctx->parallel_ctx().parallel_id()).end()); } else { return std::shared_ptr<user_op::OpKernelState>(nullptr); } } } // namespace template<typename T, typename K> class CombinedMarginLossGpuKernel final : public user_op::OpKernel { public: CombinedMarginLossGpuKernel() = default; ~CombinedMarginLossGpuKernel() override = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { return CreateCombinedMarginLossOpKernelState(ctx, "x"); } private: void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override { const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0); user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0); user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0); const float m1 = ctx->Attr<float>("m1"); const float m2 = ctx->Attr<float>("m2"); const float m3 = ctx->Attr<float>("m3"); int64_t lower_bound = 0; if (state != nullptr) { auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state); CHECK_NOTNULL(kernel_state); CHECK_EQ(x->shape().Count(1), kernel_state->upper() - kernel_state->lower()); lower_bound = kernel_state->lower(); } hipLaunchKernelGGL(( GpuForward), dim3(BlocksNum4ThreadsNum(x->shape().elem_cnt())), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1), static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(), theta->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL(in_type, indices_type) \ REGISTER_USER_KERNEL("combined_margin_loss") \ .SetCreateFn<CombinedMarginLossGpuKernel<OF_PP_PAIR_FIRST(in_type), \ OF_PP_PAIR_FIRST(indices_type)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(in_type)) \ & (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type))); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL, FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) template<typename T, typename K> class CombinedMarginLossGradGpuKernel final : public user_op::OpKernel { public: CombinedMarginLossGradGpuKernel() = default; ~CombinedMarginLossGradGpuKernel() override = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { return CreateCombinedMarginLossOpKernelState(ctx, "dy"); } private: void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override { const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0); const user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0); user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const float m1 = ctx->Attr<float>("m1"); const float m2 = ctx->Attr<float>("m2"); const float m3 = ctx->Attr<float>("m3"); int64_t lower_bound = 0; if (state != nullptr) { auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state); CHECK_NOTNULL(kernel_state); CHECK_EQ(dy->shape().Count(1), kernel_state->upper() - kernel_state->lower()); lower_bound = kernel_state->lower(); } hipLaunchKernelGGL(( GpuBackward), dim3(BlocksNum4ThreadsNum(dy->shape().elem_cnt())), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1), static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(), dx->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL(dy_type, indices_type) \ REGISTER_USER_KERNEL("combined_margin_loss_grad") \ .SetCreateFn<CombinedMarginLossGradGpuKernel<OF_PP_PAIR_FIRST(dy_type), \ OF_PP_PAIR_FIRST(indices_type)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dy", 0) == OF_PP_PAIR_SECOND(dy_type)) \ & (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type))); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL, FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) } // namespace oneflow
ffdbdd6c5ec7e32f5bafaf37bb49f520bb1d5e9f.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/common/balanced_splitter.h" #include "oneflow/core/kernel/kernel_util.h" #include "oneflow/user/kernels/math_unary_elementwise_func.h" namespace oneflow { namespace { template<typename T, typename K> __global__ void GpuForward(const int64_t n, const int64_t num_classes, const int64_t lower_bound, const T m1, const T m2, const T m3, const T* in, const K* labels, T* out, T* theta) { CUDA_1D_KERNEL_LOOP(i, n) { const int32_t row_id = i / num_classes; const int32_t col_id = i - row_id * num_classes; const T in_data = in[i]; T out_data = in_data; K label = labels[row_id] - lower_bound; if (label == col_id) { const T theta_data = AcosFunctor<T>::Forward(in_data); out_data = CosFunctor<T>::Forward(theta_data * m1 + m2) - m3; theta[row_id] = theta_data; } else if ((label < 0 || label >= num_classes) && col_id == 0) { theta[row_id] = 0; } out[i] = out_data; } } template<typename T, typename K> __global__ void GpuBackward(const int64_t n, const int64_t num_classes, const int64_t lower_bound, const T m1, const T m2, const T m3, const T* dy, const K* labels, const T* theta, T* dx) { CUDA_1D_KERNEL_LOOP(i, n) { const int32_t row_id = i / num_classes; const int32_t col_id = i - row_id * num_classes; K label = labels[row_id] - lower_bound; const T dy_data = dy[i]; const T theta_data = theta[row_id]; T dx_data = dy_data; if (label == col_id) { dx_data = dy_data * SinFunctor<T>::Forward(theta_data * m1 + m2) * m1 / SinFunctor<T>::Forward(theta_data); } dx[i] = dx_data; } } class CombinedMarginLossOpKernelState final : public user_op::OpKernelState { public: CombinedMarginLossOpKernelState(int64_t lower, int64_t upper) : lower_(lower), upper_(upper) {} ~CombinedMarginLossOpKernelState() override = default; int64_t lower() const { return lower_; } int64_t upper() const { return upper_; } private: const int64_t lower_; const int64_t upper_; }; std::shared_ptr<user_op::OpKernelState> CreateCombinedMarginLossOpKernelState( user_op::KernelInitContext* ctx, const std::string& in_arg_name) { const SbpParallel& in_sbp = ctx->SbpParallel4ArgNameAndIndex(in_arg_name, 0); if (in_sbp.has_split_parallel() && in_sbp.split_parallel().axis() == 1 && ctx->parallel_ctx().parallel_num() > 1) { CHECK(ctx->SbpParallel4ArgNameAndIndex("label", 0).has_broadcast_parallel()); const user_op::TensorDesc* in_logical_desc = ctx->LogicalTensorDesc4ArgNameAndIndex(in_arg_name, 0); const auto depth = ctx->Attr<int64_t>("depth"); CHECK_EQ(depth, in_logical_desc->shape().At(1)); BalancedSplitter bs(depth, ctx->parallel_ctx().parallel_num()); return std::make_shared<CombinedMarginLossOpKernelState>( bs.At(ctx->parallel_ctx().parallel_id()).begin(), bs.At(ctx->parallel_ctx().parallel_id()).end()); } else { return std::shared_ptr<user_op::OpKernelState>(nullptr); } } } // namespace template<typename T, typename K> class CombinedMarginLossGpuKernel final : public user_op::OpKernel { public: CombinedMarginLossGpuKernel() = default; ~CombinedMarginLossGpuKernel() override = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { return CreateCombinedMarginLossOpKernelState(ctx, "x"); } private: void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override { const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0); user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0); user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0); const float m1 = ctx->Attr<float>("m1"); const float m2 = ctx->Attr<float>("m2"); const float m3 = ctx->Attr<float>("m3"); int64_t lower_bound = 0; if (state != nullptr) { auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state); CHECK_NOTNULL(kernel_state); CHECK_EQ(x->shape().Count(1), kernel_state->upper() - kernel_state->lower()); lower_bound = kernel_state->lower(); } GpuForward<<<BlocksNum4ThreadsNum(x->shape().elem_cnt()), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>( x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1), static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(), theta->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL(in_type, indices_type) \ REGISTER_USER_KERNEL("combined_margin_loss") \ .SetCreateFn<CombinedMarginLossGpuKernel<OF_PP_PAIR_FIRST(in_type), \ OF_PP_PAIR_FIRST(indices_type)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(in_type)) \ & (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type))); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL, FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) template<typename T, typename K> class CombinedMarginLossGradGpuKernel final : public user_op::OpKernel { public: CombinedMarginLossGradGpuKernel() = default; ~CombinedMarginLossGradGpuKernel() override = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { return CreateCombinedMarginLossOpKernelState(ctx, "dy"); } private: void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override { const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0); const user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0); user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const float m1 = ctx->Attr<float>("m1"); const float m2 = ctx->Attr<float>("m2"); const float m3 = ctx->Attr<float>("m3"); int64_t lower_bound = 0; if (state != nullptr) { auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state); CHECK_NOTNULL(kernel_state); CHECK_EQ(dy->shape().Count(1), kernel_state->upper() - kernel_state->lower()); lower_bound = kernel_state->lower(); } GpuBackward<<<BlocksNum4ThreadsNum(dy->shape().elem_cnt()), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>( dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1), static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(), dx->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL(dy_type, indices_type) \ REGISTER_USER_KERNEL("combined_margin_loss_grad") \ .SetCreateFn<CombinedMarginLossGradGpuKernel<OF_PP_PAIR_FIRST(dy_type), \ OF_PP_PAIR_FIRST(indices_type)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dy", 0) == OF_PP_PAIR_SECOND(dy_type)) \ & (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type))); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL, FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) } // namespace oneflow
ad01fcaf01526552aa3978ba084bb62543a1ba5b.hip
// !!! This is a file automatically generated by hipify!!! // In this assignment you will expand your "Hello world" kernel to see how // are threads, warps and blocks scheduled. // // Follow instructions for TASK 1 which consists from writing a kernel, // configuring it and then running the code. After running the code few // times you should see that blocks are executed in no particular order // // After you finish TASK 1 continue with TASK 2 and TASK 3 following same // workflow. Write the kernel, configure it properly and then run code // multiple times to see how threads from one warp are schedules and how // warps from one block are scheduled. // NOTE: You should finish your basic "Hello world" assignment first, before // doing this one. #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> //---------------------------------------------------------------------- // TASK 1.0: Write a new "Hello world" kernel, called for example // 'helloworld_blocks', which in addition to "Hello world" writes out which // block is writing out the string. // For example "Hello world from block 2!" // // In order to print which block is saying "Hello world" you can use syntax // like this: // printf("integer=%d; float=%f or %e;\n",1, 0.0001, 0.0001); // Also remember that every thread can access pre-set variable which // refer to its coordinates and coordinates of the block which it resides in. // These are dim3 data types called: threadIdx, blockIdx, blockDim // and gridDim // dim3 data type has three components: x, y, z // write your kernel here __global__ void helloworld_blocks(void) { printf("Hello from block %d!\n", blockIdx.x); } //---------------------------------------------------------------------- //---------------------------------------------------------------------- // TASK 2.0: Write a "Hello world" kernel which output "Hello world" but // in addition to that also outputs which block and thread it // comes from. For example: "Hello world from block 1, thread 3" // // As in task one use printf() function to print to console and utilise // pre-set variables threadIdx, blockIdx, blockDim and gridDim. // write your kernel here __global__ void helloworld_bt(void) { printf("Hello from block %d and thread %d!\n", blockIdx.x, threadIdx.x); } //---------------------------------------------------------------------- //---------------------------------------------------------------------- // TASK 3.0: Write a "Hello world" kernel where only first thread from each // warp writes out to console. So for example: // "Hello world from block 2, warp 3" // // A warp is group of 32 threads. First warp is consists from threads 0--31, // second warp consists from threads 32--63 and so on. To select first thread // from each warp we have to use modulo "%" operation. Modulo operation returns // remainder after division. So 3%2=1 while 4&2=0; // To select first thread from each warp we need to use a branch like this: // if(threadIdx.x%32==0) { // this block will be executed only by first thread from each warp // } // To identify which warp thread resides in you should remember that warp consist // from 32 threads. // write your kernel here __global__ void helloworld_first(void) { if (threadIdx.x%32 == 0) { printf("Hello from warp %d!\n", (threadIdx.x / 32)); } } //---------------------------------------------------------------------- int main(void) { // initiate GPU int deviceid = 0; int devCount; hipGetDeviceCount(&devCount); if(deviceid<devCount){ hipSetDevice(deviceid); } else { printf("ERROR! Selected device is not available\n"); return(1); } //---------------------------------------------------------------------- // TASK 1.1: execute your "Hello world" kernel from TASK 1.0 on few blocks // (10 should be enough) with 1 thread. When you had configured your // kernel compile the code typing "make" and then run it be executing // ./helloworld_scheduling.exe // You should see that blocks are scheduled in haphazard manner. // // You may use whatever syntax version you prefer, a simplified one // dimensional or full three dimensional call using dim3 data type. // put your code here dim3 Gd(10,1,1); dim3 Bd(1,1,1); hipLaunchKernelGGL(( helloworld_blocks), dim3(Gd), dim3(Bd), 0, 0, ); //---------------------------------------------------------------------- //---------------------------------------------------------------------- // TASK 2.1: execute your "Hello world" kernel from TASK 2.0 on about // 5 blocks each containing about 10 threads. When you configured the kernel // compile the code typing "make" and then run it be executing // ./helloworld_scheduling.exe // You should see that blocks are still scheduled in haphazard manner, // but threads within them, being from one warp should execute in order. // // You may use whatever syntax version you prefer, a simplified one // dimensional or full three dimensional call using dim3 data type. // put your code here dim3 Gd_2(5,1,1); dim3 Bd_2(10,1,1); hipLaunchKernelGGL(( helloworld_bt), dim3(Gd_2), dim3(Bd_2), 0, 0, ); //---------------------------------------------------------------------- //---------------------------------------------------------------------- // TASK 3.1: execute your "Hello world" kernel from TASK 3.0 on about // 5 blocks each containing about 320 threads. When you configured the kernel // compile the code typing "make" and then run it be executing // ./helloworld_scheduling.exe // You should see that both blocks and warps within them are scheduled // in haphazard manner. // To see more clearly that warps are executed in haphazard manner run // your kernel with only one block. // // You may use whatever syntax version you prefer, a simplified one // dimensional or full three dimensional call using dim3 data type. // put your code here dim3 Gd_3(5,1,1); dim3 Bd_3(320,1,1); hipLaunchKernelGGL(( helloworld_first), dim3(Gd_3), dim3(Bd_3), 0, 0, ); //---------------------------------------------------------------------- hipDeviceReset(); return (0); }
ad01fcaf01526552aa3978ba084bb62543a1ba5b.cu
// In this assignment you will expand your "Hello world" kernel to see how // are threads, warps and blocks scheduled. // // Follow instructions for TASK 1 which consists from writing a kernel, // configuring it and then running the code. After running the code few // times you should see that blocks are executed in no particular order // // After you finish TASK 1 continue with TASK 2 and TASK 3 following same // workflow. Write the kernel, configure it properly and then run code // multiple times to see how threads from one warp are schedules and how // warps from one block are scheduled. // NOTE: You should finish your basic "Hello world" assignment first, before // doing this one. #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> //---------------------------------------------------------------------- // TASK 1.0: Write a new "Hello world" kernel, called for example // 'helloworld_blocks', which in addition to "Hello world" writes out which // block is writing out the string. // For example "Hello world from block 2!" // // In order to print which block is saying "Hello world" you can use syntax // like this: // printf("integer=%d; float=%f or %e;\n",1, 0.0001, 0.0001); // Also remember that every thread can access pre-set variable which // refer to its coordinates and coordinates of the block which it resides in. // These are dim3 data types called: threadIdx, blockIdx, blockDim // and gridDim // dim3 data type has three components: x, y, z // write your kernel here __global__ void helloworld_blocks(void) { printf("Hello from block %d!\n", blockIdx.x); } //---------------------------------------------------------------------- //---------------------------------------------------------------------- // TASK 2.0: Write a "Hello world" kernel which output "Hello world" but // in addition to that also outputs which block and thread it // comes from. For example: "Hello world from block 1, thread 3" // // As in task one use printf() function to print to console and utilise // pre-set variables threadIdx, blockIdx, blockDim and gridDim. // write your kernel here __global__ void helloworld_bt(void) { printf("Hello from block %d and thread %d!\n", blockIdx.x, threadIdx.x); } //---------------------------------------------------------------------- //---------------------------------------------------------------------- // TASK 3.0: Write a "Hello world" kernel where only first thread from each // warp writes out to console. So for example: // "Hello world from block 2, warp 3" // // A warp is group of 32 threads. First warp is consists from threads 0--31, // second warp consists from threads 32--63 and so on. To select first thread // from each warp we have to use modulo "%" operation. Modulo operation returns // remainder after division. So 3%2=1 while 4&2=0; // To select first thread from each warp we need to use a branch like this: // if(threadIdx.x%32==0) { // this block will be executed only by first thread from each warp // } // To identify which warp thread resides in you should remember that warp consist // from 32 threads. // write your kernel here __global__ void helloworld_first(void) { if (threadIdx.x%32 == 0) { printf("Hello from warp %d!\n", (threadIdx.x / 32)); } } //---------------------------------------------------------------------- int main(void) { // initiate GPU int deviceid = 0; int devCount; cudaGetDeviceCount(&devCount); if(deviceid<devCount){ cudaSetDevice(deviceid); } else { printf("ERROR! Selected device is not available\n"); return(1); } //---------------------------------------------------------------------- // TASK 1.1: execute your "Hello world" kernel from TASK 1.0 on few blocks // (10 should be enough) with 1 thread. When you had configured your // kernel compile the code typing "make" and then run it be executing // ./helloworld_scheduling.exe // You should see that blocks are scheduled in haphazard manner. // // You may use whatever syntax version you prefer, a simplified one // dimensional or full three dimensional call using dim3 data type. // put your code here dim3 Gd(10,1,1); dim3 Bd(1,1,1); helloworld_blocks<<<Gd, Bd>>>(); //---------------------------------------------------------------------- //---------------------------------------------------------------------- // TASK 2.1: execute your "Hello world" kernel from TASK 2.0 on about // 5 blocks each containing about 10 threads. When you configured the kernel // compile the code typing "make" and then run it be executing // ./helloworld_scheduling.exe // You should see that blocks are still scheduled in haphazard manner, // but threads within them, being from one warp should execute in order. // // You may use whatever syntax version you prefer, a simplified one // dimensional or full three dimensional call using dim3 data type. // put your code here dim3 Gd_2(5,1,1); dim3 Bd_2(10,1,1); helloworld_bt<<<Gd_2, Bd_2>>>(); //---------------------------------------------------------------------- //---------------------------------------------------------------------- // TASK 3.1: execute your "Hello world" kernel from TASK 3.0 on about // 5 blocks each containing about 320 threads. When you configured the kernel // compile the code typing "make" and then run it be executing // ./helloworld_scheduling.exe // You should see that both blocks and warps within them are scheduled // in haphazard manner. // To see more clearly that warps are executed in haphazard manner run // your kernel with only one block. // // You may use whatever syntax version you prefer, a simplified one // dimensional or full three dimensional call using dim3 data type. // put your code here dim3 Gd_3(5,1,1); dim3 Bd_3(320,1,1); helloworld_first<<<Gd_3, Bd_3>>>(); //---------------------------------------------------------------------- cudaDeviceReset(); return (0); }
ea1c0313d4266551d31e7ff7e4a0a72dfc167996.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 6 //Poisson Blending /* Background ========== The goal for this assignment is to take one image (the source) and paste it into another image (the destination) attempting to match the two images so that the pasting is non-obvious. This is known as a "seamless clone". The basic ideas are as follows: 1) Figure out the interior and border of the source image 2) Use the values of the border pixels in the destination image as boundary conditions for solving a Poisson equation that tells us how to blend the images. No pixels from the destination except pixels on the border are used to compute the match. Solving the Poisson Equation ============================ There are multiple ways to solve this equation - we choose an iterative method - specifically the Jacobi method. Iterative methods start with a guess of the solution and then iterate to try and improve the guess until it stops changing. If the problem was well-suited for the method then it will stop and where it stops will be the solution. The Jacobi method is the simplest iterative method and converges slowly - that is we need a lot of iterations to get to the answer, but it is the easiest method to write. Jacobi Iterations ================= Our initial guess is going to be the source image itself. This is a pretty good guess for what the blended image will look like and it means that we won't have to do as many iterations compared to if we had started far from the final solution. ImageGuess_prev (Floating point) ImageGuess_next (Floating point) DestinationImg SourceImg Follow these steps to implement one iteration: 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor] else if the neighbor in on the border then += DestinationImg[neighbor] Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) 2) Calculate the new pixel value: float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] In this assignment we will do 800 iterations. */ #include "utils.h" #include <algorithm> #include <thrust/host_vector.h> #define TPB 1024 __global__ void init_mask(const uchar4 *const h_sourceImg, unsigned char *mask, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; mask[i] = (h_sourceImg[i].x + h_sourceImg[i].y + h_sourceImg[i].z < 3 * 255) ? 1 : 0; } __global__ void init_rgb(const uchar4 *const h_sourceImg, unsigned char *red_src, unsigned char *green_src, unsigned char *blue_src, const uchar4 * const h_destImg, unsigned char *red_dst, unsigned char *green_dst, unsigned char *blue_dst, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; red_src[i] = h_sourceImg[i].x; blue_src[i] = h_sourceImg[i].y; green_src[i] = h_sourceImg[i].z; red_dst[i] = h_destImg[i].x; blue_dst[i] = h_destImg[i].y; green_dst[i] = h_destImg[i].z; } __global__ void compute_regions(const unsigned char *const mask, unsigned char *strictInteriorPixels, unsigned char *borderPixels, const size_t numColsSource, const size_t numRowsSource) { int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < 1 || c >= (numColsSource - 1) || r < 1 || r >= (numRowsSource - 1)) return; if (mask[r * numColsSource + c]) { if (mask[(r - 1) * numColsSource + c] && mask[(r + 1) * numColsSource + c] && mask[r * numColsSource + c - 1] && mask[r * numColsSource + c + 1]) { strictInteriorPixels[r * numColsSource + c] = 1; borderPixels[r * numColsSource + c] = 0; } else { strictInteriorPixels[r * numColsSource + c] = 0; borderPixels[r * numColsSource + c] = 1; } } else { strictInteriorPixels[r * numColsSource + c] = 0; borderPixels[r * numColsSource + c] = 0; } } __global__ void compute_g(const unsigned char* const src, const unsigned char* const strictInteriorPixels, float *g, const size_t numColsSource, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; if(strictInteriorPixels[i]) { float sum = 4.f * src[i]; sum -= (float)src[i-1] + (float)src[i+1]; sum -= (float)src[i+numColsSource] + (float)src[i-numColsSource]; g[i] = sum; } } __global__ void init_buffer(const unsigned char* const src, float *buffer_1, float *buffer_2, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; buffer_1[i] = src[i]; buffer_2[i] = src[i]; } __global__ void compute_iteration(const unsigned char* const dst, const unsigned char* const strictInteriorPixels, const unsigned char* const borderPixels, const size_t numColsSource, const float* const f, const float* const g, float* const f_next, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; if(strictInteriorPixels[i]) { float blendedSum = 0.f; float borderSum = 0.f; if(strictInteriorPixels[i-1]) { blendedSum += f[i-1]; } else { borderSum += dst[i-1]; } if(strictInteriorPixels[i+1]) { blendedSum += f[i+1]; } else { borderSum += dst[i+1]; } if(strictInteriorPixels[i+numColsSource]) { blendedSum += f[i+numColsSource]; } else { borderSum += dst[i+numColsSource]; } if(strictInteriorPixels[i-numColsSource]) { blendedSum += f[i-numColsSource]; } else { borderSum += dst[i-numColsSource]; } float f_next_val = (blendedSum + borderSum + g[i]) / 4.f; f_next[i] = min(255.f, max(0.f, f_next_val)); } } __global__ void swap_blended(float *const blender1, float *const blender2, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; float tmp = blender1[i]; blender1[i] = blender2[i]; blender2[i] = tmp; } __global__ void copy_blended(uchar4 *const d_blendedImg, const float *const blendedRed, const float *const blendedGreen, const float *const blendedBlue, const unsigned char* const strictInteriorPixels, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; if(strictInteriorPixels[i]) { d_blendedImg[i].x = (char)blendedRed[i]; d_blendedImg[i].y = (char)blendedBlue[i]; d_blendedImg[i].z = (char)blendedGreen[i]; } } void your_blend(const uchar4 *const h_sourceImg, //IN const size_t numRowsSource, const size_t numColsSource, const uchar4 *const h_destImg, //IN uchar4 *const h_blendedImg) //OUT { /* To Recap here are the steps you need to implement 1) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. 2) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. 3) Separate out the incoming image into three separate channels 4) Create two float(!) buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. 5) For each color channel perform the Jacobi iteration described above 800 times. 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. Since this is final assignment we provide little boilerplate code to help you. Notice that all the input/output pointers are HOST pointers. You will have to allocate all of your own GPU memory and perform your own memcopies to get data in and out of the GPU memory. Remember to wrap all of your calls with checkCudaErrors() to catch any thing that might go wrong. After each kernel call do: hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); to catch any errors that happened while executing the kernel. */ const size_t srcSize = numRowsSource * numColsSource; const int blocks1D = (srcSize + TPB - 1) / TPB; dim3 blockDim(16, 16, 1); dim3 gridDim( (numColsSource + blockDim.x - 1) / blockDim.x, (numRowsSource + blockDim.y - 1) / blockDim.y); // step 0: image from host to device uchar4 * d_sourceImg, *d_destImg, *d_blendedImg; hipMalloc((void **)&d_sourceImg, sizeof(uchar4) * srcSize); hipMalloc((void **)&d_destImg, sizeof(uchar4) * srcSize); hipMalloc((void **)&d_blendedImg, sizeof(uchar4) * srcSize); hipMemcpy(d_sourceImg, h_sourceImg, sizeof(uchar4) * srcSize, hipMemcpyHostToDevice); hipMemcpy(d_destImg, h_destImg, sizeof(uchar4) * srcSize, hipMemcpyHostToDevice); // step 1: get a mask unsigned char *mask, *borderPixels, *strictInteriorPixels; hipMalloc((void **)&mask, sizeof(unsigned char) * srcSize); hipMalloc((void **)&borderPixels, sizeof(unsigned char) * srcSize); hipMalloc((void **)&strictInteriorPixels, sizeof(unsigned char) * srcSize); hipLaunchKernelGGL(( init_mask), dim3(blocks1D), dim3(TPB), 0, 0, d_sourceImg, mask, srcSize); // step 2: regions strictInteriorPixels & borderPixels // todo interiorPixelList hipLaunchKernelGGL(( compute_regions), dim3(blockDim), dim3(gridDim), 0, 0, mask, strictInteriorPixels, borderPixels, numColsSource, numRowsSource); // step 3: rgb src & rgb dst, and g terms unsigned char *red_src, *blue_src, *green_src; unsigned char *red_dst, *blue_dst, *green_dst; hipMalloc((void **)&red_src, sizeof(unsigned char) * srcSize); hipMalloc((void **)&blue_src, sizeof(unsigned char) * srcSize); hipMalloc((void **)&green_src, sizeof(unsigned char) * srcSize); hipMalloc((void **)&red_dst, sizeof(unsigned char) * srcSize); hipMalloc((void **)&blue_dst, sizeof(unsigned char) * srcSize); hipMalloc((void **)&green_dst, sizeof(unsigned char) * srcSize); hipLaunchKernelGGL(( init_rgb), dim3(blocks1D), dim3(TPB), 0, 0, d_sourceImg, red_src, green_src, blue_src, d_destImg, red_dst, green_dst, blue_dst, srcSize); // computeG float *g_red, *g_green, *g_blue; hipMalloc((void **)&g_red, sizeof(float) * srcSize); hipMalloc((void **)&g_green, sizeof(float) * srcSize); hipMalloc((void **)&g_blue, sizeof(float) * srcSize); hipMemset(g_red, 0.f, sizeof(float) * srcSize); hipMemset(g_green, 0.f, sizeof(float) * srcSize); hipMemset(g_blue, 0.f, sizeof(float) * srcSize); hipLaunchKernelGGL(( compute_g), dim3(blocks1D), dim3(TPB), 0, 0, red_src, strictInteriorPixels, g_red, numColsSource, srcSize); hipLaunchKernelGGL(( compute_g), dim3(blocks1D), dim3(TPB), 0, 0, blue_src, strictInteriorPixels, g_blue, numColsSource, srcSize); hipLaunchKernelGGL(( compute_g), dim3(blocks1D), dim3(TPB), 0, 0, green_src, strictInteriorPixels, g_green, numColsSource, srcSize); // step 4: init two buffers of rgb float *blendedValsRed_1, *blendedValsRed_2; float *blendedValsBlue_1, *blendedValsBlue_2; float *blendedValsGreen_1, *blendedValsGreen_2; hipMalloc((void **)&blendedValsRed_1, sizeof(float) * srcSize); hipMalloc((void **)&blendedValsRed_2, sizeof(float) * srcSize); hipMalloc((void **)&blendedValsBlue_1, sizeof(float) * srcSize); hipMalloc((void **)&blendedValsBlue_2, sizeof(float) * srcSize); hipMalloc((void **)&blendedValsGreen_1, sizeof(float) * srcSize); hipMalloc((void **)&blendedValsGreen_2, sizeof(float) * srcSize); hipLaunchKernelGGL(( init_buffer), dim3(blocks1D), dim3(TPB), 0, 0, red_src, blendedValsRed_1, blendedValsRed_2, srcSize); hipLaunchKernelGGL(( init_buffer), dim3(blocks1D), dim3(TPB), 0, 0, green_src, blendedValsGreen_1, blendedValsGreen_2, srcSize); hipLaunchKernelGGL(( init_buffer), dim3(blocks1D), dim3(TPB), 0, 0, blue_src, blendedValsBlue_1, blendedValsBlue_2, srcSize); // step 5: solve const size_t numIterations = 800; for(size_t i = 0; i < numIterations; ++i) { hipLaunchKernelGGL(( compute_iteration), dim3(blocks1D), dim3(TPB), 0, 0, red_dst,strictInteriorPixels, borderPixels, numColsSource, blendedValsRed_1, g_red, blendedValsRed_2, srcSize); // hipMemcpy(blendedValsRed_1, blendedValsRed_2, sizeof(float)*srcSize, hipMemcpyDeviceToDevice); std::swap(blendedValsRed_1, blendedValsRed_2); hipLaunchKernelGGL(( compute_iteration), dim3(blocks1D), dim3(TPB), 0, 0, green_dst,strictInteriorPixels, borderPixels, numColsSource, blendedValsGreen_1, g_green, blendedValsGreen_2, srcSize); // hipMemcpy(blendedValsGreen_1, blendedValsGreen_2, sizeof(float)*srcSize, hipMemcpyDeviceToDevice); std::swap(blendedValsGreen_1, blendedValsGreen_2); hipLaunchKernelGGL(( compute_iteration), dim3(blocks1D), dim3(TPB), 0, 0, blue_dst,strictInteriorPixels, borderPixels, numColsSource, blendedValsBlue_1, g_blue, blendedValsBlue_2, srcSize); // hipMemcpy(blendedValsBlue_1, blendedValsBlue_2, sizeof(float)*srcSize, hipMemcpyDeviceToDevice); std::swap(blendedValsBlue_1, blendedValsBlue_2); } hipMemcpy(blendedValsRed_2, blendedValsRed_1, sizeof(float)*srcSize, hipMemcpyDeviceToDevice); hipMemcpy(blendedValsGreen_2, blendedValsGreen_1, sizeof(float)*srcSize, hipMemcpyDeviceToDevice); hipMemcpy(blendedValsBlue_2, blendedValsBlue_1, sizeof(float)*srcSize, hipMemcpyDeviceToDevice); // hipMemcpy() hipMemcpy(d_blendedImg, d_destImg, sizeof(uchar4) * srcSize, hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( copy_blended), dim3(blocks1D), dim3(TPB), 0, 0, d_blendedImg, blendedValsRed_2, blendedValsGreen_2, blendedValsBlue_2, strictInteriorPixels, srcSize); hipMemcpy(h_blendedImg, d_blendedImg, sizeof(uchar4) * srcSize,hipMemcpyDeviceToHost); // hipFree(mask); hipFree(borderPixels); hipFree(strictInteriorPixels); hipFree(red_src); hipFree(green_src); hipFree(blue_src); hipFree(red_dst); hipFree(green_dst); hipFree(blue_dst); hipFree(g_red); hipFree(g_green); hipFree(g_blue); hipFree(blendedValsRed_1); hipFree(blendedValsRed_2); hipFree(blendedValsGreen_1); hipFree(blendedValsGreen_2); hipFree(blendedValsBlue_1); hipFree(blendedValsBlue_2); hipFree(d_sourceImg); hipFree(d_destImg); hipFree(d_blendedImg); }
ea1c0313d4266551d31e7ff7e4a0a72dfc167996.cu
//Udacity HW 6 //Poisson Blending /* Background ========== The goal for this assignment is to take one image (the source) and paste it into another image (the destination) attempting to match the two images so that the pasting is non-obvious. This is known as a "seamless clone". The basic ideas are as follows: 1) Figure out the interior and border of the source image 2) Use the values of the border pixels in the destination image as boundary conditions for solving a Poisson equation that tells us how to blend the images. No pixels from the destination except pixels on the border are used to compute the match. Solving the Poisson Equation ============================ There are multiple ways to solve this equation - we choose an iterative method - specifically the Jacobi method. Iterative methods start with a guess of the solution and then iterate to try and improve the guess until it stops changing. If the problem was well-suited for the method then it will stop and where it stops will be the solution. The Jacobi method is the simplest iterative method and converges slowly - that is we need a lot of iterations to get to the answer, but it is the easiest method to write. Jacobi Iterations ================= Our initial guess is going to be the source image itself. This is a pretty good guess for what the blended image will look like and it means that we won't have to do as many iterations compared to if we had started far from the final solution. ImageGuess_prev (Floating point) ImageGuess_next (Floating point) DestinationImg SourceImg Follow these steps to implement one iteration: 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor] else if the neighbor in on the border then += DestinationImg[neighbor] Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) 2) Calculate the new pixel value: float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] In this assignment we will do 800 iterations. */ #include "utils.h" #include <algorithm> #include <thrust/host_vector.h> #define TPB 1024 __global__ void init_mask(const uchar4 *const h_sourceImg, unsigned char *mask, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; mask[i] = (h_sourceImg[i].x + h_sourceImg[i].y + h_sourceImg[i].z < 3 * 255) ? 1 : 0; } __global__ void init_rgb(const uchar4 *const h_sourceImg, unsigned char *red_src, unsigned char *green_src, unsigned char *blue_src, const uchar4 * const h_destImg, unsigned char *red_dst, unsigned char *green_dst, unsigned char *blue_dst, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; red_src[i] = h_sourceImg[i].x; blue_src[i] = h_sourceImg[i].y; green_src[i] = h_sourceImg[i].z; red_dst[i] = h_destImg[i].x; blue_dst[i] = h_destImg[i].y; green_dst[i] = h_destImg[i].z; } __global__ void compute_regions(const unsigned char *const mask, unsigned char *strictInteriorPixels, unsigned char *borderPixels, const size_t numColsSource, const size_t numRowsSource) { int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; if (c < 1 || c >= (numColsSource - 1) || r < 1 || r >= (numRowsSource - 1)) return; if (mask[r * numColsSource + c]) { if (mask[(r - 1) * numColsSource + c] && mask[(r + 1) * numColsSource + c] && mask[r * numColsSource + c - 1] && mask[r * numColsSource + c + 1]) { strictInteriorPixels[r * numColsSource + c] = 1; borderPixels[r * numColsSource + c] = 0; } else { strictInteriorPixels[r * numColsSource + c] = 0; borderPixels[r * numColsSource + c] = 1; } } else { strictInteriorPixels[r * numColsSource + c] = 0; borderPixels[r * numColsSource + c] = 0; } } __global__ void compute_g(const unsigned char* const src, const unsigned char* const strictInteriorPixels, float *g, const size_t numColsSource, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; if(strictInteriorPixels[i]) { float sum = 4.f * src[i]; sum -= (float)src[i-1] + (float)src[i+1]; sum -= (float)src[i+numColsSource] + (float)src[i-numColsSource]; g[i] = sum; } } __global__ void init_buffer(const unsigned char* const src, float *buffer_1, float *buffer_2, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; buffer_1[i] = src[i]; buffer_2[i] = src[i]; } __global__ void compute_iteration(const unsigned char* const dst, const unsigned char* const strictInteriorPixels, const unsigned char* const borderPixels, const size_t numColsSource, const float* const f, const float* const g, float* const f_next, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; if(strictInteriorPixels[i]) { float blendedSum = 0.f; float borderSum = 0.f; if(strictInteriorPixels[i-1]) { blendedSum += f[i-1]; } else { borderSum += dst[i-1]; } if(strictInteriorPixels[i+1]) { blendedSum += f[i+1]; } else { borderSum += dst[i+1]; } if(strictInteriorPixels[i+numColsSource]) { blendedSum += f[i+numColsSource]; } else { borderSum += dst[i+numColsSource]; } if(strictInteriorPixels[i-numColsSource]) { blendedSum += f[i-numColsSource]; } else { borderSum += dst[i-numColsSource]; } float f_next_val = (blendedSum + borderSum + g[i]) / 4.f; f_next[i] = min(255.f, max(0.f, f_next_val)); } } __global__ void swap_blended(float *const blender1, float *const blender2, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; float tmp = blender1[i]; blender1[i] = blender2[i]; blender2[i] = tmp; } __global__ void copy_blended(uchar4 *const d_blendedImg, const float *const blendedRed, const float *const blendedGreen, const float *const blendedBlue, const unsigned char* const strictInteriorPixels, const size_t srcSize) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= srcSize) return; if(strictInteriorPixels[i]) { d_blendedImg[i].x = (char)blendedRed[i]; d_blendedImg[i].y = (char)blendedBlue[i]; d_blendedImg[i].z = (char)blendedGreen[i]; } } void your_blend(const uchar4 *const h_sourceImg, //IN const size_t numRowsSource, const size_t numColsSource, const uchar4 *const h_destImg, //IN uchar4 *const h_blendedImg) //OUT { /* To Recap here are the steps you need to implement 1) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. 2) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. 3) Separate out the incoming image into three separate channels 4) Create two float(!) buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. 5) For each color channel perform the Jacobi iteration described above 800 times. 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. Since this is final assignment we provide little boilerplate code to help you. Notice that all the input/output pointers are HOST pointers. You will have to allocate all of your own GPU memory and perform your own memcopies to get data in and out of the GPU memory. Remember to wrap all of your calls with checkCudaErrors() to catch any thing that might go wrong. After each kernel call do: cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); to catch any errors that happened while executing the kernel. */ const size_t srcSize = numRowsSource * numColsSource; const int blocks1D = (srcSize + TPB - 1) / TPB; dim3 blockDim(16, 16, 1); dim3 gridDim( (numColsSource + blockDim.x - 1) / blockDim.x, (numRowsSource + blockDim.y - 1) / blockDim.y); // step 0: image from host to device uchar4 * d_sourceImg, *d_destImg, *d_blendedImg; cudaMalloc((void **)&d_sourceImg, sizeof(uchar4) * srcSize); cudaMalloc((void **)&d_destImg, sizeof(uchar4) * srcSize); cudaMalloc((void **)&d_blendedImg, sizeof(uchar4) * srcSize); cudaMemcpy(d_sourceImg, h_sourceImg, sizeof(uchar4) * srcSize, cudaMemcpyHostToDevice); cudaMemcpy(d_destImg, h_destImg, sizeof(uchar4) * srcSize, cudaMemcpyHostToDevice); // step 1: get a mask unsigned char *mask, *borderPixels, *strictInteriorPixels; cudaMalloc((void **)&mask, sizeof(unsigned char) * srcSize); cudaMalloc((void **)&borderPixels, sizeof(unsigned char) * srcSize); cudaMalloc((void **)&strictInteriorPixels, sizeof(unsigned char) * srcSize); init_mask<<<blocks1D, TPB>>>(d_sourceImg, mask, srcSize); // step 2: regions strictInteriorPixels & borderPixels // todo interiorPixelList compute_regions<<<blockDim, gridDim>>>( mask, strictInteriorPixels, borderPixels, numColsSource, numRowsSource); // step 3: rgb src & rgb dst, and g terms unsigned char *red_src, *blue_src, *green_src; unsigned char *red_dst, *blue_dst, *green_dst; cudaMalloc((void **)&red_src, sizeof(unsigned char) * srcSize); cudaMalloc((void **)&blue_src, sizeof(unsigned char) * srcSize); cudaMalloc((void **)&green_src, sizeof(unsigned char) * srcSize); cudaMalloc((void **)&red_dst, sizeof(unsigned char) * srcSize); cudaMalloc((void **)&blue_dst, sizeof(unsigned char) * srcSize); cudaMalloc((void **)&green_dst, sizeof(unsigned char) * srcSize); init_rgb<<<blocks1D, TPB>>>( d_sourceImg, red_src, green_src, blue_src, d_destImg, red_dst, green_dst, blue_dst, srcSize); // computeG float *g_red, *g_green, *g_blue; cudaMalloc((void **)&g_red, sizeof(float) * srcSize); cudaMalloc((void **)&g_green, sizeof(float) * srcSize); cudaMalloc((void **)&g_blue, sizeof(float) * srcSize); cudaMemset(g_red, 0.f, sizeof(float) * srcSize); cudaMemset(g_green, 0.f, sizeof(float) * srcSize); cudaMemset(g_blue, 0.f, sizeof(float) * srcSize); compute_g<<<blocks1D, TPB>>>(red_src, strictInteriorPixels, g_red, numColsSource, srcSize); compute_g<<<blocks1D, TPB>>>(blue_src, strictInteriorPixels, g_blue, numColsSource, srcSize); compute_g<<<blocks1D, TPB>>>(green_src, strictInteriorPixels, g_green, numColsSource, srcSize); // step 4: init two buffers of rgb float *blendedValsRed_1, *blendedValsRed_2; float *blendedValsBlue_1, *blendedValsBlue_2; float *blendedValsGreen_1, *blendedValsGreen_2; cudaMalloc((void **)&blendedValsRed_1, sizeof(float) * srcSize); cudaMalloc((void **)&blendedValsRed_2, sizeof(float) * srcSize); cudaMalloc((void **)&blendedValsBlue_1, sizeof(float) * srcSize); cudaMalloc((void **)&blendedValsBlue_2, sizeof(float) * srcSize); cudaMalloc((void **)&blendedValsGreen_1, sizeof(float) * srcSize); cudaMalloc((void **)&blendedValsGreen_2, sizeof(float) * srcSize); init_buffer<<<blocks1D, TPB>>>(red_src, blendedValsRed_1, blendedValsRed_2, srcSize); init_buffer<<<blocks1D, TPB>>>(green_src, blendedValsGreen_1, blendedValsGreen_2, srcSize); init_buffer<<<blocks1D, TPB>>>(blue_src, blendedValsBlue_1, blendedValsBlue_2, srcSize); // step 5: solve const size_t numIterations = 800; for(size_t i = 0; i < numIterations; ++i) { compute_iteration<<<blocks1D, TPB>>>( red_dst,strictInteriorPixels, borderPixels, numColsSource, blendedValsRed_1, g_red, blendedValsRed_2, srcSize); // cudaMemcpy(blendedValsRed_1, blendedValsRed_2, sizeof(float)*srcSize, cudaMemcpyDeviceToDevice); std::swap(blendedValsRed_1, blendedValsRed_2); compute_iteration<<<blocks1D, TPB>>>( green_dst,strictInteriorPixels, borderPixels, numColsSource, blendedValsGreen_1, g_green, blendedValsGreen_2, srcSize); // cudaMemcpy(blendedValsGreen_1, blendedValsGreen_2, sizeof(float)*srcSize, cudaMemcpyDeviceToDevice); std::swap(blendedValsGreen_1, blendedValsGreen_2); compute_iteration<<<blocks1D, TPB>>>( blue_dst,strictInteriorPixels, borderPixels, numColsSource, blendedValsBlue_1, g_blue, blendedValsBlue_2, srcSize); // cudaMemcpy(blendedValsBlue_1, blendedValsBlue_2, sizeof(float)*srcSize, cudaMemcpyDeviceToDevice); std::swap(blendedValsBlue_1, blendedValsBlue_2); } cudaMemcpy(blendedValsRed_2, blendedValsRed_1, sizeof(float)*srcSize, cudaMemcpyDeviceToDevice); cudaMemcpy(blendedValsGreen_2, blendedValsGreen_1, sizeof(float)*srcSize, cudaMemcpyDeviceToDevice); cudaMemcpy(blendedValsBlue_2, blendedValsBlue_1, sizeof(float)*srcSize, cudaMemcpyDeviceToDevice); // cudaMemcpy() cudaMemcpy(d_blendedImg, d_destImg, sizeof(uchar4) * srcSize, cudaMemcpyDeviceToDevice); copy_blended<<<blocks1D, TPB>>>( d_blendedImg, blendedValsRed_2, blendedValsGreen_2, blendedValsBlue_2, strictInteriorPixels, srcSize); cudaMemcpy(h_blendedImg, d_blendedImg, sizeof(uchar4) * srcSize,cudaMemcpyDeviceToHost); // cudaFree(mask); cudaFree(borderPixels); cudaFree(strictInteriorPixels); cudaFree(red_src); cudaFree(green_src); cudaFree(blue_src); cudaFree(red_dst); cudaFree(green_dst); cudaFree(blue_dst); cudaFree(g_red); cudaFree(g_green); cudaFree(g_blue); cudaFree(blendedValsRed_1); cudaFree(blendedValsRed_2); cudaFree(blendedValsGreen_1); cudaFree(blendedValsGreen_2); cudaFree(blendedValsBlue_1); cudaFree(blendedValsBlue_2); cudaFree(d_sourceImg); cudaFree(d_destImg); cudaFree(d_blendedImg); }
a42efbbf9dac29d6e357ef147cd8fa26a582d1c7.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief Utilities for creating FSAs. * * Note that serializations are done in Python. * * @copyright * Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang) * Guoguo Chen * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <sstream> #include <utility> #include <vector> #include "k2/csrc/context.h" #include "k2/csrc/fsa_utils.h" namespace k2 { // field separator within a line for a text form FSA static constexpr const char *kDelim = " \t"; // Convert a string to an integer. Abort the program on failure. static int32_t StringToInt(const std::string &s) { K2_CHECK(!s.empty()); bool ok = false; char *p = nullptr; // std::strtol requires a `long` type long n = std::strtol(s.c_str(), &p, 10); // NOLINT if (*p == '\0') ok = true; auto res = static_cast<int32_t>(n); if (n != res) ok = false; // out of range K2_CHECK(ok) << "Failed to convert " << s << " to an integer"; return res; } // Convert a string to a float. Abort the program on failure. static float StringToFloat(const std::string &s) { K2_CHECK(!s.empty()); char *p = nullptr; float f = std::strtof(s.c_str(), &p); if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float"; return f; } // Trim leading and trailing spaces of a string. static void TrimString(std::string *s) { K2_CHECK_NE(s, nullptr); auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; }; s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space)); s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end()); } /* Split a string to a vector of strings using a set of delimiters. Example usage: @code std::string in = "1 2 3"; const char *delim = " \t"; std::vector<std::string> out; SplitStringToVector(in, delim, &out); @endcode @param [in] in The input string to be split. @param [in] delim A string of delimiters. @param [out] out It saves the split result. */ static void SplitStringToVector(const std::string &in, const char *delim, std::vector<std::string> *out) { K2_CHECK_NE(delim, nullptr); K2_CHECK_NE(out, nullptr); out->clear(); std::size_t start = 0; while (true) { auto pos = in.find_first_of(delim, start); if (pos == std::string::npos) break; auto sub = in.substr(start, pos - start); start = pos + 1; TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } if (start < in.size()) { auto sub = in.substr(start); TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } } // Create an acceptor from a stream. static Fsa AcceptorFromStream(std::string first_line, std::istringstream &is, bool openfst) { std::vector<Arc> arcs; std::string line = std::move(first_line); std::vector<std::string> splits; float scale = 1; if (openfst) scale = -1; int32_t max_state = -1; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; do { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 4u) { // 0 1 2 3 // src_state dest_state symbol score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = scale * StringToFloat(splits[3]); arcs.emplace_back(src_state, dest_state, symbol, score); max_state = ::max(max_state, ::max(src_state, dest_state)); } else if (num_fields == 2u) { // 0 1 // final_state score // In this case, openfst is true. There could be multiple final states, so // we first have to collect all the final states, and then work out the // super final state. K2_CHECK(openfst) << "Invalid line: " << line << "\nFinal state with weight detected in K2 format"; original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(StringToFloat(splits[1])); max_state = ::max(max_state, original_final_states.back()); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); // this is a final state break; // finish reading } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nIt expects a line with 1, 2 or 4 fields"; } } while (std::getline(is, line)); // Post processing on final states. When openfst is true, we may have multiple // final states with weights associated with them. We will have to add a super // final state, and convert that into the K2 format (final state with no // weight). if (original_final_states.size() > 0) { K2_CHECK_EQ(openfst, true); K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; for (std::size_t i = 0; i != original_final_states.size(); ++i) { arcs.emplace_back(original_final_states[i], super_final_state, -1, // kFinalSymbol scale * original_final_weights[i]); } } if (openfst) { // Sort arcs so that source states are in non-decreasing order. std::sort(arcs.begin(), arcs.end()); } bool error = true; Array1<Arc> array(GetCpuContext(), arcs); auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } static Fsa TransducerFromStream(std::string first_line, std::istringstream &is, bool openfst, Array1<int32_t> *aux_labels) { K2_CHECK(aux_labels != nullptr); std::vector<int32_t> state_aux_labels; std::vector<Arc> arcs; std::string line = std::move(first_line); std::vector<std::string> splits; float scale = 1; if (openfst) scale = -1; int32_t max_state = -1; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; do { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 5u) { // 0 1 2 3 4 // src_state dest_state symbol aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = scale * StringToFloat(splits[4]); arcs.emplace_back(src_state, dest_state, symbol, score); state_aux_labels.push_back(aux_label); max_state = ::max(max_state, ::max(src_state, dest_state)); } else if (num_fields == 2u) { // 0 1 // final_state score // In this case, openfst is true. There could be multiple final states, so // we first have to collect all the final states, and then work out the // super final state. K2_CHECK(openfst) << "Invalid line: " << line << "\nFinal state with weight detected in K2 format"; original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(StringToFloat(splits[1])); max_state = ::max(max_state, original_final_states.back()); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); break; // finish reading } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nIt expects a line with 1, 2 or 5 fields"; } } while (std::getline(is, line)); // Post processing on final states. When openfst is true, we may have multiple // final states with weights associated with them. We will have to add a super // final state, and convert that into the K2 format (final state with no // weight). if (original_final_states.size() > 0) { K2_CHECK_EQ(openfst, true); K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; for (std::size_t i = 0; i != original_final_states.size(); ++i) { arcs.emplace_back(original_final_states[i], super_final_state, -1, // kFinalSymbol scale * original_final_weights[i]); // TODO(guoguo) We are not sure yet what to put as the auxiliary label for // arcs entering the super final state. The only real choices // are kEpsilon or kFinalSymbol. We are using kEpsilon for // now. state_aux_labels.push_back(0); // kEpsilon } } if (openfst) { // Sort arcs so that source states are in non-decreasing order. We have to // do this simultaneously for both arcs and auxiliary labels. The following // implementation makes a pair of (Arc, AuxLabel) for sorting. // TODO(guoguo) Optimize this when necessary. std::vector<std::pair<Arc, int32_t>> arcs_and_aux_labels; K2_CHECK_EQ(state_aux_labels.size(), arcs.size()); arcs_and_aux_labels.resize(arcs.size()); for (std::size_t i = 0; i < arcs.size(); ++i) { arcs_and_aux_labels[i] = std::make_pair(arcs[i], state_aux_labels[i]); } // Default pair comparison should work for us. std::sort(arcs_and_aux_labels.begin(), arcs_and_aux_labels.end()); for (std::size_t i = 0; i < arcs.size(); ++i) { arcs[i] = arcs_and_aux_labels[i].first; state_aux_labels[i] = arcs_and_aux_labels[i].second; } } auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, state_aux_labels); Array1<Arc> array(cpu_context, arcs); bool error = true; auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } Fsa FsaFromString(const std::string &s, bool openfst /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { std::istringstream is(s); std::string line; std::getline(is, line); K2_CHECK(is); std::vector<std::string> splits; SplitStringToVector(line, kDelim, &splits); auto num_fields = splits.size(); if (num_fields == 4u) return AcceptorFromStream(std::move(line), is, openfst); else if (num_fields == 5u) return TransducerFromStream(std::move(line), is, openfst, aux_labels); K2_LOG(FATAL) << "Expected number of fields: 4 or 5." << "Actual: " << num_fields << "\n" << "First line is: " << line; return Fsa(); // unreachable code } std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/, const Array1<int32_t> *aux_labels /*= nullptr*/) { K2_CHECK_EQ(fsa.NumAxes(), 2); K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu); const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1); const Array1<Arc> &arcs = fsa.values; const int32_t *p = nullptr; if (aux_labels != nullptr) { K2_CHECK(IsCompatible(fsa, *aux_labels)); K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim()); p = aux_labels->Data(); } float scale = 1; if (openfst) scale = -1; std::ostringstream os; int32_t n = arcs.Dim(); char sep = ' '; char line_sep = '\n'; for (int32_t i = 0; i != n; ++i) { const auto &arc = arcs[i]; os << arc.src_state << sep << arc.dest_state << sep << arc.symbol << sep; if (p != nullptr) os << p[i] << sep; os << (scale * arc.score) << line_sep; } os << (fsa.shape.Dim0() - 1) << line_sep; return os.str(); } } // namespace k2
a42efbbf9dac29d6e357ef147cd8fa26a582d1c7.cu
/** * @brief Utilities for creating FSAs. * * Note that serializations are done in Python. * * @copyright * Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang) * Guoguo Chen * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <sstream> #include <utility> #include <vector> #include "k2/csrc/context.h" #include "k2/csrc/fsa_utils.h" namespace k2 { // field separator within a line for a text form FSA static constexpr const char *kDelim = " \t"; // Convert a string to an integer. Abort the program on failure. static int32_t StringToInt(const std::string &s) { K2_CHECK(!s.empty()); bool ok = false; char *p = nullptr; // std::strtol requires a `long` type long n = std::strtol(s.c_str(), &p, 10); // NOLINT if (*p == '\0') ok = true; auto res = static_cast<int32_t>(n); if (n != res) ok = false; // out of range K2_CHECK(ok) << "Failed to convert " << s << " to an integer"; return res; } // Convert a string to a float. Abort the program on failure. static float StringToFloat(const std::string &s) { K2_CHECK(!s.empty()); char *p = nullptr; float f = std::strtof(s.c_str(), &p); if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float"; return f; } // Trim leading and trailing spaces of a string. static void TrimString(std::string *s) { K2_CHECK_NE(s, nullptr); auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; }; s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space)); s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end()); } /* Split a string to a vector of strings using a set of delimiters. Example usage: @code std::string in = "1 2 3"; const char *delim = " \t"; std::vector<std::string> out; SplitStringToVector(in, delim, &out); @endcode @param [in] in The input string to be split. @param [in] delim A string of delimiters. @param [out] out It saves the split result. */ static void SplitStringToVector(const std::string &in, const char *delim, std::vector<std::string> *out) { K2_CHECK_NE(delim, nullptr); K2_CHECK_NE(out, nullptr); out->clear(); std::size_t start = 0; while (true) { auto pos = in.find_first_of(delim, start); if (pos == std::string::npos) break; auto sub = in.substr(start, pos - start); start = pos + 1; TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } if (start < in.size()) { auto sub = in.substr(start); TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } } // Create an acceptor from a stream. static Fsa AcceptorFromStream(std::string first_line, std::istringstream &is, bool openfst) { std::vector<Arc> arcs; std::string line = std::move(first_line); std::vector<std::string> splits; float scale = 1; if (openfst) scale = -1; int32_t max_state = -1; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; do { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 4u) { // 0 1 2 3 // src_state dest_state symbol score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = scale * StringToFloat(splits[3]); arcs.emplace_back(src_state, dest_state, symbol, score); max_state = std::max(max_state, std::max(src_state, dest_state)); } else if (num_fields == 2u) { // 0 1 // final_state score // In this case, openfst is true. There could be multiple final states, so // we first have to collect all the final states, and then work out the // super final state. K2_CHECK(openfst) << "Invalid line: " << line << "\nFinal state with weight detected in K2 format"; original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(StringToFloat(splits[1])); max_state = std::max(max_state, original_final_states.back()); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); // this is a final state break; // finish reading } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nIt expects a line with 1, 2 or 4 fields"; } } while (std::getline(is, line)); // Post processing on final states. When openfst is true, we may have multiple // final states with weights associated with them. We will have to add a super // final state, and convert that into the K2 format (final state with no // weight). if (original_final_states.size() > 0) { K2_CHECK_EQ(openfst, true); K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; for (std::size_t i = 0; i != original_final_states.size(); ++i) { arcs.emplace_back(original_final_states[i], super_final_state, -1, // kFinalSymbol scale * original_final_weights[i]); } } if (openfst) { // Sort arcs so that source states are in non-decreasing order. std::sort(arcs.begin(), arcs.end()); } bool error = true; Array1<Arc> array(GetCpuContext(), arcs); auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } static Fsa TransducerFromStream(std::string first_line, std::istringstream &is, bool openfst, Array1<int32_t> *aux_labels) { K2_CHECK(aux_labels != nullptr); std::vector<int32_t> state_aux_labels; std::vector<Arc> arcs; std::string line = std::move(first_line); std::vector<std::string> splits; float scale = 1; if (openfst) scale = -1; int32_t max_state = -1; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; do { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 5u) { // 0 1 2 3 4 // src_state dest_state symbol aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = scale * StringToFloat(splits[4]); arcs.emplace_back(src_state, dest_state, symbol, score); state_aux_labels.push_back(aux_label); max_state = std::max(max_state, std::max(src_state, dest_state)); } else if (num_fields == 2u) { // 0 1 // final_state score // In this case, openfst is true. There could be multiple final states, so // we first have to collect all the final states, and then work out the // super final state. K2_CHECK(openfst) << "Invalid line: " << line << "\nFinal state with weight detected in K2 format"; original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(StringToFloat(splits[1])); max_state = std::max(max_state, original_final_states.back()); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); break; // finish reading } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nIt expects a line with 1, 2 or 5 fields"; } } while (std::getline(is, line)); // Post processing on final states. When openfst is true, we may have multiple // final states with weights associated with them. We will have to add a super // final state, and convert that into the K2 format (final state with no // weight). if (original_final_states.size() > 0) { K2_CHECK_EQ(openfst, true); K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; for (std::size_t i = 0; i != original_final_states.size(); ++i) { arcs.emplace_back(original_final_states[i], super_final_state, -1, // kFinalSymbol scale * original_final_weights[i]); // TODO(guoguo) We are not sure yet what to put as the auxiliary label for // arcs entering the super final state. The only real choices // are kEpsilon or kFinalSymbol. We are using kEpsilon for // now. state_aux_labels.push_back(0); // kEpsilon } } if (openfst) { // Sort arcs so that source states are in non-decreasing order. We have to // do this simultaneously for both arcs and auxiliary labels. The following // implementation makes a pair of (Arc, AuxLabel) for sorting. // TODO(guoguo) Optimize this when necessary. std::vector<std::pair<Arc, int32_t>> arcs_and_aux_labels; K2_CHECK_EQ(state_aux_labels.size(), arcs.size()); arcs_and_aux_labels.resize(arcs.size()); for (std::size_t i = 0; i < arcs.size(); ++i) { arcs_and_aux_labels[i] = std::make_pair(arcs[i], state_aux_labels[i]); } // Default pair comparison should work for us. std::sort(arcs_and_aux_labels.begin(), arcs_and_aux_labels.end()); for (std::size_t i = 0; i < arcs.size(); ++i) { arcs[i] = arcs_and_aux_labels[i].first; state_aux_labels[i] = arcs_and_aux_labels[i].second; } } auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, state_aux_labels); Array1<Arc> array(cpu_context, arcs); bool error = true; auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } Fsa FsaFromString(const std::string &s, bool openfst /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { std::istringstream is(s); std::string line; std::getline(is, line); K2_CHECK(is); std::vector<std::string> splits; SplitStringToVector(line, kDelim, &splits); auto num_fields = splits.size(); if (num_fields == 4u) return AcceptorFromStream(std::move(line), is, openfst); else if (num_fields == 5u) return TransducerFromStream(std::move(line), is, openfst, aux_labels); K2_LOG(FATAL) << "Expected number of fields: 4 or 5." << "Actual: " << num_fields << "\n" << "First line is: " << line; return Fsa(); // unreachable code } std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/, const Array1<int32_t> *aux_labels /*= nullptr*/) { K2_CHECK_EQ(fsa.NumAxes(), 2); K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu); const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1); const Array1<Arc> &arcs = fsa.values; const int32_t *p = nullptr; if (aux_labels != nullptr) { K2_CHECK(IsCompatible(fsa, *aux_labels)); K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim()); p = aux_labels->Data(); } float scale = 1; if (openfst) scale = -1; std::ostringstream os; int32_t n = arcs.Dim(); char sep = ' '; char line_sep = '\n'; for (int32_t i = 0; i != n; ++i) { const auto &arc = arcs[i]; os << arc.src_state << sep << arc.dest_state << sep << arc.symbol << sep; if (p != nullptr) os << p[i] << sep; os << (scale * arc.score) << line_sep; } os << (fsa.shape.Dim0() - 1) << line_sep; return os.str(); } } // namespace k2
35ac0e7cd6905196c533d553d51b188abd7a63a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include "SpecialFunctions.h" #include "Random.h" /*------------ Uses CUDA random number generator -------- */ __global__ void init_random(unsigned long long *seed, hiprandState_t *global_state){ int tid = blockIdx.x; unsigned long long local_seed = seed[tid]; hiprandState_t local_state; local_state = global_state[tid]; hiprand_init(local_seed,tid,0, &local_state); global_state[tid] = local_state; } /*--------------------------------------*/ __global__ void random(double *x, hiprandState_t *global_state){ int tid = blockIdx.x; hiprandState_t local_state; local_state = global_state[tid]; x[tid] = (double) hiprand(&local_state); global_state[tid] = local_state; } /*--------------------------------------*/ __global__ void UniformRandom(double *x, hiprandState_t *global_state){ int tid = blockIdx.x; hiprandState_t local_state; local_state = global_state[tid]; x[tid] = (double) hiprand_uniform(&local_state); global_state[tid] = local_state; } /*--------------------------------------*/ __device__ double Gaussian(double mean, double sigma, hiprandState_t *mystate){ double xx= (double) hiprand_normal(mystate); double yy=mean+sigma*xx; return yy; } /*--------------------------------------*/ __device__ double Poisson(double xmean, hiprandState_t *mystate){ double reject_factor=0.9,reject; double pi; pi = 4.*atan(1.); double x,xcomp; if (xmean < 12.){ x=-1.; double exp_nxm=exp(-xmean); double uni_var_product=1.; while(uni_var_product > exp_nxm){ x=x+1.; double rand = (double) hiprand_uniform(mystate); uni_var_product=uni_var_product*rand; } }else{ double sq = sqrt(2.0*xmean); double log_xmean = log(xmean); double GG = xmean*log_xmean - LnGamma(xmean+1.0); do { do { double rand = (double) hiprand_uniform(mystate); xcomp = tan(pi*rand); x = sqrt(2.*xmean)*xcomp + xmean; } while (x < 0.0); x = floor(x); reject = reject_factor*(1.0 + xcomp*xcomp)*exp(x*log_xmean - LnGamma(x+1.0) - GG); } while (hiprand_uniform(mystate) > reject); } return x; }
35ac0e7cd6905196c533d553d51b188abd7a63a6.cu
#include <stdio.h> #include <math.h> #include "SpecialFunctions.h" #include "Random.h" /*------------ Uses CUDA random number generator -------- */ __global__ void init_random(unsigned long long *seed, curandState *global_state){ int tid = blockIdx.x; unsigned long long local_seed = seed[tid]; curandState local_state; local_state = global_state[tid]; curand_init(local_seed,tid,0, &local_state); global_state[tid] = local_state; } /*--------------------------------------*/ __global__ void random(double *x, curandState *global_state){ int tid = blockIdx.x; curandState local_state; local_state = global_state[tid]; x[tid] = (double) curand(&local_state); global_state[tid] = local_state; } /*--------------------------------------*/ __global__ void UniformRandom(double *x, curandState *global_state){ int tid = blockIdx.x; curandState local_state; local_state = global_state[tid]; x[tid] = (double) curand_uniform(&local_state); global_state[tid] = local_state; } /*--------------------------------------*/ __device__ double Gaussian(double mean, double sigma, curandState *mystate){ double xx= (double) curand_normal(mystate); double yy=mean+sigma*xx; return yy; } /*--------------------------------------*/ __device__ double Poisson(double xmean, curandState *mystate){ double reject_factor=0.9,reject; double pi; pi = 4.*atan(1.); double x,xcomp; if (xmean < 12.){ x=-1.; double exp_nxm=exp(-xmean); double uni_var_product=1.; while(uni_var_product > exp_nxm){ x=x+1.; double rand = (double) curand_uniform(mystate); uni_var_product=uni_var_product*rand; } }else{ double sq = sqrt(2.0*xmean); double log_xmean = log(xmean); double GG = xmean*log_xmean - LnGamma(xmean+1.0); do { do { double rand = (double) curand_uniform(mystate); xcomp = tan(pi*rand); x = sqrt(2.*xmean)*xcomp + xmean; } while (x < 0.0); x = floor(x); reject = reject_factor*(1.0 + xcomp*xcomp)*exp(x*log_xmean - LnGamma(x+1.0) - GG); } while (curand_uniform(mystate) > reject); } return x; }
d75e0116e1f889ae13f04e3698226706770b2d66.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Smith-Waterman algorithm with affine gap model // MATCH: 1; MISMATCH: -3; Gopen: -3; Gext: -2 __inline__ __device__ int3 cudadp_user_kernel(int i, int j, int3 left, int3 up, int3 diag, void *data) { struct Sequences* seq = (struct Sequences*)data; char *A = seq->dev_A; char *B = seq->dev_B; int3 result; result.x = max(left.x-Gext, left.z-Gopen); // E[i,j] result.y = max(up.y-Gext, up.z-Gopen); // F[i,j] result.z = max(0, result,x, result.y diag.z + (A[i]==B[j]?MATCH:MISMATCH)); // H[i,j] return result; } DP_DiagUpLeft sw(M, N); cudadp_start(&sw, dev_seqs); // Longest common subsequence __inline__ __device__ int cudadp_user_kernel(int i, in j, int left, int up, int diag, void *data) { struct Sequences* seq = (struct Sequences*)data; char *A = seq->dev_A; char *B = seq->dev_B; int result; result = A[i] == B[j] ? diag+1 : max(left, up); return result; } __inline__ __device__ int3 cudadp_user_kernel(int m, int n, int level, int3 *deps, void *data) { int tid = blockIdx.x * THREADS + threadIdx.x; struct Sequences* seq = (struct Sequences*)data; char *A = seq->dev_A; char *B = seq->dev_B; int3 *dep1 = deps; int3 *dep2 = &deps[min(m, n)]; int i = compute_i(m, n, level); int j = compute_j(m, n, level); // read dependencies from global memory to shared memory __shared__ int3 local_dep1[THREADS+2]; __shared__ int3 local_dep2[THREADS+2]; if(tid < min(m, n)) { local_dep1[threadIdx.x+1] = dep1[tid]; local_dep2[threadIdx.x+1] = dep2[tid]; } if(threadIdx.x == THREADS-1 && tid < min(M, N) ) { local_dep1[threadIdx.x+2] = dep1[tid+1]; local_dep2[threadIdx.x+2] = dep2[tid+1]; } __syncthreads(); int3 diag, left, up, result; if (level <= min(M-1, N-1)) { // up, depends on tid-1, tid left = local_dep2[threadIdx.x]; up = local_dep2[threadIdx.x+1]; diag = local_dep1[threadIdx.x]; } else { // middle and bottom, depends on tid, tid+1 left = local_dep2[threadIdx.x+1]; up = local_dep2[threadIdx.x+2]; diag = local_dep1[threadIdx.x+2]; } result.x = max(left.x-Gext, left.z-Gopen); // E[i,j] result.y = max(up.y-Gext, up.z-Gopen); // F[i,j] result.z = max(0, result.x, result.y, diag.z + (A[i]==Bj?MATCH:MISMATCH)); // H[i,j] return result; }
d75e0116e1f889ae13f04e3698226706770b2d66.cu
// Smith-Waterman algorithm with affine gap model // MATCH: 1; MISMATCH: -3; Gopen: -3; Gext: -2 __inline__ __device__ int3 cudadp_user_kernel(int i, int j, int3 left, int3 up, int3 diag, void *data) { struct Sequences* seq = (struct Sequences*)data; char *A = seq->dev_A; char *B = seq->dev_B; int3 result; result.x = max(left.x-Gext, left.z-Gopen); // E[i,j] result.y = max(up.y-Gext, up.z-Gopen); // F[i,j] result.z = max(0, result,x, result.y diag.z + (A[i]==B[j]?MATCH:MISMATCH)); // H[i,j] return result; } DP_DiagUpLeft sw(M, N); cudadp_start(&sw, dev_seqs); // Longest common subsequence __inline__ __device__ int cudadp_user_kernel(int i, in j, int left, int up, int diag, void *data) { struct Sequences* seq = (struct Sequences*)data; char *A = seq->dev_A; char *B = seq->dev_B; int result; result = A[i] == B[j] ? diag+1 : max(left, up); return result; } __inline__ __device__ int3 cudadp_user_kernel(int m, int n, int level, int3 *deps, void *data) { int tid = blockIdx.x * THREADS + threadIdx.x; struct Sequences* seq = (struct Sequences*)data; char *A = seq->dev_A; char *B = seq->dev_B; int3 *dep1 = deps; int3 *dep2 = &deps[min(m, n)]; int i = compute_i(m, n, level); int j = compute_j(m, n, level); // read dependencies from global memory to shared memory __shared__ int3 local_dep1[THREADS+2]; __shared__ int3 local_dep2[THREADS+2]; if(tid < min(m, n)) { local_dep1[threadIdx.x+1] = dep1[tid]; local_dep2[threadIdx.x+1] = dep2[tid]; } if(threadIdx.x == THREADS-1 && tid < min(M, N) ) { local_dep1[threadIdx.x+2] = dep1[tid+1]; local_dep2[threadIdx.x+2] = dep2[tid+1]; } __syncthreads(); int3 diag, left, up, result; if (level <= min(M-1, N-1)) { // up, depends on tid-1, tid left = local_dep2[threadIdx.x]; up = local_dep2[threadIdx.x+1]; diag = local_dep1[threadIdx.x]; } else { // middle and bottom, depends on tid, tid+1 left = local_dep2[threadIdx.x+1]; up = local_dep2[threadIdx.x+2]; diag = local_dep1[threadIdx.x+2]; } result.x = max(left.x-Gext, left.z-Gopen); // E[i,j] result.y = max(up.y-Gext, up.z-Gopen); // F[i,j] result.z = max(0, result.x, result.y, diag.z + (A[i]==Bj?MATCH:MISMATCH)); // H[i,j] return result; }
4deace64fad31875ca4413e1ce967bd176e9d4f9.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simulator.h" #include "model.h" #include "realm/runtime_impl.h" #include "realm/cuda/cuda_module.h" Simulator::Simulator(const FFModel* model, FFHandler handler, void* _base_ptr, size_t _capacity) : base_ptr((char*)_base_ptr), capacity(_capacity), offset(0), warmup_times(5), repeat_times(10) { float inter_gpu_bandwidth = 20 * 1024 * 1024.0f; /* B/ms*/ float inter_node_bandwidth = 12 * 1024 * 1024.0f / model->config.numNodes; /* B/ms*/ float gpu_dram_bandwidth = 16 * 1024 * 1024.0f; /* B/ms*/ size_t max_num_tasks = 1024 * 1024; hipEventCreate(&start_event); hipEventCreate(&end_event); conv2d_meta = new Conv2DMeta(handler); linear_meta = new LinearMeta(handler, 4096); pool2d_meta = new Pool2DMeta(handler); int num_nodes = model->config.numNodes; int gpus_per_node = model->config.workersPerNode; total_num_devices = num_nodes * gpus_per_node; // Create GPU compute device for (int i = 0; i < num_nodes; i++) for (int j = 0; j < gpus_per_node; j++) { id_to_compute_device[i*gpus_per_node+j] = new Device(Device::DEVICE_GPU, i, i*gpus_per_node+j); } // Create inter GPU comm devices: for (int i = 0; i < total_num_devices; i++) for (int j = 0; j < total_num_devices; j++) { Device* src = id_to_compute_device[i]; Device* dst = id_to_compute_device[j]; if (src->node_id == dst->node_id && src != dst) { int hash = i * total_num_devices + j; ids_to_inter_gpu_comm_device[hash] = new Device(Device::DEVICE_COMM, inter_gpu_bandwidth); } } // Create gpu<->dram comm devices for (int i = 0; i < total_num_devices; i++) { id_to_gputodram_comm_device[i] = new Device(Device::DEVICE_COMM, gpu_dram_bandwidth); id_to_dramtogpu_comm_device[i] = new Device(Device::DEVICE_COMM, gpu_dram_bandwidth); } // Create inter node comm devices for (int i = 0; i < num_nodes; i++) for (int j = 0; j < num_nodes; j++) if (i != j) { int hash = i * total_num_devices + j; ids_to_inter_node_comm_device[hash] = new Device(Device::DEVICE_COMM, inter_node_bandwidth); } // Initialize task manager task_manager = new TaskManager(max_num_tasks); } __host__ void Simulator::strategy_search_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const FFModel* model = *((FFModel**) task->args); Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine()) .only_kind(Memory::GPU_FB_MEM).best_affinity_to(task->target_proc).first(); Realm::MemoryImpl* memImpl = Realm::get_runtime()->get_memory_impl(gpu_mem); Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl; off_t offset = memFBImpl->alloc_bytes_local(model->config.simulator_work_space_size); void* base_ptr = memFBImpl->get_direct_ptr(offset, 0); // Assume this task is running on GPU0 Simulator* simulator = new Simulator(model, model->handlers[0], base_ptr, model->config.simulator_work_space_size); std::map<Op*, ParallelConfig> strategies; model->optimize(simulator, strategies, model->config.search_budget, model->config.search_alpha); if (model->config.export_strategy_file.length() > 0) { std::map<Op*, ParallelConfig>::const_iterator iter; std::map<MappingTagID, ParallelConfig> strategy_output; for (iter = strategies.begin(); iter != strategies.end(); iter++) { strategy_output[FFConfig::get_hash_id(std::string(iter->first->name))] = iter->second; } save_strategies_to_file(model->config.export_strategy_file, strategy_output); } // Start from data memFBImpl->free_bytes_local(offset, model->config.simulator_work_space_size); delete(simulator); }
4deace64fad31875ca4413e1ce967bd176e9d4f9.cu
/* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simulator.h" #include "model.h" #include "realm/runtime_impl.h" #include "realm/cuda/cuda_module.h" Simulator::Simulator(const FFModel* model, FFHandler handler, void* _base_ptr, size_t _capacity) : base_ptr((char*)_base_ptr), capacity(_capacity), offset(0), warmup_times(5), repeat_times(10) { float inter_gpu_bandwidth = 20 * 1024 * 1024.0f; /* B/ms*/ float inter_node_bandwidth = 12 * 1024 * 1024.0f / model->config.numNodes; /* B/ms*/ float gpu_dram_bandwidth = 16 * 1024 * 1024.0f; /* B/ms*/ size_t max_num_tasks = 1024 * 1024; cudaEventCreate(&start_event); cudaEventCreate(&end_event); conv2d_meta = new Conv2DMeta(handler); linear_meta = new LinearMeta(handler, 4096); pool2d_meta = new Pool2DMeta(handler); int num_nodes = model->config.numNodes; int gpus_per_node = model->config.workersPerNode; total_num_devices = num_nodes * gpus_per_node; // Create GPU compute device for (int i = 0; i < num_nodes; i++) for (int j = 0; j < gpus_per_node; j++) { id_to_compute_device[i*gpus_per_node+j] = new Device(Device::DEVICE_GPU, i, i*gpus_per_node+j); } // Create inter GPU comm devices: for (int i = 0; i < total_num_devices; i++) for (int j = 0; j < total_num_devices; j++) { Device* src = id_to_compute_device[i]; Device* dst = id_to_compute_device[j]; if (src->node_id == dst->node_id && src != dst) { int hash = i * total_num_devices + j; ids_to_inter_gpu_comm_device[hash] = new Device(Device::DEVICE_COMM, inter_gpu_bandwidth); } } // Create gpu<->dram comm devices for (int i = 0; i < total_num_devices; i++) { id_to_gputodram_comm_device[i] = new Device(Device::DEVICE_COMM, gpu_dram_bandwidth); id_to_dramtogpu_comm_device[i] = new Device(Device::DEVICE_COMM, gpu_dram_bandwidth); } // Create inter node comm devices for (int i = 0; i < num_nodes; i++) for (int j = 0; j < num_nodes; j++) if (i != j) { int hash = i * total_num_devices + j; ids_to_inter_node_comm_device[hash] = new Device(Device::DEVICE_COMM, inter_node_bandwidth); } // Initialize task manager task_manager = new TaskManager(max_num_tasks); } __host__ void Simulator::strategy_search_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const FFModel* model = *((FFModel**) task->args); Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine()) .only_kind(Memory::GPU_FB_MEM).best_affinity_to(task->target_proc).first(); Realm::MemoryImpl* memImpl = Realm::get_runtime()->get_memory_impl(gpu_mem); Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl; off_t offset = memFBImpl->alloc_bytes_local(model->config.simulator_work_space_size); void* base_ptr = memFBImpl->get_direct_ptr(offset, 0); // Assume this task is running on GPU0 Simulator* simulator = new Simulator(model, model->handlers[0], base_ptr, model->config.simulator_work_space_size); std::map<Op*, ParallelConfig> strategies; model->optimize(simulator, strategies, model->config.search_budget, model->config.search_alpha); if (model->config.export_strategy_file.length() > 0) { std::map<Op*, ParallelConfig>::const_iterator iter; std::map<MappingTagID, ParallelConfig> strategy_output; for (iter = strategies.begin(); iter != strategies.end(); iter++) { strategy_output[FFConfig::get_hash_id(std::string(iter->first->name))] = iter->second; } save_strategies_to_file(model->config.export_strategy_file, strategy_output); } // Start from data memFBImpl->free_bytes_local(offset, model->config.simulator_work_space_size); delete(simulator); }
9f4a3f77ada9cbb10502106a2cd1c78141282160.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <float.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define TRAIN_NUM 100 #define TEST_NUM 50 #define FEATURE 4 #define NUMBER_OF_CLASSES 3 #define FEAT_KEY 0 #define CUT_KEY 1 #define LEFT_KEY 2 #define RIGHT_KEY 3 #define PRED_KEY 4 #define DEPTH_KEY 5 #define NUM_FIELDS 6 #define index(i, j, N) ((i)*(N)) + (j) #define ixt(i, j, t, N, T) ((t)*(N)*(T)) + ((i)*(N)) + (j) #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } int countNumRows(char *filename) { FILE *fp; int count = 0; // Line counter (result) //char filename[MAX_FILE_NAME]; char c; // To store a character read from file // Get file name from user. The file should be // either in current folder or complete path should be provided //printf("Enter file name: "); //scanf("%s", filename); // Open the file fp = fopen(filename, "r"); // Check if file exists if (fp == NULL) { printf("Could not open file %s", filename); return -1; } // Extract characters from file and store in character c for (c = getc(fp); c != EOF; c = getc(fp)) if (c == '\n') // Increment count if this character is newline count = count + 1; // Close the file fclose(fp); //printf("The file %s has %d lines\n ", filename, count); return count; } const char* getfield(char* line, int num){ const char* tok; for (tok = strtok(line, ","); tok && *tok; tok = strtok(NULL, ",\n")) { if (!--num) return tok; } return NULL; } /* Labels for IRIS: Iris-setosa - 0 Iris-versicolor - 1 Iris-virginica - 2 */ void read_csv_iris(float *data, float *label, int row_count, char *filename){ //data = (float *)malloc(row_count*4*sizeof(float)); //label = (int *)malloc(row_count*sizeof(int)); FILE *fp = fopen(filename,"r"); char line[1024]; int idx = 0; for(int iter = 0;iter<row_count;iter++) { fgets(line,1024,fp); const char *temp_field; for(int i=0;i<5;i++) { float temp_num; char *tmp = strdup(line); temp_field = getfield(tmp,i+1); if(i==4) { if(strcmp(temp_field,"Iris-setosa")==0) { label[idx] = 0; continue; } if(strcmp(temp_field,"Iris-versicolor")==0) { label[idx] = 1; continue; } if(strcmp(temp_field,"Iris-virginica")==0) { label[idx] = 2; continue; } } temp_num = atof(temp_field); data[idx*4 + i] = temp_num; } idx++; } } /* === Utils === */ int next_pow_2(int x){ int y = 1; while(y < x) y*=2; return y; } void copy_transpose(float* to, float* from, int h, int w){ for(int i=0; i<h; i++){ for(int j=0; j<w; j++){ to[index(j, i, h)] = from[index(i, j, w)]; } } } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){ // From https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void debug(){ hipError_t code; code = hipPeekAtLastError(); if(code != hipSuccess){ printf("GPUassert: Failed at Init: %s\n", hipGetErrorString(code)); exit(code); } code = hipDeviceSynchronize(); if(code != hipSuccess){ printf("GPUassert: Failed at Execution: %s\n", hipGetErrorString(code)); exit(code); } } /* === Random Init === */ __global__ void init_random(unsigned int seed, hiprandState_t* states) { int tid = blockIdx.x * blockDim.x + threadIdx.x; hiprand_init(seed, tid, 0, &states[tid]); } __device__ int draw_approx_binomial(int n, float p, hiprandState_t* state) { int x = (int) round(hiprand_normal(state) * n*p*(1-p) + n*p); return max(0, min(x, n)); } __device__ float draw_uniform(float minimum, float maximum, hiprandState_t* state){ return minimum + hiprand_uniform(state) * (maximum - minimum); } /* === Expanding tree memory === */ void expand(float** d_trees_ptr, int num_trees, int tree_arr_length, int new_tree_arr_length){ float *new_d_trees, *d_trees; d_trees = *d_trees_ptr; assert(new_tree_arr_length >= tree_arr_length); hipMalloc((void **) &new_d_trees, num_trees * NUM_FIELDS * new_tree_arr_length * sizeof(float)); printf("Malloced: %d\n", num_trees * NUM_FIELDS * new_tree_arr_length); //hipMemcpy(new_d_trees, d_trees, num_trees * NUM_FIELDS * tree_arr_length *sizeof(float), hipMemcpyDeviceToDevice); for(int i=0; i<num_trees; i++){ hipMemcpy( new_d_trees + i * (NUM_FIELDS * new_tree_arr_length), d_trees + i * (NUM_FIELDS * tree_arr_length), (NUM_FIELDS * tree_arr_length) * sizeof(float), hipMemcpyDeviceToDevice); } hipFree(d_trees); *d_trees_ptr = new_d_trees; } __global__ void get_max_tree_length(int* d_tree_lengths, int num_trees, int* d_max_tree_length){ extern __shared__ int tree_length_buffer[]; if(threadIdx.x < num_trees){ tree_length_buffer[threadIdx.x] = d_tree_lengths[threadIdx.x]; }else{ tree_length_buffer[threadIdx.x] = -1; } for(int stride=blockDim.x/2; stride > 0; stride >>=1){ __syncthreads(); if(threadIdx.x < stride){ if(tree_length_buffer[threadIdx.x + stride] > tree_length_buffer[threadIdx.x]){ tree_length_buffer[threadIdx.x] = tree_length_buffer[threadIdx.x + stride]; } } } if(threadIdx.x == 0){ d_max_tree_length[0] = tree_length_buffer[0]; } } void maybe_expand(float** d_trees_ptr, int num_trees, int* tree_arr_length, int* d_tree_lengths, int* max_tree_length, int* d_max_tree_length){ // I wonder if it's faster just to compute max on CPU. int new_tree_arr_length; hipLaunchKernelGGL(( get_max_tree_length), dim3(1), dim3(num_trees), next_pow_2(num_trees) * sizeof(int), 0, d_tree_lengths, num_trees, d_max_tree_length ); printf("get_max_tree_length(%d, %d, %d)\n", 1, num_trees, next_pow_2(num_trees)); hipMemcpy(max_tree_length, d_max_tree_length, sizeof(int), hipMemcpyDeviceToHost); // Buffer of 2 => up to 2 additions at a time if(*max_tree_length <= *tree_arr_length-3){ return; }else{ new_tree_arr_length = (*tree_arr_length) * 2; while(*max_tree_length > new_tree_arr_length-2){ new_tree_arr_length *= 2; } printf("Expanding to %d\n", new_tree_arr_length); expand(d_trees_ptr, num_trees, *tree_arr_length, new_tree_arr_length); *tree_arr_length = new_tree_arr_length; } } /* === Tree Initialization === */ __global__ void kernel_initialize_trees(float *d_trees, int* d_tree_lengths, int tree_arr_length){ d_trees[ixt(0, LEFT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = 0; d_trees[ixt(0, RIGHT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = 0; d_trees[ixt(0, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = 0; d_trees[ixt(0, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_tree_lengths[threadIdx.x] = 1; } void initialize_trees(float* d_trees, int num_trees, int tree_arr_length, int* d_tree_lengths){ hipLaunchKernelGGL(( kernel_initialize_trees), dim3(1), dim3(num_trees), 0, 0, d_trees, d_tree_lengths, tree_arr_length); } __global__ void kernel_initialize_batch_pos(int *d_batch_pos, int x_length, int num_trees){ int i; for(i=threadIdx.x; i<x_length; i+=blockDim.x){ d_batch_pos[index(blockIdx.x, i, x_length)] = 0; } } void initialize_batch_pos(int *d_batch_pos, int x_length, int num_trees, hipDeviceProp_t dev_prop){ hipLaunchKernelGGL(( kernel_initialize_batch_pos), dim3(num_trees), dim3(dev_prop.maxThreadsPerBlock), 0, 0, d_batch_pos, x_length, num_trees ); } /* === Tree Growth checks === */ __global__ void kernel_refresh_tree_is_done(int* d_tree_lengths, int* d_tree_is_done, int tree_pos){ // threadIdx.x = tree_id int is_done; if(tree_pos < d_tree_lengths[threadIdx.x]){ is_done = 0; }else{ is_done = 1; } d_tree_is_done[threadIdx.x] = is_done; } void refresh_tree_is_done(int* d_tree_lengths, int* d_tree_is_done, int tree_pos, int num_trees){ hipLaunchKernelGGL(( kernel_refresh_tree_is_done), dim3(1), dim3(num_trees), 0, 0, d_tree_lengths, d_tree_is_done, tree_pos ); } int check_forest_done(int* d_tree_is_done, int *tree_is_done, int num_trees){ hipMemcpy(tree_is_done, d_tree_is_done, num_trees * sizeof(int), hipMemcpyDeviceToHost); int trees_left; trees_left = 0; for(int i=0; i<num_trees; i++){ if(!tree_is_done[i]){ trees_left++; } } printf("%d trees left to grow\n", trees_left); if(trees_left == 0){ return 1; }else{ return 0; } } /* === Tree Traversal === */ __global__ void kernel_traverse_trees( float *d_trees, float* d_x, int x_length, int num_trees, int tree_arr_length, int* d_batch_pos ){ // Should optimize this. It's just a bunch of global reads. // Also possibly to rewrite this and batch_traverse to support a "next-step" method instead of a full // traversal while growing int pos, new_pos, left_right_key, x_i, tree_id, tx; tx = threadIdx.x + blockIdx.x * blockDim.x; if(tx >= x_length * num_trees) return; // Actually get x_i, tree_id tree_id = tx % num_trees; x_i = tx / num_trees; pos = 0; while(1){ if(d_x[index(x_i, (int) d_trees[ixt(pos, FEAT_KEY, tree_id, NUM_FIELDS, tree_arr_length)], FEATURE)] < d_trees[ixt(pos, CUT_KEY, tree_id, NUM_FIELDS, tree_arr_length)]){ left_right_key = LEFT_KEY; }else{ left_right_key = RIGHT_KEY; } new_pos = (int) d_trees[ixt(pos, left_right_key, tree_id, NUM_FIELDS, tree_arr_length)]; if(new_pos == pos){ // Leaf nodes are set up to be idempotent break; } pos = new_pos; } d_batch_pos[index(tree_id, x_i, x_length)] = pos; } void batch_traverse_trees( float *d_trees, float *d_x, int x_length, int num_trees, int tree_arr_length, int *d_batch_pos, hipDeviceProp_t dev_prop){ int block_size, num_blocks; block_size = dev_prop.maxThreadsPerBlock; num_blocks = ceil(num_trees * x_length/((float) block_size)); hipLaunchKernelGGL(( kernel_traverse_trees), dim3(num_blocks), dim3(block_size), 0, 0, d_trees, d_x, x_length, num_trees, tree_arr_length, d_batch_pos ); } __global__ void kernel_advance_trees(float *d_trees, float* d_x, int x_length, int tree_arr_length, int num_trees, int* d_batch_pos){ int pos, left_right_key, x_i; // threadIdx.x = x_i, blockIdx.x = tree_id for(x_i=threadIdx.x; x_i < x_length; x_i+=blockDim.x){ pos = d_batch_pos[index(blockIdx.x, x_i, TRAIN_NUM)]; if(d_x[index(x_i, (int) d_trees[ixt(pos, FEAT_KEY, blockIdx.x, NUM_FIELDS, tree_arr_length)], FEATURE)] < d_trees[ixt(pos, CUT_KEY, blockIdx.x, NUM_FIELDS, tree_arr_length)]){ left_right_key = LEFT_KEY; }else{ left_right_key = RIGHT_KEY; } d_batch_pos[index(blockIdx.x, x_i, TRAIN_NUM)] = (int) d_trees[ixt(pos, left_right_key, blockIdx.x, NUM_FIELDS, tree_arr_length)]; } } void batch_advance_trees(float *d_tree, float *d_x, int x_length, int tree_arr_length, int num_trees, int *d_batch_pos, hipDeviceProp_t dev_prop){ hipLaunchKernelGGL(( kernel_advance_trees), dim3(num_trees), dim3(dev_prop.maxThreadsPerBlock), 0, 0, d_tree, d_x, x_length, tree_arr_length, num_trees, d_batch_pos ); } /* === Node termination === */ __global__ void kernel_check_node_termination( float* d_trees, int tree_arr_length, float* d_y, int* d_batch_pos, int tree_pos, int* d_is_branch_node, int* d_tree_is_done ){ // threadIdx.x = tree_id int i, base_y, new_y, is_branch_node; // If tree is done, it's never a branch node if(d_tree_is_done[threadIdx.x]==1){ d_is_branch_node[threadIdx.x] = 0; return; } // Check for non-unique Y base_y = -1; is_branch_node = 0; for(i=1; i<TRAIN_NUM; i++){ if(d_batch_pos[index(threadIdx.x, i, TRAIN_NUM)] == tree_pos){ new_y = d_y[i]; if(base_y == -1){ base_y = new_y; }else if(base_y != new_y){ is_branch_node = 1; break; } } } d_is_branch_node[threadIdx.x] = is_branch_node; if(base_y==-1){ printf("ERROR ERROR ERROR EMPTY 1TREE %d\n", threadIdx.x); printf("ERROR ERROR ERROR EMPTY 2TREE %d\n", threadIdx.x); printf("ERROR ERROR ERROR EMPTY 2TREE %d\n", threadIdx.x); } if(!is_branch_node){ d_trees[ixt(tree_pos, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = base_y; } } void check_node_termination( float* d_trees, int tree_arr_length, float* d_y, int* d_batch_pos, int tree_pos, int* d_is_branch_node, int* d_tree_is_done, int num_trees ){ hipLaunchKernelGGL(( kernel_check_node_termination), dim3(1), dim3(num_trees), 0, 0, d_trees, tree_arr_length, d_y, d_batch_pos, tree_pos, d_is_branch_node, d_tree_is_done ); } /* === Valid features === */ __global__ void kernel_collect_min_max(float* d_x_T, int* d_batch_pos, int desired_pos, int num_trees, int x_length, float* d_min_max_buffer, int* d_is_branch_node){ extern __shared__ float shared_min_max[]; // threadIdx.x * 2 // Ripe for optimization. // threadIdx.x = x_i++, blockIdx.x = tree_id, feat = blockIdx.y int x_i; float minimum, maximum, val; if(!d_is_branch_node[blockIdx.x]){ return; } minimum = FLT_MAX; maximum = -FLT_MAX; for(x_i=threadIdx.x; x_i < x_length; x_i+=blockDim.x){ if(d_batch_pos[index(blockIdx.x, x_i, x_length)] == desired_pos){ val = d_x_T[index(blockIdx.y, x_i, TRAIN_NUM)]; if(val < minimum){ minimum = val; } if(val > maximum){ maximum = val; } } } shared_min_max[index(threadIdx.x, 0, 2)] = minimum; shared_min_max[index(threadIdx.x, 1, 2)] = maximum; for(int stride=blockDim.x/2; stride > 0; stride >>=1){ __syncthreads(); if(threadIdx.x < stride){ if(shared_min_max[index(threadIdx.x + stride, 0, 2)] < shared_min_max[index(threadIdx.x, 0, 2)]){ shared_min_max[index(threadIdx.x, 0, 2)] = shared_min_max[index(threadIdx.x + stride, 0, 2)]; } if(shared_min_max[index(threadIdx.x + stride, 1, 2)] > shared_min_max[index(threadIdx.x, 1, 2)]){ shared_min_max[index(threadIdx.x, 1, 2)] = shared_min_max[index(threadIdx.x + stride, 1, 2)]; } } } if(threadIdx.x==0){ d_min_max_buffer[ixt(blockIdx.y, 0, blockIdx.x, 2, FEATURE)] = shared_min_max[index(0, 0, 2)]; d_min_max_buffer[ixt(blockIdx.y, 1, blockIdx.x, 2, FEATURE)] = shared_min_max[index(0, 1, 2)]; } } void collect_min_max(float* d_x_T, int* d_batch_pos, int desired_pos, int num_trees, int x_length, float* d_min_max_buffer, int* d_is_branch_node, hipDeviceProp_t dev_prop){ // Ripe for optimization. dim3 grid(num_trees, FEATURE); hipLaunchKernelGGL(( kernel_collect_min_max), dim3(grid), dim3(64), 64 * sizeof(int) * 2, 0, d_x_T, d_batch_pos, desired_pos, num_trees, x_length, d_min_max_buffer, d_is_branch_node ); } __global__ void kernel_collect_num_valid_feat( int* d_num_valid_feat, float* d_min_max_buffer, int num_trees, int* d_is_branch_node ){ extern __shared__ int shared_num_valid_feat_buffer[]; // blockIdx.x = tree_id int sub_num_valid_feat, feat_i; if(!d_is_branch_node[blockIdx.x]){ return; } sub_num_valid_feat = 0; for(feat_i=threadIdx.x; feat_i<FEATURE; feat_i+=blockDim.x){ if(d_min_max_buffer[ixt(feat_i, 0, blockIdx.x, 2, FEATURE)] != d_min_max_buffer[ixt(feat_i, 1, blockIdx.x, 2, FEATURE)] ){ sub_num_valid_feat++; } } shared_num_valid_feat_buffer[threadIdx.x] = sub_num_valid_feat; for(int stride=blockDim.x/2; stride > 0; stride >>=1){ __syncthreads(); if(threadIdx.x < stride){ shared_num_valid_feat_buffer[threadIdx.x] += shared_num_valid_feat_buffer[threadIdx.x + stride]; } } if(threadIdx.x == 0){ d_num_valid_feat[blockIdx.x] = shared_num_valid_feat_buffer[0]; } } void collect_num_valid_feat( int* d_num_valid_feat, float* d_min_max_buffer, int num_trees, int* d_is_branch_node, hipDeviceProp_t dev_prop ){ // Ripe for optimization int block_size = MIN(dev_prop.maxThreadsPerBlock, next_pow_2(FEATURE)); // Copy this to other places too hipLaunchKernelGGL(( kernel_collect_num_valid_feat), dim3(num_trees), dim3(block_size), block_size * sizeof(int), 0, d_num_valid_feat, d_min_max_buffer, num_trees, d_is_branch_node ); } /* === Populate Random Features === */ __global__ void kernel_depopulate_valid_feat_idx( int* d_random_feats, int num_trees, int feat_per_node ){ int t; for(t=0; t<num_trees; t++){ //-1 means fill-forward d_random_feats[index(t, threadIdx.x, feat_per_node)] = -1; } } __global__ void kernel_populate_valid_feat_idx( int* d_random_feats, int* d_num_valid_feat, int feat_per_node, int* d_is_branch_node, hiprandState_t* curand_states ){ // threadIdx.x = tree_id int k, idx, draw, num_valid_feat; if(!d_is_branch_node[threadIdx.x]){ return; } idx = 0; num_valid_feat = d_num_valid_feat[threadIdx.x]; for(k=0; k<(num_valid_feat-1); k++){ draw = draw_approx_binomial(feat_per_node-idx, 1./(num_valid_feat-k), curand_states + threadIdx.x); if(draw > 0){ d_random_feats[index(threadIdx.x, idx, feat_per_node)] = k; } idx += draw; if(idx >= feat_per_node){ return; } } if(idx < feat_per_node){ d_random_feats[index(threadIdx.x, idx, feat_per_node)] = k; } } __global__ void kernel_populate_feat_cut( int* d_random_feats, float* d_random_cuts, float* d_min_max_buffer, int feat_per_node, int num_trees, int* d_is_branch_node, hiprandState_t* curand_states ){ // threadIdx.x = tree_id int feat_i, feat_idx, feat_idx_idx, valid_feats_seen, buffer; float minimum, maximum; if(!d_is_branch_node[threadIdx.x]){ return; } feat_idx = -1; // First element will overwrite feat_idx_idx = 0; // Parallel construction valid_feats_seen = 0; for(feat_i=0; feat_i < FEATURE; feat_i++){ minimum = d_min_max_buffer[ixt(feat_i, 0, threadIdx.x, 2, FEATURE)]; maximum = d_min_max_buffer[ixt(feat_i, 1, threadIdx.x, 2, FEATURE)]; if(minimum!=maximum){ while(1){ buffer = d_random_feats[index(threadIdx.x, feat_idx_idx, feat_per_node)]; if(buffer != -1){ feat_idx = buffer; } if(feat_idx==valid_feats_seen){ d_random_feats[index(threadIdx.x, feat_idx_idx, feat_per_node)] = feat_i; d_random_cuts[index(threadIdx.x, feat_idx_idx, feat_per_node)] = draw_uniform(minimum, maximum, curand_states+threadIdx.x); }else{ break; } feat_idx_idx++; if(feat_idx_idx >= feat_per_node){ return; } } } valid_feats_seen++; } } void populate_valid_feat_idx( int* d_random_feats, int* d_num_valid_feat, int feat_per_node, int num_trees, int* d_is_branch_node, hiprandState_t* curand_states ){ hipLaunchKernelGGL(( kernel_depopulate_valid_feat_idx), dim3(1), dim3(feat_per_node), 0, 0, d_random_feats, num_trees, feat_per_node); hipLaunchKernelGGL(( kernel_populate_valid_feat_idx), dim3(1), dim3(num_trees), 0, 0, d_random_feats, d_num_valid_feat, feat_per_node, d_is_branch_node, curand_states ); } void populate_feat_cut(int* d_random_feats, float* d_random_cuts, float* d_min_max_buffer, int feat_per_node, int num_trees, int* d_is_branch_node, hiprandState_t* curand_states){ hipLaunchKernelGGL(( kernel_populate_feat_cut), dim3(1), dim3(num_trees), 0, 0, d_random_feats, d_random_cuts, d_min_max_buffer, feat_per_node, num_trees, d_is_branch_node, curand_states ); } /* === Count Classes === */ __global__ void kernel_populate_class_counts( float* d_x, float* d_y, int* d_class_counts_a, int* d_class_counts_b, int* d_random_feats, float* d_random_cuts, int* d_batch_pos, int tree_pos, int num_trees, int feat_per_node, int* d_is_branch_node ){ // Naive version // threadIdx.x = tree_id, blockIdx.x = rand_feat_i int i, y, feat; float cut; if(!d_is_branch_node[threadIdx.x]){ return; } feat = d_random_feats[index(threadIdx.x, blockIdx.x, feat_per_node)]; cut = d_random_cuts[index(threadIdx.x, blockIdx.x, feat_per_node)]; for(i=0; i<NUMBER_OF_CLASSES; i++){ //tree node class d_class_counts_a[ixt(threadIdx.x, blockIdx.x, i, feat_per_node, num_trees)] = 0; d_class_counts_b[ixt(threadIdx.x, blockIdx.x, i, feat_per_node, num_trees)] = 0; } for(i=0; i<TRAIN_NUM; i++){ if(d_batch_pos[index(threadIdx.x, i, TRAIN_NUM)]==tree_pos){ y = (int) d_y[i]; if(d_x[index(i, feat, FEATURE)] < cut){ d_class_counts_a[ixt(threadIdx.x, blockIdx.x, y, feat_per_node, num_trees)]++; }else{ d_class_counts_b[ixt(threadIdx.x, blockIdx.x, y, feat_per_node, num_trees)]++; } } } } void populate_class_counts( float* d_x, float* d_y, int* d_class_counts_a, int* d_class_counts_b, int* d_random_feats, float* d_random_cuts, int* d_batch_pos, int tree_pos, int num_trees, int feat_per_node, int* d_is_branch_node ){ // Naive version hipLaunchKernelGGL(( kernel_populate_class_counts), dim3(feat_per_node), dim3(num_trees), 0, 0, d_x, d_y, d_class_counts_a, d_class_counts_b, d_random_feats, d_random_cuts, d_batch_pos, tree_pos, num_trees, feat_per_node, d_is_branch_node ); } /* === Place Best Features/Cuts === */ __global__ void kernel_place_best_feat_cuts( int* d_class_counts_a, int* d_class_counts_b, int* d_random_feats, float* d_random_cuts, int* d_best_feats, float* d_best_cuts, int feat_per_node, int num_trees, int* d_is_branch_node ){ // Naive version => Can move class_counts into shared memory // threadIdx.x = tree_id int i, k; float best_improvement, best_cut, proxy_improvement; int best_feat; int total_a, total_b; float impurity_a, impurity_b; if(!d_is_branch_node[threadIdx.x]){ return; } best_improvement = -FLT_MAX; best_feat = -1; best_cut = 0; for(i=0; i<feat_per_node; i++){ total_a = 0; total_b = 0; impurity_a = 1; impurity_b = 1; for(k=0; k<NUMBER_OF_CLASSES; k++){ total_a += d_class_counts_a[ixt(threadIdx.x, i, k, feat_per_node, num_trees)]; total_b += d_class_counts_b[ixt(threadIdx.x, i, k, feat_per_node, num_trees)]; } for(k=0; k<NUMBER_OF_CLASSES; k++){ impurity_a -= pow(((float) d_class_counts_a[ixt(threadIdx.x, i, k, feat_per_node, num_trees)]) / total_a, 2); impurity_b -= pow(((float) d_class_counts_b[ixt(threadIdx.x, i, k, feat_per_node, num_trees)]) / total_b, 2); } proxy_improvement = - total_a * impurity_a - total_b * impurity_b; if(proxy_improvement > best_improvement){ best_feat = d_random_feats[index(threadIdx.x, i, feat_per_node)]; best_cut = d_random_cuts[index(threadIdx.x, i, feat_per_node)]; best_improvement = proxy_improvement; } } d_best_feats[threadIdx.x] = best_feat; d_best_cuts[threadIdx.x] = best_cut; } void place_best_feat_cuts( int* d_class_counts_a, int* d_class_counts_b, int* d_random_feats, float* d_random_cuts, int* d_best_feats, float* d_best_cuts, int feat_per_node, int num_trees, int* d_is_branch_node ){ // Naive version hipLaunchKernelGGL(( kernel_place_best_feat_cuts), dim3(1), dim3(num_trees), 0, 0, d_class_counts_a, d_class_counts_b, d_random_feats, d_random_cuts, d_best_feats, d_best_cuts, feat_per_node, num_trees, d_is_branch_node ); } /* === Update Trees === */ __global__ void kernel_update_trees( float* d_trees, int* d_tree_lengths, int tree_pos, int* d_best_feats, float* d_best_cuts, int tree_arr_length, int* d_is_branch_node ){ // Naive version // threadIdx.x = tree_id int left_child_pos, right_child_pos, tree_length; if(!d_is_branch_node[threadIdx.x]){ return; } tree_length = d_tree_lengths[threadIdx.x]; left_child_pos = tree_length; right_child_pos = tree_length + 1; // Update tree nodes d_trees[ixt(tree_pos, LEFT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = left_child_pos; d_trees[ixt(tree_pos, RIGHT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = right_child_pos; d_trees[ixt(tree_pos, FEAT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = d_best_feats[threadIdx.x]; d_trees[ixt(tree_pos, CUT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = d_best_cuts[threadIdx.x]; d_tree_lengths[threadIdx.x] += 2; // Prefill child nodes d_trees[ixt(left_child_pos, LEFT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = left_child_pos; d_trees[ixt(left_child_pos, RIGHT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = left_child_pos; d_trees[ixt(left_child_pos, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = \ d_trees[ixt(tree_pos, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] + 1; d_trees[ixt(left_child_pos, FEAT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(left_child_pos, CUT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(left_child_pos, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(right_child_pos, LEFT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = right_child_pos; d_trees[ixt(right_child_pos, RIGHT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = right_child_pos; d_trees[ixt(right_child_pos, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = \ d_trees[ixt(tree_pos, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] + 1; d_trees[ixt(right_child_pos, FEAT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(right_child_pos, CUT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(right_child_pos, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; } void update_trees( float* d_trees, int* d_tree_lengths, int tree_pos, int* d_best_feats, float* d_best_cuts, int tree_arr_length, int num_trees, int* d_is_branch_node ){ hipLaunchKernelGGL(( kernel_update_trees), dim3(1), dim3(num_trees), 0, 0, d_trees, d_tree_lengths, tree_pos, d_best_feats, d_best_cuts, tree_arr_length, d_is_branch_node ); } /* === Evaluate === */ __global__ void kernel_raw_predict( float *d_raw_pred_y, float* d_trees, int* d_batch_pos, int tree_arr_length, int x_length ){ // threadIdx.x = tree_id, blockIdx.x = x_i int pos; pos = d_batch_pos[index(threadIdx.x, blockIdx.x, x_length)]; d_raw_pred_y[index(threadIdx.x, blockIdx.x, x_length)] = d_trees[ ixt(pos, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)]; } void raw_predict( float *d_raw_pred_y, float* d_trees, int* d_batch_pos, int tree_arr_length, int x_length, int num_trees ){ hipLaunchKernelGGL(( kernel_raw_predict), dim3(x_length), dim3(num_trees), 0, 0, d_raw_pred_y, d_trees, d_batch_pos, tree_arr_length, x_length ); } void predict(float* pred_y, float* raw_pred_y, int x_length, int num_trees){ int *class_count_buffer; int i, j, k, pred, maximum, maximum_class; class_count_buffer = (int *)malloc(NUMBER_OF_CLASSES * sizeof(int)); for(k=0; k<NUMBER_OF_CLASSES; k++){ class_count_buffer[k] = 0; } for(i=0; i<x_length; i++){ for(j=0; j<num_trees; j++){ pred = (int) raw_pred_y[index(j, i, x_length)]; class_count_buffer[pred]++; } maximum = -1; for(k=0; k<NUMBER_OF_CLASSES; k++){ if(class_count_buffer[k] > maximum){ maximum = class_count_buffer[k]; maximum_class = k; } class_count_buffer[k] = 0; } printf("Setting %d to %d with counts %d\n", i, maximum_class, maximum); pred_y[i] = (float) maximum_class; } } float evaluate(float* pred_y, float* true_y, int y_length){ int i; float score; score = 0; for(i=0; i<y_length; i++){ if((int) pred_y[i] == (int) true_y[i]){ score += 1; } } score /= y_length; return score; } int main(int argc,char *argv[]) { float *dataset_train,*dataset_test; float *labels_train,*labels_test; dataset_train = (float *)malloc(FEATURE * TRAIN_NUM*sizeof(float)); labels_train = (float *)malloc(TRAIN_NUM*sizeof(float)); dataset_test = (float *)malloc(FEATURE * TEST_NUM*sizeof(float)); labels_test = (float *)malloc(TEST_NUM*sizeof(float)); char file_train_set[] = "data/iris_train.data"; char file_test_set[] = "data/iris_test.data"; read_csv_iris(dataset_train,labels_train,TRAIN_NUM,file_train_set); read_csv_iris(dataset_test,labels_test,TEST_NUM,file_test_set); float *dataset_train_T; dataset_train_T = (float *)malloc(TRAIN_NUM * FEATURE * sizeof(float)); copy_transpose(dataset_train_T, dataset_train, TRAIN_NUM, FEATURE); float *trees, *d_trees; int *tree_arr_length; int *tree_lengths, *d_tree_lengths; int *max_tree_length, *d_max_tree_length; int feat_per_node; int *num_valid_feat, *d_num_valid_feat; int tree_pos; int *batch_pos, *d_batch_pos; // NUM_TREES * TRAIN_NUM int *is_branch_node, *d_is_branch_node; int *tree_is_done, *d_tree_is_done; float *min_max_buffer, *d_min_max_buffer; int *random_feats, *d_random_feats; float *random_cuts, *d_random_cuts; int *class_counts_a, *class_counts_b; int *d_class_counts_a, *d_class_counts_b; int *best_feats, *d_best_feats; float *best_cuts, *d_best_cuts; int prev_depth, max_depth; float *d_x, *d_y; float *d_x_T; float *pred_y, *raw_pred_y, *d_raw_pred_y; hiprandState_t* curand_states; int num_trees; num_trees = 200; // Assumption: num_trees < maxNumBlocks, maxThreadsPerBlock srand(2); tree_arr_length = (int *)malloc(sizeof(int)); tree_lengths = (int *)malloc(num_trees * sizeof(int)); *tree_arr_length = 8; max_tree_length = (int *)malloc(sizeof(int)); feat_per_node = (int) ceil(sqrt(FEATURE)); //trees = (float *)malloc(num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float)); batch_pos = (int *)malloc(num_trees * TRAIN_NUM *sizeof(float)); is_branch_node = (int *)malloc(num_trees * sizeof(int)); tree_is_done = (int *)malloc(num_trees * sizeof(int)); min_max_buffer = (float *)malloc(num_trees * FEATURE * 2 *sizeof(float)); num_valid_feat = (int *)malloc(num_trees * sizeof(int)); random_feats = (int *)malloc(num_trees * feat_per_node * sizeof(int)); random_cuts = (float *)malloc(num_trees * feat_per_node * sizeof(float)); best_feats = (int *)malloc(num_trees * sizeof(int)); best_cuts = (float *)malloc(num_trees * sizeof(float)); class_counts_a = (int *)malloc(num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int)); class_counts_b = (int *)malloc(num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int)); hipDeviceProp_t dev_prop; hipGetDeviceProperties(&dev_prop, 0); hipMalloc((void **) &d_trees, num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float)); hipMalloc((void **) &d_tree_lengths, num_trees * sizeof(int)); hipMalloc((void **) &d_max_tree_length, sizeof(int)); hipMalloc((void **) &d_batch_pos, num_trees * TRAIN_NUM *sizeof(float)); hipMalloc((void **) &d_is_branch_node, num_trees * sizeof(int)); hipMalloc((void **) &d_tree_is_done, num_trees * sizeof(int)); hipMalloc((void **) &d_min_max_buffer, num_trees * FEATURE * 2 *sizeof(float)); hipMalloc((void **) &d_num_valid_feat, num_trees *sizeof(int)); hipMalloc((void **) &d_random_feats, num_trees * feat_per_node * sizeof(int)); hipMalloc((void **) &d_random_cuts, num_trees * feat_per_node * sizeof(float)); hipMalloc((void **) &d_best_feats, num_trees * sizeof(int)); hipMalloc((void **) &d_best_cuts, num_trees * sizeof(float)); hipMalloc((void **) &d_class_counts_a, num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int)); hipMalloc((void **) &d_class_counts_b, num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int)); hipMalloc((void **) &d_x, TRAIN_NUM * FEATURE *sizeof(float)); hipMalloc((void **) &d_y, TRAIN_NUM *sizeof(float)); hipMalloc((void **) &d_x_T, TRAIN_NUM * FEATURE *sizeof(float)); hipMemcpy(d_x, dataset_train, TRAIN_NUM * FEATURE *sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, labels_train, TRAIN_NUM *sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_x_T, dataset_train_T, TRAIN_NUM * FEATURE *sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**) &curand_states, num_trees * sizeof(hiprandState_t)); hipLaunchKernelGGL(( init_random), dim3(1), dim3(num_trees), 0, 0, 1337, curand_states); initialize_trees(d_trees, num_trees, *tree_arr_length, d_tree_lengths); initialize_batch_pos(d_batch_pos, TRAIN_NUM, num_trees, dev_prop); for(tree_pos=0; tree_pos<200; tree_pos++){ printf("* ================== TREE POS -[ %d ]- ================== *\n", tree_pos); trees = (float *)malloc(num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float)); hipMemcpy(trees, d_trees, num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float), hipMemcpyDeviceToHost); printf("%d\n", num_trees * NUM_FIELDS * (*tree_arr_length)); for(int i=0; i<num_trees; i++){ printf("T=%d ", i); for(int j=0; j<=10; j++){ printf("%d ", (int) trees[ixt(j, LEFT_KEY, i, NUM_FIELDS, *tree_arr_length)]); } printf("\n "); for(int j=0; j<=10; j++){ printf("%d ", (int) trees[ixt(j, RIGHT_KEY, i, NUM_FIELDS, *tree_arr_length)]); } printf("\n"); } free(trees); refresh_tree_is_done(d_tree_lengths, d_tree_is_done, tree_pos, num_trees); if(check_forest_done(d_tree_is_done, tree_is_done, num_trees)){ printf("DONE\n"); break; } maybe_expand(&d_trees, num_trees, tree_arr_length, d_tree_lengths, max_tree_length, d_max_tree_length); batch_advance_trees(d_trees, d_x, TRAIN_NUM, *tree_arr_length, num_trees, d_batch_pos, dev_prop); hipMemcpy(batch_pos, d_batch_pos, num_trees * TRAIN_NUM * sizeof(float), hipMemcpyDeviceToHost); for(int i=0; i<num_trees; i++){ //printf("T=%d traverse\n", i); for(int j=0; j<TRAIN_NUM; j++){ //printf("%d ", batch_pos[index(i, j, TRAIN_NUM)]); } //printf("\n"); } check_node_termination( d_trees, *tree_arr_length, d_y, d_batch_pos, tree_pos, d_is_branch_node, d_tree_is_done, num_trees ); // ^^ hipMemcpy(is_branch_node, d_is_branch_node, num_trees * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(tree_is_done, d_tree_is_done, num_trees * sizeof(int), hipMemcpyDeviceToHost); printf("TREE IS DONE : "); for(int i=0; i<num_trees; i++){printf("%d ", tree_is_done[i]);};printf("\n"); printf("IS BRANCH NODE: "); for(int i=0; i<num_trees; i++){printf("%d ", is_branch_node[i]);};printf("\n"); // VV collect_min_max( d_x_T, d_batch_pos, tree_pos, num_trees, TRAIN_NUM, d_min_max_buffer, d_is_branch_node, dev_prop ); collect_num_valid_feat( d_num_valid_feat, d_min_max_buffer, num_trees, d_is_branch_node, dev_prop ); populate_valid_feat_idx( d_random_feats, d_num_valid_feat, feat_per_node, num_trees, d_is_branch_node, curand_states ); // AAAA /* hipMemcpy(random_feats, d_random_feats, num_trees * feat_per_node * sizeof(int), hipMemcpyDeviceToHost); for(int i=0; i<num_trees; i++){ printf("T=%d: ", i); for(int j=0; j<feat_per_node; j++){ printf("%d(%d) ", random_feats[index(i, j, feat_per_node)], index(i, j, feat_per_node)); } printf("\n"); }*/ // ZZZZ populate_feat_cut( d_random_feats, d_random_cuts, d_min_max_buffer, feat_per_node, num_trees, d_is_branch_node, curand_states ); populate_class_counts( d_x, d_y, d_class_counts_a, d_class_counts_b, d_random_feats, d_random_cuts, d_batch_pos, tree_pos, num_trees, feat_per_node, d_is_branch_node ); place_best_feat_cuts( d_class_counts_a, d_class_counts_b, d_random_feats, d_random_cuts, d_best_feats, d_best_cuts, feat_per_node, num_trees, d_is_branch_node ); update_trees( d_trees, d_tree_lengths, tree_pos, d_best_feats, d_best_cuts, *tree_arr_length, num_trees, d_is_branch_node ); hipMemcpy(random_feats, d_random_feats, num_trees * feat_per_node * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(random_cuts, d_random_cuts, num_trees * feat_per_node * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(class_counts_a, d_class_counts_a, num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(class_counts_b, d_class_counts_b, num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(best_feats, d_best_feats, num_trees * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(best_cuts, d_best_cuts, num_trees * sizeof(float), hipMemcpyDeviceToHost); int x1; for(int i=0; i<num_trees; i++){ printf("T=%d\n", i); x1 = 0; for(int j=0; j<feat_per_node; j++){ printf(" J=%d @ %d---%f\n", j, random_feats[index(i, j, feat_per_node)], random_cuts[index(i, j, feat_per_node)]); printf(" "); for(int k=0; k<NUMBER_OF_CLASSES; k++){ x1 += class_counts_a[ixt(i, j, k, feat_per_node, num_trees)]; printf(" %d", class_counts_a[ixt(i, j, k, feat_per_node, num_trees)]); } printf("\n"); printf(" "); for(int k=0; k<NUMBER_OF_CLASSES; k++){ x1 += class_counts_b[ixt(i, j, k, feat_per_node, num_trees)]; printf(" %d", class_counts_b[ixt(i, j, k, feat_per_node, num_trees)]); } printf(" ===> %d", x1); printf("\n"); } printf("\n"); } for(int i=0; i<num_trees; i++){ printf("T=%d ==> %d/%f\n", i, best_feats[i], best_cuts[i]); } } /* for(int i=0; i<num_trees; i++){ for(int j=0; j<feat_per_node; j++){ printf(" %d %d %f \n", j, random_feats[index(i, j, feat_per_node)], random_cuts[index(i, j, feat_per_node)]); } printf("\n"); } printf("%d\n", feat_per_node); */ /* TO DO: - Expanding is broken - Check 2nd level filter - Implement terminal nodes - Randomness might be broken */ printf("================= DONE TRAINING =================\n"); /* === TEST === */ hipFree(d_batch_pos); free(batch_pos); hipMalloc((void **) &d_batch_pos, num_trees * TEST_NUM * sizeof(float)); pred_y = (float *)malloc(TEST_NUM * sizeof(float)); raw_pred_y = (float *)malloc(num_trees * TEST_NUM * sizeof(float)); hipFree(d_x); hipMalloc((void **) &d_x, TEST_NUM * FEATURE * sizeof(float)); hipMalloc((void **) &d_raw_pred_y, num_trees * TEST_NUM * sizeof(float)); hipMemcpy(d_x, dataset_test, TEST_NUM * FEATURE * sizeof(float), hipMemcpyHostToDevice); initialize_batch_pos( d_batch_pos, TEST_NUM, num_trees, dev_prop ); batch_traverse_trees( d_trees, d_x, TEST_NUM, num_trees, *tree_arr_length, d_batch_pos, dev_prop ); hipMemcpy(d_x, dataset_test, TEST_NUM * FEATURE * sizeof(float), hipMemcpyHostToDevice); raw_predict(d_raw_pred_y, d_trees, d_batch_pos, *tree_arr_length, TEST_NUM, num_trees); hipMemcpy(raw_pred_y, d_raw_pred_y, num_trees * TEST_NUM * sizeof(float), hipMemcpyDeviceToHost); predict(pred_y, raw_pred_y, TEST_NUM, num_trees); /* trees = (float *)malloc(num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float)); hipMemcpy(trees, d_trees, num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float), hipMemcpyDeviceToHost); printf("%d\n", num_trees * NUM_FIELDS * (*tree_arr_length)); for(int i=0; i<num_trees; i++){ printf("T=%d ", i); for(int j=0; j<=10; j++){ printf("%d ", (int) trees[ixt(j, PRED_KEY, i, NUM_FIELDS, *tree_arr_length)]); } printf("\n"); } hipMemcpy(batch_pos, d_batch_pos, num_trees * TEST_NUM * sizeof(float), hipMemcpyDeviceToHost); for(int i=0; i<num_trees; i++){ printf("T=%d ", i); for(int j=0; j<=TEST_NUM; j++){ //printf("%d ", batch_pos[index(i, j, TEST_NUM)]); printf("%d ", (int) trees[ ixt(batch_pos[index(i, j, TEST_NUM)], PRED_KEY, i, NUM_FIELDS, *tree_arr_length) ]); } printf("\n"); } for(int i=0; i<TEST_NUM; i++){printf("%d ", (int) pred_y[i]);};printf("\n"); for(int i=0; i<TEST_NUM; i++){printf("%d ", (int) labels_test[i]);};printf("\n"); */ printf("Accuracy: %f\n", evaluate(pred_y, labels_test, TEST_NUM)); debug(); }
9f4a3f77ada9cbb10502106a2cd1c78141282160.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <float.h> #include <curand.h> #include <curand_kernel.h> #define TRAIN_NUM 100 #define TEST_NUM 50 #define FEATURE 4 #define NUMBER_OF_CLASSES 3 #define FEAT_KEY 0 #define CUT_KEY 1 #define LEFT_KEY 2 #define RIGHT_KEY 3 #define PRED_KEY 4 #define DEPTH_KEY 5 #define NUM_FIELDS 6 #define index(i, j, N) ((i)*(N)) + (j) #define ixt(i, j, t, N, T) ((t)*(N)*(T)) + ((i)*(N)) + (j) #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } int countNumRows(char *filename) { FILE *fp; int count = 0; // Line counter (result) //char filename[MAX_FILE_NAME]; char c; // To store a character read from file // Get file name from user. The file should be // either in current folder or complete path should be provided //printf("Enter file name: "); //scanf("%s", filename); // Open the file fp = fopen(filename, "r"); // Check if file exists if (fp == NULL) { printf("Could not open file %s", filename); return -1; } // Extract characters from file and store in character c for (c = getc(fp); c != EOF; c = getc(fp)) if (c == '\n') // Increment count if this character is newline count = count + 1; // Close the file fclose(fp); //printf("The file %s has %d lines\n ", filename, count); return count; } const char* getfield(char* line, int num){ const char* tok; for (tok = strtok(line, ","); tok && *tok; tok = strtok(NULL, ",\n")) { if (!--num) return tok; } return NULL; } /* Labels for IRIS: Iris-setosa - 0 Iris-versicolor - 1 Iris-virginica - 2 */ void read_csv_iris(float *data, float *label, int row_count, char *filename){ //data = (float *)malloc(row_count*4*sizeof(float)); //label = (int *)malloc(row_count*sizeof(int)); FILE *fp = fopen(filename,"r"); char line[1024]; int idx = 0; for(int iter = 0;iter<row_count;iter++) { fgets(line,1024,fp); const char *temp_field; for(int i=0;i<5;i++) { float temp_num; char *tmp = strdup(line); temp_field = getfield(tmp,i+1); if(i==4) { if(strcmp(temp_field,"Iris-setosa")==0) { label[idx] = 0; continue; } if(strcmp(temp_field,"Iris-versicolor")==0) { label[idx] = 1; continue; } if(strcmp(temp_field,"Iris-virginica")==0) { label[idx] = 2; continue; } } temp_num = atof(temp_field); data[idx*4 + i] = temp_num; } idx++; } } /* === Utils === */ int next_pow_2(int x){ int y = 1; while(y < x) y*=2; return y; } void copy_transpose(float* to, float* from, int h, int w){ for(int i=0; i<h; i++){ for(int j=0; j<w; j++){ to[index(j, i, h)] = from[index(i, j, w)]; } } } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){ // From https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void debug(){ cudaError_t code; code = cudaPeekAtLastError(); if(code != cudaSuccess){ printf("GPUassert: Failed at Init: %s\n", cudaGetErrorString(code)); exit(code); } code = cudaDeviceSynchronize(); if(code != cudaSuccess){ printf("GPUassert: Failed at Execution: %s\n", cudaGetErrorString(code)); exit(code); } } /* === Random Init === */ __global__ void init_random(unsigned int seed, curandState_t* states) { int tid = blockIdx.x * blockDim.x + threadIdx.x; curand_init(seed, tid, 0, &states[tid]); } __device__ int draw_approx_binomial(int n, float p, curandState_t* state) { int x = (int) round(curand_normal(state) * n*p*(1-p) + n*p); return max(0, min(x, n)); } __device__ float draw_uniform(float minimum, float maximum, curandState_t* state){ return minimum + curand_uniform(state) * (maximum - minimum); } /* === Expanding tree memory === */ void expand(float** d_trees_ptr, int num_trees, int tree_arr_length, int new_tree_arr_length){ float *new_d_trees, *d_trees; d_trees = *d_trees_ptr; assert(new_tree_arr_length >= tree_arr_length); cudaMalloc((void **) &new_d_trees, num_trees * NUM_FIELDS * new_tree_arr_length * sizeof(float)); printf("Malloced: %d\n", num_trees * NUM_FIELDS * new_tree_arr_length); //cudaMemcpy(new_d_trees, d_trees, num_trees * NUM_FIELDS * tree_arr_length *sizeof(float), cudaMemcpyDeviceToDevice); for(int i=0; i<num_trees; i++){ cudaMemcpy( new_d_trees + i * (NUM_FIELDS * new_tree_arr_length), d_trees + i * (NUM_FIELDS * tree_arr_length), (NUM_FIELDS * tree_arr_length) * sizeof(float), cudaMemcpyDeviceToDevice); } cudaFree(d_trees); *d_trees_ptr = new_d_trees; } __global__ void get_max_tree_length(int* d_tree_lengths, int num_trees, int* d_max_tree_length){ extern __shared__ int tree_length_buffer[]; if(threadIdx.x < num_trees){ tree_length_buffer[threadIdx.x] = d_tree_lengths[threadIdx.x]; }else{ tree_length_buffer[threadIdx.x] = -1; } for(int stride=blockDim.x/2; stride > 0; stride >>=1){ __syncthreads(); if(threadIdx.x < stride){ if(tree_length_buffer[threadIdx.x + stride] > tree_length_buffer[threadIdx.x]){ tree_length_buffer[threadIdx.x] = tree_length_buffer[threadIdx.x + stride]; } } } if(threadIdx.x == 0){ d_max_tree_length[0] = tree_length_buffer[0]; } } void maybe_expand(float** d_trees_ptr, int num_trees, int* tree_arr_length, int* d_tree_lengths, int* max_tree_length, int* d_max_tree_length){ // I wonder if it's faster just to compute max on CPU. int new_tree_arr_length; get_max_tree_length<<<1, num_trees, next_pow_2(num_trees) * sizeof(int)>>>( d_tree_lengths, num_trees, d_max_tree_length ); printf("get_max_tree_length(%d, %d, %d)\n", 1, num_trees, next_pow_2(num_trees)); cudaMemcpy(max_tree_length, d_max_tree_length, sizeof(int), cudaMemcpyDeviceToHost); // Buffer of 2 => up to 2 additions at a time if(*max_tree_length <= *tree_arr_length-3){ return; }else{ new_tree_arr_length = (*tree_arr_length) * 2; while(*max_tree_length > new_tree_arr_length-2){ new_tree_arr_length *= 2; } printf("Expanding to %d\n", new_tree_arr_length); expand(d_trees_ptr, num_trees, *tree_arr_length, new_tree_arr_length); *tree_arr_length = new_tree_arr_length; } } /* === Tree Initialization === */ __global__ void kernel_initialize_trees(float *d_trees, int* d_tree_lengths, int tree_arr_length){ d_trees[ixt(0, LEFT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = 0; d_trees[ixt(0, RIGHT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = 0; d_trees[ixt(0, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = 0; d_trees[ixt(0, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_tree_lengths[threadIdx.x] = 1; } void initialize_trees(float* d_trees, int num_trees, int tree_arr_length, int* d_tree_lengths){ kernel_initialize_trees<<<1, num_trees>>>(d_trees, d_tree_lengths, tree_arr_length); } __global__ void kernel_initialize_batch_pos(int *d_batch_pos, int x_length, int num_trees){ int i; for(i=threadIdx.x; i<x_length; i+=blockDim.x){ d_batch_pos[index(blockIdx.x, i, x_length)] = 0; } } void initialize_batch_pos(int *d_batch_pos, int x_length, int num_trees, cudaDeviceProp dev_prop){ kernel_initialize_batch_pos<<<num_trees, dev_prop.maxThreadsPerBlock>>>( d_batch_pos, x_length, num_trees ); } /* === Tree Growth checks === */ __global__ void kernel_refresh_tree_is_done(int* d_tree_lengths, int* d_tree_is_done, int tree_pos){ // threadIdx.x = tree_id int is_done; if(tree_pos < d_tree_lengths[threadIdx.x]){ is_done = 0; }else{ is_done = 1; } d_tree_is_done[threadIdx.x] = is_done; } void refresh_tree_is_done(int* d_tree_lengths, int* d_tree_is_done, int tree_pos, int num_trees){ kernel_refresh_tree_is_done<<<1, num_trees>>>( d_tree_lengths, d_tree_is_done, tree_pos ); } int check_forest_done(int* d_tree_is_done, int *tree_is_done, int num_trees){ cudaMemcpy(tree_is_done, d_tree_is_done, num_trees * sizeof(int), cudaMemcpyDeviceToHost); int trees_left; trees_left = 0; for(int i=0; i<num_trees; i++){ if(!tree_is_done[i]){ trees_left++; } } printf("%d trees left to grow\n", trees_left); if(trees_left == 0){ return 1; }else{ return 0; } } /* === Tree Traversal === */ __global__ void kernel_traverse_trees( float *d_trees, float* d_x, int x_length, int num_trees, int tree_arr_length, int* d_batch_pos ){ // Should optimize this. It's just a bunch of global reads. // Also possibly to rewrite this and batch_traverse to support a "next-step" method instead of a full // traversal while growing int pos, new_pos, left_right_key, x_i, tree_id, tx; tx = threadIdx.x + blockIdx.x * blockDim.x; if(tx >= x_length * num_trees) return; // Actually get x_i, tree_id tree_id = tx % num_trees; x_i = tx / num_trees; pos = 0; while(1){ if(d_x[index(x_i, (int) d_trees[ixt(pos, FEAT_KEY, tree_id, NUM_FIELDS, tree_arr_length)], FEATURE)] < d_trees[ixt(pos, CUT_KEY, tree_id, NUM_FIELDS, tree_arr_length)]){ left_right_key = LEFT_KEY; }else{ left_right_key = RIGHT_KEY; } new_pos = (int) d_trees[ixt(pos, left_right_key, tree_id, NUM_FIELDS, tree_arr_length)]; if(new_pos == pos){ // Leaf nodes are set up to be idempotent break; } pos = new_pos; } d_batch_pos[index(tree_id, x_i, x_length)] = pos; } void batch_traverse_trees( float *d_trees, float *d_x, int x_length, int num_trees, int tree_arr_length, int *d_batch_pos, cudaDeviceProp dev_prop){ int block_size, num_blocks; block_size = dev_prop.maxThreadsPerBlock; num_blocks = ceil(num_trees * x_length/((float) block_size)); kernel_traverse_trees<<<num_blocks, block_size>>>( d_trees, d_x, x_length, num_trees, tree_arr_length, d_batch_pos ); } __global__ void kernel_advance_trees(float *d_trees, float* d_x, int x_length, int tree_arr_length, int num_trees, int* d_batch_pos){ int pos, left_right_key, x_i; // threadIdx.x = x_i, blockIdx.x = tree_id for(x_i=threadIdx.x; x_i < x_length; x_i+=blockDim.x){ pos = d_batch_pos[index(blockIdx.x, x_i, TRAIN_NUM)]; if(d_x[index(x_i, (int) d_trees[ixt(pos, FEAT_KEY, blockIdx.x, NUM_FIELDS, tree_arr_length)], FEATURE)] < d_trees[ixt(pos, CUT_KEY, blockIdx.x, NUM_FIELDS, tree_arr_length)]){ left_right_key = LEFT_KEY; }else{ left_right_key = RIGHT_KEY; } d_batch_pos[index(blockIdx.x, x_i, TRAIN_NUM)] = (int) d_trees[ixt(pos, left_right_key, blockIdx.x, NUM_FIELDS, tree_arr_length)]; } } void batch_advance_trees(float *d_tree, float *d_x, int x_length, int tree_arr_length, int num_trees, int *d_batch_pos, cudaDeviceProp dev_prop){ kernel_advance_trees<<<num_trees, dev_prop.maxThreadsPerBlock>>>( d_tree, d_x, x_length, tree_arr_length, num_trees, d_batch_pos ); } /* === Node termination === */ __global__ void kernel_check_node_termination( float* d_trees, int tree_arr_length, float* d_y, int* d_batch_pos, int tree_pos, int* d_is_branch_node, int* d_tree_is_done ){ // threadIdx.x = tree_id int i, base_y, new_y, is_branch_node; // If tree is done, it's never a branch node if(d_tree_is_done[threadIdx.x]==1){ d_is_branch_node[threadIdx.x] = 0; return; } // Check for non-unique Y base_y = -1; is_branch_node = 0; for(i=1; i<TRAIN_NUM; i++){ if(d_batch_pos[index(threadIdx.x, i, TRAIN_NUM)] == tree_pos){ new_y = d_y[i]; if(base_y == -1){ base_y = new_y; }else if(base_y != new_y){ is_branch_node = 1; break; } } } d_is_branch_node[threadIdx.x] = is_branch_node; if(base_y==-1){ printf("ERROR ERROR ERROR EMPTY 1TREE %d\n", threadIdx.x); printf("ERROR ERROR ERROR EMPTY 2TREE %d\n", threadIdx.x); printf("ERROR ERROR ERROR EMPTY 2TREE %d\n", threadIdx.x); } if(!is_branch_node){ d_trees[ixt(tree_pos, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = base_y; } } void check_node_termination( float* d_trees, int tree_arr_length, float* d_y, int* d_batch_pos, int tree_pos, int* d_is_branch_node, int* d_tree_is_done, int num_trees ){ kernel_check_node_termination<<<1, num_trees>>>( d_trees, tree_arr_length, d_y, d_batch_pos, tree_pos, d_is_branch_node, d_tree_is_done ); } /* === Valid features === */ __global__ void kernel_collect_min_max(float* d_x_T, int* d_batch_pos, int desired_pos, int num_trees, int x_length, float* d_min_max_buffer, int* d_is_branch_node){ extern __shared__ float shared_min_max[]; // threadIdx.x * 2 // Ripe for optimization. // threadIdx.x = x_i++, blockIdx.x = tree_id, feat = blockIdx.y int x_i; float minimum, maximum, val; if(!d_is_branch_node[blockIdx.x]){ return; } minimum = FLT_MAX; maximum = -FLT_MAX; for(x_i=threadIdx.x; x_i < x_length; x_i+=blockDim.x){ if(d_batch_pos[index(blockIdx.x, x_i, x_length)] == desired_pos){ val = d_x_T[index(blockIdx.y, x_i, TRAIN_NUM)]; if(val < minimum){ minimum = val; } if(val > maximum){ maximum = val; } } } shared_min_max[index(threadIdx.x, 0, 2)] = minimum; shared_min_max[index(threadIdx.x, 1, 2)] = maximum; for(int stride=blockDim.x/2; stride > 0; stride >>=1){ __syncthreads(); if(threadIdx.x < stride){ if(shared_min_max[index(threadIdx.x + stride, 0, 2)] < shared_min_max[index(threadIdx.x, 0, 2)]){ shared_min_max[index(threadIdx.x, 0, 2)] = shared_min_max[index(threadIdx.x + stride, 0, 2)]; } if(shared_min_max[index(threadIdx.x + stride, 1, 2)] > shared_min_max[index(threadIdx.x, 1, 2)]){ shared_min_max[index(threadIdx.x, 1, 2)] = shared_min_max[index(threadIdx.x + stride, 1, 2)]; } } } if(threadIdx.x==0){ d_min_max_buffer[ixt(blockIdx.y, 0, blockIdx.x, 2, FEATURE)] = shared_min_max[index(0, 0, 2)]; d_min_max_buffer[ixt(blockIdx.y, 1, blockIdx.x, 2, FEATURE)] = shared_min_max[index(0, 1, 2)]; } } void collect_min_max(float* d_x_T, int* d_batch_pos, int desired_pos, int num_trees, int x_length, float* d_min_max_buffer, int* d_is_branch_node, cudaDeviceProp dev_prop){ // Ripe for optimization. dim3 grid(num_trees, FEATURE); kernel_collect_min_max<<<grid, 64, 64 * sizeof(int) * 2>>>( d_x_T, d_batch_pos, desired_pos, num_trees, x_length, d_min_max_buffer, d_is_branch_node ); } __global__ void kernel_collect_num_valid_feat( int* d_num_valid_feat, float* d_min_max_buffer, int num_trees, int* d_is_branch_node ){ extern __shared__ int shared_num_valid_feat_buffer[]; // blockIdx.x = tree_id int sub_num_valid_feat, feat_i; if(!d_is_branch_node[blockIdx.x]){ return; } sub_num_valid_feat = 0; for(feat_i=threadIdx.x; feat_i<FEATURE; feat_i+=blockDim.x){ if(d_min_max_buffer[ixt(feat_i, 0, blockIdx.x, 2, FEATURE)] != d_min_max_buffer[ixt(feat_i, 1, blockIdx.x, 2, FEATURE)] ){ sub_num_valid_feat++; } } shared_num_valid_feat_buffer[threadIdx.x] = sub_num_valid_feat; for(int stride=blockDim.x/2; stride > 0; stride >>=1){ __syncthreads(); if(threadIdx.x < stride){ shared_num_valid_feat_buffer[threadIdx.x] += shared_num_valid_feat_buffer[threadIdx.x + stride]; } } if(threadIdx.x == 0){ d_num_valid_feat[blockIdx.x] = shared_num_valid_feat_buffer[0]; } } void collect_num_valid_feat( int* d_num_valid_feat, float* d_min_max_buffer, int num_trees, int* d_is_branch_node, cudaDeviceProp dev_prop ){ // Ripe for optimization int block_size = MIN(dev_prop.maxThreadsPerBlock, next_pow_2(FEATURE)); // Copy this to other places too kernel_collect_num_valid_feat<<<num_trees, block_size, block_size * sizeof(int)>>>( d_num_valid_feat, d_min_max_buffer, num_trees, d_is_branch_node ); } /* === Populate Random Features === */ __global__ void kernel_depopulate_valid_feat_idx( int* d_random_feats, int num_trees, int feat_per_node ){ int t; for(t=0; t<num_trees; t++){ //-1 means fill-forward d_random_feats[index(t, threadIdx.x, feat_per_node)] = -1; } } __global__ void kernel_populate_valid_feat_idx( int* d_random_feats, int* d_num_valid_feat, int feat_per_node, int* d_is_branch_node, curandState_t* curand_states ){ // threadIdx.x = tree_id int k, idx, draw, num_valid_feat; if(!d_is_branch_node[threadIdx.x]){ return; } idx = 0; num_valid_feat = d_num_valid_feat[threadIdx.x]; for(k=0; k<(num_valid_feat-1); k++){ draw = draw_approx_binomial(feat_per_node-idx, 1./(num_valid_feat-k), curand_states + threadIdx.x); if(draw > 0){ d_random_feats[index(threadIdx.x, idx, feat_per_node)] = k; } idx += draw; if(idx >= feat_per_node){ return; } } if(idx < feat_per_node){ d_random_feats[index(threadIdx.x, idx, feat_per_node)] = k; } } __global__ void kernel_populate_feat_cut( int* d_random_feats, float* d_random_cuts, float* d_min_max_buffer, int feat_per_node, int num_trees, int* d_is_branch_node, curandState_t* curand_states ){ // threadIdx.x = tree_id int feat_i, feat_idx, feat_idx_idx, valid_feats_seen, buffer; float minimum, maximum; if(!d_is_branch_node[threadIdx.x]){ return; } feat_idx = -1; // First element will overwrite feat_idx_idx = 0; // Parallel construction valid_feats_seen = 0; for(feat_i=0; feat_i < FEATURE; feat_i++){ minimum = d_min_max_buffer[ixt(feat_i, 0, threadIdx.x, 2, FEATURE)]; maximum = d_min_max_buffer[ixt(feat_i, 1, threadIdx.x, 2, FEATURE)]; if(minimum!=maximum){ while(1){ buffer = d_random_feats[index(threadIdx.x, feat_idx_idx, feat_per_node)]; if(buffer != -1){ feat_idx = buffer; } if(feat_idx==valid_feats_seen){ d_random_feats[index(threadIdx.x, feat_idx_idx, feat_per_node)] = feat_i; d_random_cuts[index(threadIdx.x, feat_idx_idx, feat_per_node)] = draw_uniform(minimum, maximum, curand_states+threadIdx.x); }else{ break; } feat_idx_idx++; if(feat_idx_idx >= feat_per_node){ return; } } } valid_feats_seen++; } } void populate_valid_feat_idx( int* d_random_feats, int* d_num_valid_feat, int feat_per_node, int num_trees, int* d_is_branch_node, curandState_t* curand_states ){ kernel_depopulate_valid_feat_idx<<<1, feat_per_node>>>( d_random_feats, num_trees, feat_per_node); kernel_populate_valid_feat_idx<<<1, num_trees>>>( d_random_feats, d_num_valid_feat, feat_per_node, d_is_branch_node, curand_states ); } void populate_feat_cut(int* d_random_feats, float* d_random_cuts, float* d_min_max_buffer, int feat_per_node, int num_trees, int* d_is_branch_node, curandState_t* curand_states){ kernel_populate_feat_cut<<<1, num_trees>>>( d_random_feats, d_random_cuts, d_min_max_buffer, feat_per_node, num_trees, d_is_branch_node, curand_states ); } /* === Count Classes === */ __global__ void kernel_populate_class_counts( float* d_x, float* d_y, int* d_class_counts_a, int* d_class_counts_b, int* d_random_feats, float* d_random_cuts, int* d_batch_pos, int tree_pos, int num_trees, int feat_per_node, int* d_is_branch_node ){ // Naive version // threadIdx.x = tree_id, blockIdx.x = rand_feat_i int i, y, feat; float cut; if(!d_is_branch_node[threadIdx.x]){ return; } feat = d_random_feats[index(threadIdx.x, blockIdx.x, feat_per_node)]; cut = d_random_cuts[index(threadIdx.x, blockIdx.x, feat_per_node)]; for(i=0; i<NUMBER_OF_CLASSES; i++){ //tree node class d_class_counts_a[ixt(threadIdx.x, blockIdx.x, i, feat_per_node, num_trees)] = 0; d_class_counts_b[ixt(threadIdx.x, blockIdx.x, i, feat_per_node, num_trees)] = 0; } for(i=0; i<TRAIN_NUM; i++){ if(d_batch_pos[index(threadIdx.x, i, TRAIN_NUM)]==tree_pos){ y = (int) d_y[i]; if(d_x[index(i, feat, FEATURE)] < cut){ d_class_counts_a[ixt(threadIdx.x, blockIdx.x, y, feat_per_node, num_trees)]++; }else{ d_class_counts_b[ixt(threadIdx.x, blockIdx.x, y, feat_per_node, num_trees)]++; } } } } void populate_class_counts( float* d_x, float* d_y, int* d_class_counts_a, int* d_class_counts_b, int* d_random_feats, float* d_random_cuts, int* d_batch_pos, int tree_pos, int num_trees, int feat_per_node, int* d_is_branch_node ){ // Naive version kernel_populate_class_counts<<<feat_per_node, num_trees>>>( d_x, d_y, d_class_counts_a, d_class_counts_b, d_random_feats, d_random_cuts, d_batch_pos, tree_pos, num_trees, feat_per_node, d_is_branch_node ); } /* === Place Best Features/Cuts === */ __global__ void kernel_place_best_feat_cuts( int* d_class_counts_a, int* d_class_counts_b, int* d_random_feats, float* d_random_cuts, int* d_best_feats, float* d_best_cuts, int feat_per_node, int num_trees, int* d_is_branch_node ){ // Naive version => Can move class_counts into shared memory // threadIdx.x = tree_id int i, k; float best_improvement, best_cut, proxy_improvement; int best_feat; int total_a, total_b; float impurity_a, impurity_b; if(!d_is_branch_node[threadIdx.x]){ return; } best_improvement = -FLT_MAX; best_feat = -1; best_cut = 0; for(i=0; i<feat_per_node; i++){ total_a = 0; total_b = 0; impurity_a = 1; impurity_b = 1; for(k=0; k<NUMBER_OF_CLASSES; k++){ total_a += d_class_counts_a[ixt(threadIdx.x, i, k, feat_per_node, num_trees)]; total_b += d_class_counts_b[ixt(threadIdx.x, i, k, feat_per_node, num_trees)]; } for(k=0; k<NUMBER_OF_CLASSES; k++){ impurity_a -= pow(((float) d_class_counts_a[ixt(threadIdx.x, i, k, feat_per_node, num_trees)]) / total_a, 2); impurity_b -= pow(((float) d_class_counts_b[ixt(threadIdx.x, i, k, feat_per_node, num_trees)]) / total_b, 2); } proxy_improvement = - total_a * impurity_a - total_b * impurity_b; if(proxy_improvement > best_improvement){ best_feat = d_random_feats[index(threadIdx.x, i, feat_per_node)]; best_cut = d_random_cuts[index(threadIdx.x, i, feat_per_node)]; best_improvement = proxy_improvement; } } d_best_feats[threadIdx.x] = best_feat; d_best_cuts[threadIdx.x] = best_cut; } void place_best_feat_cuts( int* d_class_counts_a, int* d_class_counts_b, int* d_random_feats, float* d_random_cuts, int* d_best_feats, float* d_best_cuts, int feat_per_node, int num_trees, int* d_is_branch_node ){ // Naive version kernel_place_best_feat_cuts<<<1, num_trees>>>( d_class_counts_a, d_class_counts_b, d_random_feats, d_random_cuts, d_best_feats, d_best_cuts, feat_per_node, num_trees, d_is_branch_node ); } /* === Update Trees === */ __global__ void kernel_update_trees( float* d_trees, int* d_tree_lengths, int tree_pos, int* d_best_feats, float* d_best_cuts, int tree_arr_length, int* d_is_branch_node ){ // Naive version // threadIdx.x = tree_id int left_child_pos, right_child_pos, tree_length; if(!d_is_branch_node[threadIdx.x]){ return; } tree_length = d_tree_lengths[threadIdx.x]; left_child_pos = tree_length; right_child_pos = tree_length + 1; // Update tree nodes d_trees[ixt(tree_pos, LEFT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = left_child_pos; d_trees[ixt(tree_pos, RIGHT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = right_child_pos; d_trees[ixt(tree_pos, FEAT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = d_best_feats[threadIdx.x]; d_trees[ixt(tree_pos, CUT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = d_best_cuts[threadIdx.x]; d_tree_lengths[threadIdx.x] += 2; // Prefill child nodes d_trees[ixt(left_child_pos, LEFT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = left_child_pos; d_trees[ixt(left_child_pos, RIGHT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = left_child_pos; d_trees[ixt(left_child_pos, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = \ d_trees[ixt(tree_pos, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] + 1; d_trees[ixt(left_child_pos, FEAT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(left_child_pos, CUT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(left_child_pos, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(right_child_pos, LEFT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = right_child_pos; d_trees[ixt(right_child_pos, RIGHT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = right_child_pos; d_trees[ixt(right_child_pos, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = \ d_trees[ixt(tree_pos, DEPTH_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] + 1; d_trees[ixt(right_child_pos, FEAT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(right_child_pos, CUT_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; d_trees[ixt(right_child_pos, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)] = -1; } void update_trees( float* d_trees, int* d_tree_lengths, int tree_pos, int* d_best_feats, float* d_best_cuts, int tree_arr_length, int num_trees, int* d_is_branch_node ){ kernel_update_trees<<<1, num_trees>>>( d_trees, d_tree_lengths, tree_pos, d_best_feats, d_best_cuts, tree_arr_length, d_is_branch_node ); } /* === Evaluate === */ __global__ void kernel_raw_predict( float *d_raw_pred_y, float* d_trees, int* d_batch_pos, int tree_arr_length, int x_length ){ // threadIdx.x = tree_id, blockIdx.x = x_i int pos; pos = d_batch_pos[index(threadIdx.x, blockIdx.x, x_length)]; d_raw_pred_y[index(threadIdx.x, blockIdx.x, x_length)] = d_trees[ ixt(pos, PRED_KEY, threadIdx.x, NUM_FIELDS, tree_arr_length)]; } void raw_predict( float *d_raw_pred_y, float* d_trees, int* d_batch_pos, int tree_arr_length, int x_length, int num_trees ){ kernel_raw_predict<<<x_length, num_trees>>>( d_raw_pred_y, d_trees, d_batch_pos, tree_arr_length, x_length ); } void predict(float* pred_y, float* raw_pred_y, int x_length, int num_trees){ int *class_count_buffer; int i, j, k, pred, maximum, maximum_class; class_count_buffer = (int *)malloc(NUMBER_OF_CLASSES * sizeof(int)); for(k=0; k<NUMBER_OF_CLASSES; k++){ class_count_buffer[k] = 0; } for(i=0; i<x_length; i++){ for(j=0; j<num_trees; j++){ pred = (int) raw_pred_y[index(j, i, x_length)]; class_count_buffer[pred]++; } maximum = -1; for(k=0; k<NUMBER_OF_CLASSES; k++){ if(class_count_buffer[k] > maximum){ maximum = class_count_buffer[k]; maximum_class = k; } class_count_buffer[k] = 0; } printf("Setting %d to %d with counts %d\n", i, maximum_class, maximum); pred_y[i] = (float) maximum_class; } } float evaluate(float* pred_y, float* true_y, int y_length){ int i; float score; score = 0; for(i=0; i<y_length; i++){ if((int) pred_y[i] == (int) true_y[i]){ score += 1; } } score /= y_length; return score; } int main(int argc,char *argv[]) { float *dataset_train,*dataset_test; float *labels_train,*labels_test; dataset_train = (float *)malloc(FEATURE * TRAIN_NUM*sizeof(float)); labels_train = (float *)malloc(TRAIN_NUM*sizeof(float)); dataset_test = (float *)malloc(FEATURE * TEST_NUM*sizeof(float)); labels_test = (float *)malloc(TEST_NUM*sizeof(float)); char file_train_set[] = "data/iris_train.data"; char file_test_set[] = "data/iris_test.data"; read_csv_iris(dataset_train,labels_train,TRAIN_NUM,file_train_set); read_csv_iris(dataset_test,labels_test,TEST_NUM,file_test_set); float *dataset_train_T; dataset_train_T = (float *)malloc(TRAIN_NUM * FEATURE * sizeof(float)); copy_transpose(dataset_train_T, dataset_train, TRAIN_NUM, FEATURE); float *trees, *d_trees; int *tree_arr_length; int *tree_lengths, *d_tree_lengths; int *max_tree_length, *d_max_tree_length; int feat_per_node; int *num_valid_feat, *d_num_valid_feat; int tree_pos; int *batch_pos, *d_batch_pos; // NUM_TREES * TRAIN_NUM int *is_branch_node, *d_is_branch_node; int *tree_is_done, *d_tree_is_done; float *min_max_buffer, *d_min_max_buffer; int *random_feats, *d_random_feats; float *random_cuts, *d_random_cuts; int *class_counts_a, *class_counts_b; int *d_class_counts_a, *d_class_counts_b; int *best_feats, *d_best_feats; float *best_cuts, *d_best_cuts; int prev_depth, max_depth; float *d_x, *d_y; float *d_x_T; float *pred_y, *raw_pred_y, *d_raw_pred_y; curandState_t* curand_states; int num_trees; num_trees = 200; // Assumption: num_trees < maxNumBlocks, maxThreadsPerBlock srand(2); tree_arr_length = (int *)malloc(sizeof(int)); tree_lengths = (int *)malloc(num_trees * sizeof(int)); *tree_arr_length = 8; max_tree_length = (int *)malloc(sizeof(int)); feat_per_node = (int) ceil(sqrt(FEATURE)); //trees = (float *)malloc(num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float)); batch_pos = (int *)malloc(num_trees * TRAIN_NUM *sizeof(float)); is_branch_node = (int *)malloc(num_trees * sizeof(int)); tree_is_done = (int *)malloc(num_trees * sizeof(int)); min_max_buffer = (float *)malloc(num_trees * FEATURE * 2 *sizeof(float)); num_valid_feat = (int *)malloc(num_trees * sizeof(int)); random_feats = (int *)malloc(num_trees * feat_per_node * sizeof(int)); random_cuts = (float *)malloc(num_trees * feat_per_node * sizeof(float)); best_feats = (int *)malloc(num_trees * sizeof(int)); best_cuts = (float *)malloc(num_trees * sizeof(float)); class_counts_a = (int *)malloc(num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int)); class_counts_b = (int *)malloc(num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int)); cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop, 0); cudaMalloc((void **) &d_trees, num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float)); cudaMalloc((void **) &d_tree_lengths, num_trees * sizeof(int)); cudaMalloc((void **) &d_max_tree_length, sizeof(int)); cudaMalloc((void **) &d_batch_pos, num_trees * TRAIN_NUM *sizeof(float)); cudaMalloc((void **) &d_is_branch_node, num_trees * sizeof(int)); cudaMalloc((void **) &d_tree_is_done, num_trees * sizeof(int)); cudaMalloc((void **) &d_min_max_buffer, num_trees * FEATURE * 2 *sizeof(float)); cudaMalloc((void **) &d_num_valid_feat, num_trees *sizeof(int)); cudaMalloc((void **) &d_random_feats, num_trees * feat_per_node * sizeof(int)); cudaMalloc((void **) &d_random_cuts, num_trees * feat_per_node * sizeof(float)); cudaMalloc((void **) &d_best_feats, num_trees * sizeof(int)); cudaMalloc((void **) &d_best_cuts, num_trees * sizeof(float)); cudaMalloc((void **) &d_class_counts_a, num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int)); cudaMalloc((void **) &d_class_counts_b, num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int)); cudaMalloc((void **) &d_x, TRAIN_NUM * FEATURE *sizeof(float)); cudaMalloc((void **) &d_y, TRAIN_NUM *sizeof(float)); cudaMalloc((void **) &d_x_T, TRAIN_NUM * FEATURE *sizeof(float)); cudaMemcpy(d_x, dataset_train, TRAIN_NUM * FEATURE *sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, labels_train, TRAIN_NUM *sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_x_T, dataset_train_T, TRAIN_NUM * FEATURE *sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &curand_states, num_trees * sizeof(curandState)); init_random<<<1, num_trees>>>(1337, curand_states); initialize_trees(d_trees, num_trees, *tree_arr_length, d_tree_lengths); initialize_batch_pos(d_batch_pos, TRAIN_NUM, num_trees, dev_prop); for(tree_pos=0; tree_pos<200; tree_pos++){ printf("* ================== TREE POS -[ %d ]- ================== *\n", tree_pos); trees = (float *)malloc(num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float)); cudaMemcpy(trees, d_trees, num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float), cudaMemcpyDeviceToHost); printf("%d\n", num_trees * NUM_FIELDS * (*tree_arr_length)); for(int i=0; i<num_trees; i++){ printf("T=%d ", i); for(int j=0; j<=10; j++){ printf("%d ", (int) trees[ixt(j, LEFT_KEY, i, NUM_FIELDS, *tree_arr_length)]); } printf("\n "); for(int j=0; j<=10; j++){ printf("%d ", (int) trees[ixt(j, RIGHT_KEY, i, NUM_FIELDS, *tree_arr_length)]); } printf("\n"); } free(trees); refresh_tree_is_done(d_tree_lengths, d_tree_is_done, tree_pos, num_trees); if(check_forest_done(d_tree_is_done, tree_is_done, num_trees)){ printf("DONE\n"); break; } maybe_expand(&d_trees, num_trees, tree_arr_length, d_tree_lengths, max_tree_length, d_max_tree_length); batch_advance_trees(d_trees, d_x, TRAIN_NUM, *tree_arr_length, num_trees, d_batch_pos, dev_prop); cudaMemcpy(batch_pos, d_batch_pos, num_trees * TRAIN_NUM * sizeof(float), cudaMemcpyDeviceToHost); for(int i=0; i<num_trees; i++){ //printf("T=%d traverse\n", i); for(int j=0; j<TRAIN_NUM; j++){ //printf("%d ", batch_pos[index(i, j, TRAIN_NUM)]); } //printf("\n"); } check_node_termination( d_trees, *tree_arr_length, d_y, d_batch_pos, tree_pos, d_is_branch_node, d_tree_is_done, num_trees ); // ^^ cudaMemcpy(is_branch_node, d_is_branch_node, num_trees * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(tree_is_done, d_tree_is_done, num_trees * sizeof(int), cudaMemcpyDeviceToHost); printf("TREE IS DONE : "); for(int i=0; i<num_trees; i++){printf("%d ", tree_is_done[i]);};printf("\n"); printf("IS BRANCH NODE: "); for(int i=0; i<num_trees; i++){printf("%d ", is_branch_node[i]);};printf("\n"); // VV collect_min_max( d_x_T, d_batch_pos, tree_pos, num_trees, TRAIN_NUM, d_min_max_buffer, d_is_branch_node, dev_prop ); collect_num_valid_feat( d_num_valid_feat, d_min_max_buffer, num_trees, d_is_branch_node, dev_prop ); populate_valid_feat_idx( d_random_feats, d_num_valid_feat, feat_per_node, num_trees, d_is_branch_node, curand_states ); // AAAA /* cudaMemcpy(random_feats, d_random_feats, num_trees * feat_per_node * sizeof(int), cudaMemcpyDeviceToHost); for(int i=0; i<num_trees; i++){ printf("T=%d: ", i); for(int j=0; j<feat_per_node; j++){ printf("%d(%d) ", random_feats[index(i, j, feat_per_node)], index(i, j, feat_per_node)); } printf("\n"); }*/ // ZZZZ populate_feat_cut( d_random_feats, d_random_cuts, d_min_max_buffer, feat_per_node, num_trees, d_is_branch_node, curand_states ); populate_class_counts( d_x, d_y, d_class_counts_a, d_class_counts_b, d_random_feats, d_random_cuts, d_batch_pos, tree_pos, num_trees, feat_per_node, d_is_branch_node ); place_best_feat_cuts( d_class_counts_a, d_class_counts_b, d_random_feats, d_random_cuts, d_best_feats, d_best_cuts, feat_per_node, num_trees, d_is_branch_node ); update_trees( d_trees, d_tree_lengths, tree_pos, d_best_feats, d_best_cuts, *tree_arr_length, num_trees, d_is_branch_node ); cudaMemcpy(random_feats, d_random_feats, num_trees * feat_per_node * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(random_cuts, d_random_cuts, num_trees * feat_per_node * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(class_counts_a, d_class_counts_a, num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(class_counts_b, d_class_counts_b, num_trees * feat_per_node * NUMBER_OF_CLASSES *sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(best_feats, d_best_feats, num_trees * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(best_cuts, d_best_cuts, num_trees * sizeof(float), cudaMemcpyDeviceToHost); int x1; for(int i=0; i<num_trees; i++){ printf("T=%d\n", i); x1 = 0; for(int j=0; j<feat_per_node; j++){ printf(" J=%d @ %d---%f\n", j, random_feats[index(i, j, feat_per_node)], random_cuts[index(i, j, feat_per_node)]); printf(" "); for(int k=0; k<NUMBER_OF_CLASSES; k++){ x1 += class_counts_a[ixt(i, j, k, feat_per_node, num_trees)]; printf(" %d", class_counts_a[ixt(i, j, k, feat_per_node, num_trees)]); } printf("\n"); printf(" "); for(int k=0; k<NUMBER_OF_CLASSES; k++){ x1 += class_counts_b[ixt(i, j, k, feat_per_node, num_trees)]; printf(" %d", class_counts_b[ixt(i, j, k, feat_per_node, num_trees)]); } printf(" ===> %d", x1); printf("\n"); } printf("\n"); } for(int i=0; i<num_trees; i++){ printf("T=%d ==> %d/%f\n", i, best_feats[i], best_cuts[i]); } } /* for(int i=0; i<num_trees; i++){ for(int j=0; j<feat_per_node; j++){ printf(" %d %d %f \n", j, random_feats[index(i, j, feat_per_node)], random_cuts[index(i, j, feat_per_node)]); } printf("\n"); } printf("%d\n", feat_per_node); */ /* TO DO: - Expanding is broken - Check 2nd level filter - Implement terminal nodes - Randomness might be broken */ printf("================= DONE TRAINING =================\n"); /* === TEST === */ cudaFree(d_batch_pos); free(batch_pos); cudaMalloc((void **) &d_batch_pos, num_trees * TEST_NUM * sizeof(float)); pred_y = (float *)malloc(TEST_NUM * sizeof(float)); raw_pred_y = (float *)malloc(num_trees * TEST_NUM * sizeof(float)); cudaFree(d_x); cudaMalloc((void **) &d_x, TEST_NUM * FEATURE * sizeof(float)); cudaMalloc((void **) &d_raw_pred_y, num_trees * TEST_NUM * sizeof(float)); cudaMemcpy(d_x, dataset_test, TEST_NUM * FEATURE * sizeof(float), cudaMemcpyHostToDevice); initialize_batch_pos( d_batch_pos, TEST_NUM, num_trees, dev_prop ); batch_traverse_trees( d_trees, d_x, TEST_NUM, num_trees, *tree_arr_length, d_batch_pos, dev_prop ); cudaMemcpy(d_x, dataset_test, TEST_NUM * FEATURE * sizeof(float), cudaMemcpyHostToDevice); raw_predict(d_raw_pred_y, d_trees, d_batch_pos, *tree_arr_length, TEST_NUM, num_trees); cudaMemcpy(raw_pred_y, d_raw_pred_y, num_trees * TEST_NUM * sizeof(float), cudaMemcpyDeviceToHost); predict(pred_y, raw_pred_y, TEST_NUM, num_trees); /* trees = (float *)malloc(num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float)); cudaMemcpy(trees, d_trees, num_trees * NUM_FIELDS * (*tree_arr_length) *sizeof(float), cudaMemcpyDeviceToHost); printf("%d\n", num_trees * NUM_FIELDS * (*tree_arr_length)); for(int i=0; i<num_trees; i++){ printf("T=%d ", i); for(int j=0; j<=10; j++){ printf("%d ", (int) trees[ixt(j, PRED_KEY, i, NUM_FIELDS, *tree_arr_length)]); } printf("\n"); } cudaMemcpy(batch_pos, d_batch_pos, num_trees * TEST_NUM * sizeof(float), cudaMemcpyDeviceToHost); for(int i=0; i<num_trees; i++){ printf("T=%d ", i); for(int j=0; j<=TEST_NUM; j++){ //printf("%d ", batch_pos[index(i, j, TEST_NUM)]); printf("%d ", (int) trees[ ixt(batch_pos[index(i, j, TEST_NUM)], PRED_KEY, i, NUM_FIELDS, *tree_arr_length) ]); } printf("\n"); } for(int i=0; i<TEST_NUM; i++){printf("%d ", (int) pred_y[i]);};printf("\n"); for(int i=0; i<TEST_NUM; i++){printf("%d ", (int) labels_test[i]);};printf("\n"); */ printf("Accuracy: %f\n", evaluate(pred_y, labels_test, TEST_NUM)); debug(); }
3702d6a6c284f38d249db348dd5c4eec2f52ef31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <c10/util/Exception.h> namespace at { namespace native { using namespace at::cuda::detail; template <typename T> __host__ __device__ __forceinline__ T ceilDiv(T a, T b) { return (a + b - 1) / b; } template <typename T> __global__ void max_unpooling2d_forward_kernel( const int64_t numInputElements, const T* input, const int64_t* indices, const int64_t numChannels, const int64_t inputHeight, const int64_t inputWidth, const int64_t outputHeight, const int64_t outputWidth, T* output) { CUDA_KERNEL_LOOP(linearIndex, numInputElements) { int c = (linearIndex / inputWidth / inputHeight) % numChannels; int n = linearIndex / inputWidth / inputHeight / numChannels; output += (n * numChannels + c) * outputHeight * outputWidth; int maxind = indices[linearIndex]; output[maxind] = input[linearIndex]; } } template <typename T> __global__ void max_unpooling3d_forward_kernel( PackedTensorAccessor<T, 4> input, PackedTensorAccessor<int64_t, 4> indices, T* output, const int64_t oT, const int64_t oH, const int64_t oW, const int64_t offsetZ) { int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x; int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y; int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature if (iRow < input.size(2) && iColumn < input.size(3)) { T val = input[slice][iFrame][iRow][iColumn]; int64_t index = indices[slice][iFrame][iRow][iColumn]; output[slice * oT * oH * oW + index] = val; } } template <typename T> __global__ void max_unpooling2d_backward_kernel( const int64_t numInputElements, const T* input, const int64_t* indices, const int64_t numChannels, const int64_t inputHeight, const int64_t inputWidth, const int64_t outputHeight, const int64_t outputWidth, T* output) { CUDA_KERNEL_LOOP(linearIndex, numInputElements) { int c = (linearIndex / inputWidth / inputHeight) % numChannels; int n = linearIndex / inputWidth / inputHeight / numChannels; input += (n * numChannels + c) * outputHeight * outputWidth; int maxind = indices[linearIndex]; output[linearIndex] = input[maxind]; } } template <typename T> __global__ void max_unpooling3d_backward_kernel( T* gradOutputData, int64_t oT, int64_t oH, int64_t oW, PackedTensorAccessor<int64_t, 4> indices, PackedTensorAccessor<T, 4> gradInput, int offsetZ) { int iColumn = blockIdx.x * blockDim.x + threadIdx.x; int iRow = blockIdx.y * blockDim.y + threadIdx.y; int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) { int64_t index = indices[slice][iFrame][iRow][iColumn]; T grad_val = gradOutputData[slice * oT * oH * oW + index]; gradInput[slice][iFrame][iRow][iColumn] = grad_val; } } Tensor& max_unpooling2d_forward_out_cuda( Tensor& output, const Tensor& self_, const Tensor& indices_, IntList output_size) { TORCH_CHECK(output.is_contiguous(), "output must be contiguous"); TORCH_CHECK( indices_.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); auto oheight = output_size[0]; auto owidth = output_size[1]; TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2}, indices_arg{indices_, "indices_", 3}; checkAllSameGPU( "max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg}); TORCH_CHECK(self_.numel() > 0, "Input must be non-empty tensor"); TORCH_CHECK( (self_.ndimension() == 3 || self_.ndimension() == 4), "Input to max_unpooling2d should be a 3d or 4d Tensor", self_.sizes()); TORCH_CHECK( self_.sizes() == indices_.sizes(), "Shape of input must match shape of indices"); TORCH_CHECK( output_size.size() == 2, "There should be exactly two elements (width, height) in output_size"); int64_t dimw = 2; int64_t dimh = 1; int64_t numBatch = 1; int64_t numChannels; int64_t inputHeight; int64_t inputWidth; auto self = self_.contiguous(); auto indices = indices_.contiguous(); if (self.ndimension() == 4) { numBatch = self.size(0); dimw++; dimh++; } numChannels = self.size(dimh - 1); inputHeight = self.size(dimh); inputWidth = self.size(dimw); output.resize_({numBatch, numChannels, oheight, owidth}); output.zero_(); auto count = self.numel(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] { hipLaunchKernelGGL(( max_unpooling2d_forward_kernel), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self.numel(), self.data<scalar_t>(), indices.data<int64_t>(), numChannels, inputHeight, inputWidth, oheight, owidth, output.data<scalar_t>()); })); TORCH_CHECK( hipGetLastError() == hipSuccess, "max_unpooling2d_forward_kernel failed with error code ", hipGetLastError()); if (self.ndimension() == 3) { output.resize_({numChannels, oheight, owidth}); } return output; } Tensor max_unpooling2d_forward_cuda( const Tensor& self, const Tensor& indices, IntList output_size) { auto output = at::empty({0}, self.options()); max_unpooling2d_forward_out_cuda(output, self, indices, output_size); return output; } static void max_unpooling3d_shape_check( const Tensor& input, const Tensor& gradOutput, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; TORCH_CHECK( indices.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); TORCH_CHECK( (input.ndimension() == 4 || input.ndimension() == 5), "Input to max_unpooling3d should be a 4d or 5d Tensor", input.sizes()); TORCH_CHECK( output_size.size() == 3, "There should be exactly three elements (depth, height, width) in output_size"); TORCH_CHECK( stride.size() == 3, "There should be exactly three elements (depth, height, width) in stride"); TORCH_CHECK( padding.size() == 3, "There should be exactly three elements (depth, height, width) in padding"); TORCH_CHECK( input.sizes() == indices.sizes(), "Shape of indices should match shape of input"); TORCH_CHECK(input.numel() > 0, "Input must be non-empty"); TORCH_CHECK( stride[0] > 0 && stride[1] > 0 && stride[2] > 0, "strides should be greater than zero, but got stride: ", stride); int dimw = 3; int dimh = 2; int dimt = 1; int dimn = 0; if (input.ndimension() == 5) { dimw++; dimh++; dimt++; dimn++; } int nslices = input.size(dimn); if (gradOutput.defined()) { if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) || oW != gradOutput.size(dimw)) { AT_ERROR( "Inconsistent gradOutput size. oT= ", oT, ", oH= ", oH, ", oW= ", oW, ". gradOutput: ", gradOutput.size(dimt), "x", gradOutput.size(dimh), "x", gradOutput.size(dimw)); } TORCH_CHECK( gradOutput.ndimension() == input.ndimension() && gradOutput.size(dimn) == nslices, "gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices"); } } Tensor& max_unpooling3d_forward_out_cuda( Tensor& output, const Tensor& self_, const Tensor& indices_, IntList output_size, IntList stride, IntList padding) { TORCH_CHECK(output.is_contiguous(), "output must be contiguous"); max_unpooling3d_shape_check( self_, Tensor(), indices_, output_size, stride, padding); int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2}, indices_arg{indices_, "indices_", 3}; checkAllSameGPU( "max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg}); auto self = self_.contiguous(); auto indices = indices_.contiguous(); int64_t batchSize; int64_t inputSlices; int64_t inputTime; int64_t inputHeight; int64_t inputWidth; if (self.ndimension() == 4) { batchSize = 1; inputSlices = self.size(0); inputTime = self.size(1); inputHeight = self.size(2); inputWidth = self.size(3); output.resize_({inputSlices, oT, oH, oW}); } else { batchSize = self.size(0); inputSlices = self.size(1); inputTime = self.size(2); inputHeight = self.size(3); inputWidth = self.size(4); output.resize_({batchSize, inputSlices, oT, oH, oW}); } output.zero_(); // Collapse batch and feature dimensions if needed if (self.ndimension() == 5) { self = self.reshape({self.size(0) * self.size(1), self.size(2), self.size(3), self.size(4)}); indices = indices.reshape({indices.size(0) * indices.size(1), indices.size(2), indices.size(3), indices.size(4)}); } int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] { while (totalZ > 0) { dim3 grid( ceilDiv(inputWidth, static_cast<int64_t>(block.x)), ceilDiv(inputHeight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_unpooling3d_forward_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self.packed_accessor<scalar_t, 4>(), indices.packed_accessor<int64_t, 4>(), output.data<scalar_t>(), oT, oH, oW, offsetZ); TORCH_CHECK( hipGetLastError() == hipSuccess, "max_unpooling3d_forward_kernel failed with error code ", hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } })); return output; } Tensor max_unpooling3d_forward_cuda( const Tensor& self, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { auto output = at::empty({0}, self.options()); max_unpooling3d_forward_out_cuda( output, self, indices, output_size, stride, padding); return output; } at::Tensor& max_unpooling2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output_, const Tensor& self_, const Tensor& indices_, IntList output_size) { int64_t oheight = output_size[0]; int64_t owidth = output_size[1]; TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); TORCH_CHECK( indices_.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}, self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4}; checkAllSameGPU( "max_unpooling2d_backward_out_cuda", {grad_input_arg, grad_output_arg, self_arg, indices_arg}); TORCH_CHECK( (self_.ndimension() == 3 || self_.ndimension() == 4), "Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ", self_); TORCH_CHECK( self_.sizes() == indices_.sizes(), "Input should have same shape as indices"); TORCH_CHECK(output_size.size() == 2, "output_size must have two elements"); int64_t nInputCols, nInputRows, nInputPlane, batchSize; int dimw = 2; int dimh = 1; auto self = self_.contiguous(); auto indices = indices_.contiguous(); auto grad_output = grad_output_.contiguous(); if (self.ndimension() == 3) { nInputPlane = self.size(0); batchSize = 1; } else { ++dimw; ++dimh; nInputPlane = self.size(1); batchSize = self.size(0); } nInputCols = self.size(dimw); nInputRows = self.size(dimh); if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) { AT_ERROR( "Inconsistent gradOutput size. output height: ", oheight, ", output width= ", owidth, ", gradOutput: ", grad_output.size(dimh), "x", grad_output.size(dimw)); } grad_input.resize_as_(self); grad_input.zero_(); int count = self.numel(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] { hipLaunchKernelGGL(( max_unpooling2d_backward_kernel), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, grad_output.data<scalar_t>(), indices.data<int64_t>(), nInputPlane, nInputRows, nInputCols, oheight, owidth, grad_input.data<scalar_t>()); })); TORCH_CHECK( hipGetLastError() == hipSuccess, "max_unpooling2d_backward_kernel failed with error code ", hipGetLastError()); return grad_input; } at::Tensor max_unpooling2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& indices, IntList output_size) { auto grad_input = at::empty_like(self); max_unpooling2d_backward_out_cuda( grad_input, grad_output, self, indices, output_size); return grad_input; } at::Tensor& max_unpooling3d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output_, const Tensor& self_, const Tensor& indices_, IntList output_size, IntList stride, IntList padding) { TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; max_unpooling3d_shape_check( self_, grad_output_, indices_, output_size, stride, padding); int batchSize = 0; int inputSlices = 0; int inputTime = 0; int64_t inputHeight = 0; int64_t inputWidth = 0; TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2}, grad_output_arg{grad_output_, "grad_output_", 3}, grad_input_arg{grad_input, "grad_input", 4}; checkAllSameGPU( "max_unpooling3d_backward_out_cuda", {self_arg, indices_arg, grad_output_arg, grad_input_arg}); auto self = self_.contiguous(); auto indices = indices_.contiguous(); auto grad_output = grad_output_.contiguous(); if (self.ndimension() == 4) { batchSize = 1; inputSlices = self.size(0); inputTime = self.size(1); inputHeight = self.size(2); inputWidth = self.size(3); } else { batchSize = self.size(0); inputSlices = self.size(1); inputTime = self.size(2); inputHeight = self.size(3); inputWidth = self.size(4); } grad_input.resize_as_(self); grad_input.zero_(); // Collapse batch and feature dimensions if needed auto grad_input_reshaped = grad_input; if (grad_input.ndimension() == 5) { grad_input_reshaped = grad_input.reshape({grad_input.size(0) * grad_input.size(1), grad_input.size(2), grad_input.size(3), grad_input.size(4)}); indices = indices.reshape({indices.size(0) * indices.size(1), indices.size(2), indices.size(3), indices.size(4)}); } int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] { while (totalZ > 0) { dim3 grid( ceilDiv(inputWidth, static_cast<int64_t>(block.x)), ceilDiv(inputHeight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_unpooling3d_backward_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output.data<scalar_t>(), oT, oH, oW, indices.packed_accessor<int64_t, 4>(), grad_input_reshaped.packed_accessor<scalar_t, 4>(), offsetZ); TORCH_CHECK( hipGetLastError() == hipSuccess, "max_unpooling3d_backward_kernel failed with error code ", hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } })); return grad_input; } at::Tensor max_unpooling3d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { auto grad_input = at::empty_like(self); max_unpooling3d_backward_out_cuda( grad_input, grad_output, self, indices, output_size, stride, padding); return grad_input; } } // namespace native } // namespace at
3702d6a6c284f38d249db348dd5c4eec2f52ef31.cu
#include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/util/Exception.h> namespace at { namespace native { using namespace at::cuda::detail; template <typename T> __host__ __device__ __forceinline__ T ceilDiv(T a, T b) { return (a + b - 1) / b; } template <typename T> __global__ void max_unpooling2d_forward_kernel( const int64_t numInputElements, const T* input, const int64_t* indices, const int64_t numChannels, const int64_t inputHeight, const int64_t inputWidth, const int64_t outputHeight, const int64_t outputWidth, T* output) { CUDA_KERNEL_LOOP(linearIndex, numInputElements) { int c = (linearIndex / inputWidth / inputHeight) % numChannels; int n = linearIndex / inputWidth / inputHeight / numChannels; output += (n * numChannels + c) * outputHeight * outputWidth; int maxind = indices[linearIndex]; output[maxind] = input[linearIndex]; } } template <typename T> __global__ void max_unpooling3d_forward_kernel( PackedTensorAccessor<T, 4> input, PackedTensorAccessor<int64_t, 4> indices, T* output, const int64_t oT, const int64_t oH, const int64_t oW, const int64_t offsetZ) { int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x; int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y; int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature if (iRow < input.size(2) && iColumn < input.size(3)) { T val = input[slice][iFrame][iRow][iColumn]; int64_t index = indices[slice][iFrame][iRow][iColumn]; output[slice * oT * oH * oW + index] = val; } } template <typename T> __global__ void max_unpooling2d_backward_kernel( const int64_t numInputElements, const T* input, const int64_t* indices, const int64_t numChannels, const int64_t inputHeight, const int64_t inputWidth, const int64_t outputHeight, const int64_t outputWidth, T* output) { CUDA_KERNEL_LOOP(linearIndex, numInputElements) { int c = (linearIndex / inputWidth / inputHeight) % numChannels; int n = linearIndex / inputWidth / inputHeight / numChannels; input += (n * numChannels + c) * outputHeight * outputWidth; int maxind = indices[linearIndex]; output[linearIndex] = input[maxind]; } } template <typename T> __global__ void max_unpooling3d_backward_kernel( T* gradOutputData, int64_t oT, int64_t oH, int64_t oW, PackedTensorAccessor<int64_t, 4> indices, PackedTensorAccessor<T, 4> gradInput, int offsetZ) { int iColumn = blockIdx.x * blockDim.x + threadIdx.x; int iRow = blockIdx.y * blockDim.y + threadIdx.y; int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) { int64_t index = indices[slice][iFrame][iRow][iColumn]; T grad_val = gradOutputData[slice * oT * oH * oW + index]; gradInput[slice][iFrame][iRow][iColumn] = grad_val; } } Tensor& max_unpooling2d_forward_out_cuda( Tensor& output, const Tensor& self_, const Tensor& indices_, IntList output_size) { TORCH_CHECK(output.is_contiguous(), "output must be contiguous"); TORCH_CHECK( indices_.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); auto oheight = output_size[0]; auto owidth = output_size[1]; TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2}, indices_arg{indices_, "indices_", 3}; checkAllSameGPU( "max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg}); TORCH_CHECK(self_.numel() > 0, "Input must be non-empty tensor"); TORCH_CHECK( (self_.ndimension() == 3 || self_.ndimension() == 4), "Input to max_unpooling2d should be a 3d or 4d Tensor", self_.sizes()); TORCH_CHECK( self_.sizes() == indices_.sizes(), "Shape of input must match shape of indices"); TORCH_CHECK( output_size.size() == 2, "There should be exactly two elements (width, height) in output_size"); int64_t dimw = 2; int64_t dimh = 1; int64_t numBatch = 1; int64_t numChannels; int64_t inputHeight; int64_t inputWidth; auto self = self_.contiguous(); auto indices = indices_.contiguous(); if (self.ndimension() == 4) { numBatch = self.size(0); dimw++; dimh++; } numChannels = self.size(dimh - 1); inputHeight = self.size(dimh); inputWidth = self.size(dimw); output.resize_({numBatch, numChannels, oheight, owidth}); output.zero_(); auto count = self.numel(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] { max_unpooling2d_forward_kernel<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( self.numel(), self.data<scalar_t>(), indices.data<int64_t>(), numChannels, inputHeight, inputWidth, oheight, owidth, output.data<scalar_t>()); })); TORCH_CHECK( cudaGetLastError() == cudaSuccess, "max_unpooling2d_forward_kernel failed with error code ", cudaGetLastError()); if (self.ndimension() == 3) { output.resize_({numChannels, oheight, owidth}); } return output; } Tensor max_unpooling2d_forward_cuda( const Tensor& self, const Tensor& indices, IntList output_size) { auto output = at::empty({0}, self.options()); max_unpooling2d_forward_out_cuda(output, self, indices, output_size); return output; } static void max_unpooling3d_shape_check( const Tensor& input, const Tensor& gradOutput, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; TORCH_CHECK( indices.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); TORCH_CHECK( (input.ndimension() == 4 || input.ndimension() == 5), "Input to max_unpooling3d should be a 4d or 5d Tensor", input.sizes()); TORCH_CHECK( output_size.size() == 3, "There should be exactly three elements (depth, height, width) in output_size"); TORCH_CHECK( stride.size() == 3, "There should be exactly three elements (depth, height, width) in stride"); TORCH_CHECK( padding.size() == 3, "There should be exactly three elements (depth, height, width) in padding"); TORCH_CHECK( input.sizes() == indices.sizes(), "Shape of indices should match shape of input"); TORCH_CHECK(input.numel() > 0, "Input must be non-empty"); TORCH_CHECK( stride[0] > 0 && stride[1] > 0 && stride[2] > 0, "strides should be greater than zero, but got stride: ", stride); int dimw = 3; int dimh = 2; int dimt = 1; int dimn = 0; if (input.ndimension() == 5) { dimw++; dimh++; dimt++; dimn++; } int nslices = input.size(dimn); if (gradOutput.defined()) { if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) || oW != gradOutput.size(dimw)) { AT_ERROR( "Inconsistent gradOutput size. oT= ", oT, ", oH= ", oH, ", oW= ", oW, ". gradOutput: ", gradOutput.size(dimt), "x", gradOutput.size(dimh), "x", gradOutput.size(dimw)); } TORCH_CHECK( gradOutput.ndimension() == input.ndimension() && gradOutput.size(dimn) == nslices, "gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices"); } } Tensor& max_unpooling3d_forward_out_cuda( Tensor& output, const Tensor& self_, const Tensor& indices_, IntList output_size, IntList stride, IntList padding) { TORCH_CHECK(output.is_contiguous(), "output must be contiguous"); max_unpooling3d_shape_check( self_, Tensor(), indices_, output_size, stride, padding); int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2}, indices_arg{indices_, "indices_", 3}; checkAllSameGPU( "max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg}); auto self = self_.contiguous(); auto indices = indices_.contiguous(); int64_t batchSize; int64_t inputSlices; int64_t inputTime; int64_t inputHeight; int64_t inputWidth; if (self.ndimension() == 4) { batchSize = 1; inputSlices = self.size(0); inputTime = self.size(1); inputHeight = self.size(2); inputWidth = self.size(3); output.resize_({inputSlices, oT, oH, oW}); } else { batchSize = self.size(0); inputSlices = self.size(1); inputTime = self.size(2); inputHeight = self.size(3); inputWidth = self.size(4); output.resize_({batchSize, inputSlices, oT, oH, oW}); } output.zero_(); // Collapse batch and feature dimensions if needed if (self.ndimension() == 5) { self = self.reshape({self.size(0) * self.size(1), self.size(2), self.size(3), self.size(4)}); indices = indices.reshape({indices.size(0) * indices.size(1), indices.size(2), indices.size(3), indices.size(4)}); } int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] { while (totalZ > 0) { dim3 grid( ceilDiv(inputWidth, static_cast<int64_t>(block.x)), ceilDiv(inputHeight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_unpooling3d_forward_kernel<<< grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( self.packed_accessor<scalar_t, 4>(), indices.packed_accessor<int64_t, 4>(), output.data<scalar_t>(), oT, oH, oW, offsetZ); TORCH_CHECK( cudaGetLastError() == cudaSuccess, "max_unpooling3d_forward_kernel failed with error code ", cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } })); return output; } Tensor max_unpooling3d_forward_cuda( const Tensor& self, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { auto output = at::empty({0}, self.options()); max_unpooling3d_forward_out_cuda( output, self, indices, output_size, stride, padding); return output; } at::Tensor& max_unpooling2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output_, const Tensor& self_, const Tensor& indices_, IntList output_size) { int64_t oheight = output_size[0]; int64_t owidth = output_size[1]; TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); TORCH_CHECK( indices_.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}, self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4}; checkAllSameGPU( "max_unpooling2d_backward_out_cuda", {grad_input_arg, grad_output_arg, self_arg, indices_arg}); TORCH_CHECK( (self_.ndimension() == 3 || self_.ndimension() == 4), "Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ", self_); TORCH_CHECK( self_.sizes() == indices_.sizes(), "Input should have same shape as indices"); TORCH_CHECK(output_size.size() == 2, "output_size must have two elements"); int64_t nInputCols, nInputRows, nInputPlane, batchSize; int dimw = 2; int dimh = 1; auto self = self_.contiguous(); auto indices = indices_.contiguous(); auto grad_output = grad_output_.contiguous(); if (self.ndimension() == 3) { nInputPlane = self.size(0); batchSize = 1; } else { ++dimw; ++dimh; nInputPlane = self.size(1); batchSize = self.size(0); } nInputCols = self.size(dimw); nInputRows = self.size(dimh); if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) { AT_ERROR( "Inconsistent gradOutput size. output height: ", oheight, ", output width= ", owidth, ", gradOutput: ", grad_output.size(dimh), "x", grad_output.size(dimw)); } grad_input.resize_as_(self); grad_input.zero_(); int count = self.numel(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] { max_unpooling2d_backward_kernel<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, grad_output.data<scalar_t>(), indices.data<int64_t>(), nInputPlane, nInputRows, nInputCols, oheight, owidth, grad_input.data<scalar_t>()); })); TORCH_CHECK( cudaGetLastError() == cudaSuccess, "max_unpooling2d_backward_kernel failed with error code ", cudaGetLastError()); return grad_input; } at::Tensor max_unpooling2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& indices, IntList output_size) { auto grad_input = at::empty_like(self); max_unpooling2d_backward_out_cuda( grad_input, grad_output, self, indices, output_size); return grad_input; } at::Tensor& max_unpooling3d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output_, const Tensor& self_, const Tensor& indices_, IntList output_size, IntList stride, IntList padding) { TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; max_unpooling3d_shape_check( self_, grad_output_, indices_, output_size, stride, padding); int batchSize = 0; int inputSlices = 0; int inputTime = 0; int64_t inputHeight = 0; int64_t inputWidth = 0; TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2}, grad_output_arg{grad_output_, "grad_output_", 3}, grad_input_arg{grad_input, "grad_input", 4}; checkAllSameGPU( "max_unpooling3d_backward_out_cuda", {self_arg, indices_arg, grad_output_arg, grad_input_arg}); auto self = self_.contiguous(); auto indices = indices_.contiguous(); auto grad_output = grad_output_.contiguous(); if (self.ndimension() == 4) { batchSize = 1; inputSlices = self.size(0); inputTime = self.size(1); inputHeight = self.size(2); inputWidth = self.size(3); } else { batchSize = self.size(0); inputSlices = self.size(1); inputTime = self.size(2); inputHeight = self.size(3); inputWidth = self.size(4); } grad_input.resize_as_(self); grad_input.zero_(); // Collapse batch and feature dimensions if needed auto grad_input_reshaped = grad_input; if (grad_input.ndimension() == 5) { grad_input_reshaped = grad_input.reshape({grad_input.size(0) * grad_input.size(1), grad_input.size(2), grad_input.size(3), grad_input.size(4)}); indices = indices.reshape({indices.size(0) * indices.size(1), indices.size(2), indices.size(3), indices.size(4)}); } int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] { while (totalZ > 0) { dim3 grid( ceilDiv(inputWidth, static_cast<int64_t>(block.x)), ceilDiv(inputHeight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_unpooling3d_backward_kernel<<< grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( grad_output.data<scalar_t>(), oT, oH, oW, indices.packed_accessor<int64_t, 4>(), grad_input_reshaped.packed_accessor<scalar_t, 4>(), offsetZ); TORCH_CHECK( cudaGetLastError() == cudaSuccess, "max_unpooling3d_backward_kernel failed with error code ", cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } })); return grad_input; } at::Tensor max_unpooling3d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { auto grad_input = at::empty_like(self); max_unpooling3d_backward_out_cuda( grad_input, grad_output, self, indices, output_size, stride, padding); return grad_input; } } // namespace native } // namespace at
a7446874989578182a817fb0fbff4c0814713d7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * _reg_resampling_gpu.cu * * * Created by Marc Modat on 24/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_RESAMPLING_GPU_CU #define _REG_RESAMPLING_GPU_CU #include "_reg_resampling_gpu.h" #include "_reg_resampling_kernels.cu" /* *************************************************************** */ /* *************************************************************** */ void reg_resampleSourceImage_gpu(nifti_image *sourceImage, float **resultImageArray_d, hipArray **sourceImageArray_d, float4 **positionFieldImageArray_d, int **mask_d, int activeVoxelNumber, float sourceBGValue) { int3 sourceDim = make_int3(sourceImage->nx, sourceImage->ny, sourceImage->nz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_SourceDim,&sourceDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_PaddingValue,&sourceBGValue,sizeof(float))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int))) //Bind source image array to a 3D texture sourceTexture.normalized = true; sourceTexture.filterMode = hipFilterModeLinear; sourceTexture.addressMode[0] = hipAddressModeWrap; sourceTexture.addressMode[1] = hipAddressModeWrap; sourceTexture.addressMode[2] = hipAddressModeWrap; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); NR_CUDA_SAFE_CALL(hipBindTextureToArray(sourceTexture, *sourceImageArray_d, channelDesc)) //Bind positionField to texture NR_CUDA_SAFE_CALL(hipBindTexture(0, positionFieldTexture, *positionFieldImageArray_d, activeVoxelNumber*sizeof(float4))) //Bind positionField to texture NR_CUDA_SAFE_CALL(hipBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int))) // Bind the real to voxel matrix to texture mat44 *sourceMatrix; if(sourceImage->sform_code>0) sourceMatrix=&(sourceImage->sto_ijk); else sourceMatrix=&(sourceImage->qto_ijk); float4 *sourceRealToVoxel_h;NR_CUDA_SAFE_CALL(hipHostMalloc(&sourceRealToVoxel_h, 3*sizeof(float4))) float4 *sourceRealToVoxel_d; NR_CUDA_SAFE_CALL(hipMalloc(&sourceRealToVoxel_d, 3*sizeof(float4))) for(int i=0; i<3; i++){ sourceRealToVoxel_h[i].x=sourceMatrix->m[i][0]; sourceRealToVoxel_h[i].y=sourceMatrix->m[i][1]; sourceRealToVoxel_h[i].z=sourceMatrix->m[i][2]; sourceRealToVoxel_h[i].w=sourceMatrix->m[i][3]; } NR_CUDA_SAFE_CALL(hipMemcpy(sourceRealToVoxel_d, sourceRealToVoxel_h, 3*sizeof(float4), hipMemcpyHostToDevice)) NR_CUDA_SAFE_CALL(hipHostFree((void *)sourceRealToVoxel_h)) NR_CUDA_SAFE_CALL(hipBindTexture(0, sourceMatrixTexture, sourceRealToVoxel_d, 3*sizeof(float4))) const unsigned int Grid_reg_resampleSourceImage = (unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)Block_reg_resampleSourceImage)); dim3 B1(Block_reg_resampleSourceImage,1,1); dim3 G1(Grid_reg_resampleSourceImage,Grid_reg_resampleSourceImage,1); hipLaunchKernelGGL(( reg_resampleSourceImage_kernel) , dim3(G1), dim3(B1) , 0, 0, *resultImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(hipUnbindTexture(sourceTexture)) NR_CUDA_SAFE_CALL(hipUnbindTexture(positionFieldTexture)) NR_CUDA_SAFE_CALL(hipUnbindTexture(maskTexture)) NR_CUDA_SAFE_CALL(hipUnbindTexture(sourceMatrixTexture)) hipFree(sourceRealToVoxel_d); } /* *************************************************************** */ /* *************************************************************** */ void reg_getSourceImageGradient_gpu(nifti_image *sourceImage, hipArray **sourceImageArray_d, float4 **positionFieldImageArray_d, float4 **resultGradientArray_d, int activeVoxelNumber) { int3 sourceDim = make_int3(sourceImage->nx, sourceImage->ny, sourceImage->nz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_SourceDim, &sourceDim, sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ActiveVoxelNumber, &activeVoxelNumber, sizeof(int))) //Bind source image array to a 3D texture sourceTexture.normalized = true; sourceTexture.filterMode = hipFilterModeLinear; sourceTexture.addressMode[0] = hipAddressModeWrap; sourceTexture.addressMode[1] = hipAddressModeWrap; sourceTexture.addressMode[2] = hipAddressModeWrap; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); NR_CUDA_SAFE_CALL(hipBindTextureToArray(sourceTexture, *sourceImageArray_d, channelDesc)) //Bind positionField to texture NR_CUDA_SAFE_CALL(hipBindTexture(0, positionFieldTexture, *positionFieldImageArray_d, activeVoxelNumber*sizeof(float4))) // Bind the real to voxel matrix to texture mat44 *sourceMatrix; if(sourceImage->sform_code>0) sourceMatrix=&(sourceImage->sto_ijk); else sourceMatrix=&(sourceImage->qto_ijk); float4 *sourceRealToVoxel_h;NR_CUDA_SAFE_CALL(hipHostMalloc(&sourceRealToVoxel_h, 3*sizeof(float4))) float4 *sourceRealToVoxel_d; NR_CUDA_SAFE_CALL(hipMalloc(&sourceRealToVoxel_d, 3*sizeof(float4))) for(int i=0; i<3; i++){ sourceRealToVoxel_h[i].x=sourceMatrix->m[i][0]; sourceRealToVoxel_h[i].y=sourceMatrix->m[i][1]; sourceRealToVoxel_h[i].z=sourceMatrix->m[i][2]; sourceRealToVoxel_h[i].w=sourceMatrix->m[i][3]; } NR_CUDA_SAFE_CALL(hipMemcpy(sourceRealToVoxel_d, sourceRealToVoxel_h, 3*sizeof(float4), hipMemcpyHostToDevice)) NR_CUDA_SAFE_CALL(hipHostFree((void *)sourceRealToVoxel_h)) NR_CUDA_SAFE_CALL(hipBindTexture(0, sourceMatrixTexture, sourceRealToVoxel_d, 3*sizeof(float4))) const unsigned int Grid_reg_getSourceImageGradient = (unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)Block_reg_getSourceImageGradient)); dim3 B1(Block_reg_getSourceImageGradient,1,1); dim3 G1(Grid_reg_getSourceImageGradient,Grid_reg_getSourceImageGradient,1); hipLaunchKernelGGL(( reg_getSourceImageGradient_kernel) , dim3(G1), dim3(B1) , 0, 0, *resultGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(hipUnbindTexture(sourceTexture)) NR_CUDA_SAFE_CALL(hipUnbindTexture(positionFieldTexture)) NR_CUDA_SAFE_CALL(hipUnbindTexture(sourceMatrixTexture)) hipFree(sourceRealToVoxel_d); } /* *************************************************************** */ /* *************************************************************** */ #endif
a7446874989578182a817fb0fbff4c0814713d7d.cu
/* * _reg_resampling_gpu.cu * * * Created by Marc Modat on 24/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_RESAMPLING_GPU_CU #define _REG_RESAMPLING_GPU_CU #include "_reg_resampling_gpu.h" #include "_reg_resampling_kernels.cu" /* *************************************************************** */ /* *************************************************************** */ void reg_resampleSourceImage_gpu(nifti_image *sourceImage, float **resultImageArray_d, cudaArray **sourceImageArray_d, float4 **positionFieldImageArray_d, int **mask_d, int activeVoxelNumber, float sourceBGValue) { int3 sourceDim = make_int3(sourceImage->nx, sourceImage->ny, sourceImage->nz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_SourceDim,&sourceDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_PaddingValue,&sourceBGValue,sizeof(float))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int))) //Bind source image array to a 3D texture sourceTexture.normalized = true; sourceTexture.filterMode = cudaFilterModeLinear; sourceTexture.addressMode[0] = cudaAddressModeWrap; sourceTexture.addressMode[1] = cudaAddressModeWrap; sourceTexture.addressMode[2] = cudaAddressModeWrap; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); NR_CUDA_SAFE_CALL(cudaBindTextureToArray(sourceTexture, *sourceImageArray_d, channelDesc)) //Bind positionField to texture NR_CUDA_SAFE_CALL(cudaBindTexture(0, positionFieldTexture, *positionFieldImageArray_d, activeVoxelNumber*sizeof(float4))) //Bind positionField to texture NR_CUDA_SAFE_CALL(cudaBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int))) // Bind the real to voxel matrix to texture mat44 *sourceMatrix; if(sourceImage->sform_code>0) sourceMatrix=&(sourceImage->sto_ijk); else sourceMatrix=&(sourceImage->qto_ijk); float4 *sourceRealToVoxel_h;NR_CUDA_SAFE_CALL(cudaMallocHost(&sourceRealToVoxel_h, 3*sizeof(float4))) float4 *sourceRealToVoxel_d; NR_CUDA_SAFE_CALL(cudaMalloc(&sourceRealToVoxel_d, 3*sizeof(float4))) for(int i=0; i<3; i++){ sourceRealToVoxel_h[i].x=sourceMatrix->m[i][0]; sourceRealToVoxel_h[i].y=sourceMatrix->m[i][1]; sourceRealToVoxel_h[i].z=sourceMatrix->m[i][2]; sourceRealToVoxel_h[i].w=sourceMatrix->m[i][3]; } NR_CUDA_SAFE_CALL(cudaMemcpy(sourceRealToVoxel_d, sourceRealToVoxel_h, 3*sizeof(float4), cudaMemcpyHostToDevice)) NR_CUDA_SAFE_CALL(cudaFreeHost((void *)sourceRealToVoxel_h)) NR_CUDA_SAFE_CALL(cudaBindTexture(0, sourceMatrixTexture, sourceRealToVoxel_d, 3*sizeof(float4))) const unsigned int Grid_reg_resampleSourceImage = (unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)Block_reg_resampleSourceImage)); dim3 B1(Block_reg_resampleSourceImage,1,1); dim3 G1(Grid_reg_resampleSourceImage,Grid_reg_resampleSourceImage,1); reg_resampleSourceImage_kernel <<< G1, B1 >>> (*resultImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(sourceTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(positionFieldTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(sourceMatrixTexture)) cudaFree(sourceRealToVoxel_d); } /* *************************************************************** */ /* *************************************************************** */ void reg_getSourceImageGradient_gpu(nifti_image *sourceImage, cudaArray **sourceImageArray_d, float4 **positionFieldImageArray_d, float4 **resultGradientArray_d, int activeVoxelNumber) { int3 sourceDim = make_int3(sourceImage->nx, sourceImage->ny, sourceImage->nz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_SourceDim, &sourceDim, sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ActiveVoxelNumber, &activeVoxelNumber, sizeof(int))) //Bind source image array to a 3D texture sourceTexture.normalized = true; sourceTexture.filterMode = cudaFilterModeLinear; sourceTexture.addressMode[0] = cudaAddressModeWrap; sourceTexture.addressMode[1] = cudaAddressModeWrap; sourceTexture.addressMode[2] = cudaAddressModeWrap; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); NR_CUDA_SAFE_CALL(cudaBindTextureToArray(sourceTexture, *sourceImageArray_d, channelDesc)) //Bind positionField to texture NR_CUDA_SAFE_CALL(cudaBindTexture(0, positionFieldTexture, *positionFieldImageArray_d, activeVoxelNumber*sizeof(float4))) // Bind the real to voxel matrix to texture mat44 *sourceMatrix; if(sourceImage->sform_code>0) sourceMatrix=&(sourceImage->sto_ijk); else sourceMatrix=&(sourceImage->qto_ijk); float4 *sourceRealToVoxel_h;NR_CUDA_SAFE_CALL(cudaMallocHost(&sourceRealToVoxel_h, 3*sizeof(float4))) float4 *sourceRealToVoxel_d; NR_CUDA_SAFE_CALL(cudaMalloc(&sourceRealToVoxel_d, 3*sizeof(float4))) for(int i=0; i<3; i++){ sourceRealToVoxel_h[i].x=sourceMatrix->m[i][0]; sourceRealToVoxel_h[i].y=sourceMatrix->m[i][1]; sourceRealToVoxel_h[i].z=sourceMatrix->m[i][2]; sourceRealToVoxel_h[i].w=sourceMatrix->m[i][3]; } NR_CUDA_SAFE_CALL(cudaMemcpy(sourceRealToVoxel_d, sourceRealToVoxel_h, 3*sizeof(float4), cudaMemcpyHostToDevice)) NR_CUDA_SAFE_CALL(cudaFreeHost((void *)sourceRealToVoxel_h)) NR_CUDA_SAFE_CALL(cudaBindTexture(0, sourceMatrixTexture, sourceRealToVoxel_d, 3*sizeof(float4))) const unsigned int Grid_reg_getSourceImageGradient = (unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)Block_reg_getSourceImageGradient)); dim3 B1(Block_reg_getSourceImageGradient,1,1); dim3 G1(Grid_reg_getSourceImageGradient,Grid_reg_getSourceImageGradient,1); reg_getSourceImageGradient_kernel <<< G1, B1 >>> (*resultGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(sourceTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(positionFieldTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(sourceMatrixTexture)) cudaFree(sourceRealToVoxel_d); } /* *************************************************************** */ /* *************************************************************** */ #endif
d085f95636cc3e7f157d6101b8a051ff78e4ceb2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "nodiag_normalize.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *I = NULL; hipMalloc(&I, XSIZE*YSIZE); int n = XSIZE*YSIZE; int i = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( nodiag_normalize), dim3(gridBlock),dim3(threadBlock), 0, 0, A,I,n,i); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( nodiag_normalize), dim3(gridBlock),dim3(threadBlock), 0, 0, A,I,n,i); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( nodiag_normalize), dim3(gridBlock),dim3(threadBlock), 0, 0, A,I,n,i); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d085f95636cc3e7f157d6101b8a051ff78e4ceb2.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "nodiag_normalize.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *I = NULL; cudaMalloc(&I, XSIZE*YSIZE); int n = XSIZE*YSIZE; int i = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); nodiag_normalize<<<gridBlock,threadBlock>>>(A,I,n,i); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { nodiag_normalize<<<gridBlock,threadBlock>>>(A,I,n,i); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { nodiag_normalize<<<gridBlock,threadBlock>>>(A,I,n,i); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7b1d566b00fa9aedd7f13dd72255108e16c4ef40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_mask_ops.h" #include <hipcub/hipcub.hpp> namespace caffe2 { namespace { __global__ void BooleanMaskCopyKernel( const int64_t numOfOutput, const int64_t numBytes, const int64_t* indices, const uint8_t* src, uint8_t* dest) { for (int64_t i = blockIdx.x; i < numOfOutput; i += gridDim.x) { const auto srcBase = indices[i] * numBytes; const auto destBase = i * numBytes; for (int64_t j = threadIdx.x; j < numBytes; j += blockDim.x) { dest[destBase + j] = src[srcBase + j]; } } } } template <> class BooleanMaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanMaskOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} bool RunOnDevice() override { const auto& src = Input(0); const auto& mask = Input(1); auto* dest = Output(0); CAFFE_ENFORCE(src.ndim() >= 1); CAFFE_ENFORCE_EQ(mask.ndim(), 1); CAFFE_ENFORCE(src.dims()[0] == mask.dims()[0]); const auto* maskData = mask.data<bool>(); const auto outerSize = mask.dims()[0]; indices_.Resize(outerSize); auto* indicesData = indices_.mutable_data<int64_t>(); size_t numBytes = 0; hipcub::CountingInputIterator<int> itr(0); hipcub::DeviceSelect::Flagged( nullptr, numBytes, itr, maskData, indicesData, static_cast<int64_t*>(nullptr), outerSize, context_.cuda_stream()); auto numint64_t = static_cast<int64_t>((numBytes + sizeof(int64_t) - 1) / sizeof(int64_t)); // allocate one more int64_t at the end of scratch for storing numOfOutput scratch_.Resize(numint64_t + 1); auto* scratchData = scratch_.mutable_data<int64_t>(); auto* numOfOutputData = scratchData + numint64_t; hipcub::DeviceSelect::Flagged( static_cast<void*>(scratchData), numBytes, itr, maskData, indicesData, numOfOutputData, outerSize, context_.cuda_stream()); // Copy numOfOutput from gpu to cpu int64_t numOfOutput; context_.CopyToCPU(1, numOfOutputData, &numOfOutput); indices_.Resize(numOfOutput); std::vector<int64_t> dims = src.dims(); dims[0] = numOfOutput; dest->Resize(dims); auto* destData = (uint8_t*)dest->raw_mutable_data(src.meta()); const auto* srcData = (uint8_t*)src.raw_data(); if (OutputSize() == 2) { auto* indicesOut = Output(1); indicesOut->Resize(numOfOutput); indicesOut->template mutable_data<int64_t>(); } if (numOfOutput > 0) { hipLaunchKernelGGL(( BooleanMaskCopyKernel), dim3(min(numOfOutput, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS))), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), numOfOutput, src.size_from_dim(1) * src.meta().itemsize(), indicesData, srcData, destData); if (OutputSize() == 2) { Output(1)->CopyFrom(indices_, &context_); } } return true; } private: Tensor indices_{CUDA}; Tensor scratch_{CUDA}; }; REGISTER_CUDA_OPERATOR(BooleanMask, BooleanMaskOp<CUDAContext>); namespace { #define minf (-1.0f * std::numeric_limits<float>::infinity()) template <typename T> __global__ void sequenceMaskKernel( int N, int M, int B, const T* in, const int* seq_lengths, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= seq_lengths[j] ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } } template <typename T> __global__ void repeatedSequenceMaskKernel( int N, int M, int D, const T* in, const int* seq_lengths, T fill_val, T* out) { CUDA_1D_KERNEL_LOOP(index, N * M * D) { int i = index / (D * M); int j = (index / D) % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } template <typename T> __global__ void windowMaskKernel( int N, int M, int B, const T* in, const int* window_centers, const int radius, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < window_centers[j] - radius || k > window_centers[j] + radius ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < window_centers[i] - radius || j > window_centers[i] + radius ? fill_val : in[index]); } } } template <typename T> __global__ void upperMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k > j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j > i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < i ? fill_val : in[index]); } } } template <typename T> __global__ void upperDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k <= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j <= i ? fill_val : in[index]); } } } } // namespace template <> bool SequenceMaskOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0)); } template <> template <class T> bool SequenceMaskOp<CUDAContext>::DoRunWithType() { const Tensor* input = &Input(0); const Tensor* sequence_lengths = nullptr; const Tensor* window_centers = nullptr; if (mode_ == "sequence") { sequence_lengths = &Input(1); } else if (mode_ == "window") { window_centers = &Input(1); } auto* output = Output(0); output->ResizeLike(*input); const auto canonical_axis = input->canonical_axis_index(axis_); // canonical_batch is non-negative if batching, -1 otherwise int canonical_batch = -1; if ((HasArgument("batch"))) { canonical_batch = input->canonical_axis_index(batch_); } // make sure batch < axis if (canonical_batch >= 0) { CAFFE_ENFORCE_LT(canonical_batch, canonical_axis); } // if no batch, then left is product of dims up to axis // otherwise, left is product of dims between batch and axis const int left = (canonical_batch >= 0 ? input->size_between_dim(canonical_batch, canonical_axis) : input->size_to_dim(canonical_axis)); const int right = input->size_from_dim(canonical_axis); // product of dims from 1 to batch const int batch_dim = (canonical_batch >= 0 ? input->size_to_dim(canonical_batch) * input->dim(canonical_batch) : -1); T fill_val = convert::To<float, T>(grad_ ? 0.0f : fill_val_); if (mode_ == "sequence") { if (HasArgument("repeat_from_axis")) { const int canonical_repeat_from = input->canonical_axis_index(repeat_from_); const int repeated_dims = input->size_from_dim(canonical_repeat_from); const int masked_dims = right / repeated_dims; hipLaunchKernelGGL(( repeatedSequenceMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, masked_dims, repeated_dims, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } else { hipLaunchKernelGGL(( sequenceMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } } else if (mode_ == "window") { hipLaunchKernelGGL(( windowMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), window_centers->data<int>(), radius_, fill_val, output->template mutable_data<T>()); } else if (mode_ == "upper") { hipLaunchKernelGGL(( upperMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lower") { hipLaunchKernelGGL(( lowerMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "upperdiag") { hipLaunchKernelGGL(( upperDiagMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lowerdiag") { hipLaunchKernelGGL(( lowerDiagMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else { CAFFE_ENFORCE(false, "Unsupported mode for SequenceMaskOp!"); } return true; } REGISTER_CUDA_OPERATOR(SequenceMask, SequenceMaskOp<CUDAContext>); } // namespace caffe2
7b1d566b00fa9aedd7f13dd72255108e16c4ef40.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_mask_ops.h" #include <cub/cub.cuh> namespace caffe2 { namespace { __global__ void BooleanMaskCopyKernel( const int64_t numOfOutput, const int64_t numBytes, const int64_t* indices, const uint8_t* src, uint8_t* dest) { for (int64_t i = blockIdx.x; i < numOfOutput; i += gridDim.x) { const auto srcBase = indices[i] * numBytes; const auto destBase = i * numBytes; for (int64_t j = threadIdx.x; j < numBytes; j += blockDim.x) { dest[destBase + j] = src[srcBase + j]; } } } } template <> class BooleanMaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanMaskOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} bool RunOnDevice() override { const auto& src = Input(0); const auto& mask = Input(1); auto* dest = Output(0); CAFFE_ENFORCE(src.ndim() >= 1); CAFFE_ENFORCE_EQ(mask.ndim(), 1); CAFFE_ENFORCE(src.dims()[0] == mask.dims()[0]); const auto* maskData = mask.data<bool>(); const auto outerSize = mask.dims()[0]; indices_.Resize(outerSize); auto* indicesData = indices_.mutable_data<int64_t>(); size_t numBytes = 0; cub::CountingInputIterator<int> itr(0); cub::DeviceSelect::Flagged( nullptr, numBytes, itr, maskData, indicesData, static_cast<int64_t*>(nullptr), outerSize, context_.cuda_stream()); auto numint64_t = static_cast<int64_t>((numBytes + sizeof(int64_t) - 1) / sizeof(int64_t)); // allocate one more int64_t at the end of scratch for storing numOfOutput scratch_.Resize(numint64_t + 1); auto* scratchData = scratch_.mutable_data<int64_t>(); auto* numOfOutputData = scratchData + numint64_t; cub::DeviceSelect::Flagged( static_cast<void*>(scratchData), numBytes, itr, maskData, indicesData, numOfOutputData, outerSize, context_.cuda_stream()); // Copy numOfOutput from gpu to cpu int64_t numOfOutput; context_.CopyToCPU(1, numOfOutputData, &numOfOutput); indices_.Resize(numOfOutput); std::vector<int64_t> dims = src.dims(); dims[0] = numOfOutput; dest->Resize(dims); auto* destData = (uint8_t*)dest->raw_mutable_data(src.meta()); const auto* srcData = (uint8_t*)src.raw_data(); if (OutputSize() == 2) { auto* indicesOut = Output(1); indicesOut->Resize(numOfOutput); indicesOut->template mutable_data<int64_t>(); } if (numOfOutput > 0) { BooleanMaskCopyKernel<<< min(numOfOutput, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS)), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( numOfOutput, src.size_from_dim(1) * src.meta().itemsize(), indicesData, srcData, destData); if (OutputSize() == 2) { Output(1)->CopyFrom(indices_, &context_); } } return true; } private: Tensor indices_{CUDA}; Tensor scratch_{CUDA}; }; REGISTER_CUDA_OPERATOR(BooleanMask, BooleanMaskOp<CUDAContext>); namespace { #define minf (-1.0f * std::numeric_limits<float>::infinity()) template <typename T> __global__ void sequenceMaskKernel( int N, int M, int B, const T* in, const int* seq_lengths, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= seq_lengths[j] ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } } template <typename T> __global__ void repeatedSequenceMaskKernel( int N, int M, int D, const T* in, const int* seq_lengths, T fill_val, T* out) { CUDA_1D_KERNEL_LOOP(index, N * M * D) { int i = index / (D * M); int j = (index / D) % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } template <typename T> __global__ void windowMaskKernel( int N, int M, int B, const T* in, const int* window_centers, const int radius, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < window_centers[j] - radius || k > window_centers[j] + radius ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < window_centers[i] - radius || j > window_centers[i] + radius ? fill_val : in[index]); } } } template <typename T> __global__ void upperMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k > j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j > i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < i ? fill_val : in[index]); } } } template <typename T> __global__ void upperDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k <= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j <= i ? fill_val : in[index]); } } } } // namespace template <> bool SequenceMaskOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0)); } template <> template <class T> bool SequenceMaskOp<CUDAContext>::DoRunWithType() { const Tensor* input = &Input(0); const Tensor* sequence_lengths = nullptr; const Tensor* window_centers = nullptr; if (mode_ == "sequence") { sequence_lengths = &Input(1); } else if (mode_ == "window") { window_centers = &Input(1); } auto* output = Output(0); output->ResizeLike(*input); const auto canonical_axis = input->canonical_axis_index(axis_); // canonical_batch is non-negative if batching, -1 otherwise int canonical_batch = -1; if ((HasArgument("batch"))) { canonical_batch = input->canonical_axis_index(batch_); } // make sure batch < axis if (canonical_batch >= 0) { CAFFE_ENFORCE_LT(canonical_batch, canonical_axis); } // if no batch, then left is product of dims up to axis // otherwise, left is product of dims between batch and axis const int left = (canonical_batch >= 0 ? input->size_between_dim(canonical_batch, canonical_axis) : input->size_to_dim(canonical_axis)); const int right = input->size_from_dim(canonical_axis); // product of dims from 1 to batch const int batch_dim = (canonical_batch >= 0 ? input->size_to_dim(canonical_batch) * input->dim(canonical_batch) : -1); T fill_val = convert::To<float, T>(grad_ ? 0.0f : fill_val_); if (mode_ == "sequence") { if (HasArgument("repeat_from_axis")) { const int canonical_repeat_from = input->canonical_axis_index(repeat_from_); const int repeated_dims = input->size_from_dim(canonical_repeat_from); const int masked_dims = right / repeated_dims; repeatedSequenceMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, masked_dims, repeated_dims, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } else { sequenceMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } } else if (mode_ == "window") { windowMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), window_centers->data<int>(), radius_, fill_val, output->template mutable_data<T>()); } else if (mode_ == "upper") { upperMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lower") { lowerMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "upperdiag") { upperDiagMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lowerdiag") { lowerDiagMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else { CAFFE_ENFORCE(false, "Unsupported mode for SequenceMaskOp!"); } return true; } REGISTER_CUDA_OPERATOR(SequenceMask, SequenceMaskOp<CUDAContext>); } // namespace caffe2
2ffdebceea37c9726dfd36ed8307d8e12f78f9dc.hip
// !!! This is a file automatically generated by hipify!!! #define GRB_USE_APSPIE //#define private public #include <iostream> #include <algorithm> #include <string> #include <cstdio> #include <cstdlib> #include <hip/hip_runtime_api.h> #include "graphblas/mmio.hpp" #include "graphblas/util.hpp" #include "graphblas/graphblas.hpp" #include <boost/program_options.hpp> #include <test/test.hpp> int main( int argc, char** argv ) { std::vector<graphblas::Index> row_indices; std::vector<graphblas::Index> col_indices; std::vector<float> values; graphblas::Index nrows, ncols, nvals; // Parse arguments namespace po = boost::program_options; po::variables_map vm; parseArgs( argc, argv, vm ); int TA, TB, NT, NUM_ITER, DEVICE; bool ROW_MAJOR, DEBUG, SPLIT; if( vm.count("ta") ) TA = vm["ta"].as<int>(); // default values of TA, TB, NT will be used if( vm.count("tb") ) TB = vm["tb"].as<int>(); if( vm.count("nt") ) NT = vm["nt"].as<int>(); if( vm.count("debug") ) DEBUG = vm["debug"].as<bool>(); if( vm.count("split") ) SPLIT = vm["split"].as<bool>(); if( vm.count("iter") ) NUM_ITER = vm["iter"].as<int>(); if( vm.count("device") ) { DEVICE = vm["device"].as<int>(); hipDeviceProp_t prop; CUDA( hipGetDeviceProperties( &prop, DEVICE )); if( DEBUG ) std::cout << "Using device: " << DEVICE << ", " << prop.name << "\n"; } // ROW_MAJOR == 1: means row major // ROW_MAJOR == 0: means col major // TA == 0 && TB == 0 && NT == 0: means cusparse if( vm.count("major") ) { std::string major = vm["major"].as<std::string>(); ROW_MAJOR = (major=="row"); if( major=="cusparse" ) { TA = 0; TB = 0; NT = 0; } else if( major=="cusparse2" ) { TA = 0; TB = 0; NT = 1; } } if( DEBUG ) { std::cout << "ta: " << TA << "\n"; std::cout << "tb: " << TB << "\n"; std::cout << "nt: " << NT << "\n"; std::cout << "row: " << ROW_MAJOR << "\n"; std::cout << "debug: " << DEBUG << "\n"; std::cout << "split: " << SPLIT << "\n"; } // Read in sparse matrix if (argc < 2) { fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]); exit(1); } else { readMtx( argv[argc-1], row_indices, col_indices, values, nrows, ncols, nvals, DEBUG ); } // Matrix A graphblas::Matrix<float> a(nrows, ncols); a.build( row_indices, col_indices, values, nvals ); a.nrows( nrows ); a.ncols( ncols ); a.nvals( nvals ); if( DEBUG ) a.print(); // Matrix B graphblas::Matrix<float> b(nrows, ncols); b.build( row_indices, col_indices, values, nvals ); b.nrows( nrows ); b.ncols( ncols ); b.nvals( nvals ); graphblas::Matrix<float> c(nrows, ncols); graphblas::Semiring op; // Warmup graphblas::GpuTimer warmup; warmup.Start(); graphblas::mxm<float, float, float>( c, op, a, b, TA, TB, NT, ROW_MAJOR ); warmup.Stop(); graphblas::GpuTimer gpu_mxm; //hipProfilerStart(); gpu_mxm.Start(); for( int i=0; i<NUM_ITER; i++ ) { if( SPLIT ) graphblas::mxmCompute<float, float, float>( c, op, a, b, TA, TB, NT, ROW_MAJOR ); else graphblas::mxm<float, float, float>( c, op, a, b, TA, TB, NT, ROW_MAJOR ); } //hipProfilerStop(); gpu_mxm.Stop(); float flop = 0; if( DEBUG ) std::cout << "warmup, " << warmup.ElapsedMillis() << ", " << flop/warmup.ElapsedMillis()/1000000.0 << "\n"; float elapsed_mxm = gpu_mxm.ElapsedMillis(); std::cout << "spgemm, " << elapsed_mxm/NUM_ITER << "\n"; if( DEBUG ) c.print(); /*c.extractTuples( out_denseVal ); for( int i=0; i<nvals; i++ ) { graphblas::Index row = row_indices[i]; graphblas::Index col = col_indices[i]; float val = values[i]; if( col<max_ncols ) { // Row major order if( ROW_MAJOR ) //std::cout << row << " " << col << " " << val << " " << out_denseVal[row*max_ncols+col] << std::endl; BOOST_ASSERT( val==out_denseVal[row*max_ncols+col] ); else // Column major order //std::cout << row << " " << col << " " << val << " " << out_denseVal[col*nrows+row] << std::endl; BOOST_ASSERT( val==out_denseVal[col*nrows+row] ); } }*/ return 0; }
2ffdebceea37c9726dfd36ed8307d8e12f78f9dc.cu
#define GRB_USE_APSPIE //#define private public #include <iostream> #include <algorithm> #include <string> #include <cstdio> #include <cstdlib> #include <cuda_profiler_api.h> #include "graphblas/mmio.hpp" #include "graphblas/util.hpp" #include "graphblas/graphblas.hpp" #include <boost/program_options.hpp> #include <test/test.hpp> int main( int argc, char** argv ) { std::vector<graphblas::Index> row_indices; std::vector<graphblas::Index> col_indices; std::vector<float> values; graphblas::Index nrows, ncols, nvals; // Parse arguments namespace po = boost::program_options; po::variables_map vm; parseArgs( argc, argv, vm ); int TA, TB, NT, NUM_ITER, DEVICE; bool ROW_MAJOR, DEBUG, SPLIT; if( vm.count("ta") ) TA = vm["ta"].as<int>(); // default values of TA, TB, NT will be used if( vm.count("tb") ) TB = vm["tb"].as<int>(); if( vm.count("nt") ) NT = vm["nt"].as<int>(); if( vm.count("debug") ) DEBUG = vm["debug"].as<bool>(); if( vm.count("split") ) SPLIT = vm["split"].as<bool>(); if( vm.count("iter") ) NUM_ITER = vm["iter"].as<int>(); if( vm.count("device") ) { DEVICE = vm["device"].as<int>(); cudaDeviceProp prop; CUDA( cudaGetDeviceProperties( &prop, DEVICE )); if( DEBUG ) std::cout << "Using device: " << DEVICE << ", " << prop.name << "\n"; } // ROW_MAJOR == 1: means row major // ROW_MAJOR == 0: means col major // TA == 0 && TB == 0 && NT == 0: means cusparse if( vm.count("major") ) { std::string major = vm["major"].as<std::string>(); ROW_MAJOR = (major=="row"); if( major=="cusparse" ) { TA = 0; TB = 0; NT = 0; } else if( major=="cusparse2" ) { TA = 0; TB = 0; NT = 1; } } if( DEBUG ) { std::cout << "ta: " << TA << "\n"; std::cout << "tb: " << TB << "\n"; std::cout << "nt: " << NT << "\n"; std::cout << "row: " << ROW_MAJOR << "\n"; std::cout << "debug: " << DEBUG << "\n"; std::cout << "split: " << SPLIT << "\n"; } // Read in sparse matrix if (argc < 2) { fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]); exit(1); } else { readMtx( argv[argc-1], row_indices, col_indices, values, nrows, ncols, nvals, DEBUG ); } // Matrix A graphblas::Matrix<float> a(nrows, ncols); a.build( row_indices, col_indices, values, nvals ); a.nrows( nrows ); a.ncols( ncols ); a.nvals( nvals ); if( DEBUG ) a.print(); // Matrix B graphblas::Matrix<float> b(nrows, ncols); b.build( row_indices, col_indices, values, nvals ); b.nrows( nrows ); b.ncols( ncols ); b.nvals( nvals ); graphblas::Matrix<float> c(nrows, ncols); graphblas::Semiring op; // Warmup graphblas::GpuTimer warmup; warmup.Start(); graphblas::mxm<float, float, float>( c, op, a, b, TA, TB, NT, ROW_MAJOR ); warmup.Stop(); graphblas::GpuTimer gpu_mxm; //cudaProfilerStart(); gpu_mxm.Start(); for( int i=0; i<NUM_ITER; i++ ) { if( SPLIT ) graphblas::mxmCompute<float, float, float>( c, op, a, b, TA, TB, NT, ROW_MAJOR ); else graphblas::mxm<float, float, float>( c, op, a, b, TA, TB, NT, ROW_MAJOR ); } //cudaProfilerStop(); gpu_mxm.Stop(); float flop = 0; if( DEBUG ) std::cout << "warmup, " << warmup.ElapsedMillis() << ", " << flop/warmup.ElapsedMillis()/1000000.0 << "\n"; float elapsed_mxm = gpu_mxm.ElapsedMillis(); std::cout << "spgemm, " << elapsed_mxm/NUM_ITER << "\n"; if( DEBUG ) c.print(); /*c.extractTuples( out_denseVal ); for( int i=0; i<nvals; i++ ) { graphblas::Index row = row_indices[i]; graphblas::Index col = col_indices[i]; float val = values[i]; if( col<max_ncols ) { // Row major order if( ROW_MAJOR ) //std::cout << row << " " << col << " " << val << " " << out_denseVal[row*max_ncols+col] << std::endl; BOOST_ASSERT( val==out_denseVal[row*max_ncols+col] ); else // Column major order //std::cout << row << " " << col << " " << val << " " << out_denseVal[col*nrows+row] << std::endl; BOOST_ASSERT( val==out_denseVal[col*nrows+row] ); } }*/ return 0; }
3f03e3b0cde61b3761c396216f35aba55537c4a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void initializeAtRandom ( const int dim, const int nwl, const float dlt, const float *x0, const float *stn, float *xx ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int t = i + j * dim; if ( i < dim && j < nwl ) { xx[t] = x0[i] + dlt * stn[t]; } }
3f03e3b0cde61b3761c396216f35aba55537c4a0.cu
#include "includes.h" __global__ void initializeAtRandom ( const int dim, const int nwl, const float dlt, const float *x0, const float *stn, float *xx ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int t = i + j * dim; if ( i < dim && j < nwl ) { xx[t] = x0[i] + dlt * stn[t]; } }
1b644bf1b17e888f020f4bd8692576fc236b05db.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <assert.h> #include "tree.cuh" __device__ node* tempData[INPUTSIZE]; __device__ node* tempData1[INPUTSIZE]; __device__ node* root1; __device__ node* globalCurr; __device__ node* globalCurrs[ORDER]; __device__ node* newNode; __device__ int globalIdx; __device__ int tempKeys[ORDER]; __device__ node* tempPointers[ORDER]; __device__ int globalPointerIdx; __device__ node* globalCurr1, *globalCurr2; __device__ node* foundChild; __device__ void make_node(node*& new_node) { new_node = (node*)malloc(sizeof(node)); new_node->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); new_node->pointers = (node**)malloc( ORDER * sizeof(node *) ); new_node->is_leaf = false; new_node->num_keys = 0; new_node->parent = NULL; new_node->next = NULL; } __device__ void make_leaf(node*& new_node) { make_node(new_node); new_node->is_leaf = true; } __global__ void buildLeaves(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1); node* newNode; if(inWholeIdx < noOfNodes) { make_leaf(newNode); tempData[inWholeIdx] = newNode; assert(tempData[inWholeIdx]); } } __global__ void buildRoot(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx == 0) { root1 = (node*)malloc(sizeof(node)); root1->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); root1->pointers = (node**)malloc( ORDER * sizeof(node *) ); root1->is_leaf = false; root1->num_keys = 0; root1->parent = NULL; root1->next = NULL; root1->keys[0] = 5; } } __global__ void buildLevel(node*& root, int* input, int* result, int size, int x) { node** arr; unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = size / (ORDER / 2); if(x) arr = tempData1; else arr = tempData; if(inWholeIdx < noOfNodes) { node* newNode; make_node(newNode); newNode->keys[0] = inWholeIdx; arr[inWholeIdx] = newNode; } } __global__ void fillLevel(node*& root, int* input, int* result, int size, int x) { node** parent; node** children; if(x) { parent = tempData1; children = tempData; } else { parent = tempData; children = tempData1; } unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = size / (ORDER / 2); unsigned inNodeIdx = inWholeIdx % (ORDER / 2); unsigned nodeNo = inWholeIdx / (ORDER / 2 ); if(nodeNo == noOfNodes) { nodeNo--; inNodeIdx = ((ORDER/2)) + inNodeIdx; } if(inWholeIdx < size) { assert(parent[nodeNo]); assert(parent[nodeNo]->keys); assert(children[inWholeIdx]); parent[nodeNo]->pointers[inNodeIdx] = children[inWholeIdx]; children[inWholeIdx]->parent = parent[nodeNo]; if(inNodeIdx < (ORDER/2) - 1 || (nodeNo == noOfNodes -1 && inWholeIdx != size - 1)) { assert(children[inWholeIdx]); assert(children[inWholeIdx]->num_keys); assert(children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]); parent[nodeNo]->keys[inNodeIdx] = children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]; assert(parent[nodeNo]->keys[inNodeIdx]); } } if(inNodeIdx == 0) { if(nodeNo < noOfNodes -1) { parent[nodeNo]->num_keys = (ORDER / 2) - 1; } else if(nodeNo == noOfNodes - 1) parent[nodeNo]->num_keys = (size % (ORDER / 2)) + (ORDER / 2) - 1; } } __global__ void fillRoot(node*& root, int* input, int* result, int size, int x) { node** children; if(x) { children = tempData; } else { children = tempData1; } unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned inNodeIdx = inWholeIdx % size; if(inWholeIdx < size) { assert(children[inWholeIdx]); root1->pointers[inNodeIdx] = children[inWholeIdx]; children[inWholeIdx]->parent = root1; if(inNodeIdx < size -1 ) { assert(children[inWholeIdx]); assert(children[inWholeIdx]->num_keys); assert(children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]); root1->keys[inWholeIdx] = children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]; assert(root1->keys[inNodeIdx]); } } if(inWholeIdx == 0) { root1->num_keys = size - 1; } } __global__ void fillLeaves(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1); unsigned inNodeIdx = inWholeIdx % ((ORDER / 2) - 1); unsigned nodeNo = inWholeIdx / ((ORDER / 2 ) -1); if(nodeNo == noOfNodes) { nodeNo--; inNodeIdx = ((ORDER/2) - 1) + inNodeIdx; } if(inWholeIdx < INPUTSIZE) { assert(tempData[nodeNo]); assert(tempData[nodeNo]->keys); assert(input[inWholeIdx]); tempData[nodeNo]->keys[inNodeIdx] = input[inWholeIdx]; } if(inNodeIdx == 0) { if(nodeNo < noOfNodes -1) { tempData[nodeNo]->next = tempData[nodeNo + 1]; tempData[nodeNo]->num_keys = ((ORDER / 2) - 1); } else if(nodeNo == noOfNodes - 1) tempData[nodeNo]->num_keys = (INPUTSIZE % ((ORDER / 2) - 1)) + ((ORDER / 2) - 1); } } __global__ void bulkLoad(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1); unsigned inNodeIdx = inWholeIdx % ((ORDER / 2) - 1); unsigned nodeNo = inWholeIdx / ((ORDER / 2 ) -1); if(inWholeIdx == 0) { root->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); root->pointers = (node**)malloc( ORDER * sizeof(node *) ); root->is_leaf = false; root->num_keys = 0; root->parent = NULL; root->next = NULL; } node* newNode; if(inNodeIdx == 0 && nodeNo < noOfNodes) { make_leaf(newNode); tempData[nodeNo] = newNode; assert(tempData[nodeNo]); } if(nodeNo == noOfNodes) { nodeNo--; inNodeIdx = ((ORDER/2) - 1) + inNodeIdx; } __syncthreads(); if(inWholeIdx < INPUTSIZE && nodeNo < noOfNodes) { tempData[nodeNo]->keys[inNodeIdx] = input[inWholeIdx]; } } __device__ void addKey(node* curr, node* child) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; int val = child->keys[0]; if(contains(curr, val)) return; if(inWholeIdx <= curr->num_keys) { if(inWholeIdx < curr->num_keys) tempKeys[inWholeIdx] = curr->keys[inWholeIdx]; if(!curr->is_leaf) tempPointers[inWholeIdx] = curr->pointers[inWholeIdx]; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { if(val <= curr->keys[0]) { globalIdx = 0; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { globalIdx = inWholeIdx; } } else if(inWholeIdx == curr->num_keys) { if(val > curr->keys[curr->num_keys - 1]) { globalIdx = curr->num_keys; } } } __syncthreads(); if(inWholeIdx >= globalIdx && inWholeIdx <= curr->num_keys) { if(inWholeIdx < curr->num_keys) curr->keys[inWholeIdx+1] = tempKeys[inWholeIdx]; if(!curr->is_leaf) curr->pointers[inWholeIdx+1] = tempPointers[inWholeIdx]; } __syncthreads(); if(inWholeIdx == globalIdx) { if(inWholeIdx > 0) curr->keys[globalIdx] = val; else curr->keys[globalIdx] = child->keys[child->num_keys]+1; if(!curr->is_leaf) curr->pointers[globalIdx] = child; } __syncthreads(); if(inWholeIdx == 0) curr->num_keys++; } __device__ void split(node* curr, node* child) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* newNodeLocal; if(inWholeIdx == 0) { newNode = (node*)malloc(sizeof(node)); newNode->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); newNode->pointers = (node**)malloc( ORDER * sizeof(node *) ); newNode->is_leaf = curr->is_leaf; newNode->num_keys = ORDER/2; newNode->parent = curr->parent; newNode->next = curr->next; curr->num_keys = ORDER/2; curr->next = newNode; globalPointerIdx = 0; } __syncthreads(); newNodeLocal = newNode; __syncthreads(); if(inWholeIdx < (ORDER /2)) { newNode->keys[inWholeIdx] = curr->keys[ORDER/2 + inWholeIdx]; } if(!curr->is_leaf && inWholeIdx <= (ORDER /2)) { newNode->pointers[inWholeIdx] = curr->pointers[ORDER/2 + inWholeIdx]; } if(curr->parent->num_keys >= ORDER) split(curr, newNode); else addKey(curr->parent, newNodeLocal); } __global__ void createNewNode(node*& root) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr = foundChild; if(inWholeIdx == 0) { root = (node*)malloc(sizeof(node)); root->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); root->pointers = (node**)malloc( ORDER * sizeof(node *) ); root->is_leaf = curr->is_leaf; root->num_keys = ORDER/2; root->parent = curr->parent; root->next = curr->next; curr->num_keys = ORDER/2; curr->next = newNode; globalPointerIdx = 0; } } __device__ void split(node* curr, int val) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* newNodeLocal; if(inWholeIdx == 0) { newNode = (node*)malloc(sizeof(node)); newNode->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); newNode->pointers = (node**)malloc( ORDER * sizeof(node *) ); newNode->is_leaf = curr->is_leaf; newNode->num_keys = ORDER/2; newNode->parent = curr->parent; newNode->next = curr->next; curr->num_keys = ORDER/2; curr->next = newNode; globalPointerIdx = 0; } __syncthreads(); newNodeLocal = newNode; __syncthreads(); if(inWholeIdx < (ORDER /2)) { newNode->keys[inWholeIdx] = curr->keys[ORDER/2 + inWholeIdx]; } if(!curr->is_leaf && inWholeIdx <= (ORDER /2)) { newNode->pointers[inWholeIdx] = curr->pointers[ORDER/2 + inWholeIdx]; } if(curr->parent->num_keys >= ORDER) split(curr, newNode); else addKey(curr->parent, newNodeLocal); } __device__ void addKey(node* curr, int val) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(contains(curr, val)) return; if(inWholeIdx < curr->num_keys) tempKeys[inWholeIdx] = curr->keys[inWholeIdx]; if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { if(val <= curr->keys[0]) { globalIdx = 0; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { globalIdx = inWholeIdx; } } else if(inWholeIdx == curr->num_keys) { if(val > curr->keys[curr->num_keys - 1]) { globalIdx = curr->num_keys; } } } __syncthreads(); if(inWholeIdx >= globalIdx && inWholeIdx < curr->num_keys) curr->keys[inWholeIdx+1] = tempKeys[inWholeIdx]; __syncthreads(); if(inWholeIdx == globalIdx) curr->keys[globalIdx] = val; __syncthreads(); if(inWholeIdx == 0) curr->num_keys++; } __global__ void insertVal(int val) { node* curr = find(val); __syncthreads(); assert(curr->num_keys < ORDER -1); if(curr->num_keys < ORDER -1) addKey(curr, val); else split(curr, val); } __global__ void copyNode(node* node1, int* full) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; foundChild = foundChild->parent; node* curr = foundChild; int val = node1->keys[0]; if(inWholeIdx == 0) { if(foundChild->num_keys == ORDER - 1) full[0] = 1; else full[0] = 0; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx < curr->num_keys) tempKeys[inWholeIdx] = curr->keys[inWholeIdx]; if(!curr->is_leaf) tempPointers[inWholeIdx] = curr->pointers[inWholeIdx]; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { if(val <= curr->keys[0]) { globalIdx = 0; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { globalIdx = inWholeIdx; } } else if(inWholeIdx == curr->num_keys) { if(val > curr->keys[curr->num_keys - 1]) { globalIdx = curr->num_keys; } } } if(inWholeIdx == 0 && foundChild->num_keys == ORDER - 1) { foundChild->num_keys = ORDER/2; foundChild = foundChild->parent; } } __global__ void copyNode(int val, int* full) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr = foundChild; if(inWholeIdx == 0) { if(foundChild->num_keys == ORDER - 1) full[0] = 1; else full[0] = 0; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx < curr->num_keys) tempKeys[inWholeIdx] = curr->keys[inWholeIdx]; if(!curr->is_leaf) tempPointers[inWholeIdx] = curr->pointers[inWholeIdx]; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { if(val <= curr->keys[0]) { globalIdx = 0; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { globalIdx = inWholeIdx; } } else if(inWholeIdx == curr->num_keys) { if(val > curr->keys[curr->num_keys - 1]) { globalIdx = curr->num_keys; } } } if(inWholeIdx == 0 && foundChild->num_keys == ORDER - 1) { foundChild->num_keys = ORDER/2; } } __global__ void addValue(int val) { node* curr = foundChild; unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx > globalIdx && inWholeIdx < curr->num_keys) curr->keys[inWholeIdx] = tempKeys[inWholeIdx - 1]; if(inWholeIdx == globalIdx) { curr->keys[globalIdx] = val; curr->num_keys++; } } __global__ void addValue(node* nn) { node* curr = foundChild; unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx > globalIdx && inWholeIdx < curr->num_keys) curr->keys[inWholeIdx] = tempKeys[inWholeIdx - 1]; if(inWholeIdx == globalIdx) { curr->pointers[globalIdx] = nn; curr->num_keys++; if(globalIdx == 0) curr->keys[0] = nn->keys[nn->num_keys-1]; else curr->keys[globalIdx] = nn->keys[0]; } } __global__ void copyToNewNode(node*& nnode) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx < (ORDER /2)) { nnode->keys[inWholeIdx] = tempKeys[ORDER/2 + inWholeIdx]; } if(!nnode->is_leaf && inWholeIdx <= (ORDER /2)) { nnode->pointers[inWholeIdx] = tempPointers[ORDER/2 + inWholeIdx]; } } __device__ int contains(node* curr, int val) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx < curr->num_keys) { if(curr->keys[inWholeIdx] == val) globalIdx = 1; } __syncthreads(); return globalIdx; } __device__ node* find(int val) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; assert(root1); node* curr = root1; assert(curr); assert(!curr->is_leaf); __syncthreads(); while(!curr->is_leaf) { if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { assert(curr->keys[0]); if(val <= curr->keys[0]) { assert(curr->pointers[0]); globalCurr = curr->pointers[0]; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { assert(curr->keys[inWholeIdx-1]); assert(curr->keys[inWholeIdx]); if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { assert(curr->pointers[inWholeIdx]); globalCurr = curr->pointers[inWholeIdx]; } } else if(inWholeIdx == curr->num_keys) { assert(curr->keys[curr->num_keys - 1]); if(val > curr->keys[curr->num_keys - 1]) { assert(curr->pointers[inWholeIdx]); globalCurr = curr->pointers[inWholeIdx]; } } } __syncthreads(); curr = globalCurr; __syncthreads(); } return curr; } __device__ node* find(int* values, int len) { //unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned inNodeIdx = threadIdx.x; unsigned nodeNo = blockIdx.x; int val; if(nodeNo < len) val = values[nodeNo]; assert(root1); node* curr = root1; assert(curr); assert(!curr->is_leaf); __syncthreads(); while(!curr->is_leaf) { if(inNodeIdx <= curr->num_keys && nodeNo < len) { if(inNodeIdx == 0) { assert(curr->keys[0]); if(val <= curr->keys[0]) { assert(curr->pointers[0]); globalCurrs[nodeNo] = curr->pointers[0]; } } else if(inNodeIdx < curr->num_keys && inNodeIdx > 0) { assert(curr->keys[inNodeIdx-1]); assert(curr->keys[inNodeIdx]); if(curr->keys[inNodeIdx-1] < val && val <= curr->keys[inNodeIdx]) { assert(curr->pointers[inNodeIdx]); globalCurrs[nodeNo] = curr->pointers[inNodeIdx]; } } else if(inNodeIdx == curr->num_keys) { assert(curr->keys[curr->num_keys - 1]); if(val > curr->keys[curr->num_keys - 1]) { assert(curr->pointers[inNodeIdx]); globalCurrs[nodeNo] = curr->pointers[inNodeIdx]; } } } __syncthreads(); assert(globalCurrs[nodeNo]); curr = globalCurrs[nodeNo]; __syncthreads(); } return curr; } __global__ void searchBetter(int val, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr; switch(result[0]) { case 2: curr = root1; break; case 3: curr = globalCurr1; break; case 4: curr = globalCurr2; break; default: return; } assert(curr); assert(!curr->is_leaf); node* found = NULL; if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { assert(curr->keys[0]); if(val <= curr->keys[0]) { assert(curr->pointers[0]); found = curr->pointers[0]; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { assert(curr->keys[inWholeIdx-1]); assert(curr->keys[inWholeIdx]); if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { assert(curr->pointers[inWholeIdx]); found = curr->pointers[inWholeIdx]; } } else if(inWholeIdx == curr->num_keys) { assert(curr->keys[curr->num_keys - 1]); if(val > curr->keys[curr->num_keys - 1]) { assert(curr->pointers[inWholeIdx]); found = curr->pointers[inWholeIdx]; } } } if(found != NULL) { assert(found); if(result[0] == 2 || result[0] == 3) { globalCurr2 = found; result[0] = 4; } else if(result[0] == 4) { globalCurr1 = found; result[0] = 3; } if(found->is_leaf) result[0] = result[0] * 2; } } __global__ void containsBetter(int val, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr; if(result[0] == 6) curr = globalCurr1; else if(result[0] == 8) curr = globalCurr2; assert(curr->is_leaf); if(inWholeIdx == 0) foundChild = curr; if(inWholeIdx < curr->num_keys) { if(curr->keys[inWholeIdx] == val) result[0] = 1; } } __global__ void search(int val, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr = find(val); result[0] = 0; if(inWholeIdx < curr->num_keys) { if(curr->keys[inWholeIdx] == val) result[0] = 1; } } __global__ void search(int* vals, int* results, int len) { //unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned inNodeIdx = threadIdx.x; unsigned nodeNo = blockIdx.x; node* curr = find(vals, len); if(nodeNo < len) results[nodeNo] = 0; if(nodeNo < len && inNodeIdx < curr->num_keys) { if(curr->keys[inNodeIdx] == vals[nodeNo]) results[nodeNo] = 1; } } __global__ void test(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx== 0) { //node* curr = root1; /*while(!curr->is_leaf) { curr = curr->pointers[2]; }*/ result[0] = root1->pointers[root1->num_keys]->keys[0]; } }
1b644bf1b17e888f020f4bd8692576fc236b05db.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <assert.h> #include "tree.cuh" __device__ node* tempData[INPUTSIZE]; __device__ node* tempData1[INPUTSIZE]; __device__ node* root1; __device__ node* globalCurr; __device__ node* globalCurrs[ORDER]; __device__ node* newNode; __device__ int globalIdx; __device__ int tempKeys[ORDER]; __device__ node* tempPointers[ORDER]; __device__ int globalPointerIdx; __device__ node* globalCurr1, *globalCurr2; __device__ node* foundChild; __device__ void make_node(node*& new_node) { new_node = (node*)malloc(sizeof(node)); new_node->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); new_node->pointers = (node**)malloc( ORDER * sizeof(node *) ); new_node->is_leaf = false; new_node->num_keys = 0; new_node->parent = NULL; new_node->next = NULL; } __device__ void make_leaf(node*& new_node) { make_node(new_node); new_node->is_leaf = true; } __global__ void buildLeaves(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1); node* newNode; if(inWholeIdx < noOfNodes) { make_leaf(newNode); tempData[inWholeIdx] = newNode; assert(tempData[inWholeIdx]); } } __global__ void buildRoot(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx == 0) { root1 = (node*)malloc(sizeof(node)); root1->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); root1->pointers = (node**)malloc( ORDER * sizeof(node *) ); root1->is_leaf = false; root1->num_keys = 0; root1->parent = NULL; root1->next = NULL; root1->keys[0] = 5; } } __global__ void buildLevel(node*& root, int* input, int* result, int size, int x) { node** arr; unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = size / (ORDER / 2); if(x) arr = tempData1; else arr = tempData; if(inWholeIdx < noOfNodes) { node* newNode; make_node(newNode); newNode->keys[0] = inWholeIdx; arr[inWholeIdx] = newNode; } } __global__ void fillLevel(node*& root, int* input, int* result, int size, int x) { node** parent; node** children; if(x) { parent = tempData1; children = tempData; } else { parent = tempData; children = tempData1; } unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = size / (ORDER / 2); unsigned inNodeIdx = inWholeIdx % (ORDER / 2); unsigned nodeNo = inWholeIdx / (ORDER / 2 ); if(nodeNo == noOfNodes) { nodeNo--; inNodeIdx = ((ORDER/2)) + inNodeIdx; } if(inWholeIdx < size) { assert(parent[nodeNo]); assert(parent[nodeNo]->keys); assert(children[inWholeIdx]); parent[nodeNo]->pointers[inNodeIdx] = children[inWholeIdx]; children[inWholeIdx]->parent = parent[nodeNo]; if(inNodeIdx < (ORDER/2) - 1 || (nodeNo == noOfNodes -1 && inWholeIdx != size - 1)) { assert(children[inWholeIdx]); assert(children[inWholeIdx]->num_keys); assert(children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]); parent[nodeNo]->keys[inNodeIdx] = children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]; assert(parent[nodeNo]->keys[inNodeIdx]); } } if(inNodeIdx == 0) { if(nodeNo < noOfNodes -1) { parent[nodeNo]->num_keys = (ORDER / 2) - 1; } else if(nodeNo == noOfNodes - 1) parent[nodeNo]->num_keys = (size % (ORDER / 2)) + (ORDER / 2) - 1; } } __global__ void fillRoot(node*& root, int* input, int* result, int size, int x) { node** children; if(x) { children = tempData; } else { children = tempData1; } unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned inNodeIdx = inWholeIdx % size; if(inWholeIdx < size) { assert(children[inWholeIdx]); root1->pointers[inNodeIdx] = children[inWholeIdx]; children[inWholeIdx]->parent = root1; if(inNodeIdx < size -1 ) { assert(children[inWholeIdx]); assert(children[inWholeIdx]->num_keys); assert(children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]); root1->keys[inWholeIdx] = children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]; assert(root1->keys[inNodeIdx]); } } if(inWholeIdx == 0) { root1->num_keys = size - 1; } } __global__ void fillLeaves(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1); unsigned inNodeIdx = inWholeIdx % ((ORDER / 2) - 1); unsigned nodeNo = inWholeIdx / ((ORDER / 2 ) -1); if(nodeNo == noOfNodes) { nodeNo--; inNodeIdx = ((ORDER/2) - 1) + inNodeIdx; } if(inWholeIdx < INPUTSIZE) { assert(tempData[nodeNo]); assert(tempData[nodeNo]->keys); assert(input[inWholeIdx]); tempData[nodeNo]->keys[inNodeIdx] = input[inWholeIdx]; } if(inNodeIdx == 0) { if(nodeNo < noOfNodes -1) { tempData[nodeNo]->next = tempData[nodeNo + 1]; tempData[nodeNo]->num_keys = ((ORDER / 2) - 1); } else if(nodeNo == noOfNodes - 1) tempData[nodeNo]->num_keys = (INPUTSIZE % ((ORDER / 2) - 1)) + ((ORDER / 2) - 1); } } __global__ void bulkLoad(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1); unsigned inNodeIdx = inWholeIdx % ((ORDER / 2) - 1); unsigned nodeNo = inWholeIdx / ((ORDER / 2 ) -1); if(inWholeIdx == 0) { root->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); root->pointers = (node**)malloc( ORDER * sizeof(node *) ); root->is_leaf = false; root->num_keys = 0; root->parent = NULL; root->next = NULL; } node* newNode; if(inNodeIdx == 0 && nodeNo < noOfNodes) { make_leaf(newNode); tempData[nodeNo] = newNode; assert(tempData[nodeNo]); } if(nodeNo == noOfNodes) { nodeNo--; inNodeIdx = ((ORDER/2) - 1) + inNodeIdx; } __syncthreads(); if(inWholeIdx < INPUTSIZE && nodeNo < noOfNodes) { tempData[nodeNo]->keys[inNodeIdx] = input[inWholeIdx]; } } __device__ void addKey(node* curr, node* child) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; int val = child->keys[0]; if(contains(curr, val)) return; if(inWholeIdx <= curr->num_keys) { if(inWholeIdx < curr->num_keys) tempKeys[inWholeIdx] = curr->keys[inWholeIdx]; if(!curr->is_leaf) tempPointers[inWholeIdx] = curr->pointers[inWholeIdx]; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { if(val <= curr->keys[0]) { globalIdx = 0; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { globalIdx = inWholeIdx; } } else if(inWholeIdx == curr->num_keys) { if(val > curr->keys[curr->num_keys - 1]) { globalIdx = curr->num_keys; } } } __syncthreads(); if(inWholeIdx >= globalIdx && inWholeIdx <= curr->num_keys) { if(inWholeIdx < curr->num_keys) curr->keys[inWholeIdx+1] = tempKeys[inWholeIdx]; if(!curr->is_leaf) curr->pointers[inWholeIdx+1] = tempPointers[inWholeIdx]; } __syncthreads(); if(inWholeIdx == globalIdx) { if(inWholeIdx > 0) curr->keys[globalIdx] = val; else curr->keys[globalIdx] = child->keys[child->num_keys]+1; if(!curr->is_leaf) curr->pointers[globalIdx] = child; } __syncthreads(); if(inWholeIdx == 0) curr->num_keys++; } __device__ void split(node* curr, node* child) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* newNodeLocal; if(inWholeIdx == 0) { newNode = (node*)malloc(sizeof(node)); newNode->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); newNode->pointers = (node**)malloc( ORDER * sizeof(node *) ); newNode->is_leaf = curr->is_leaf; newNode->num_keys = ORDER/2; newNode->parent = curr->parent; newNode->next = curr->next; curr->num_keys = ORDER/2; curr->next = newNode; globalPointerIdx = 0; } __syncthreads(); newNodeLocal = newNode; __syncthreads(); if(inWholeIdx < (ORDER /2)) { newNode->keys[inWholeIdx] = curr->keys[ORDER/2 + inWholeIdx]; } if(!curr->is_leaf && inWholeIdx <= (ORDER /2)) { newNode->pointers[inWholeIdx] = curr->pointers[ORDER/2 + inWholeIdx]; } if(curr->parent->num_keys >= ORDER) split(curr, newNode); else addKey(curr->parent, newNodeLocal); } __global__ void createNewNode(node*& root) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr = foundChild; if(inWholeIdx == 0) { root = (node*)malloc(sizeof(node)); root->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); root->pointers = (node**)malloc( ORDER * sizeof(node *) ); root->is_leaf = curr->is_leaf; root->num_keys = ORDER/2; root->parent = curr->parent; root->next = curr->next; curr->num_keys = ORDER/2; curr->next = newNode; globalPointerIdx = 0; } } __device__ void split(node* curr, int val) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* newNodeLocal; if(inWholeIdx == 0) { newNode = (node*)malloc(sizeof(node)); newNode->keys = (int*)malloc( (ORDER - 1) * sizeof(int) ); newNode->pointers = (node**)malloc( ORDER * sizeof(node *) ); newNode->is_leaf = curr->is_leaf; newNode->num_keys = ORDER/2; newNode->parent = curr->parent; newNode->next = curr->next; curr->num_keys = ORDER/2; curr->next = newNode; globalPointerIdx = 0; } __syncthreads(); newNodeLocal = newNode; __syncthreads(); if(inWholeIdx < (ORDER /2)) { newNode->keys[inWholeIdx] = curr->keys[ORDER/2 + inWholeIdx]; } if(!curr->is_leaf && inWholeIdx <= (ORDER /2)) { newNode->pointers[inWholeIdx] = curr->pointers[ORDER/2 + inWholeIdx]; } if(curr->parent->num_keys >= ORDER) split(curr, newNode); else addKey(curr->parent, newNodeLocal); } __device__ void addKey(node* curr, int val) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(contains(curr, val)) return; if(inWholeIdx < curr->num_keys) tempKeys[inWholeIdx] = curr->keys[inWholeIdx]; if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { if(val <= curr->keys[0]) { globalIdx = 0; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { globalIdx = inWholeIdx; } } else if(inWholeIdx == curr->num_keys) { if(val > curr->keys[curr->num_keys - 1]) { globalIdx = curr->num_keys; } } } __syncthreads(); if(inWholeIdx >= globalIdx && inWholeIdx < curr->num_keys) curr->keys[inWholeIdx+1] = tempKeys[inWholeIdx]; __syncthreads(); if(inWholeIdx == globalIdx) curr->keys[globalIdx] = val; __syncthreads(); if(inWholeIdx == 0) curr->num_keys++; } __global__ void insertVal(int val) { node* curr = find(val); __syncthreads(); assert(curr->num_keys < ORDER -1); if(curr->num_keys < ORDER -1) addKey(curr, val); else split(curr, val); } __global__ void copyNode(node* node1, int* full) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; foundChild = foundChild->parent; node* curr = foundChild; int val = node1->keys[0]; if(inWholeIdx == 0) { if(foundChild->num_keys == ORDER - 1) full[0] = 1; else full[0] = 0; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx < curr->num_keys) tempKeys[inWholeIdx] = curr->keys[inWholeIdx]; if(!curr->is_leaf) tempPointers[inWholeIdx] = curr->pointers[inWholeIdx]; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { if(val <= curr->keys[0]) { globalIdx = 0; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { globalIdx = inWholeIdx; } } else if(inWholeIdx == curr->num_keys) { if(val > curr->keys[curr->num_keys - 1]) { globalIdx = curr->num_keys; } } } if(inWholeIdx == 0 && foundChild->num_keys == ORDER - 1) { foundChild->num_keys = ORDER/2; foundChild = foundChild->parent; } } __global__ void copyNode(int val, int* full) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr = foundChild; if(inWholeIdx == 0) { if(foundChild->num_keys == ORDER - 1) full[0] = 1; else full[0] = 0; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx < curr->num_keys) tempKeys[inWholeIdx] = curr->keys[inWholeIdx]; if(!curr->is_leaf) tempPointers[inWholeIdx] = curr->pointers[inWholeIdx]; } if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { if(val <= curr->keys[0]) { globalIdx = 0; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { globalIdx = inWholeIdx; } } else if(inWholeIdx == curr->num_keys) { if(val > curr->keys[curr->num_keys - 1]) { globalIdx = curr->num_keys; } } } if(inWholeIdx == 0 && foundChild->num_keys == ORDER - 1) { foundChild->num_keys = ORDER/2; } } __global__ void addValue(int val) { node* curr = foundChild; unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx > globalIdx && inWholeIdx < curr->num_keys) curr->keys[inWholeIdx] = tempKeys[inWholeIdx - 1]; if(inWholeIdx == globalIdx) { curr->keys[globalIdx] = val; curr->num_keys++; } } __global__ void addValue(node* nn) { node* curr = foundChild; unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx > globalIdx && inWholeIdx < curr->num_keys) curr->keys[inWholeIdx] = tempKeys[inWholeIdx - 1]; if(inWholeIdx == globalIdx) { curr->pointers[globalIdx] = nn; curr->num_keys++; if(globalIdx == 0) curr->keys[0] = nn->keys[nn->num_keys-1]; else curr->keys[globalIdx] = nn->keys[0]; } } __global__ void copyToNewNode(node*& nnode) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx < (ORDER /2)) { nnode->keys[inWholeIdx] = tempKeys[ORDER/2 + inWholeIdx]; } if(!nnode->is_leaf && inWholeIdx <= (ORDER /2)) { nnode->pointers[inWholeIdx] = tempPointers[ORDER/2 + inWholeIdx]; } } __device__ int contains(node* curr, int val) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx < curr->num_keys) { if(curr->keys[inWholeIdx] == val) globalIdx = 1; } __syncthreads(); return globalIdx; } __device__ node* find(int val) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; assert(root1); node* curr = root1; assert(curr); assert(!curr->is_leaf); __syncthreads(); while(!curr->is_leaf) { if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { assert(curr->keys[0]); if(val <= curr->keys[0]) { assert(curr->pointers[0]); globalCurr = curr->pointers[0]; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { assert(curr->keys[inWholeIdx-1]); assert(curr->keys[inWholeIdx]); if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { assert(curr->pointers[inWholeIdx]); globalCurr = curr->pointers[inWholeIdx]; } } else if(inWholeIdx == curr->num_keys) { assert(curr->keys[curr->num_keys - 1]); if(val > curr->keys[curr->num_keys - 1]) { assert(curr->pointers[inWholeIdx]); globalCurr = curr->pointers[inWholeIdx]; } } } __syncthreads(); curr = globalCurr; __syncthreads(); } return curr; } __device__ node* find(int* values, int len) { //unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned inNodeIdx = threadIdx.x; unsigned nodeNo = blockIdx.x; int val; if(nodeNo < len) val = values[nodeNo]; assert(root1); node* curr = root1; assert(curr); assert(!curr->is_leaf); __syncthreads(); while(!curr->is_leaf) { if(inNodeIdx <= curr->num_keys && nodeNo < len) { if(inNodeIdx == 0) { assert(curr->keys[0]); if(val <= curr->keys[0]) { assert(curr->pointers[0]); globalCurrs[nodeNo] = curr->pointers[0]; } } else if(inNodeIdx < curr->num_keys && inNodeIdx > 0) { assert(curr->keys[inNodeIdx-1]); assert(curr->keys[inNodeIdx]); if(curr->keys[inNodeIdx-1] < val && val <= curr->keys[inNodeIdx]) { assert(curr->pointers[inNodeIdx]); globalCurrs[nodeNo] = curr->pointers[inNodeIdx]; } } else if(inNodeIdx == curr->num_keys) { assert(curr->keys[curr->num_keys - 1]); if(val > curr->keys[curr->num_keys - 1]) { assert(curr->pointers[inNodeIdx]); globalCurrs[nodeNo] = curr->pointers[inNodeIdx]; } } } __syncthreads(); assert(globalCurrs[nodeNo]); curr = globalCurrs[nodeNo]; __syncthreads(); } return curr; } __global__ void searchBetter(int val, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr; switch(result[0]) { case 2: curr = root1; break; case 3: curr = globalCurr1; break; case 4: curr = globalCurr2; break; default: return; } assert(curr); assert(!curr->is_leaf); node* found = NULL; if(inWholeIdx <= curr->num_keys) { if(inWholeIdx == 0) { assert(curr->keys[0]); if(val <= curr->keys[0]) { assert(curr->pointers[0]); found = curr->pointers[0]; } } else if(inWholeIdx < curr->num_keys && inWholeIdx > 0) { assert(curr->keys[inWholeIdx-1]); assert(curr->keys[inWholeIdx]); if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx]) { assert(curr->pointers[inWholeIdx]); found = curr->pointers[inWholeIdx]; } } else if(inWholeIdx == curr->num_keys) { assert(curr->keys[curr->num_keys - 1]); if(val > curr->keys[curr->num_keys - 1]) { assert(curr->pointers[inWholeIdx]); found = curr->pointers[inWholeIdx]; } } } if(found != NULL) { assert(found); if(result[0] == 2 || result[0] == 3) { globalCurr2 = found; result[0] = 4; } else if(result[0] == 4) { globalCurr1 = found; result[0] = 3; } if(found->is_leaf) result[0] = result[0] * 2; } } __global__ void containsBetter(int val, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr; if(result[0] == 6) curr = globalCurr1; else if(result[0] == 8) curr = globalCurr2; assert(curr->is_leaf); if(inWholeIdx == 0) foundChild = curr; if(inWholeIdx < curr->num_keys) { if(curr->keys[inWholeIdx] == val) result[0] = 1; } } __global__ void search(int val, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; node* curr = find(val); result[0] = 0; if(inWholeIdx < curr->num_keys) { if(curr->keys[inWholeIdx] == val) result[0] = 1; } } __global__ void search(int* vals, int* results, int len) { //unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; unsigned inNodeIdx = threadIdx.x; unsigned nodeNo = blockIdx.x; node* curr = find(vals, len); if(nodeNo < len) results[nodeNo] = 0; if(nodeNo < len && inNodeIdx < curr->num_keys) { if(curr->keys[inNodeIdx] == vals[nodeNo]) results[nodeNo] = 1; } } __global__ void test(node*& root, int* input, int* result) { unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x; if(inWholeIdx== 0) { //node* curr = root1; /*while(!curr->is_leaf) { curr = curr->pointers[2]; }*/ result[0] = root1->pointers[root1->num_keys]->keys[0]; } }
ba03b9812257f80248e8c179e7133ad53db1fea9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int filterRadius = filterWidth/2; int y = threadIdx.y+ blockIdx.y* blockDim.y; int x = threadIdx.x+ blockIdx.x* blockDim.x; if (y < numRows && x < numCols) { float weighted_sum = 0.0; for(int j=-filterRadius; j<=filterRadius; j++) { for(int i=-filterRadius; i<=filterRadius; i++) { float filter_val = filter[(j+filterRadius)*filterWidth + (i+filterRadius)]; int x_neighbor = min(max(x+i,0),numCols-1); int y_neighbor = min(max(y+j,0),numRows-1); float neighbor_val = (float)inputChannel[(y_neighbor*numCols) + x_neighbor]; weighted_sum += neighbor_val*filter_val; } } outputChannel[(y*numCols) + x] = (unsigned char) (weighted_sum); } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int y = threadIdx.y+ blockIdx.y* blockDim.y; int x = threadIdx.x+ blockIdx.x* blockDim.x; if (y < numRows && x < numCols) { int index = numCols*y +x; uchar4 color = inputImageRGBA[index]; redChannel[index] = color.x; greenChannel[index] = color.y; blueChannel[index] = color.z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) int blockWidth = 32; const dim3 blockSize(blockWidth, blockWidth, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. int blocksX = numCols/blockWidth+1; int blocksY = numRows/blockWidth+1; //TODO const dim3 gridSize(blocksX,blocksY,1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize),dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
ba03b9812257f80248e8c179e7133ad53db1fea9.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int filterRadius = filterWidth/2; int y = threadIdx.y+ blockIdx.y* blockDim.y; int x = threadIdx.x+ blockIdx.x* blockDim.x; if (y < numRows && x < numCols) { float weighted_sum = 0.0; for(int j=-filterRadius; j<=filterRadius; j++) { for(int i=-filterRadius; i<=filterRadius; i++) { float filter_val = filter[(j+filterRadius)*filterWidth + (i+filterRadius)]; int x_neighbor = min(max(x+i,0),numCols-1); int y_neighbor = min(max(y+j,0),numRows-1); float neighbor_val = (float)inputChannel[(y_neighbor*numCols) + x_neighbor]; weighted_sum += neighbor_val*filter_val; } } outputChannel[(y*numCols) + x] = (unsigned char) (weighted_sum); } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int y = threadIdx.y+ blockIdx.y* blockDim.y; int x = threadIdx.x+ blockIdx.x* blockDim.x; if (y < numRows && x < numCols) { int index = numCols*y +x; uchar4 color = inputImageRGBA[index]; redChannel[index] = color.x; greenChannel[index] = color.y; blueChannel[index] = color.z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) int blockWidth = 32; const dim3 blockSize(blockWidth, blockWidth, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. int blocksX = numCols/blockWidth+1; int blocksY = numRows/blockWidth+1; //TODO const dim3 gridSize(blocksX,blocksY,1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize,blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
e4bedcae623b84128d4af13b719c10f40a17809c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void SimpleClone( const float *background, const float *target, const int *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt * yt + xt; if (yt < ht and xt < wt and mask[curt]) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb){ output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; } } }
e4bedcae623b84128d4af13b719c10f40a17809c.cu
#include "includes.h" __global__ void SimpleClone( const float *background, const float *target, const int *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt * yt + xt; if (yt < ht and xt < wt and mask[curt]) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb){ output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; } } }
b598d0e8ab0e0b96ae01058dd73cdf60161972bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float *elements; } Matrix; // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { hipSetDevice(0); hipDeviceSynchronize(); size_t available, total; hipMemGetInfo(&available, &total); // printf("Mem total: %ld Bytes\nMem available: %ld Bytes\n", available, total); // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); // printf("size of A: %ld\n", size); hipMalloc(&d_A.elements, size); hipError_t error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "ERROR: allocation A %s\n", hipGetErrorString(error)); exit(-1); } hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc(&d_B.elements, size); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "ERROR: allocation B %s\n", hipGetErrorString(error)); exit(-1); } hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); error = hipGetLastError(); hipMalloc(&d_C.elements, size); if (error != hipSuccess) { fprintf(stderr, "ERROR: allocation C %s\n", hipGetErrorString(error)); exit(-1); } // Invoke kernel dim3 dimBlock(A.width * B.height,1,1); dim3 dimGrid(1, 1, 1); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "ERROR: calculation error %s\n", hipGetErrorString(error)); exit(-1); } // Read C from device memory hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); if (error != hipSuccess) { fprintf(stderr, "ERROR: copying C %s\n", hipGetErrorString(error)); exit(-1); } // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; const unsigned int tid = threadIdx.x; const unsigned int col = tid % A.width; const unsigned int row = tid / A.width; for (int i = 0; i < A.width; i++){ Cvalue += A.elements[row * A.width + i] * B.elements[B.width * i + col]; } C.elements[tid] = Cvalue; } int myrand() { return rand() / (RAND_MAX / 10); } int main() { // A x B srand(0); Matrix A, B, C; A.height = 1 * BLOCK_SIZE; A.width = 1 * BLOCK_SIZE; // hB = wA B.height = A.width; B.width = 1 * BLOCK_SIZE; C.height = A.height; // hC = hA C.width = B.width; // wC = wB A.elements = (float *)malloc(A.height * A.width * sizeof(float)); B.elements = (float *)malloc(B.height * B.width * sizeof(float)); C.elements = (float *)malloc(C.height * C.width * sizeof(float)); printf("Content of A: \n"); for (int i = 0; i < A.height; i++) { for (int j = 0; j < A.width; j++) { A.elements[i * A.height + j] = myrand(); printf("%2d", (int)A.elements[i * A.height + j]); } printf("\n"); } printf("\n\nContent of B: \n"); for (int i = 0; i < B.height; i++) { for (int j = 0; j < B.width; j++) { B.elements[i * B.height + j] = myrand(); printf("%2d", (int)B.elements[i * B.height + j]); } printf("\n"); } MatMul(A, B, C); printf("\n\nContent of C: \n"); for (int i = 0; i < C.height; i++) { for (int j = 0; j < C.width; j++) { printf("%4d", (int)C.elements[i * C.height + j]); } printf("\n"); } return 0; }
b598d0e8ab0e0b96ae01058dd73cdf60161972bf.cu
#include <stdio.h> #include <stdlib.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float *elements; } Matrix; // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { cudaSetDevice(0); cudaDeviceSynchronize(); size_t available, total; cudaMemGetInfo(&available, &total); // printf("Mem total: %ld Bytes\nMem available: %ld Bytes\n", available, total); // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); // printf("size of A: %ld\n", size); cudaMalloc(&d_A.elements, size); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: allocation A %s\n", cudaGetErrorString(error)); exit(-1); } cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: allocation B %s\n", cudaGetErrorString(error)); exit(-1); } cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); error = cudaGetLastError(); cudaMalloc(&d_C.elements, size); if (error != cudaSuccess) { fprintf(stderr, "ERROR: allocation C %s\n", cudaGetErrorString(error)); exit(-1); } // Invoke kernel dim3 dimBlock(A.width * B.height,1,1); dim3 dimGrid(1, 1, 1); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: calculation error %s\n", cudaGetErrorString(error)); exit(-1); } // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { fprintf(stderr, "ERROR: copying C %s\n", cudaGetErrorString(error)); exit(-1); } // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; const unsigned int tid = threadIdx.x; const unsigned int col = tid % A.width; const unsigned int row = tid / A.width; for (int i = 0; i < A.width; i++){ Cvalue += A.elements[row * A.width + i] * B.elements[B.width * i + col]; } C.elements[tid] = Cvalue; } int myrand() { return rand() / (RAND_MAX / 10); } int main() { // A x B srand(0); Matrix A, B, C; A.height = 1 * BLOCK_SIZE; A.width = 1 * BLOCK_SIZE; // hB = wA B.height = A.width; B.width = 1 * BLOCK_SIZE; C.height = A.height; // hC = hA C.width = B.width; // wC = wB A.elements = (float *)malloc(A.height * A.width * sizeof(float)); B.elements = (float *)malloc(B.height * B.width * sizeof(float)); C.elements = (float *)malloc(C.height * C.width * sizeof(float)); printf("Content of A: \n"); for (int i = 0; i < A.height; i++) { for (int j = 0; j < A.width; j++) { A.elements[i * A.height + j] = myrand(); printf("%2d", (int)A.elements[i * A.height + j]); } printf("\n"); } printf("\n\nContent of B: \n"); for (int i = 0; i < B.height; i++) { for (int j = 0; j < B.width; j++) { B.elements[i * B.height + j] = myrand(); printf("%2d", (int)B.elements[i * B.height + j]); } printf("\n"); } MatMul(A, B, C); printf("\n\nContent of C: \n"); for (int i = 0; i < C.height; i++) { for (int j = 0; j < C.width; j++) { printf("%4d", (int)C.elements[i * C.height + j]); } printf("\n"); } return 0; }
0a2f6a759390e4d9c207879a2c4bf6ceedd05283.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void scan(float * input, float * output, int len) { //@@ Load a segment of the input vector into shared memory __shared__ float sh_input[2048]; int tx = threadIdx.x; int tx2= tx + blockDim.x; int bdimx = blockDim.x; int i = 2*blockIdx.x*blockDim.x + tx; int start = 2*blockIdx.x*blockDim.x; int Col1 = start + tx; int Col2 = start + bdimx + tx; if( Col2 < len) { // Collaborative loading of A sh_input[tx] = input[ Col1]; sh_input[tx2] = input[ Col2]; } else if ( Col1 < len) { // Control divergence at the edge sh_input[tx] = input[ Col1]; sh_input[tx2]= 0.0f; } else { // Control divergence at the edge sh_input[tx] = 0.0f; sh_input[tx2]= 0.0f; } __syncthreads(); //output[Col1] = sh_input[tx]; output[Col2] = sh_input[tx2]; unsigned int stride; int index; // @@ Traverse the reduction tree down for (stride = 1;stride <= 2*bdimx ; stride *= 2) { index = (tx +1)* stride*2 -1; if (index < 2*bdimx) sh_input[index] += sh_input[index-stride]; __syncthreads(); } //@@ Traverse the reduction tree up for ( stride = bdimx/2; stride > 0; stride/=2) { __syncthreads(); index = (tx +1)* stride*2 -1; if (index + stride < 2*bdimx) sh_input[index+stride] += sh_input[index]; } //@@ Write the computed sum of the block to the output vector at the //@@ correct index __syncthreads(); output[i] = sh_input[tx]; if ( i + bdimx < len) { output[i + bdimx] = sh_input[tx2]; } }
0a2f6a759390e4d9c207879a2c4bf6ceedd05283.cu
#include "includes.h" __global__ void scan(float * input, float * output, int len) { //@@ Load a segment of the input vector into shared memory __shared__ float sh_input[2048]; int tx = threadIdx.x; int tx2= tx + blockDim.x; int bdimx = blockDim.x; int i = 2*blockIdx.x*blockDim.x + tx; int start = 2*blockIdx.x*blockDim.x; int Col1 = start + tx; int Col2 = start + bdimx + tx; if( Col2 < len) { // Collaborative loading of A sh_input[tx] = input[ Col1]; sh_input[tx2] = input[ Col2]; } else if ( Col1 < len) { // Control divergence at the edge sh_input[tx] = input[ Col1]; sh_input[tx2]= 0.0f; } else { // Control divergence at the edge sh_input[tx] = 0.0f; sh_input[tx2]= 0.0f; } __syncthreads(); //output[Col1] = sh_input[tx]; output[Col2] = sh_input[tx2]; unsigned int stride; int index; // @@ Traverse the reduction tree down for (stride = 1;stride <= 2*bdimx ; stride *= 2) { index = (tx +1)* stride*2 -1; if (index < 2*bdimx) sh_input[index] += sh_input[index-stride]; __syncthreads(); } //@@ Traverse the reduction tree up for ( stride = bdimx/2; stride > 0; stride/=2) { __syncthreads(); index = (tx +1)* stride*2 -1; if (index + stride < 2*bdimx) sh_input[index+stride] += sh_input[index]; } //@@ Write the computed sum of the block to the output vector at the //@@ correct index __syncthreads(); output[i] = sh_input[tx]; if ( i + bdimx < len) { output[i + bdimx] = sh_input[tx2]; } }
4416120237e32b179d42e934f9a96633423da0ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/scale_blobs_op.h" namespace caffe2 { template <typename T> __global__ void ScaleBlobsCUDAKernel( const float scale, const int numBlobs, const int* sizeArr, T** X, T** Y) { for (size_t i = 0; i < numBlobs; ++i) { CUDA_1D_KERNEL_LOOP(j, sizeArr[i]) { Y[i][j] = X[i][j] * scale; } } } template <typename T> __global__ void ScaleBlobsCUDAKernelManyTensors( const float scale, const int* sizeArr, T** X, T** Y) { for (size_t i = threadIdx.x; i < sizeArr[blockIdx.x]; i += blockDim.x) { Y[blockIdx.x][i] = X[blockIdx.x][i] * scale; } } template <> template <typename T> bool ScaleBlobsOp<CUDAContext>::DoRunWithType() { const int numBlobs = InputSize(); ReinitializeTensor(&hostBlobSizes_, {numBlobs}, at::dtype<int>().device(CPU)); int* hostBlobSizesData = hostBlobSizes_.mutable_data<int>(); ReinitializeTensor(&hostInputs_, {numBlobs}, at::dtype<T*>().device(CPU)); T** hostInputsData = hostInputs_.mutable_data<T*>(); ReinitializeTensor(&hostOutputs_, {numBlobs}, at::dtype<T*>().device(CPU)); T** hostOutputsData = hostOutputs_.mutable_data<T*>(); int totalSize = 0; int maxSize = 0; for (int i = 0; i < numBlobs; ++i) { hostBlobSizesData[i] = Input(i).numel(); totalSize += hostBlobSizesData[i]; maxSize = max(maxSize, hostBlobSizesData[i]); hostInputsData[i] = Input(i).template data<T>(); hostOutputsData[i] = Output(i)->template mutable_data<T>(); } ReinitializeTensor(&inputs_, {numBlobs}, at::dtype<T*>().device(CUDA)); ReinitializeTensor(&outputs_, {numBlobs}, at::dtype<T*>().device(CUDA)); ReinitializeTensor(&blobSizes_, {numBlobs}, at::dtype<T*>().device(CUDA)); blobSizes_.CopyFrom(hostBlobSizes_); inputs_.CopyFrom(hostInputs_); outputs_.CopyFrom(hostOutputs_); // Select which kernel to launch based on the length of the tensors // The first one performs better when there are many tensors of short length // The second one is better when there are small number of long tensors if (numBlobs > CAFFE_GET_BLOCKS(maxSize)) { // Note: number of blocks has to be equal to the numBlobs hipLaunchKernelGGL(( ScaleBlobsCUDAKernelManyTensors<T>) , dim3(numBlobs), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), scale_, blobSizes_.data<int>(), inputs_.mutable_data<T*>(), outputs_.mutable_data<T*>()); } else { hipLaunchKernelGGL(( ScaleBlobsCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(maxSize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), scale_, numBlobs, blobSizes_.data<int>(), inputs_.mutable_data<T*>(), outputs_.mutable_data<T*>()); } return true; } template <> bool ScaleBlobsOp<CUDAContext>::RunOnDevice() { for (int i = 0; i < InputSize(); ++i) { auto& input = this->template Input<Tensor>(i, CUDA); auto* output = this->template Output<Tensor>(i, CUDA); output->ResizeLike(input); } return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0)); } REGISTER_CUDA_OPERATOR(ScaleBlobs, ScaleBlobsOp<CUDAContext>); /* * Implementation of a different version of the kernel * This balances the work per thread and could be useful * when there is a high imbalance between tensors * However the memory requirement is very high so it does * not perform well for common scenarios * * * Additional storage for the start pointers is required * for ScaleBlobsCUDAKernelBalanced setup * int threadsPerBlock = CAFFE_CUDA_NUM_THREADS; int coorArrSize = 2 * ((totalSize - 1) / threadsPerBlock + 1); int startCoorArr[coorArrSize]; int* dStartCoorArr; int j = 0, cur = 0, elemsLeftInRow = 0; for (int i = 0; i < numBlobs; ++i) { if (i == 0) { startCoorArr[cur++] = i; startCoorArr[cur++] = j; elemsLeftInRow = 0; } while (j < sizeArr[i]) { j += threadsPerBlock - elemsLeftInRow; if (j < sizeArr[i]) { startCoorArr[cur++] = i; startCoorArr[cur++] = j; elemsLeftInRow = 0; } else { elemsLeftInRow = sizeArr[i] - j + threadsPerBlock; j = 0; break; } } } hipMalloc(&dStartCoorArr, sizeof(int) * coorArrSize); hipMemcpy(dStartCoorArr, startCoorArr, sizeof(int) * coorArrSize, hipMemcpyHostToDevice); // ScaleBlobsCUDAKernelBalanced kernel launch ScaleBlobsCUDAKernelBalanced<T> <<<(totalSize-1)/CAFFE_CUDA_NUM_THREADS+1, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( scale_, numBlobs, coorArrSize, dStartCoorArr, dSizeArr, dInputArr, dOutputArr); hipFree(dStartCoorArr); */ template <typename T> __global__ void ScaleBlobsCUDAKernelBalanced( const float scale, const int numBlobs, const int coorArrSize, const int* coorArr, const int* sizeArr, T** X, T** Y) { int i = coorArr[2 * blockIdx.x + 1] + threadIdx.x; int curTen = coorArr[2 * blockIdx.x]; while (curTen < numBlobs && i >= sizeArr[curTen]) { i -= sizeArr[curTen++]; } if (curTen < numBlobs) { Y[curTen][i] = X[curTen][i] * scale; } } } // namespace caffe2
4416120237e32b179d42e934f9a96633423da0ce.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/scale_blobs_op.h" namespace caffe2 { template <typename T> __global__ void ScaleBlobsCUDAKernel( const float scale, const int numBlobs, const int* sizeArr, T** X, T** Y) { for (size_t i = 0; i < numBlobs; ++i) { CUDA_1D_KERNEL_LOOP(j, sizeArr[i]) { Y[i][j] = X[i][j] * scale; } } } template <typename T> __global__ void ScaleBlobsCUDAKernelManyTensors( const float scale, const int* sizeArr, T** X, T** Y) { for (size_t i = threadIdx.x; i < sizeArr[blockIdx.x]; i += blockDim.x) { Y[blockIdx.x][i] = X[blockIdx.x][i] * scale; } } template <> template <typename T> bool ScaleBlobsOp<CUDAContext>::DoRunWithType() { const int numBlobs = InputSize(); ReinitializeTensor(&hostBlobSizes_, {numBlobs}, at::dtype<int>().device(CPU)); int* hostBlobSizesData = hostBlobSizes_.mutable_data<int>(); ReinitializeTensor(&hostInputs_, {numBlobs}, at::dtype<T*>().device(CPU)); T** hostInputsData = hostInputs_.mutable_data<T*>(); ReinitializeTensor(&hostOutputs_, {numBlobs}, at::dtype<T*>().device(CPU)); T** hostOutputsData = hostOutputs_.mutable_data<T*>(); int totalSize = 0; int maxSize = 0; for (int i = 0; i < numBlobs; ++i) { hostBlobSizesData[i] = Input(i).numel(); totalSize += hostBlobSizesData[i]; maxSize = max(maxSize, hostBlobSizesData[i]); hostInputsData[i] = Input(i).template data<T>(); hostOutputsData[i] = Output(i)->template mutable_data<T>(); } ReinitializeTensor(&inputs_, {numBlobs}, at::dtype<T*>().device(CUDA)); ReinitializeTensor(&outputs_, {numBlobs}, at::dtype<T*>().device(CUDA)); ReinitializeTensor(&blobSizes_, {numBlobs}, at::dtype<T*>().device(CUDA)); blobSizes_.CopyFrom(hostBlobSizes_); inputs_.CopyFrom(hostInputs_); outputs_.CopyFrom(hostOutputs_); // Select which kernel to launch based on the length of the tensors // The first one performs better when there are many tensors of short length // The second one is better when there are small number of long tensors if (numBlobs > CAFFE_GET_BLOCKS(maxSize)) { // Note: number of blocks has to be equal to the numBlobs ScaleBlobsCUDAKernelManyTensors<T> <<<numBlobs, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( scale_, blobSizes_.data<int>(), inputs_.mutable_data<T*>(), outputs_.mutable_data<T*>()); } else { ScaleBlobsCUDAKernel<T> <<<CAFFE_GET_BLOCKS(maxSize), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( scale_, numBlobs, blobSizes_.data<int>(), inputs_.mutable_data<T*>(), outputs_.mutable_data<T*>()); } return true; } template <> bool ScaleBlobsOp<CUDAContext>::RunOnDevice() { for (int i = 0; i < InputSize(); ++i) { auto& input = this->template Input<Tensor>(i, CUDA); auto* output = this->template Output<Tensor>(i, CUDA); output->ResizeLike(input); } return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0)); } REGISTER_CUDA_OPERATOR(ScaleBlobs, ScaleBlobsOp<CUDAContext>); /* * Implementation of a different version of the kernel * This balances the work per thread and could be useful * when there is a high imbalance between tensors * However the memory requirement is very high so it does * not perform well for common scenarios * * * Additional storage for the start pointers is required * for ScaleBlobsCUDAKernelBalanced setup * int threadsPerBlock = CAFFE_CUDA_NUM_THREADS; int coorArrSize = 2 * ((totalSize - 1) / threadsPerBlock + 1); int startCoorArr[coorArrSize]; int* dStartCoorArr; int j = 0, cur = 0, elemsLeftInRow = 0; for (int i = 0; i < numBlobs; ++i) { if (i == 0) { startCoorArr[cur++] = i; startCoorArr[cur++] = j; elemsLeftInRow = 0; } while (j < sizeArr[i]) { j += threadsPerBlock - elemsLeftInRow; if (j < sizeArr[i]) { startCoorArr[cur++] = i; startCoorArr[cur++] = j; elemsLeftInRow = 0; } else { elemsLeftInRow = sizeArr[i] - j + threadsPerBlock; j = 0; break; } } } cudaMalloc(&dStartCoorArr, sizeof(int) * coorArrSize); cudaMemcpy(dStartCoorArr, startCoorArr, sizeof(int) * coorArrSize, cudaMemcpyHostToDevice); // ScaleBlobsCUDAKernelBalanced kernel launch ScaleBlobsCUDAKernelBalanced<T> <<<(totalSize-1)/CAFFE_CUDA_NUM_THREADS+1, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( scale_, numBlobs, coorArrSize, dStartCoorArr, dSizeArr, dInputArr, dOutputArr); cudaFree(dStartCoorArr); */ template <typename T> __global__ void ScaleBlobsCUDAKernelBalanced( const float scale, const int numBlobs, const int coorArrSize, const int* coorArr, const int* sizeArr, T** X, T** Y) { int i = coorArr[2 * blockIdx.x + 1] + threadIdx.x; int curTen = coorArr[2 * blockIdx.x]; while (curTen < numBlobs && i >= sizeArr[curTen]) { i -= sizeArr[curTen++]; } if (curTen < numBlobs) { Y[curTen][i] = X[curTen][i] * scale; } } } // namespace caffe2
a5861b308fb45d72eabfe4f9aa1fd6e54a147519.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void kMartixByMatrixElementwise(const int nThreads, const float *m1, const float *m2, float *output) { /* Computes the product of two arrays (elementwise multiplication). Inputs: m1: array m2: array output: array,the results of the multiplication are to be stored here */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = m1[i] * m2[i]; } } __device__ float* dMartixByMatrixElementwise(const float *m1, const float *m2, float *output, const int width, const int height) { hipLaunchKernelGGL(( kMartixByMatrixElementwise) , dim3(width), dim3(height) , 0, 0, width * height, m1, m2, output ); hipDeviceSynchronize(); return output; } __global__ void kMartixSubstractMatrix(const int nThreads, const float *m1, const float *m2, float *output) { /* Computes the (elementwise) difference between two arrays Inputs: m1: array m2: array output: array,the results of the computation are to be stored here */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = m1[i] - m2[i]; } } __device__ float* dMartixSubstractMatrix(const float *m1, const float *m2, float *output, const int width, const int height) { hipLaunchKernelGGL(( kMartixSubstractMatrix) , dim3(width), dim3(height) , 0, 0, width * height, m1, m2, output ); hipDeviceSynchronize(); return output; } __global__ void kSigmoid(const int nThreads, float const *input, float *output) { /* Computes the value of the sigmoid function f(x) = 1/(1 + e^-x). Inputs: input: array output: array, the results of the computation are to be stored here */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = 1.0 / (1.0 + ::exp(-input[i])); } } __device__ void dSigmoid(float const *input, float *output, const int height, const int width) { hipLaunchKernelGGL(( kSigmoid) , dim3(height), dim3(width) , 0, 0, height * width, input, output); hipDeviceSynchronize(); } __global__ void kSigmoid_d(const int nThreads, float const *input, float *output) { /* Computes the value of the sigmoid function derivative f'(x) = f(x)(1 f(x)), where f(x) is sigmoid function. Inputs: input: array output: array, the results of the computation are to be stored here: x(1 x) for every element of the input matrix m1. */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = input[i] * (1 - input[i]); } } __device__ float* dSigmoid_d(float const *input, float *output, const int rows, const int columns){ hipLaunchKernelGGL(( kSigmoid_d) , dim3(rows), dim3(columns) , 0, 0, rows*columns, input, output); hipDeviceSynchronize(); return output; } __global__ void kDot(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ){ /* Computes the product of two matrices: m1 x m2. Inputs: m1: array, left matrix of size m1_rows x m1_columns m2: array, right matrix of size m1_columns x m2_columns (the number of rows in the right matrix must be equal to the number of the columns in the left one) output: array, the results of the computation are to be stored here: m1 * m2, product of two arrays m1 and m2, a matrix of size m1_rows x m2_columns m1_rows: int, number of rows in the left matrix m1 m1_columns: int, number of columns in the left matrix m1 m2_columns: int, number of columns in the right matrix m2 */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_columns; int c = i % m2_columns; float t_output = 0.f; for( int k = 0; k < m1_columns; ++k ) { t_output += m1[ r * m1_columns + k ] * m2[ k * m2_columns + c]; } output[i] = t_output; } } __device__ float* dDot(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ) { hipLaunchKernelGGL(( kDot) , dim3(m1_rows), dim3(m2_columns) , 0, 0, m1_rows * m2_columns, m1, m2, output, m1_rows , m1_columns, m2_columns ); hipDeviceSynchronize(); return output; } __global__ void kDot_m1_m2T(const int nThreads, const float *m1, const float *m2, float *output, const int m1_columns, const int m2_rows ) { /* Updates the output matrix with the product of two matrices: m1 and m2 transposed. Inputs: m1: array, left matrix of size m1_rows x m1_columns m2: array, right matrix of size m2_rows x m1_columns (m2 transposed will be of size m1_columns x m2_rows) output: array, the results of the computation are to be stored here: m1 * m2, product of two arrays m1 and m2, a matrix of size m1_rows x m2_rows m1_columns: int, number of columns in the left matrix m1 m2_rows: int, number of rows in the left matrix m2 */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_rows; int c = i % m2_rows; float t_output = 0.0; int id_T; for( int k = 0; k < m1_columns; ++k ) { id_T = c * m1_columns + k; t_output += m1[ r * m1_columns + k ] * m2[ id_T ]; } output[i] = t_output; } } __device__ float* dDot_m1_m2T(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_rows ) { hipLaunchKernelGGL(( kDot_m1_m2T) , dim3(m1_rows), dim3(m2_rows) , 0, 0, m1_rows * m2_rows, m1, m2, output, m1_columns, m2_rows ); hipDeviceSynchronize(); return output; } __global__ void kDot_m1T_m2(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows, const int m1_columns, const int m2_columns ) { /* Increments the output matrix with the product of two matrices: m1 transposed and m2. Inputs: m1: array, left matrix of size m1_rows x m1_columns (m1 transposed will be of size m1_columns x m1_rows) m2: array, right matrix of size m1_rows x m2_columns output: array, the results of the computation are to be stored here: m1 * m2, product of two arrays m1 and m2, a matrix of size m1_columns x m2_columns m1_rows: int, number of rows in the left matrix m1 m1_columns: int, number of columns in the left matrix m1 m2_rows: int, number of rows in the left matrix m2 */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_columns; int c = i % m2_columns; int id_T; float t_output = 0.0; for( int k = 0; k < m1_rows; ++k ) { id_T = k * m1_columns + r; t_output += m1[ id_T ] * m2[ k * m2_columns + c ]; } output[i] += t_output; } } __device__ void dDot_m1T_m2(const float *m1, const float *m2, float *output, const int m1_height , const int m1_width, const int m2_width ) { hipLaunchKernelGGL(( kDot_m1T_m2) , dim3(m1_width), dim3(m2_width) , 0, 0, m1_width * m2_width, m1, m2, output, m1_height, m1_width, m2_width ); hipDeviceSynchronize(); } __device__ void kPrintMatrix (const float* M, int h, int w) { /* Prints out the input array as h x w matrix. Inputs: m: vector, matrix of size n_rows x n_columns h: int, number of rows in the matrix M w: int, number of columns in the matrix M */ for (int i = 0; i < h; i++){ for (int j = 0; j < w; j++){ printf("%f ", M[i*w+j]); } printf("\n"); } printf("\n"); } __global__ void kFit( const float* X, const int X_w, const int X_h, const float* y, const int y_w, float* l1, const int l1_w, float* l_1_d, float* pred, float* pred_d, float* W0, float* W1, float* buffer ) { for (unsigned i = 0; i < 50; ++i) { dSigmoid(dDot(X, W0, l1, X_h, X_w, l1_w), l1, X_h, l1_w); dSigmoid(dDot(l1, W1, pred, X_h, l1_w, y_w), pred, X_h, y_w); dMartixByMatrixElementwise(dMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w ); dMartixByMatrixElementwise(dDot_m1_m2T(pred_d, W1, l_1_d, X_h, y_w, l1_w), dSigmoid_d(l1, buffer, X_h, l1_w), l_1_d, X_h, l1_w); dDot_m1T_m2( l1, pred_d, W1, X_h, l1_w, y_w ); dDot_m1T_m2( X, l_1_d, W0, X_h, X_w, l1_w ); } } int main(void){ const int TRAINING_SIZE = 4; const int TRAINING_DIM = 4; const int L1_SIZE = 8; // X, the first 4 lines from Iris dataset float h_X[TRAINING_SIZE*TRAINING_DIM] = { 5.1, 3.5, 1.4, 0.2, 4.9, 3.0, 1.4, 0.2, 6.2, 3.4, 5.4, 2.3, 5.9, 3.0, 5.1, 1.8 }; const signed int X_size = sizeof(h_X); float *d_X; hipMalloc(&d_X, X_size); hipMemcpy(d_X, h_X, X_size, hipMemcpyHostToDevice); //WEIGHTS_0 const long signed int W0_size = L1_SIZE*TRAINING_DIM*sizeof(float); float *h_W0 = (float*)malloc(W0_size); for (int i = 0; i < L1_SIZE*TRAINING_DIM; i++){ h_W0[i] = 0.1 * (2.0*rand()/RAND_MAX-1.0); } float *d_W0; hipMalloc(&d_W0, W0_size); hipMemcpy(d_W0, h_W0, W0_size, hipMemcpyHostToDevice); //LAYER_1, LAYER_1_DELTA AND BUFFER OF LAYER 1 SIZE const long signed int L1_size = L1_SIZE*TRAINING_SIZE*sizeof(float); float* h_layer_1 = (float*)malloc(L1_size); float* h_layer_1_delta = (float*)malloc(L1_size); float* h_buffer = (float*)malloc(L1_size); for (int i = 0; i < L1_SIZE*TRAINING_SIZE; i++){ h_layer_1[i] = 0.0; h_buffer[i] = 0.0; h_layer_1_delta[i] = 0.0; } float *d_layer_1; hipMalloc(&d_layer_1, L1_size); hipMemcpy(d_layer_1, h_layer_1, L1_size, hipMemcpyHostToDevice); float *d_buffer; hipMalloc(&d_buffer, L1_size); hipMemcpy(d_buffer, h_buffer, L1_size, hipMemcpyHostToDevice); float *d_layer_1_delta; hipMalloc(&d_layer_1_delta, L1_size); hipMemcpy(d_layer_1_delta, h_layer_1_delta, L1_size, hipMemcpyHostToDevice); //WEIGHTS_1 const long signed int W1_size = L1_SIZE*sizeof(float); float *h_W1 = (float*)malloc(W1_size); for (int i = 0; i < L1_SIZE; i++){ h_W1[i] = 0.1* (2.0*rand()/RAND_MAX-1.0); } float *d_W1; hipMalloc(&d_W1, W1_size); hipMemcpy(d_W1, h_W1, W1_size, hipMemcpyHostToDevice); //Y float h_y[4] = { 0, 0, 1, 1 }; const signed int y_size = sizeof(h_y); float *d_y; hipMalloc(&d_y, y_size); hipMemcpy(d_y, h_y, y_size, hipMemcpyHostToDevice); //PRED AND PRED_DELTA float* h_pred = (float*)malloc(y_size); float* h_pred_delta = (float*)malloc(y_size); for (int i = 0; i < TRAINING_SIZE; i++){ h_pred[i] = 0.0; h_pred_delta[i] = 0.0; } float *d_pred; hipMalloc(&d_pred, y_size); hipMemcpy(d_pred, h_pred, y_size, hipMemcpyHostToDevice); float *d_pred_delta; hipMalloc(&d_pred_delta, y_size); hipMemcpy(d_pred_delta, h_pred_delta, y_size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kFit) , dim3(1), dim3(1) , 0, 0, d_X, TRAINING_DIM, TRAINING_SIZE, d_y, 1, d_layer_1, L1_SIZE, d_layer_1_delta, d_pred, d_pred_delta, d_W0, d_W1, d_buffer); hipMemcpy(h_pred, d_pred, y_size, hipMemcpyDeviceToHost); hipFree(d_pred); hipFree(d_X); hipFree(d_y); hipFree(d_layer_1_delta); hipFree(d_pred_delta); hipFree(d_W0); hipFree(d_W1); hipFree(d_buffer); free(h_layer_1_delta); free(h_pred_delta); free(h_W0); free(h_W1); free(h_buffer); for (int i = 0; i < TRAINING_SIZE; i++) { printf("Prediction[%i] : %f True Value[%i] : %f Error[%i] : %f\n", i, h_pred[i], i, h_y[i], i, h_pred[i] - h_y[i]); } free(h_pred); }
a5861b308fb45d72eabfe4f9aa1fd6e54a147519.cu
#include <stdio.h> __global__ void kMartixByMatrixElementwise(const int nThreads, const float *m1, const float *m2, float *output) { /* Computes the product of two arrays (elementwise multiplication). Inputs: m1: array m2: array output: array,the results of the multiplication are to be stored here */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = m1[i] * m2[i]; } } __device__ float* dMartixByMatrixElementwise(const float *m1, const float *m2, float *output, const int width, const int height) { kMartixByMatrixElementwise <<< width, height >>> ( width * height, m1, m2, output ); cudaDeviceSynchronize(); return output; } __global__ void kMartixSubstractMatrix(const int nThreads, const float *m1, const float *m2, float *output) { /* Computes the (elementwise) difference between two arrays Inputs: m1: array m2: array output: array,the results of the computation are to be stored here */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = m1[i] - m2[i]; } } __device__ float* dMartixSubstractMatrix(const float *m1, const float *m2, float *output, const int width, const int height) { kMartixSubstractMatrix <<< width, height >>> ( width * height, m1, m2, output ); cudaDeviceSynchronize(); return output; } __global__ void kSigmoid(const int nThreads, float const *input, float *output) { /* Computes the value of the sigmoid function f(x) = 1/(1 + e^-x). Inputs: input: array output: array, the results of the computation are to be stored here */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = 1.0 / (1.0 + std::exp(-input[i])); } } __device__ void dSigmoid(float const *input, float *output, const int height, const int width) { kSigmoid <<< height, width >>> (height * width, input, output); cudaDeviceSynchronize(); } __global__ void kSigmoid_d(const int nThreads, float const *input, float *output) { /* Computes the value of the sigmoid function derivative f'(x) = f(x)(1 – f(x)), where f(x) is sigmoid function. Inputs: input: array output: array, the results of the computation are to be stored here: x(1 – x) for every element of the input matrix m1. */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = input[i] * (1 - input[i]); } } __device__ float* dSigmoid_d(float const *input, float *output, const int rows, const int columns){ kSigmoid_d <<< rows, columns >>> (rows*columns, input, output); cudaDeviceSynchronize(); return output; } __global__ void kDot(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ){ /* Computes the product of two matrices: m1 x m2. Inputs: m1: array, left matrix of size m1_rows x m1_columns m2: array, right matrix of size m1_columns x m2_columns (the number of rows in the right matrix must be equal to the number of the columns in the left one) output: array, the results of the computation are to be stored here: m1 * m2, product of two arrays m1 and m2, a matrix of size m1_rows x m2_columns m1_rows: int, number of rows in the left matrix m1 m1_columns: int, number of columns in the left matrix m1 m2_columns: int, number of columns in the right matrix m2 */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_columns; int c = i % m2_columns; float t_output = 0.f; for( int k = 0; k < m1_columns; ++k ) { t_output += m1[ r * m1_columns + k ] * m2[ k * m2_columns + c]; } output[i] = t_output; } } __device__ float* dDot(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ) { kDot <<< m1_rows, m2_columns >>> (m1_rows * m2_columns, m1, m2, output, m1_rows , m1_columns, m2_columns ); cudaDeviceSynchronize(); return output; } __global__ void kDot_m1_m2T(const int nThreads, const float *m1, const float *m2, float *output, const int m1_columns, const int m2_rows ) { /* Updates the output matrix with the product of two matrices: m1 and m2 transposed. Inputs: m1: array, left matrix of size m1_rows x m1_columns m2: array, right matrix of size m2_rows x m1_columns (m2 transposed will be of size m1_columns x m2_rows) output: array, the results of the computation are to be stored here: m1 * m2, product of two arrays m1 and m2, a matrix of size m1_rows x m2_rows m1_columns: int, number of columns in the left matrix m1 m2_rows: int, number of rows in the left matrix m2 */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_rows; int c = i % m2_rows; float t_output = 0.0; int id_T; for( int k = 0; k < m1_columns; ++k ) { id_T = c * m1_columns + k; t_output += m1[ r * m1_columns + k ] * m2[ id_T ]; } output[i] = t_output; } } __device__ float* dDot_m1_m2T(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_rows ) { kDot_m1_m2T <<< m1_rows, m2_rows >>> ( m1_rows * m2_rows, m1, m2, output, m1_columns, m2_rows ); cudaDeviceSynchronize(); return output; } __global__ void kDot_m1T_m2(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows, const int m1_columns, const int m2_columns ) { /* Increments the output matrix with the product of two matrices: m1 transposed and m2. Inputs: m1: array, left matrix of size m1_rows x m1_columns (m1 transposed will be of size m1_columns x m1_rows) m2: array, right matrix of size m1_rows x m2_columns output: array, the results of the computation are to be stored here: m1 * m2, product of two arrays m1 and m2, a matrix of size m1_columns x m2_columns m1_rows: int, number of rows in the left matrix m1 m1_columns: int, number of columns in the left matrix m1 m2_rows: int, number of rows in the left matrix m2 */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_columns; int c = i % m2_columns; int id_T; float t_output = 0.0; for( int k = 0; k < m1_rows; ++k ) { id_T = k * m1_columns + r; t_output += m1[ id_T ] * m2[ k * m2_columns + c ]; } output[i] += t_output; } } __device__ void dDot_m1T_m2(const float *m1, const float *m2, float *output, const int m1_height , const int m1_width, const int m2_width ) { kDot_m1T_m2 <<< m1_width, m2_width >>> (m1_width * m2_width, m1, m2, output, m1_height, m1_width, m2_width ); cudaDeviceSynchronize(); } __device__ void kPrintMatrix (const float* M, int h, int w) { /* Prints out the input array as h x w matrix. Inputs: m: vector, matrix of size n_rows x n_columns h: int, number of rows in the matrix M w: int, number of columns in the matrix M */ for (int i = 0; i < h; i++){ for (int j = 0; j < w; j++){ printf("%f ", M[i*w+j]); } printf("\n"); } printf("\n"); } __global__ void kFit( const float* X, const int X_w, const int X_h, const float* y, const int y_w, float* l1, const int l1_w, float* l_1_d, float* pred, float* pred_d, float* W0, float* W1, float* buffer ) { for (unsigned i = 0; i < 50; ++i) { dSigmoid(dDot(X, W0, l1, X_h, X_w, l1_w), l1, X_h, l1_w); dSigmoid(dDot(l1, W1, pred, X_h, l1_w, y_w), pred, X_h, y_w); dMartixByMatrixElementwise(dMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w ); dMartixByMatrixElementwise(dDot_m1_m2T(pred_d, W1, l_1_d, X_h, y_w, l1_w), dSigmoid_d(l1, buffer, X_h, l1_w), l_1_d, X_h, l1_w); dDot_m1T_m2( l1, pred_d, W1, X_h, l1_w, y_w ); dDot_m1T_m2( X, l_1_d, W0, X_h, X_w, l1_w ); } } int main(void){ const int TRAINING_SIZE = 4; const int TRAINING_DIM = 4; const int L1_SIZE = 8; // X, the first 4 lines from Iris dataset float h_X[TRAINING_SIZE*TRAINING_DIM] = { 5.1, 3.5, 1.4, 0.2, 4.9, 3.0, 1.4, 0.2, 6.2, 3.4, 5.4, 2.3, 5.9, 3.0, 5.1, 1.8 }; const signed int X_size = sizeof(h_X); float *d_X; cudaMalloc(&d_X, X_size); cudaMemcpy(d_X, h_X, X_size, cudaMemcpyHostToDevice); //WEIGHTS_0 const long signed int W0_size = L1_SIZE*TRAINING_DIM*sizeof(float); float *h_W0 = (float*)malloc(W0_size); for (int i = 0; i < L1_SIZE*TRAINING_DIM; i++){ h_W0[i] = 0.1 * (2.0*rand()/RAND_MAX-1.0); } float *d_W0; cudaMalloc(&d_W0, W0_size); cudaMemcpy(d_W0, h_W0, W0_size, cudaMemcpyHostToDevice); //LAYER_1, LAYER_1_DELTA AND BUFFER OF LAYER 1 SIZE const long signed int L1_size = L1_SIZE*TRAINING_SIZE*sizeof(float); float* h_layer_1 = (float*)malloc(L1_size); float* h_layer_1_delta = (float*)malloc(L1_size); float* h_buffer = (float*)malloc(L1_size); for (int i = 0; i < L1_SIZE*TRAINING_SIZE; i++){ h_layer_1[i] = 0.0; h_buffer[i] = 0.0; h_layer_1_delta[i] = 0.0; } float *d_layer_1; cudaMalloc(&d_layer_1, L1_size); cudaMemcpy(d_layer_1, h_layer_1, L1_size, cudaMemcpyHostToDevice); float *d_buffer; cudaMalloc(&d_buffer, L1_size); cudaMemcpy(d_buffer, h_buffer, L1_size, cudaMemcpyHostToDevice); float *d_layer_1_delta; cudaMalloc(&d_layer_1_delta, L1_size); cudaMemcpy(d_layer_1_delta, h_layer_1_delta, L1_size, cudaMemcpyHostToDevice); //WEIGHTS_1 const long signed int W1_size = L1_SIZE*sizeof(float); float *h_W1 = (float*)malloc(W1_size); for (int i = 0; i < L1_SIZE; i++){ h_W1[i] = 0.1* (2.0*rand()/RAND_MAX-1.0); } float *d_W1; cudaMalloc(&d_W1, W1_size); cudaMemcpy(d_W1, h_W1, W1_size, cudaMemcpyHostToDevice); //Y float h_y[4] = { 0, 0, 1, 1 }; const signed int y_size = sizeof(h_y); float *d_y; cudaMalloc(&d_y, y_size); cudaMemcpy(d_y, h_y, y_size, cudaMemcpyHostToDevice); //PRED AND PRED_DELTA float* h_pred = (float*)malloc(y_size); float* h_pred_delta = (float*)malloc(y_size); for (int i = 0; i < TRAINING_SIZE; i++){ h_pred[i] = 0.0; h_pred_delta[i] = 0.0; } float *d_pred; cudaMalloc(&d_pred, y_size); cudaMemcpy(d_pred, h_pred, y_size, cudaMemcpyHostToDevice); float *d_pred_delta; cudaMalloc(&d_pred_delta, y_size); cudaMemcpy(d_pred_delta, h_pred_delta, y_size, cudaMemcpyHostToDevice); kFit <<< 1, 1 >>> ( d_X, TRAINING_DIM, TRAINING_SIZE, d_y, 1, d_layer_1, L1_SIZE, d_layer_1_delta, d_pred, d_pred_delta, d_W0, d_W1, d_buffer); cudaMemcpy(h_pred, d_pred, y_size, cudaMemcpyDeviceToHost); cudaFree(d_pred); cudaFree(d_X); cudaFree(d_y); cudaFree(d_layer_1_delta); cudaFree(d_pred_delta); cudaFree(d_W0); cudaFree(d_W1); cudaFree(d_buffer); free(h_layer_1_delta); free(h_pred_delta); free(h_W0); free(h_W1); free(h_buffer); for (int i = 0; i < TRAINING_SIZE; i++) { printf("Prediction[%i] : %f True Value[%i] : %f Error[%i] : %f\n", i, h_pred[i], i, h_y[i], i, h_pred[i] - h_y[i]); } free(h_pred); }
dd38221fdf87b6927fac6bba6f3a1bc33eb6d8fa.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include<limits> double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } struct NUM_ADD { short2 read_reference_number; int address_array; }; __global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result { int offset=blockIdx.x; __shared__ short2 read_reference_number; __shared__ char * read_base_array; __shared__ char4 * reference_base_array; __shared__ int mismatch; __shared__ int match; __shared__ int open; __shared__ int extend; __shared__ short2 * direction_index; while(offset<size) { if( threadIdx.x==0) { read_reference_number=num_add[offset].read_reference_number; read_base_array=(char *) (data+num_add[offset].address_array); reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128); direction_index=(short2 *) (direction+offset*640*1100); } __syncthreads(); __shared__ char reference_base_in_char[600]; int hh=(read_reference_number.y+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 reference_base_in_thread; reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory reference_base_in_char[aa*4]=reference_base_in_thread.x; reference_base_in_char[aa*4+1]=reference_base_in_thread.y; reference_base_in_char[aa*4+2]=reference_base_in_thread.z; reference_base_in_char[aa*4+3]=reference_base_in_thread.w; } } __shared__ int MM[450]; __shared__ int gap_h[450]; //insertion __shared__ short2 gap_size_h[450]; //insertion __shared__ int result_col; __shared__ int result_row; __shared__ int result_col_index; __shared__ int result_row_index; //__shared__ char cigar_m[128]; //__shared__ int cigar_int_m[128]; //int final_result; //int final_i; //int final_j; if(threadIdx.x==0) { MM[0]=0; gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2; gap_size_h[0].x=0; gap_size_h[0].y=0; match=200; mismatch=-150; open=-260; extend=-11; result_col=-1000000000;//std::numeric_limits<int>::min()/2; result_row=-1000000000;//std::numeric_limits<int>::min()/2; // for(int i=0;i<read_reference_number.y;i++) // printf("%c",reference_base_in_char[i]); // printf("\n"); // for(int i=0;i<read_reference_number.x;i++) // printf("%c",read_base_array[i]); } __syncthreads(); int read_number=read_reference_number.x; { char read_base; read_base=read_base_array[threadIdx.x]; int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;; int gap_size_v=0; //Deletion int M=0; //now int step_right; //now int ki=0;//insertion h negetive //deletion v int MMM=0; short mt=0; short2 curmt; curmt.x=0; curmt.y=0; int current_reference_id=0; for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_reference_id<read_reference_number.y)) { int prev_gap=M+open; //M which is cacluated by last step in the same thread gap_v+=extend; if(prev_gap>gap_v) { gap_v=prev_gap; gap_size_v=1; } else gap_size_v++; char reference_base_each=reference_base_in_char[current_reference_id]; M=MMM+(read_base==reference_base_each? match:mismatch); prev_gap=MM[threadIdx.x]+open; step_right=gap_h[threadIdx.x]+extend; if(prev_gap>step_right) { step_right=prev_gap; ki=1; } else ki=gap_size_h[threadIdx.x].x+1; bool diag=(M>=gap_v)&&(M>=step_right); curmt.y=0; if(diag) { curmt.x=0; //if(threadIdx.x==0||current_reference_id==0) // curmt.y=0; // else curmt.y=mt+1; // curBtrack=0; } else if(step_right>=gap_v) { M=step_right; curmt.x=0-ki; // curBtrack=0-ki; } else { M=gap_v; curmt.x=gap_size_v; //curBtrack=gap_size_v; } MMM=MM[threadIdx.x]; mt=gap_size_h[threadIdx.x].y; direction_index[640*j+threadIdx.x]=curmt; //if(threadIdx.x==read_reference_number.x-3) //printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack); if(current_reference_id==read_reference_number.y-1) { if(M>=result_row) { result_row=M; result_row_index=threadIdx.x; // } //printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x); } if(threadIdx.x==read_reference_number.x-1) { if(M>=result_col) { result_col=M; result_col_index=current_reference_id; // +1 } } current_reference_id++; } __syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads. MM[threadIdx.x+1]=M; gap_h[threadIdx.x+1]=step_right; gap_size_h[threadIdx.x+1].x=ki; gap_size_h[threadIdx.x+1].y=curmt.y; __syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed. } } // char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion // __shared__ int cigar_index; // int segment_length; // short2 btr; // char new_state; // int step_length; int4 result4; if(threadIdx.x==read_reference_number.x-1) { //printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index); if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1)) { // final_result=result_row; result4.x=read_reference_number.y-1; result4.y=result_row_index; result4.z=read_reference_number.x-1-result_row_index; } else { // final_result=result_col; result4.x=result_col_index; result4.y=read_reference_number.x-1; result4.z=0; } //result[offset*3]=final_result; //printf("%d\n",final_result); //result4.x=fina_i; //result4.y=fina_j; //result4.z=segment_length; result[offset]=result4; } __syncthreads(); offset+=gridDim.x; } } __global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result { int offset=blockIdx.x; int4 result4;; short2 * direction_index; __shared__ char * cigar_store; __shared__ int *cigar_int_store; __shared__ char cigar_m[128]; __shared__ int cigar_int_m[128]; while(offset<size) { char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion __shared__ int cigar_index; int segment_length; short2 btr; char new_state; int step_length; if( threadIdx.x==0) { result4=result[offset]; direction_index=(short2 *) (direction+offset*640*1100); cigar_store=(char *) (cigar+offset*sizeof(char)*128); cigar_int_store=(int *) (cigar_int+offset*128); //printf("\n %d %d\n", final_i,final_j); cigar_index=0; if(result4.z>0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.z; cigar_index++; } segment_length=0; state='N'; do { btr=direction_index[(result4.x+result4.y)*640+result4.y]; if(btr.x>0) { new_state='D'; step_length=btr.x; result4.x-=step_length; } else if(btr.x<0) { new_state='I'; step_length=0-btr.x; result4.y-=step_length; } else { new_state='M'; step_length=btr.y; result4.x-=step_length; result4.y-=step_length; } if(state=='N') state=new_state; if(state==new_state) { segment_length+=step_length; } else { cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; segment_length=step_length; cigar_index++; state=new_state; } }while(result4.x>=0&&result4.y>=0); cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; cigar_index++; if(result4.y>=0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.y+1; cigar_index++; } result4.z=result4.x+1; result4.w=cigar_index; result[offset]=result4; /* for(int i=cigar_index-1;i>=0;i--) { printf("%d%c",cigar_int_m[i],cigar_m[i]); } */ } __syncthreads(); if(threadIdx.x<cigar_index && cigar_index<=blockDim.x) { // if(threadIdx.x==0) // printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]); cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x]; cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x]; // if(threadIdx.x==0) // printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]); } offset+=gridDim.x; } } struct InputData { char read_base[600]; char reference_base[600]; }; int main(int artc, char* args[]) { int total_size=0; FILE * file; file=fopen(args[1],"r"); int size; double computation_time=0;//total_time=0; timespec start,finish; /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[1]); strcpy(inputdata[index].read_base,data[1]); index++; } */ /* fscanf(file,"%d",&size); while(!feof(file)) { InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { fscanf(file,"%s ",inputdata[i].reference_base); fscanf(file,"%s ",inputdata[i].read_base); } */ char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[i]); strcpy(inputdata[index].read_base,data[j]); index++; } //data preparation. char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align int data_size=0; char * data_d_total; hipMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4); int * result_h=(int *) malloc(sizeof(int)*size*4); char * cigar_h=(char *) malloc(sizeof(char)*size*128); int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); for(int i=0;i<size;i++) { char4 reference_tep[150]; int read_len=strlen(inputdata[i].read_base); int ref_len=strlen(inputdata[i].reference_base); int new_len=(ref_len+4-1)/4; total_size+=ref_len*read_len; for(int j=0;j<new_len;j++) { reference_tep[j].x=inputdata[i].reference_base[j*4]; if(j*4+1<ref_len) reference_tep[j].y=inputdata[i].reference_base[j*4+1]; if(j*4+2<ref_len) reference_tep[j].z=inputdata[i].reference_base[j*4+2]; if(j*4+3<ref_len) reference_tep[j].w=inputdata[i].reference_base[j*4+3]; } data_num_add[i].read_reference_number.x=read_len; data_num_add[i].read_reference_number.y=ref_len; data_num_add[i].address_array=data_size; memcpy(data_h,inputdata[i].read_base,read_len); data_h+=(read_len+128-1)/128*128; data_size+=(read_len+128-1)/128*128; memcpy(data_h,reference_tep,sizeof(char4)* new_len); data_h+=(new_len*sizeof(char4)+127)/128*128; data_size+=(new_len*sizeof(char4)+127)/128*128; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice); NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128; int4 * result_d=(int4 *) (data_d_total+data_size_to_copy); char * cigar; hipMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int))); int * cigar_int=(int *) (cigar+size*128*sizeof(char)); int * direction; hipMalloc( (int **) & direction, size * (640*1100* sizeof (int))); dim3 block(448); dim3 grid(size); clock_gettime(CLOCK_MONOTONIC_RAW,&start); hipLaunchKernelGGL(( calculate_cigar), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d,result_d,direction); //result // calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result hipMemcpy(result_h,result_d,size*sizeof(int)*4,hipMemcpyDeviceToHost); hipMemcpy(cigar_h,cigar,128*sizeof(char)*size, hipMemcpyDeviceToHost); hipMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); /* for(int i=0;i<size;i++) { printf("%d\n",result_h[i*4+1]); printf("["); for(int j=0;j<result_h[i*4+3];j++) { if(j!=0) printf(", "); printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]); } printf("]\n"); } */ hipFree(direction); free(data_h_total); hipFree(data_d_total); free(inputdata); hipFree(cigar); free(cigar_int_h); free(cigar_h); // fscanf(file,"%d",&size); } // printf(" computation_time= %e total_time=%e \n",computation_time,0); printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000); return 0; }
dd38221fdf87b6927fac6bba6f3a1bc33eb6d8fa.cu
#include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cuda.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include<limits> double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } struct NUM_ADD { short2 read_reference_number; int address_array; }; __global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result { int offset=blockIdx.x; __shared__ short2 read_reference_number; __shared__ char * read_base_array; __shared__ char4 * reference_base_array; __shared__ int mismatch; __shared__ int match; __shared__ int open; __shared__ int extend; __shared__ short2 * direction_index; while(offset<size) { if( threadIdx.x==0) { read_reference_number=num_add[offset].read_reference_number; read_base_array=(char *) (data+num_add[offset].address_array); reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128); direction_index=(short2 *) (direction+offset*640*1100); } __syncthreads(); __shared__ char reference_base_in_char[600]; int hh=(read_reference_number.y+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 reference_base_in_thread; reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory reference_base_in_char[aa*4]=reference_base_in_thread.x; reference_base_in_char[aa*4+1]=reference_base_in_thread.y; reference_base_in_char[aa*4+2]=reference_base_in_thread.z; reference_base_in_char[aa*4+3]=reference_base_in_thread.w; } } __shared__ int MM[450]; __shared__ int gap_h[450]; //insertion __shared__ short2 gap_size_h[450]; //insertion __shared__ int result_col; __shared__ int result_row; __shared__ int result_col_index; __shared__ int result_row_index; //__shared__ char cigar_m[128]; //__shared__ int cigar_int_m[128]; //int final_result; //int final_i; //int final_j; if(threadIdx.x==0) { MM[0]=0; gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2; gap_size_h[0].x=0; gap_size_h[0].y=0; match=200; mismatch=-150; open=-260; extend=-11; result_col=-1000000000;//std::numeric_limits<int>::min()/2; result_row=-1000000000;//std::numeric_limits<int>::min()/2; // for(int i=0;i<read_reference_number.y;i++) // printf("%c",reference_base_in_char[i]); // printf("\n"); // for(int i=0;i<read_reference_number.x;i++) // printf("%c",read_base_array[i]); } __syncthreads(); int read_number=read_reference_number.x; { char read_base; read_base=read_base_array[threadIdx.x]; int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;; int gap_size_v=0; //Deletion int M=0; //now int step_right; //now int ki=0;//insertion h negetive //deletion v int MMM=0; short mt=0; short2 curmt; curmt.x=0; curmt.y=0; int current_reference_id=0; for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_reference_id<read_reference_number.y)) { int prev_gap=M+open; //M which is cacluated by last step in the same thread gap_v+=extend; if(prev_gap>gap_v) { gap_v=prev_gap; gap_size_v=1; } else gap_size_v++; char reference_base_each=reference_base_in_char[current_reference_id]; M=MMM+(read_base==reference_base_each? match:mismatch); prev_gap=MM[threadIdx.x]+open; step_right=gap_h[threadIdx.x]+extend; if(prev_gap>step_right) { step_right=prev_gap; ki=1; } else ki=gap_size_h[threadIdx.x].x+1; bool diag=(M>=gap_v)&&(M>=step_right); curmt.y=0; if(diag) { curmt.x=0; //if(threadIdx.x==0||current_reference_id==0) // curmt.y=0; // else curmt.y=mt+1; // curBtrack=0; } else if(step_right>=gap_v) { M=step_right; curmt.x=0-ki; // curBtrack=0-ki; } else { M=gap_v; curmt.x=gap_size_v; //curBtrack=gap_size_v; } MMM=MM[threadIdx.x]; mt=gap_size_h[threadIdx.x].y; direction_index[640*j+threadIdx.x]=curmt; //if(threadIdx.x==read_reference_number.x-3) //printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack); if(current_reference_id==read_reference_number.y-1) { if(M>=result_row) { result_row=M; result_row_index=threadIdx.x; // } //printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x); } if(threadIdx.x==read_reference_number.x-1) { if(M>=result_col) { result_col=M; result_col_index=current_reference_id; // +1 } } current_reference_id++; } __syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads. MM[threadIdx.x+1]=M; gap_h[threadIdx.x+1]=step_right; gap_size_h[threadIdx.x+1].x=ki; gap_size_h[threadIdx.x+1].y=curmt.y; __syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed. } } // char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion // __shared__ int cigar_index; // int segment_length; // short2 btr; // char new_state; // int step_length; int4 result4; if(threadIdx.x==read_reference_number.x-1) { //printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index); if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1)) { // final_result=result_row; result4.x=read_reference_number.y-1; result4.y=result_row_index; result4.z=read_reference_number.x-1-result_row_index; } else { // final_result=result_col; result4.x=result_col_index; result4.y=read_reference_number.x-1; result4.z=0; } //result[offset*3]=final_result; //printf("%d\n",final_result); //result4.x=fina_i; //result4.y=fina_j; //result4.z=segment_length; result[offset]=result4; } __syncthreads(); offset+=gridDim.x; } } __global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result { int offset=blockIdx.x; int4 result4;; short2 * direction_index; __shared__ char * cigar_store; __shared__ int *cigar_int_store; __shared__ char cigar_m[128]; __shared__ int cigar_int_m[128]; while(offset<size) { char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion __shared__ int cigar_index; int segment_length; short2 btr; char new_state; int step_length; if( threadIdx.x==0) { result4=result[offset]; direction_index=(short2 *) (direction+offset*640*1100); cigar_store=(char *) (cigar+offset*sizeof(char)*128); cigar_int_store=(int *) (cigar_int+offset*128); //printf("\n %d %d\n", final_i,final_j); cigar_index=0; if(result4.z>0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.z; cigar_index++; } segment_length=0; state='N'; do { btr=direction_index[(result4.x+result4.y)*640+result4.y]; if(btr.x>0) { new_state='D'; step_length=btr.x; result4.x-=step_length; } else if(btr.x<0) { new_state='I'; step_length=0-btr.x; result4.y-=step_length; } else { new_state='M'; step_length=btr.y; result4.x-=step_length; result4.y-=step_length; } if(state=='N') state=new_state; if(state==new_state) { segment_length+=step_length; } else { cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; segment_length=step_length; cigar_index++; state=new_state; } }while(result4.x>=0&&result4.y>=0); cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; cigar_index++; if(result4.y>=0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.y+1; cigar_index++; } result4.z=result4.x+1; result4.w=cigar_index; result[offset]=result4; /* for(int i=cigar_index-1;i>=0;i--) { printf("%d%c",cigar_int_m[i],cigar_m[i]); } */ } __syncthreads(); if(threadIdx.x<cigar_index && cigar_index<=blockDim.x) { // if(threadIdx.x==0) // printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]); cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x]; cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x]; // if(threadIdx.x==0) // printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]); } offset+=gridDim.x; } } struct InputData { char read_base[600]; char reference_base[600]; }; int main(int artc, char* args[]) { int total_size=0; FILE * file; file=fopen(args[1],"r"); int size; double computation_time=0;//total_time=0; timespec start,finish; /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[1]); strcpy(inputdata[index].read_base,data[1]); index++; } */ /* fscanf(file,"%d",&size); while(!feof(file)) { InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { fscanf(file,"%s ",inputdata[i].reference_base); fscanf(file,"%s ",inputdata[i].read_base); } */ char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[i]); strcpy(inputdata[index].read_base,data[j]); index++; } //data preparation. char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align int data_size=0; char * data_d_total; cudaMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4); int * result_h=(int *) malloc(sizeof(int)*size*4); char * cigar_h=(char *) malloc(sizeof(char)*size*128); int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); for(int i=0;i<size;i++) { char4 reference_tep[150]; int read_len=strlen(inputdata[i].read_base); int ref_len=strlen(inputdata[i].reference_base); int new_len=(ref_len+4-1)/4; total_size+=ref_len*read_len; for(int j=0;j<new_len;j++) { reference_tep[j].x=inputdata[i].reference_base[j*4]; if(j*4+1<ref_len) reference_tep[j].y=inputdata[i].reference_base[j*4+1]; if(j*4+2<ref_len) reference_tep[j].z=inputdata[i].reference_base[j*4+2]; if(j*4+3<ref_len) reference_tep[j].w=inputdata[i].reference_base[j*4+3]; } data_num_add[i].read_reference_number.x=read_len; data_num_add[i].read_reference_number.y=ref_len; data_num_add[i].address_array=data_size; memcpy(data_h,inputdata[i].read_base,read_len); data_h+=(read_len+128-1)/128*128; data_size+=(read_len+128-1)/128*128; memcpy(data_h,reference_tep,sizeof(char4)* new_len); data_h+=(new_len*sizeof(char4)+127)/128*128; data_size+=(new_len*sizeof(char4)+127)/128*128; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice); NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128; int4 * result_d=(int4 *) (data_d_total+data_size_to_copy); char * cigar; cudaMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int))); int * cigar_int=(int *) (cigar+size*128*sizeof(char)); int * direction; cudaMalloc( (int **) & direction, size * (640*1100* sizeof (int))); dim3 block(448); dim3 grid(size); clock_gettime(CLOCK_MONOTONIC_RAW,&start); calculate_cigar<<<grid,block>>> (size,data_d,num_add_d,result_d,direction); //result // calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result cudaMemcpy(result_h,result_d,size*sizeof(int)*4,cudaMemcpyDeviceToHost); cudaMemcpy(cigar_h,cigar,128*sizeof(char)*size, cudaMemcpyDeviceToHost); cudaMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); /* for(int i=0;i<size;i++) { printf("%d\n",result_h[i*4+1]); printf("["); for(int j=0;j<result_h[i*4+3];j++) { if(j!=0) printf(", "); printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]); } printf("]\n"); } */ cudaFree(direction); free(data_h_total); cudaFree(data_d_total); free(inputdata); cudaFree(cigar); free(cigar_int_h); free(cigar_h); // fscanf(file,"%d",&size); } // printf(" computation_time= %e total_time=%e \n",computation_time,0); printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000); return 0; }
9731f5ee950be552be45a0749cf8f17e0fda3ea8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Parallel and Distributed Systems \file v2.c \brief Implementation for the Ising Model in CUDA One thread computing a block of moments \authors Ioannis Gonidelis Dimitra Karatza \AEMs 8794 8828 \date 2020-01-15 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #define BLOCK_DIMENSION 11 #define GRID_DIMENSION 47 #define N 517 //dimention void validation(int n,int k,int *expected,int *G){ int flag=0; for(int v = 0; v < n*n; v++){ if(expected[v] != G[v]){ flag=-1; break; } } if(flag==0){ printf("\033[0;32m"); printf("k=%d: CORRECT ISING MODEL",k); printf("\033[0m \n"); }else{ printf("k=%d: WRONG ISING MODEL\n",k); } } __global__ void calc_moment(int *G, int* newG, double* w, int n){ //NOTE: gridDim.x = gridDim.y it's the same int fit = n/(gridDim.x*BLOCK_DIMENSION); //number of complete blocks that fit into G //Global G indices int ix=threadIdx.x+blockIdx.x*blockDim.x; int iy=threadIdx.y+blockIdx.y*blockDim.y; int x,y;//shared memory indices int thread_step_x= blockDim.x*gridDim.x; double infl; //influence of neighbors on current moment for(int i=0; i<(fit+1)*(fit+1); i++){ infl=0; if(ix<N && iy<N){ //for all the neighbors for(int c=0;c<5;c++){ for(int d=0;d<5;d++){ //Do not update if the next neighbor coincides with the current point if((c!=2) || (d!=2)){ //Windows centered on the edge lattice points wrap around to the other side y = ((c-2)+iy+n) % n; x = ((d-2)+ix+n) % n; //Influence of a neighbor is increased //Add to infl the weight*value of the previous neighbor infl += G[y*n+x] * w[c*5+d]; } } } //Next value of a moment is defined according to the value of infl if(infl>0.0001){ newG[iy*n+ix]=1; }else if(infl<-0.0001){ newG[iy*n+ix]=-1; }else{ newG[iy*n+ix]=G[iy*n+ix]; } } //update G coordinates - traverse horizontally though G map if((ix+thread_step_x)/n>=1){ iy=blockDim.y*gridDim.y+iy; }else{ iy=iy; } ix=(ix+thread_step_x)%n; } } void ising( int *G, double *w, int k, int n){ int *newG,*swapG; hipMallocManaged(&newG,n*n*sizeof(int)); //save previous G before changing it dim3 block(BLOCK_DIMENSION, BLOCK_DIMENSION); int grid_dimension = GRID_DIMENSION; //define it gloabaly or find a way to produce it dim3 grid(grid_dimension, grid_dimension); //for every iteration (k) for(int t=0;t<k;t++){ //Call kernel function hipLaunchKernelGGL(( calc_moment), dim3(grid),dim3(block), 0, 0, G, newG, w,n); // Synchronize threads before swapping the arrays hipError_t cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) printf("kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); //Swap arrays G and newG swapG=newG; newG=G; G=swapG; } //If last k is an odd number, then the returned G should be newG if(k % 2 == 1){ memcpy(newG, G, n*n*sizeof(int)); } } int main(void) { //k = number of iterations int k = 1; int n=N; // Array of weights double *weights; hipMallocManaged(&weights,5*5*sizeof(double)); double w[25] = {0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004}; memcpy(weights,w,sizeof(w)); // Get the moments of array G from the binary file FILE *fptr = fopen("conf-init.bin","rb"); if (fptr == NULL){ printf("Error: Cannnot open file"); exit(1); } int *G; hipMallocManaged(&G,n*n*sizeof(int)); fread(G, sizeof(int), n*n, fptr); fclose(fptr); //Save a copy of G to call again function ising() for different k //because ising() is changing the array G int *copyG; hipMallocManaged(&copyG,n*n*sizeof(int)); memcpy(copyG, G, n*n*sizeof(int)); //Call ising for k=1 ising(G, weights, k, n); // Check results by comparing with ready data for k=1 int *expected; hipMallocManaged(&expected,n*n*sizeof(int)); fptr = fopen("conf-1.bin","rb"); if (fptr == NULL){ printf("Error: Cannnot open file"); exit(1); } fread(expected, sizeof(int), n*n, fptr); fclose(fptr); validation(n,k,expected,G); //Call ising for k=4 k=4; memcpy(G, copyG, n*n*sizeof(int)); ising(G, weights, k, n); // Check for k = 4 fptr = fopen("conf-4.bin","rb"); if (fptr == NULL){ printf("Error: Cannnot open file"); exit(1); } fread(expected, sizeof(int), n*n, fptr); fclose(fptr); validation(n,k,expected,G); //Call ising for k=11; k=11; memcpy(G, copyG, n*n*sizeof(int)); ising(G, weights, k, n); // Check for k = 11 fptr = fopen("conf-11.bin","rb"); if (fptr == NULL){ printf("Error: Cannnot open file"); exit(1); } fread(expected, sizeof(int), n*n, fptr); fclose(fptr); validation(n,k,expected,G); return 0; }
9731f5ee950be552be45a0749cf8f17e0fda3ea8.cu
/* Parallel and Distributed Systems \file v2.c \brief Implementation for the Ising Model in CUDA One thread computing a block of moments \authors Ioannis Gonidelis Dimitra Karatza \AEMs 8794 8828 \date 2020-01-15 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #define BLOCK_DIMENSION 11 #define GRID_DIMENSION 47 #define N 517 //dimention void validation(int n,int k,int *expected,int *G){ int flag=0; for(int v = 0; v < n*n; v++){ if(expected[v] != G[v]){ flag=-1; break; } } if(flag==0){ printf("\033[0;32m"); printf("k=%d: CORRECT ISING MODEL",k); printf("\033[0m \n"); }else{ printf("k=%d: WRONG ISING MODEL\n",k); } } __global__ void calc_moment(int *G, int* newG, double* w, int n){ //NOTE: gridDim.x = gridDim.y it's the same int fit = n/(gridDim.x*BLOCK_DIMENSION); //number of complete blocks that fit into G //Global G indices int ix=threadIdx.x+blockIdx.x*blockDim.x; int iy=threadIdx.y+blockIdx.y*blockDim.y; int x,y;//shared memory indices int thread_step_x= blockDim.x*gridDim.x; double infl; //influence of neighbors on current moment for(int i=0; i<(fit+1)*(fit+1); i++){ infl=0; if(ix<N && iy<N){ //for all the neighbors for(int c=0;c<5;c++){ for(int d=0;d<5;d++){ //Do not update if the next neighbor coincides with the current point if((c!=2) || (d!=2)){ //Windows centered on the edge lattice points wrap around to the other side y = ((c-2)+iy+n) % n; x = ((d-2)+ix+n) % n; //Influence of a neighbor is increased //Add to infl the weight*value of the previous neighbor infl += G[y*n+x] * w[c*5+d]; } } } //Next value of a moment is defined according to the value of infl if(infl>0.0001){ newG[iy*n+ix]=1; }else if(infl<-0.0001){ newG[iy*n+ix]=-1; }else{ newG[iy*n+ix]=G[iy*n+ix]; } } //update G coordinates - traverse horizontally though G map if((ix+thread_step_x)/n>=1){ iy=blockDim.y*gridDim.y+iy; }else{ iy=iy; } ix=(ix+thread_step_x)%n; } } void ising( int *G, double *w, int k, int n){ int *newG,*swapG; cudaMallocManaged(&newG,n*n*sizeof(int)); //save previous G before changing it dim3 block(BLOCK_DIMENSION, BLOCK_DIMENSION); int grid_dimension = GRID_DIMENSION; //define it gloabaly or find a way to produce it dim3 grid(grid_dimension, grid_dimension); //for every iteration (k) for(int t=0;t<k;t++){ //Call kernel function calc_moment<<<grid,block>>>(G, newG, w,n); // Synchronize threads before swapping the arrays cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); //Swap arrays G and newG swapG=newG; newG=G; G=swapG; } //If last k is an odd number, then the returned G should be newG if(k % 2 == 1){ memcpy(newG, G, n*n*sizeof(int)); } } int main(void) { //k = number of iterations int k = 1; int n=N; // Array of weights double *weights; cudaMallocManaged(&weights,5*5*sizeof(double)); double w[25] = {0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004}; memcpy(weights,w,sizeof(w)); // Get the moments of array G from the binary file FILE *fptr = fopen("conf-init.bin","rb"); if (fptr == NULL){ printf("Error: Cannnot open file"); exit(1); } int *G; cudaMallocManaged(&G,n*n*sizeof(int)); fread(G, sizeof(int), n*n, fptr); fclose(fptr); //Save a copy of G to call again function ising() for different k //because ising() is changing the array G int *copyG; cudaMallocManaged(&copyG,n*n*sizeof(int)); memcpy(copyG, G, n*n*sizeof(int)); //Call ising for k=1 ising(G, weights, k, n); // Check results by comparing with ready data for k=1 int *expected; cudaMallocManaged(&expected,n*n*sizeof(int)); fptr = fopen("conf-1.bin","rb"); if (fptr == NULL){ printf("Error: Cannnot open file"); exit(1); } fread(expected, sizeof(int), n*n, fptr); fclose(fptr); validation(n,k,expected,G); //Call ising for k=4 k=4; memcpy(G, copyG, n*n*sizeof(int)); ising(G, weights, k, n); // Check for k = 4 fptr = fopen("conf-4.bin","rb"); if (fptr == NULL){ printf("Error: Cannnot open file"); exit(1); } fread(expected, sizeof(int), n*n, fptr); fclose(fptr); validation(n,k,expected,G); //Call ising for k=11; k=11; memcpy(G, copyG, n*n*sizeof(int)); ising(G, weights, k, n); // Check for k = 11 fptr = fopen("conf-11.bin","rb"); if (fptr == NULL){ printf("Error: Cannnot open file"); exit(1); } fread(expected, sizeof(int), n*n, fptr); fclose(fptr); validation(n,k,expected,G); return 0; }
5fcd25097166b9348b39069e0890c570ed6790dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _LISTRANK_KERNEL_H_ #define _LISTRANK_KERNEL_H_ typedef struct { int head; int scratch; int prefix; int succ; int succ2; } Sublist; __global__ void Kernel1(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head,int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; int split; //Set the Splitter - first element of block of logn elements //// printf("\n%d: ",index); //fflush(stdout); split=index*s_size; if(split==tail||split==head) split++; if(index==0) split=head; if(split>=size) return; //// printf("Index=%d\n",split); S[index].head=split; S[index].scratch=SUC[split]; SUC[split]=-(index); //printf("I: %d, Splitter=%d, SUC[i]=%d, Sublist.head=%d, Sublist.scratch=%d",index, split, SUC[split], S[index].head, S[index].scratch); } __global__ void Kernel1LB(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head,int tail, int *Splitters, int numsplitters) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; int split; //Set the Splitter - first element of block of logn elements //printf("\n%d: ",index); //fflush(stdout); if(index>=numsplitters) return; split=Splitters[index]; if(split>=size) return; //// printf("Index=%d\n",split); S[index].head=split; S[index].scratch=SUC[split]; SUC[split]=-(index); //printf("I: %d, Splitter=%d, SUC[i]=%d, Sublist.head=%d, Sublist.scratch=%d",index, split, SUC[split], S[index].head, S[index].scratch); } __global__ void Kernel2(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head, int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; //int split=index*sublist_size; if(index*s_size>=size) return; int p=S[index].scratch; int prefix=0; int temp=p; int count=0; //Traverse and set sublist prefix and sucessor to point to sublist index while(p>=0) { //// printf("\nLoop1,%d:%d",index,p); //fflush(stdout); temp=p; p=SUC[p]; SUC[temp]=-(index); VAL[temp]=++prefix; count++; } #ifdef __DEVICE_EMULATION__ printf("\n%d:%d",index,count); #endif if(temp==tail) { VAL[temp]=prefix; S[index].succ=-1; S[index].succ2=1; // // printf("\nLevel 0 Tail: %d, prefix: %d",index,prefix); } else if(p<0) { //Store the next sublist index S[index].succ=-p; S[index].succ2=-p; // printf("%d ",S[index].succ); S[-p].prefix=prefix; //// printf(" Prefix %d",S[-p].prefix); } if(index==0) S[0].prefix=0; } __global__ void Kernel3(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; if(index==0) { int prefix=0; int p=S[0].succ; while(p!=-1) { S[p].prefix=S[p].prefix+prefix; prefix=S[p].prefix; //// printf("\n%d:%d",p, S[p].prefix); p=S[p].succ; } } } //No. of Threads increased for Kernel 4 __global__ void Kernel4(int *VAL, int *SUC, Sublist *S,int sublist_size, int size, int head) { int block=(blockIdx.y*gridDim.x)+blockIdx.x; int index=block*blockDim.x+threadIdx.x; if(index>=size) return; int suc=abs(SUC[index]); VAL[index]=VAL[index]+S[suc].prefix; } __global__ void reduce6(int *g_idata, int *g_odata, unsigned int n) { int blockSize=64; extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void findtail(int *g_idata, int *g_odata, unsigned int n) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; if(index<n&&g_idata[index]==-1) { *g_odata=index; //// printf("Tail Index=%d",*g_odata); } } __global__ void findsublisttail(Sublist *S, int *g_odata, unsigned int n) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; if(index<n&&S[index].succ==-1) { *g_odata=index; //// printf("Tail Index=%d",*g_odata); } } /* __global__ void ListRankKernel2(int *LIST, int size) { int i=(blockIdx.x*256)+threadIdx.x; if(i<size) { int temp=0; int mask=0xFFFF; while((LIST[i]>>16)!=-1 && ((LIST[LIST[i]>>16]>>16)!=-1)) { //VAL[i]=VAL[i]+VAL[SUC[i]]; temp=LIST[i]&mask; temp+= LIST[(LIST[i]>>16)]&mask; //SUC[i]=SUC[SUC[i]]; temp+=(LIST[(LIST[i]>>16)]>>16)<<16; atomicExch(&LIST[i],temp); } } }*/ __global__ void SublistKernel1(Sublist *S1, Sublist *S,int s_size, int size, int head,int tail,int level) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; int split; //Set the Splitter - first element of block of logn elements //// printf("\nSublist Size: %d, size: %d",s_size,size); //fflush(stdout); split=index*s_size; if(split==tail||split==head) split++; if(index==0) split=head; if(split>=size) return; //// printf("\n%d: ",index); //fflush(stdout); //// printf("Index=%d\n",split); S[index].head=split; //// printf("\nDone"); //fflush(stdout); S[index].scratch=S1[split].succ; S1[split].succ2=S1[split].succ; S1[split].succ=-(index); // printf("\nI: %d, Splitter=%d, SUC[i]=%d, Sublist.head=%d, Sublist.scratch=%d",index, split, S1[split].succ, S[index].head, S[index].scratch); //fflush(stdout); } __global__ void SublistKernel2(Sublist *S1, Sublist *S,int s_size, int size, int head, int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; int flag=0; //int split=index*sublist_size; if(index*s_size>=size) return; if(index==0) { // printf("\nPrefix Array in SK2 before compute:\n"); for(int i=0;i<3;i++) { // printf("%d ",S1[i].prefix); } } int prefix=0; int p=S[index].scratch; if(S1[p].succ<0) flag=1; if(!flag) prefix=S1[S[index].head].prefix; // printf("\nSK2: Index: %d, Head: %d, Prefix %d, Succ %d",index, S[index].head, prefix, S1[p].succ); int temp=p; //Traverse and set sublist prefix and sucessor to point to sublist index while(p>=0&&S1[p].succ>=0) { // printf("\nLoop1,%d:%d",index,p); //fflush(stdout); // printf(" old S1.succ= %d", S1[p].succ); temp=p; //p=SUC[p]; p=S1[p].succ; //SUC[temp]=-(index); S1[temp].succ2=S1[temp].succ; S1[temp].succ=(-index); //VAL[temp]=++prefix; // printf(" old prefix: %d",S1[temp].prefix); S1[temp].prefix+=prefix; // printf(" index: %d, new prefix: %d",temp, S1[temp].prefix); prefix=S1[temp].prefix; } if(p==tail) { //VAL[temp]=prefix; S1[p].prefix+=prefix; S1[p].succ2=S1[p].succ; S1[p].succ=(-index); S[index].succ=-1; // printf("\nIndex %d: Last item prefix: %d",index,S1[p].prefix); // printf("\nPrefix Array in SK2 before compute:\n"); for(int i=0;i<3;i++) { // printf("%d ",S[i].prefix); } } else if(S1[p].succ<0) { //Store the next sublist index int successor=-(S1[p].succ); S[index].succ=successor; //// printf("\nNext Sublist for Index %d, succ=%d, flag=%d",index, S[index].succ,flag); if(!flag) S[successor].prefix=prefix; else S[successor].prefix=S[index].prefix; //// printf(" Prefix %d",S[p].prefix); } if(index==head) { S[head].prefix=S1[head].prefix; //// printf("\nHead Prefix: %d",S[head].prefix); } } __global__ void SublistKernel3(Sublist *S, int size, int head) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; if(index==0) { int prefix=0; int p=S[0].succ; while(p!=-1) { S[p].prefix=S[p].prefix+prefix; prefix=S[p].prefix; //// printf("\n%d:%d",p, S[p].prefix); p=S[p].succ; } } } __global__ void SublistKernel4(Sublist *S1, Sublist *S,int sublist_size, int size, int head) { int index=(blockIdx.y*blockDim.x)+(blockIdx.x*blockDim.x)+threadIdx.x; if(index>=size) return; int suc=abs(S1[index].succ); //VAL[index]=VAL[index]+S[suc].prefix; S1[index].prefix+=S[suc].prefix; //Restore Old Successor Value S1[index].succ=S1[index].succ2; __syncthreads(); if(index==0) { // printf("\nPrefix array after addition:\n"); for(int i=0;i<size;i++) { // printf("%3d ",S1[i].prefix); } // printf("\n"); // for(int i=0;i<size;i++) // printf("%3d ",S1[i].succ); } } __global__ void SublistKernel25(Sublist *S1, Sublist *S,int s_size, int size, int head, int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; //int flag=0; //int split=index*sublist_size; if(index*s_size>=size) return; /*if(index==0) { // printf("\nPrefix Array in SK2 before compute:\n"); for(int i=0;i<3;i++) { // printf("%d ",S1[i].prefix); } }*/ int p=S[index].scratch; int prefix=S1[S[index].head].prefix; // printf("\nStarting SK2 Sweep for Element %d, Prefix: %d",index,prefix); int temp=p; while(p>0) { //// printf(" S1[%d].prefix=%d, succ=%d",temp,prefix,S1[temp].succ); temp=p; S1[temp].succ2=S1[temp].succ; p=S1[temp].succ; if(p>0) { S1[temp].succ= -index; prefix+=S1[temp].prefix; S1[temp].prefix=prefix; } } if(temp==tail) { S1[temp].prefix+=prefix; S1[temp].succ2=-1; S1[temp].succ=-index; S[index].succ=-1; // printf("\nSK2 S Tail %d", index); } else if(p<0) { S[index].succ=-p; S[-p].prefix=prefix; // printf("\nNext Sublist for Index %d, succ=%d, prefix: %d",index, S[index].succ, S[S[index].succ].prefix); } if(index==0) { S[0].prefix=0; } } /* __global__ void Kernel2Ord(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head, int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; __shared__ int VALs[BLOCKDIM]; __shared__ int SUCCs[BLOCKDIM]; //int split=index*sublist_size; if(index*s_size>=size) return; int p=S[index].scratch; int prefix=0; int temp=p; //Traverse and set sublist prefix and sucessor to point to sublist index while(p>=0) { //// printf("\nLoop1,%d:%d",index,p); //fflush(stdout); temp=p; p=SUC[p]; SUC[temp]=-(index); VAL[temp]=++prefix; } if(temp==tail) { VAL[temp]=prefix; S[index].succ=-1; S[index].succ2=1; // // printf("\nLevel 0 Tail: %d, prefix: %d",index,prefix); } else if(p<0) { //Store the next sublist index S[index].succ=-p; S[index].succ2=-p; // printf("%d ",S[index].succ); S[-p].prefix=prefix; //// printf(" Prefix %d",S[-p].prefix); } if(index==0) S[0].prefix=0; } */ #endif // #ifndef _LISTRANK_KERNEL_H_
5fcd25097166b9348b39069e0890c570ed6790dc.cu
#ifndef _LISTRANK_KERNEL_H_ #define _LISTRANK_KERNEL_H_ typedef struct { int head; int scratch; int prefix; int succ; int succ2; } Sublist; __global__ void Kernel1(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head,int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; int split; //Set the Splitter - first element of block of logn elements //// printf("\n%d: ",index); //fflush(stdout); split=index*s_size; if(split==tail||split==head) split++; if(index==0) split=head; if(split>=size) return; //// printf("Index=%d\n",split); S[index].head=split; S[index].scratch=SUC[split]; SUC[split]=-(index); //printf("I: %d, Splitter=%d, SUC[i]=%d, Sublist.head=%d, Sublist.scratch=%d",index, split, SUC[split], S[index].head, S[index].scratch); } __global__ void Kernel1LB(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head,int tail, int *Splitters, int numsplitters) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; int split; //Set the Splitter - first element of block of logn elements //printf("\n%d: ",index); //fflush(stdout); if(index>=numsplitters) return; split=Splitters[index]; if(split>=size) return; //// printf("Index=%d\n",split); S[index].head=split; S[index].scratch=SUC[split]; SUC[split]=-(index); //printf("I: %d, Splitter=%d, SUC[i]=%d, Sublist.head=%d, Sublist.scratch=%d",index, split, SUC[split], S[index].head, S[index].scratch); } __global__ void Kernel2(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head, int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; //int split=index*sublist_size; if(index*s_size>=size) return; int p=S[index].scratch; int prefix=0; int temp=p; int count=0; //Traverse and set sublist prefix and sucessor to point to sublist index while(p>=0) { //// printf("\nLoop1,%d:%d",index,p); //fflush(stdout); temp=p; p=SUC[p]; SUC[temp]=-(index); VAL[temp]=++prefix; count++; } #ifdef __DEVICE_EMULATION__ printf("\n%d:%d",index,count); #endif if(temp==tail) { VAL[temp]=prefix; S[index].succ=-1; S[index].succ2=1; // // printf("\nLevel 0 Tail: %d, prefix: %d",index,prefix); } else if(p<0) { //Store the next sublist index S[index].succ=-p; S[index].succ2=-p; // printf("%d ",S[index].succ); S[-p].prefix=prefix; //// printf(" Prefix %d",S[-p].prefix); } if(index==0) S[0].prefix=0; } __global__ void Kernel3(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; if(index==0) { int prefix=0; int p=S[0].succ; while(p!=-1) { S[p].prefix=S[p].prefix+prefix; prefix=S[p].prefix; //// printf("\n%d:%d",p, S[p].prefix); p=S[p].succ; } } } //No. of Threads increased for Kernel 4 __global__ void Kernel4(int *VAL, int *SUC, Sublist *S,int sublist_size, int size, int head) { int block=(blockIdx.y*gridDim.x)+blockIdx.x; int index=block*blockDim.x+threadIdx.x; if(index>=size) return; int suc=abs(SUC[index]); VAL[index]=VAL[index]+S[suc].prefix; } __global__ void reduce6(int *g_idata, int *g_odata, unsigned int n) { int blockSize=64; extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void findtail(int *g_idata, int *g_odata, unsigned int n) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; if(index<n&&g_idata[index]==-1) { *g_odata=index; //// printf("Tail Index=%d",*g_odata); } } __global__ void findsublisttail(Sublist *S, int *g_odata, unsigned int n) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; if(index<n&&S[index].succ==-1) { *g_odata=index; //// printf("Tail Index=%d",*g_odata); } } /* __global__ void ListRankKernel2(int *LIST, int size) { int i=(blockIdx.x*256)+threadIdx.x; if(i<size) { int temp=0; int mask=0xFFFF; while((LIST[i]>>16)!=-1 && ((LIST[LIST[i]>>16]>>16)!=-1)) { //VAL[i]=VAL[i]+VAL[SUC[i]]; temp=LIST[i]&mask; temp+= LIST[(LIST[i]>>16)]&mask; //SUC[i]=SUC[SUC[i]]; temp+=(LIST[(LIST[i]>>16)]>>16)<<16; atomicExch(&LIST[i],temp); } } }*/ __global__ void SublistKernel1(Sublist *S1, Sublist *S,int s_size, int size, int head,int tail,int level) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; int split; //Set the Splitter - first element of block of logn elements //// printf("\nSublist Size: %d, size: %d",s_size,size); //fflush(stdout); split=index*s_size; if(split==tail||split==head) split++; if(index==0) split=head; if(split>=size) return; //// printf("\n%d: ",index); //fflush(stdout); //// printf("Index=%d\n",split); S[index].head=split; //// printf("\nDone"); //fflush(stdout); S[index].scratch=S1[split].succ; S1[split].succ2=S1[split].succ; S1[split].succ=-(index); // printf("\nI: %d, Splitter=%d, SUC[i]=%d, Sublist.head=%d, Sublist.scratch=%d",index, split, S1[split].succ, S[index].head, S[index].scratch); //fflush(stdout); } __global__ void SublistKernel2(Sublist *S1, Sublist *S,int s_size, int size, int head, int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; int flag=0; //int split=index*sublist_size; if(index*s_size>=size) return; if(index==0) { // printf("\nPrefix Array in SK2 before compute:\n"); for(int i=0;i<3;i++) { // printf("%d ",S1[i].prefix); } } int prefix=0; int p=S[index].scratch; if(S1[p].succ<0) flag=1; if(!flag) prefix=S1[S[index].head].prefix; // printf("\nSK2: Index: %d, Head: %d, Prefix %d, Succ %d",index, S[index].head, prefix, S1[p].succ); int temp=p; //Traverse and set sublist prefix and sucessor to point to sublist index while(p>=0&&S1[p].succ>=0) { // printf("\nLoop1,%d:%d",index,p); //fflush(stdout); // printf(" old S1.succ= %d", S1[p].succ); temp=p; //p=SUC[p]; p=S1[p].succ; //SUC[temp]=-(index); S1[temp].succ2=S1[temp].succ; S1[temp].succ=(-index); //VAL[temp]=++prefix; // printf(" old prefix: %d",S1[temp].prefix); S1[temp].prefix+=prefix; // printf(" index: %d, new prefix: %d",temp, S1[temp].prefix); prefix=S1[temp].prefix; } if(p==tail) { //VAL[temp]=prefix; S1[p].prefix+=prefix; S1[p].succ2=S1[p].succ; S1[p].succ=(-index); S[index].succ=-1; // printf("\nIndex %d: Last item prefix: %d",index,S1[p].prefix); // printf("\nPrefix Array in SK2 before compute:\n"); for(int i=0;i<3;i++) { // printf("%d ",S[i].prefix); } } else if(S1[p].succ<0) { //Store the next sublist index int successor=-(S1[p].succ); S[index].succ=successor; //// printf("\nNext Sublist for Index %d, succ=%d, flag=%d",index, S[index].succ,flag); if(!flag) S[successor].prefix=prefix; else S[successor].prefix=S[index].prefix; //// printf(" Prefix %d",S[p].prefix); } if(index==head) { S[head].prefix=S1[head].prefix; //// printf("\nHead Prefix: %d",S[head].prefix); } } __global__ void SublistKernel3(Sublist *S, int size, int head) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; if(index==0) { int prefix=0; int p=S[0].succ; while(p!=-1) { S[p].prefix=S[p].prefix+prefix; prefix=S[p].prefix; //// printf("\n%d:%d",p, S[p].prefix); p=S[p].succ; } } } __global__ void SublistKernel4(Sublist *S1, Sublist *S,int sublist_size, int size, int head) { int index=(blockIdx.y*blockDim.x)+(blockIdx.x*blockDim.x)+threadIdx.x; if(index>=size) return; int suc=abs(S1[index].succ); //VAL[index]=VAL[index]+S[suc].prefix; S1[index].prefix+=S[suc].prefix; //Restore Old Successor Value S1[index].succ=S1[index].succ2; __syncthreads(); if(index==0) { // printf("\nPrefix array after addition:\n"); for(int i=0;i<size;i++) { // printf("%3d ",S1[i].prefix); } // printf("\n"); // for(int i=0;i<size;i++) // printf("%3d ",S1[i].succ); } } __global__ void SublistKernel25(Sublist *S1, Sublist *S,int s_size, int size, int head, int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; //int flag=0; //int split=index*sublist_size; if(index*s_size>=size) return; /*if(index==0) { // printf("\nPrefix Array in SK2 before compute:\n"); for(int i=0;i<3;i++) { // printf("%d ",S1[i].prefix); } }*/ int p=S[index].scratch; int prefix=S1[S[index].head].prefix; // printf("\nStarting SK2 Sweep for Element %d, Prefix: %d",index,prefix); int temp=p; while(p>0) { //// printf(" S1[%d].prefix=%d, succ=%d",temp,prefix,S1[temp].succ); temp=p; S1[temp].succ2=S1[temp].succ; p=S1[temp].succ; if(p>0) { S1[temp].succ= -index; prefix+=S1[temp].prefix; S1[temp].prefix=prefix; } } if(temp==tail) { S1[temp].prefix+=prefix; S1[temp].succ2=-1; S1[temp].succ=-index; S[index].succ=-1; // printf("\nSK2 S Tail %d", index); } else if(p<0) { S[index].succ=-p; S[-p].prefix=prefix; // printf("\nNext Sublist for Index %d, succ=%d, prefix: %d",index, S[index].succ, S[S[index].succ].prefix); } if(index==0) { S[0].prefix=0; } } /* __global__ void Kernel2Ord(int *VAL, int *SUC, Sublist *S,int s_size, int size, int head, int tail) { int index=(blockIdx.x*blockDim.x)+threadIdx.x; __shared__ int VALs[BLOCKDIM]; __shared__ int SUCCs[BLOCKDIM]; //int split=index*sublist_size; if(index*s_size>=size) return; int p=S[index].scratch; int prefix=0; int temp=p; //Traverse and set sublist prefix and sucessor to point to sublist index while(p>=0) { //// printf("\nLoop1,%d:%d",index,p); //fflush(stdout); temp=p; p=SUC[p]; SUC[temp]=-(index); VAL[temp]=++prefix; } if(temp==tail) { VAL[temp]=prefix; S[index].succ=-1; S[index].succ2=1; // // printf("\nLevel 0 Tail: %d, prefix: %d",index,prefix); } else if(p<0) { //Store the next sublist index S[index].succ=-p; S[index].succ2=-p; // printf("%d ",S[index].succ); S[-p].prefix=prefix; //// printf(" Prefix %d",S[-p].prefix); } if(index==0) S[0].prefix=0; } */ #endif // #ifndef _LISTRANK_KERNEL_H_
11aa2baa64dd5ccc6162f0f7c178c62a625385b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*############################################################################# ****************************************************************************** * <name> coproc_transpose </name> ****************************************************************************** * * <purpose> * This file provides routines for transposing multi-dimensional arrays. * </purpose> * *############################################################################# */ #include <stdint.h> #include <iostream> #include <algorithm> #include "coproc_core.h" #include "coproc_transpose.h" using namespace std; /*############################################################################# * Transpose two-dimensional arrays in host memory *############################################################################# */ /****************************************************************************** * 2D-transpose in host memory: * Each element is assumed to have a single data item ******************************************************************************/ template<typename T> void coproc_transposeOnHost2d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { dest[nelmt2*i1+i2] = src[nelmt1*i2+i1]; } } } /****************************************************************************** * 2D-transpose in host memory: * Each element is assumed to have num_elmt data items given at run-time ******************************************************************************/ template<typename T> void coproc_transposeOnHost2d(int num_elmt, const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { for (int ielmt=0; ielmt<num_elmt; ++ielmt) { dest[num_elmt*(nelmt2*i1+i2)+ielmt] = src[num_elmt*(nelmt1*i2+i1)+ielmt]; } } } } /****************************************************************************** * 2D-transpose in host memory: * Each element is assumed to have num_elmt data items known at compile-time ******************************************************************************/ template<typename T, int num_elmt> void coproc_transposeOnHost2d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { #pragma unroll for (int ielmt=0; ielmt<num_elmt; ++ielmt) { dest[num_elmt*(nelmt2*i1+i2)+ielmt] = src[num_elmt*(nelmt1*i2+i1)+ielmt]; } } } } /****************************************************************************** * Wrapper routine in C++ ******************************************************************************/ void coproc_transposeOnHost2d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int bytes_per_elmt, int nelmt1, int nelmt2) { switch (bytes_per_elmt) { // // Check for 1,2,4,8 bytes // case sizeof(uint8_t): // 1 byte coproc_transposeOnHost2d<uint8_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case sizeof(uint16_t): // 2 byte coproc_transposeOnHost2d<uint16_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case sizeof(uint32_t): // 4 byte coproc_transposeOnHost2d<uint32_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case sizeof(uint64_t): // 8 byte coproc_transposeOnHost2d<uint64_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; // // Check for special cases bytes=2^n // case 16: coproc_transposeOnHost2d<uint64_t,2>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 32: coproc_transposeOnHost2d<uint64_t,4>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 64: coproc_transposeOnHost2d<uint64_t,8>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 128: coproc_transposeOnHost2d<uint64_t,16>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 256: coproc_transposeOnHost2d<uint64_t,32>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 512: coproc_transposeOnHost2d<uint64_t,64>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 1024: coproc_transposeOnHost2d<uint64_t,128>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; default: // // Default: check if we have a multiple of 8,4,2 bytes // if ((bytes_per_elmt&7) == 0) { coproc_transposeOnHost2d<uint64_t>((bytes_per_elmt>>3), h_ptrSrc, h_ptrDest, nelmt1, nelmt2); } else if ((bytes_per_elmt&3) == 0) { coproc_transposeOnHost2d<uint32_t>((bytes_per_elmt>>2), h_ptrSrc, h_ptrDest, nelmt1, nelmt2); } else if ((bytes_per_elmt&1) == 0) { coproc_transposeOnHost2d<uint16_t>((bytes_per_elmt>>1), h_ptrSrc, h_ptrDest, nelmt1, nelmt2); } else { coproc_transposeOnHost2d<uint8_t>(bytes_per_elmt, h_ptrSrc, h_ptrDest, nelmt1, nelmt2); } break; } coproc_checkError("coproc_transposeOnHost2d"); } /****************************************************************************** * Wrapper routines to be called from Fortran ******************************************************************************/ extern "C" { void FNAME(coproc_transposeonhost2d)(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, __INT *bytes_per_elmt, __INT *nelmt1, __INT *nelmt2) { coproc_transposeOnHost2d(h_ptrSrc, h_ptrDest, *bytes_per_elmt, *nelmt1, *nelmt2); } }; /*############################################################################# * Transpose three-dimensional arrays in host memory *############################################################################# */ /****************************************************************************** * Each element is assumed to have a single data item ******************************************************************************/ template<typename T> void coproc_transposeOnHost3d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2, int nelmt3) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i3=0; i3<nelmt3; ++i3) { for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { dest[nelmt2*nelmt3*i1+nelmt3*i2+i3] = src[nelmt1*nelmt2*i3+nelmt1*i2+i1]; } } } } /****************************************************************************** * Each element is assumed to have num_elmt data items given at run-time ******************************************************************************/ template<typename T> void coproc_transposeOnHost3d(int num_elmt, const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2, int nelmt3) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i3=0; i3<nelmt3; ++i3) { for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { for (int ielmt=0; ielmt<num_elmt; ++ielmt) { dest[num_elmt*(nelmt2*nelmt3*i1+nelmt3*i2+i3)+ielmt] = src[num_elmt*(nelmt1*nelmt2*i3+nelmt1*i2+i1)+ielmt]; } } } } } /****************************************************************************** * Each element is assumed to have num_elmt data items known at compile-time ******************************************************************************/ template<typename T, int num_elmt> void coproc_transposeOnHost3d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2, int nelmt3) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i3=0; i3<nelmt3; ++i3) { for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { #pragma unroll for (int ielmt=0; ielmt<num_elmt; ++ielmt) { dest[num_elmt*(nelmt2*nelmt3*i1+nelmt3*i2+i3)+ielmt] = src[num_elmt*(nelmt1*nelmt2*i3+nelmt1*i2+i1)+ielmt]; } } } } } /****************************************************************************** * Wrapper routine in C++ ******************************************************************************/ void coproc_transposeOnHost3d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int bytes_per_elmt, int nelmt1, int nelmt2, int nelmt3) { switch (bytes_per_elmt) { // // Check for 1,2,4,8 bytes // case sizeof(uint8_t): // 1 byte coproc_transposeOnHost3d<uint8_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case sizeof(uint16_t): // 2 byte coproc_transposeOnHost3d<uint16_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case sizeof(uint32_t): // 4 byte coproc_transposeOnHost3d<uint32_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case sizeof(uint64_t): // 8 byte coproc_transposeOnHost3d<uint64_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; // // Check for special cases bytes=2^n // case 16: coproc_transposeOnHost3d<uint64_t,2>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 32: coproc_transposeOnHost3d<uint64_t,4>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 64: coproc_transposeOnHost3d<uint64_t,8>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 128: coproc_transposeOnHost3d<uint64_t,16>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 256: coproc_transposeOnHost3d<uint64_t,32>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 512: coproc_transposeOnHost3d<uint64_t,64>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 1024: coproc_transposeOnHost3d<uint64_t,128>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; default: // // Default: check if we have a multiple of 8,4,2 bytes // if ((bytes_per_elmt&7) == 0) { coproc_transposeOnHost3d<uint64_t>((bytes_per_elmt>>3), h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); } else if ((bytes_per_elmt&3) == 0) { coproc_transposeOnHost3d<uint32_t>((bytes_per_elmt>>2), h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); } else if ((bytes_per_elmt&1) == 0) { coproc_transposeOnHost3d<uint16_t>((bytes_per_elmt>>1), h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); } else { coproc_transposeOnHost3d<uint8_t>(bytes_per_elmt, h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); } break; } coproc_checkError("coproc_transposeOnHost3d"); } /****************************************************************************** * Wrapper routine to be called from Fortran ******************************************************************************/ extern "C" { void FNAME(coproc_transposeonhost3d)(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, __INT *bytes_per_elmt, __INT *nelmt1, __INT *nelmt2, __INT *nelmt3) { coproc_transposeOnHost3d(h_ptrSrc, h_ptrDest, *bytes_per_elmt, *nelmt1, *nelmt2, *nelmt3); } }; /*############################################################################# * Transpose two-dimensional arrays in device memory *############################################################################# */ /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have a single data item. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_naive_knl(T *Src, T *Dest, int nelmt1, int nelmt2) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; int index_out = yIndex + nelmt2 * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index_out+i < nelmt1*nelmt2) Dest[index_out+i] = Src[index_in+i*nelmt1]; } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have a single data item. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_naive_mult_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2) { for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; int index_out = yIndex + nelmt2 * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index_out+i < nelmt1*nelmt2) Dest[index_out+i] = Src[index_in+i*nelmt1]; } } } } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel: * Each element is assumed to have a single data item. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_knl(T *Src, T *Dest, int nelmt1, int nelmt2) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + yIndex * nelmt1; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) tile[threadIdx.y+i][threadIdx.x] = Src[index+i*nelmt1]; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; index = xIndex + yIndex * nelmt2; __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) Dest[index+i*nelmt2] = tile[threadIdx.x][threadIdx.y+i]; } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel: * Each element is assumed to have a single data item. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_mult_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index = xIndex + yIndex * nelmt1; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) tile[threadIdx.y+i][threadIdx.x] = Src[index+i*nelmt1]; xIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.x; yIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.y; index = xIndex + yIndex * nelmt2; __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) Dest[index+i*nelmt2] = tile[threadIdx.x][threadIdx.y+i]; __syncthreads(); } } } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel with offset and bound checking: * Each element is assumed to have a single data item. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_offset_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int offset1, int offset2) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; int xIndex = offset1 + blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = offset2 + blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + yIndex * nelmt1; if (xIndex < nelmt1) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index+i*nelmt1 < nelmt1*nelmt2) { tile[threadIdx.y+i][threadIdx.x] = Src[index+i*nelmt1]; } } } xIndex = offset2 + blockIdx.y * TILE_DIM + threadIdx.x; yIndex = offset1 + blockIdx.x * TILE_DIM + threadIdx.y; index = xIndex + yIndex * nelmt2; __syncthreads(); if (xIndex < nelmt2) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index+i*nelmt2 < nelmt1*nelmt2) { Dest[index+i*nelmt2] = tile[threadIdx.x][threadIdx.y+i]; } } } } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel with offset and bound checking: * Each element is assumed to have a single data item. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_mult_offset_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2, int offset1, int offset2) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = offset1 + (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = offset2 + (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index = xIndex + yIndex * nelmt1; if (xIndex < nelmt1) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index+i*nelmt1 < nelmt1*nelmt2) { tile[threadIdx.y+i][threadIdx.x] = Src[index+i*nelmt1]; } } } xIndex = offset2 + (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.x; yIndex = offset1 + (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.y; index = xIndex + yIndex * nelmt2; __syncthreads(); if (xIndex < nelmt2) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index+i*nelmt2 < nelmt1*nelmt2) { Dest[index+i*nelmt2] = tile[threadIdx.x][threadIdx.y+i]; } } } __syncthreads(); } } } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel: * Each element is assumed to have a single data item ******************************************************************************/ template<int TILE_DIM, int FIXED_DIM, class T> __global__ void transpose2d_rect_knl(T *Src, T *Dest, int nelmt1, int nelmt2) { if (FIXED_DIM == 1) { // First dimension is fixed, i.e. not larger than TILE_DIM __shared__ T tile[TILE_DIM*(TILE_DIM-1)]; // All threads read nelmt1*TILE_DIM data items from the source // array from contiguous memory positions int index = blockIdx.x * TILE_DIM * nelmt1; for (int i=0; i<nelmt1; i++) { int tid = threadIdx.x + i * TILE_DIM; tile[tid] = Src[index+tid]; } __syncthreads(); // All threads write TILE_DIM contiguous items from the same // column of the shared memory block and store them in the same // row of the destination array; Repeat until all nelmt1<TILE_DIM // rows have been processed. index = blockIdx.x * TILE_DIM + threadIdx.x; for (int i=0; i<nelmt1; i++) Dest[index + i*nelmt2] = tile[threadIdx.x*nelmt2+i]; } else if (FIXED_DIM == 2) { // Second dimension is fixed, i.e. not larger than TILE_DIM __shared__ T tile[TILE_DIM*(TILE_DIM-1)]; // All threads read TILE_DIM contiguous items from the same row // and store them in the same column of the shared memory block; // Repeat until all nelmt2<TILE_DIM rows have been processed. int index = blockIdx.x * TILE_DIM + threadIdx.x; for (int i=0; i<nelmt2; i++) tile[threadIdx.x*nelmt2+i] = Src[index + i*nelmt1]; __syncthreads(); // All threads write nelmt2*TILE_DIM data items from the shared // memory block into the destination array at contiguous positions index = blockIdx.x * TILE_DIM * nelmt2; for (int i=0; i<nelmt2; i++) { int tid = threadIdx.x + i * TILE_DIM; Dest[index+tid] = tile[tid]; } } } /****************************************************************************** * 2D-transpose in device memory: * Each element is assumed to have a single data item ******************************************************************************/ template<typename T> void coproc_transposeOnDevice2d(const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, int nelmt1, int nelmt2, hipStream_t stream) { T *ptrSrc = (T*)(d_ptrSrc); T *ptrDest = (T*)(d_ptrDest); const hipDeviceProp_t *devProp = coproc_getCurrentDeviceProp(); const int TILE_DIM = 32; const int BLOCK_ROWS = 2; if (nelmt1 >= TILE_DIM && nelmt2 >= TILE_DIM) { // // Transpose matrix using TILE_DIM x TILE_DIM tiles // // Compute number of tiles and grid size const int n1 = nelmt1/TILE_DIM; const int n2 = nelmt2/TILE_DIM; const int m1 = (n1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int m2 = (n2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); // Coalesced transpose using quadratic tiles dim3 grid(n1/m1, n2/m2); dim3 threads(TILE_DIM,BLOCK_ROWS); if (m1*m2 == 1) { hipLaunchKernelGGL(( transpose2d_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2); } else { hipLaunchKernelGGL(( transpose2d_mult_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2, m1, m2); } const int l1 = m1*grid.x*TILE_DIM; const int l2 = m2*grid.y*TILE_DIM; if (nelmt1 > l1) { const int nn1 = (nelmt1-l1+TILE_DIM-1)/TILE_DIM; const int nn2 = (nelmt2+TILE_DIM-1)/TILE_DIM; const int mm1 = (nn1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int mm2 = (nn2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); // Coalesced transpose of last few (less than TILE_DIM) rows grid = dim3((nn1+mm1-1)/mm1, (nn2+mm2-1)/mm2); if (mm1*mm2 == 1) { hipLaunchKernelGGL(( transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2, l1, 0); } else { hipLaunchKernelGGL(( transpose2d_mult_offset_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2, mm1, mm2, l1, 0); } } if (nelmt2 > l2) { const int nn1 = (nelmt1+TILE_DIM-1)/TILE_DIM; const int nn2 = (nelmt2-l2+TILE_DIM-1)/TILE_DIM; const int mm1 = (nn1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int mm2 = (nn2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); // Coalesced transpose of last few (less than TILE_DIM) columns grid = dim3((nn1+mm1-1)/mm1, (nn2+mm2-1)/mm2); if (mm1*mm2 == 1) { hipLaunchKernelGGL(( transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2, 0, l2); } else { hipLaunchKernelGGL(( transpose2d_mult_offset_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2, mm1, mm2, 0, l2); } } } else if (nelmt1 > TILE_DIM) { // // Transpose array with first dimension larger than TILE_DIM // // Compute number of tiles and grid size const int n1 = nelmt1/TILE_DIM; const int m1 = (n1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); dim3 grid(n1/m1,1); dim3 threads(TILE_DIM,1); hipLaunchKernelGGL(( transpose2d_rect_knl<TILE_DIM,2>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2); const int l1 = m1*grid.x*TILE_DIM; if (nelmt1 > l1) { const int nn1 = (nelmt1-l1+TILE_DIM-1)/TILE_DIM; const int mm1 = (nn1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); // Coalesced transpose of last few (less than TILE_DIM) rows grid = dim3((nn1+mm1-1)/mm1,1); dim3 threads(TILE_DIM,BLOCK_ROWS); hipLaunchKernelGGL(( transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2, l1, 0); } } else if (nelmt2 > TILE_DIM) { // // Transpose array with second dimension larger than TILE_DIM // // Compute number of tiles and grid size const int n2 = nelmt2/TILE_DIM; const int m2 = (n2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); dim3 grid(1,n2/m2); dim3 threads(TILE_DIM,1); hipLaunchKernelGGL(( transpose2d_rect_knl<TILE_DIM,1>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2); const int l2 = m2*grid.y*TILE_DIM; if (nelmt2 > l2) { const int nn2 = (nelmt2-l2+TILE_DIM-1)/TILE_DIM; const int mm2 = (nn2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); // Coalesced transpose of last few (less than TILE_DIM) rows grid = dim3(1,(nn2+mm2-1)/mm2); dim3 threads(TILE_DIM,BLOCK_ROWS); hipLaunchKernelGGL(( transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2, 0, l2); } } else { // // Transpose array with both dimensions smaller than TILE_DIM // dim3 grid(1,1); dim3 threads(TILE_DIM,BLOCK_ROWS); hipLaunchKernelGGL(( transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1, nelmt2, 0, 0); } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_naive_knl(int num_elmt, T *Src, T *Dest, int nelmt1, int nelmt2) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; //int index_out = yIndex + nelmt2 * (xIndex); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index_in+i*nelmt1 < nelmt1*nelmt2) { int xIdx = index_in/nelmt1; int yIdx = index_in-xIdx*nelmt1; xIndex = xIdx/num_elmt; yIndex = yIndex*num_elmt + xIdx%num_elmt; int index_out = yIndex + nelmt2 * xIndex; Dest[index_out] = Src[index_in+i*nelmt1]; } } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_naive_mult_knl(int num_elmt, T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2) { for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; int index_out = yIndex + nelmt2 * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index_out+i < nelmt1*nelmt2) Dest[index_out+i] = Src[index_in+i*nelmt1]; } } } } /****************************************************************************** * 2D-transpose in device memory: * Each element is assumed to have num_elmt data items given at run-time ******************************************************************************/ template<typename T> void coproc_transposeOnDevice2d(int num_elmt, const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, int nelmt1, int nelmt2, hipStream_t stream) { T *ptrSrc = (T*)(d_ptrSrc); T *ptrDest = (T*)(d_ptrDest); const hipDeviceProp_t *devProp = coproc_getCurrentDeviceProp(); const int TILE_DIM = 32; const int BLOCK_ROWS = 2; // Compute number of tiles and grid size const int n1 = (nelmt1+TILE_DIM-1)/TILE_DIM; const int n2 = (nelmt2+TILE_DIM-1)/TILE_DIM; const int m1 = (n1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int m2 = (n2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); dim3 grid((n1+m1-1)/m1, (n2+m2-1)/m2); dim3 threads(TILE_DIM,BLOCK_ROWS); // Naive transpose of rectangular arrays if (m1*m2 == 1) { hipLaunchKernelGGL(( transpose2d_naive_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, num_elmt, ptrSrc, ptrDest, nelmt1, nelmt2); } else { hipLaunchKernelGGL(( transpose2d_naive_mult_knl<TILE_DIM,BLOCK_ROWS,T>), dim3(grid), dim3(threads), 0, stream, num_elmt, ptrSrc, ptrDest, nelmt1, nelmt2, m1, m2); } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, int num_elmt, class T> __global__ void transpose2d_naive_knl(T *Src, T *Dest, int nelmt1, int nelmt2) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; //int index_out = yIndex + nelmt2 * (xIndex); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { int index = index_in+i*nelmt1; if (index < nelmt1*nelmt2) { int yIdx = index/nelmt1; int xIdx = index-yIdx*nelmt1; xIndex = xIdx/num_elmt; yIndex = yIndex*num_elmt + xIdx%num_elmt; int index_out = yIdx + nelmt2 * xIdx; Dest[index_out] = Src[index]; } } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, int num_elmt, class T> __global__ void transpose2d_naive_mult_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2) { for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; int index_out = yIndex + nelmt2 * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if ((index_out+i < nelmt1*nelmt2) && (index_in+i*nelmt1 < nelmt1*nelmt2)) Dest[index_out+i] = Src[index_in+i*nelmt1]; } } } } /****************************************************************************** * 2D-transpose in device memory: * Each element is assumed to have num_elmt data items known at compile-time ******************************************************************************/ template<typename T, int num_elmt> void coproc_transposeOnDevice2d(const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, int nelmt1, int nelmt2, hipStream_t stream) { T *ptrSrc = (T*)(d_ptrSrc); T *ptrDest = (T*)(d_ptrDest); const hipDeviceProp_t *devProp = coproc_getCurrentDeviceProp(); const int TILE_DIM = 32; const int BLOCK_ROWS = 2; // Compute number of tiles and grid size const int n1 = (nelmt1*num_elmt+TILE_DIM-1)/TILE_DIM; const int n2 = (nelmt2+TILE_DIM-1)/TILE_DIM; const int m1 = (n1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int m2 = (n2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); dim3 grid((n1+m1-1)/m1, (n2+m2-1)/m2); dim3 threads(TILE_DIM,BLOCK_ROWS); cout << nelmt1 << "," << num_elmt << ":" << nelmt2 << endl; // Naive transpose of rectangular arrays if (m1*m2 == 1) { hipLaunchKernelGGL(( transpose2d_naive_knl<TILE_DIM,BLOCK_ROWS,num_elmt,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1*num_elmt, nelmt2); } else { hipLaunchKernelGGL(( transpose2d_naive_mult_knl<TILE_DIM,BLOCK_ROWS,num_elmt,T>), dim3(grid), dim3(threads), 0, stream, ptrSrc, ptrDest, nelmt1*num_elmt, nelmt2, m1, m2); } } /****************************************************************************** * Wrapper routine in C++ ******************************************************************************/ void coproc_transposeOnDevice2d(const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, int bytes_per_elmt, int nelmt1, int nelmt2, hipStream_t stream) { hipEvent_t start,stop; float inTime; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,stream); switch (bytes_per_elmt) { // // Check for 1,2,4,8 bytes // case sizeof(uchar1): // 1 byte coproc_transposeOnDevice2d<uchar1>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(uchar2): // 2 byte coproc_transposeOnDevice2d<uchar2>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(uchar3): // 3 byte coproc_transposeOnDevice2d<uchar3>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(float1): // 4 byte coproc_transposeOnDevice2d<float1>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(float2): // 8 byte coproc_transposeOnDevice2d<float2>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(float3): // 12 byte coproc_transposeOnDevice2d<float3>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(float4): // 16 byte coproc_transposeOnDevice2d<float4>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; // // Multiples of float3 : n*12 byte // case 24: cout << "THIS IS IT" << endl; coproc_transposeOnDevice2d<float3,2>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 60: coproc_transposeOnDevice2d<float3,5>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 72: coproc_transposeOnDevice2d<float3,6>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 84: coproc_transposeOnDevice2d<float3,7>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 108: coproc_transposeOnDevice2d<float3,9>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 120: coproc_transposeOnDevice2d<float3,10>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 132: coproc_transposeOnDevice2d<float3,11>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 156: coproc_transposeOnDevice2d<float3,13>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 168: coproc_transposeOnDevice2d<float3,14>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 180: coproc_transposeOnDevice2d<float3,15>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 204: coproc_transposeOnDevice2d<float3,17>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 216: coproc_transposeOnDevice2d<float3,18>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 228: coproc_transposeOnDevice2d<float3,19>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 252: coproc_transposeOnDevice2d<float3,21>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; // // Multiples of float4 : n * 16 byte // case 32: coproc_transposeOnDevice2d<float4,2>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 48: coproc_transposeOnDevice2d<float4,3>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 64: coproc_transposeOnDevice2d<float4,4>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 80: coproc_transposeOnDevice2d<float4,5>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 96: coproc_transposeOnDevice2d<float4,6>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 112: coproc_transposeOnDevice2d<float4,7>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 128: coproc_transposeOnDevice2d<float4,8>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 144: coproc_transposeOnDevice2d<float4,9>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 160: coproc_transposeOnDevice2d<float4,10>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 176: coproc_transposeOnDevice2d<float4,11>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 192: coproc_transposeOnDevice2d<float4,12>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 208: coproc_transposeOnDevice2d<float4,13>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 224: coproc_transposeOnDevice2d<float4,14>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 240: coproc_transposeOnDevice2d<float4,15>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 256: coproc_transposeOnDevice2d<float4,16>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 512: coproc_transposeOnDevice2d<float4,32>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 1024: coproc_transposeOnDevice2d<float4,64>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; default: // // Default: check if we have a multiple of 16,8,4,2 bytes // if ((bytes_per_elmt&15) == 0) { coproc_transposeOnDevice2d<float4>((bytes_per_elmt>>4), d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } if ((bytes_per_elmt&7) == 0) { coproc_transposeOnDevice2d<float2>((bytes_per_elmt>>3), d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } else if ((bytes_per_elmt&3) == 0) { coproc_transposeOnDevice2d<float1>((bytes_per_elmt>>2), d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } else if ((bytes_per_elmt&1) == 0) { coproc_transposeOnDevice2d<char2>((bytes_per_elmt>>1), d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } else { coproc_transposeOnDevice2d<char1>(bytes_per_elmt, d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } break; } coproc_checkError("coproc_transposeOnDevice2d"); hipEventRecord(stop,stream); hipEventSynchronize(stop); hipEventElapsedTime(&inTime, start, stop); cout << "Bandwidth: " << 2.0f*1000.0f*nelmt1*nelmt2*bytes_per_elmt/(1024*1024*1024)/(inTime) << "GB/s" << endl; } /****************************************************************************** * Wrapper routines to be called from Fortran ******************************************************************************/ extern "C" { void FNAME(coproc_transposeondevice2d)(const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, __INT *bytes_per_elmt, __INT *nelmt1, __INT *nelmt2, __I64 *stream) { coproc_transposeOnDevice2d(d_ptrSrc, d_ptrDest, *bytes_per_elmt, *nelmt1, *nelmt2, (hipStream_t)(*stream)); } }; /*############################################################################# * Transpose three-dimensional arrays in device memory *############################################################################# */ /****************************************************************************** * Naive 3D-transpose CUDA kernel: * Each element is assumed to have a single data item ******************************************************************************/ template<class T> __global__ void transpose3d_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nelmt3, int offset1=0, int offset2=0, int offset3=0) { int xIndex = offset1 + blockIdx.x + threadIdx.x; int yIndex = offset2 + blockIdx.y + threadIdx.y; int zIndex = offset3 + blockIdx.z + threadIdx.z; int index_in = xIndex + nelmt1 * yIndex + nelmt1 * nelmt2 * zIndex; int index_out = zIndex + nelmt3 * yIndex + nelmt2 * nelmt3 * xIndex; Dest[index_out] = Src[index_in]; } /****************************************************************************** * Naive 3D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time ******************************************************************************/ template<class T> __global__ void transpose3d_knl(int num_elmt, T *Src, T *Dest, int nelmt1, int nelmt2, int nelmt3, int offset1=0, int offset2=0, int offset3=0) { int xIndex = offset1 + blockIdx.x + threadIdx.x; int yIndex = offset2 + blockIdx.y + threadIdx.y; int zIndex = offset3 + blockIdx.z + threadIdx.z; int index_in = (xIndex + nelmt1 * yIndex + nelmt1 * nelmt2 * zIndex) * num_elmt; int index_out = (zIndex + nelmt3 * yIndex + nelmt2 * nelmt3 * xIndex) * num_elmt; for (int ielmt=0; ielmt<num_elmt; ++ielmt) Dest[index_out+ielmt] = Src[index_in+ielmt]; } /****************************************************************************** * Naive 3D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items known at compile-time ******************************************************************************/ template<class T, int num_elmt> __global__ void transpose3d_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nelmt3, int offset1=0, int offset2=0, int offset3=0) { int xIndex = offset1 + blockIdx.x + threadIdx.x; int yIndex = offset2 + blockIdx.y + threadIdx.y; int zIndex = offset3 + blockIdx.z + threadIdx.z; int index_in = (xIndex + nelmt1 * yIndex + nelmt1 * nelmt2 * zIndex) * num_elmt; int index_out = (zIndex + nelmt3 * yIndex + nelmt2 * nelmt3 * xIndex) * num_elmt; for (int ielmt=0; ielmt<num_elmt; ++ielmt) Dest[index_out+ielmt] = Src[index_in+ielmt]; }
11aa2baa64dd5ccc6162f0f7c178c62a625385b2.cu
/*############################################################################# ****************************************************************************** * <name> coproc_transpose </name> ****************************************************************************** * * <purpose> * This file provides routines for transposing multi-dimensional arrays. * </purpose> * *############################################################################# */ #include <stdint.h> #include <iostream> #include <algorithm> #include "coproc_core.h" #include "coproc_transpose.h" using namespace std; /*############################################################################# * Transpose two-dimensional arrays in host memory *############################################################################# */ /****************************************************************************** * 2D-transpose in host memory: * Each element is assumed to have a single data item ******************************************************************************/ template<typename T> void coproc_transposeOnHost2d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { dest[nelmt2*i1+i2] = src[nelmt1*i2+i1]; } } } /****************************************************************************** * 2D-transpose in host memory: * Each element is assumed to have num_elmt data items given at run-time ******************************************************************************/ template<typename T> void coproc_transposeOnHost2d(int num_elmt, const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { for (int ielmt=0; ielmt<num_elmt; ++ielmt) { dest[num_elmt*(nelmt2*i1+i2)+ielmt] = src[num_elmt*(nelmt1*i2+i1)+ielmt]; } } } } /****************************************************************************** * 2D-transpose in host memory: * Each element is assumed to have num_elmt data items known at compile-time ******************************************************************************/ template<typename T, int num_elmt> void coproc_transposeOnHost2d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { #pragma unroll for (int ielmt=0; ielmt<num_elmt; ++ielmt) { dest[num_elmt*(nelmt2*i1+i2)+ielmt] = src[num_elmt*(nelmt1*i2+i1)+ielmt]; } } } } /****************************************************************************** * Wrapper routine in C++ ******************************************************************************/ void coproc_transposeOnHost2d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int bytes_per_elmt, int nelmt1, int nelmt2) { switch (bytes_per_elmt) { // // Check for 1,2,4,8 bytes // case sizeof(uint8_t): // 1 byte coproc_transposeOnHost2d<uint8_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case sizeof(uint16_t): // 2 byte coproc_transposeOnHost2d<uint16_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case sizeof(uint32_t): // 4 byte coproc_transposeOnHost2d<uint32_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case sizeof(uint64_t): // 8 byte coproc_transposeOnHost2d<uint64_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; // // Check for special cases bytes=2^n // case 16: coproc_transposeOnHost2d<uint64_t,2>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 32: coproc_transposeOnHost2d<uint64_t,4>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 64: coproc_transposeOnHost2d<uint64_t,8>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 128: coproc_transposeOnHost2d<uint64_t,16>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 256: coproc_transposeOnHost2d<uint64_t,32>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 512: coproc_transposeOnHost2d<uint64_t,64>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; case 1024: coproc_transposeOnHost2d<uint64_t,128>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2); break; default: // // Default: check if we have a multiple of 8,4,2 bytes // if ((bytes_per_elmt&7) == 0) { coproc_transposeOnHost2d<uint64_t>((bytes_per_elmt>>3), h_ptrSrc, h_ptrDest, nelmt1, nelmt2); } else if ((bytes_per_elmt&3) == 0) { coproc_transposeOnHost2d<uint32_t>((bytes_per_elmt>>2), h_ptrSrc, h_ptrDest, nelmt1, nelmt2); } else if ((bytes_per_elmt&1) == 0) { coproc_transposeOnHost2d<uint16_t>((bytes_per_elmt>>1), h_ptrSrc, h_ptrDest, nelmt1, nelmt2); } else { coproc_transposeOnHost2d<uint8_t>(bytes_per_elmt, h_ptrSrc, h_ptrDest, nelmt1, nelmt2); } break; } coproc_checkError("coproc_transposeOnHost2d"); } /****************************************************************************** * Wrapper routines to be called from Fortran ******************************************************************************/ extern "C" { void FNAME(coproc_transposeonhost2d)(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, __INT *bytes_per_elmt, __INT *nelmt1, __INT *nelmt2) { coproc_transposeOnHost2d(h_ptrSrc, h_ptrDest, *bytes_per_elmt, *nelmt1, *nelmt2); } }; /*############################################################################# * Transpose three-dimensional arrays in host memory *############################################################################# */ /****************************************************************************** * Each element is assumed to have a single data item ******************************************************************************/ template<typename T> void coproc_transposeOnHost3d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2, int nelmt3) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i3=0; i3<nelmt3; ++i3) { for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { dest[nelmt2*nelmt3*i1+nelmt3*i2+i3] = src[nelmt1*nelmt2*i3+nelmt1*i2+i1]; } } } } /****************************************************************************** * Each element is assumed to have num_elmt data items given at run-time ******************************************************************************/ template<typename T> void coproc_transposeOnHost3d(int num_elmt, const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2, int nelmt3) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i3=0; i3<nelmt3; ++i3) { for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { for (int ielmt=0; ielmt<num_elmt; ++ielmt) { dest[num_elmt*(nelmt2*nelmt3*i1+nelmt3*i2+i3)+ielmt] = src[num_elmt*(nelmt1*nelmt2*i3+nelmt1*i2+i1)+ielmt]; } } } } } /****************************************************************************** * Each element is assumed to have num_elmt data items known at compile-time ******************************************************************************/ template<typename T, int num_elmt> void coproc_transposeOnHost3d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int nelmt1, int nelmt2, int nelmt3) { T *src = (T*)(h_ptrSrc); T *dest = (T*)(h_ptrDest); for (int i3=0; i3<nelmt3; ++i3) { for (int i2=0; i2<nelmt2; ++i2) { for (int i1=0; i1<nelmt1; ++i1) { #pragma unroll for (int ielmt=0; ielmt<num_elmt; ++ielmt) { dest[num_elmt*(nelmt2*nelmt3*i1+nelmt3*i2+i3)+ielmt] = src[num_elmt*(nelmt1*nelmt2*i3+nelmt1*i2+i1)+ielmt]; } } } } } /****************************************************************************** * Wrapper routine in C++ ******************************************************************************/ void coproc_transposeOnHost3d(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, int bytes_per_elmt, int nelmt1, int nelmt2, int nelmt3) { switch (bytes_per_elmt) { // // Check for 1,2,4,8 bytes // case sizeof(uint8_t): // 1 byte coproc_transposeOnHost3d<uint8_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case sizeof(uint16_t): // 2 byte coproc_transposeOnHost3d<uint16_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case sizeof(uint32_t): // 4 byte coproc_transposeOnHost3d<uint32_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case sizeof(uint64_t): // 8 byte coproc_transposeOnHost3d<uint64_t>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; // // Check for special cases bytes=2^n // case 16: coproc_transposeOnHost3d<uint64_t,2>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 32: coproc_transposeOnHost3d<uint64_t,4>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 64: coproc_transposeOnHost3d<uint64_t,8>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 128: coproc_transposeOnHost3d<uint64_t,16>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 256: coproc_transposeOnHost3d<uint64_t,32>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 512: coproc_transposeOnHost3d<uint64_t,64>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; case 1024: coproc_transposeOnHost3d<uint64_t,128>(h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); break; default: // // Default: check if we have a multiple of 8,4,2 bytes // if ((bytes_per_elmt&7) == 0) { coproc_transposeOnHost3d<uint64_t>((bytes_per_elmt>>3), h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); } else if ((bytes_per_elmt&3) == 0) { coproc_transposeOnHost3d<uint32_t>((bytes_per_elmt>>2), h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); } else if ((bytes_per_elmt&1) == 0) { coproc_transposeOnHost3d<uint16_t>((bytes_per_elmt>>1), h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); } else { coproc_transposeOnHost3d<uint8_t>(bytes_per_elmt, h_ptrSrc, h_ptrDest, nelmt1, nelmt2, nelmt3); } break; } coproc_checkError("coproc_transposeOnHost3d"); } /****************************************************************************** * Wrapper routine to be called from Fortran ******************************************************************************/ extern "C" { void FNAME(coproc_transposeonhost3d)(const void *__restrict__ h_ptrSrc, void *__restrict__ h_ptrDest, __INT *bytes_per_elmt, __INT *nelmt1, __INT *nelmt2, __INT *nelmt3) { coproc_transposeOnHost3d(h_ptrSrc, h_ptrDest, *bytes_per_elmt, *nelmt1, *nelmt2, *nelmt3); } }; /*############################################################################# * Transpose two-dimensional arrays in device memory *############################################################################# */ /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have a single data item. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_naive_knl(T *Src, T *Dest, int nelmt1, int nelmt2) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; int index_out = yIndex + nelmt2 * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index_out+i < nelmt1*nelmt2) Dest[index_out+i] = Src[index_in+i*nelmt1]; } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have a single data item. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_naive_mult_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2) { for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; int index_out = yIndex + nelmt2 * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index_out+i < nelmt1*nelmt2) Dest[index_out+i] = Src[index_in+i*nelmt1]; } } } } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel: * Each element is assumed to have a single data item. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_knl(T *Src, T *Dest, int nelmt1, int nelmt2) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + yIndex * nelmt1; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) tile[threadIdx.y+i][threadIdx.x] = Src[index+i*nelmt1]; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; index = xIndex + yIndex * nelmt2; __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) Dest[index+i*nelmt2] = tile[threadIdx.x][threadIdx.y+i]; } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel: * Each element is assumed to have a single data item. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_mult_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index = xIndex + yIndex * nelmt1; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) tile[threadIdx.y+i][threadIdx.x] = Src[index+i*nelmt1]; xIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.x; yIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.y; index = xIndex + yIndex * nelmt2; __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) Dest[index+i*nelmt2] = tile[threadIdx.x][threadIdx.y+i]; __syncthreads(); } } } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel with offset and bound checking: * Each element is assumed to have a single data item. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_offset_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int offset1, int offset2) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; int xIndex = offset1 + blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = offset2 + blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + yIndex * nelmt1; if (xIndex < nelmt1) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index+i*nelmt1 < nelmt1*nelmt2) { tile[threadIdx.y+i][threadIdx.x] = Src[index+i*nelmt1]; } } } xIndex = offset2 + blockIdx.y * TILE_DIM + threadIdx.x; yIndex = offset1 + blockIdx.x * TILE_DIM + threadIdx.y; index = xIndex + yIndex * nelmt2; __syncthreads(); if (xIndex < nelmt2) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index+i*nelmt2 < nelmt1*nelmt2) { Dest[index+i*nelmt2] = tile[threadIdx.x][threadIdx.y+i]; } } } } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel with offset and bound checking: * Each element is assumed to have a single data item. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_mult_offset_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2, int offset1, int offset2) { __shared__ T tile[TILE_DIM][TILE_DIM+1]; for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = offset1 + (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = offset2 + (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index = xIndex + yIndex * nelmt1; if (xIndex < nelmt1) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index+i*nelmt1 < nelmt1*nelmt2) { tile[threadIdx.y+i][threadIdx.x] = Src[index+i*nelmt1]; } } } xIndex = offset2 + (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.x; yIndex = offset1 + (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.y; index = xIndex + yIndex * nelmt2; __syncthreads(); if (xIndex < nelmt2) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index+i*nelmt2 < nelmt1*nelmt2) { Dest[index+i*nelmt2] = tile[threadIdx.x][threadIdx.y+i]; } } } __syncthreads(); } } } /****************************************************************************** * Coalesced 2D-transpose CUDA kernel: * Each element is assumed to have a single data item ******************************************************************************/ template<int TILE_DIM, int FIXED_DIM, class T> __global__ void transpose2d_rect_knl(T *Src, T *Dest, int nelmt1, int nelmt2) { if (FIXED_DIM == 1) { // First dimension is fixed, i.e. not larger than TILE_DIM __shared__ T tile[TILE_DIM*(TILE_DIM-1)]; // All threads read nelmt1*TILE_DIM data items from the source // array from contiguous memory positions int index = blockIdx.x * TILE_DIM * nelmt1; for (int i=0; i<nelmt1; i++) { int tid = threadIdx.x + i * TILE_DIM; tile[tid] = Src[index+tid]; } __syncthreads(); // All threads write TILE_DIM contiguous items from the same // column of the shared memory block and store them in the same // row of the destination array; Repeat until all nelmt1<TILE_DIM // rows have been processed. index = blockIdx.x * TILE_DIM + threadIdx.x; for (int i=0; i<nelmt1; i++) Dest[index + i*nelmt2] = tile[threadIdx.x*nelmt2+i]; } else if (FIXED_DIM == 2) { // Second dimension is fixed, i.e. not larger than TILE_DIM __shared__ T tile[TILE_DIM*(TILE_DIM-1)]; // All threads read TILE_DIM contiguous items from the same row // and store them in the same column of the shared memory block; // Repeat until all nelmt2<TILE_DIM rows have been processed. int index = blockIdx.x * TILE_DIM + threadIdx.x; for (int i=0; i<nelmt2; i++) tile[threadIdx.x*nelmt2+i] = Src[index + i*nelmt1]; __syncthreads(); // All threads write nelmt2*TILE_DIM data items from the shared // memory block into the destination array at contiguous positions index = blockIdx.x * TILE_DIM * nelmt2; for (int i=0; i<nelmt2; i++) { int tid = threadIdx.x + i * TILE_DIM; Dest[index+tid] = tile[tid]; } } } /****************************************************************************** * 2D-transpose in device memory: * Each element is assumed to have a single data item ******************************************************************************/ template<typename T> void coproc_transposeOnDevice2d(const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, int nelmt1, int nelmt2, cudaStream_t stream) { T *ptrSrc = (T*)(d_ptrSrc); T *ptrDest = (T*)(d_ptrDest); const cudaDeviceProp *devProp = coproc_getCurrentDeviceProp(); const int TILE_DIM = 32; const int BLOCK_ROWS = 2; if (nelmt1 >= TILE_DIM && nelmt2 >= TILE_DIM) { // // Transpose matrix using TILE_DIM x TILE_DIM tiles // // Compute number of tiles and grid size const int n1 = nelmt1/TILE_DIM; const int n2 = nelmt2/TILE_DIM; const int m1 = (n1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int m2 = (n2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); // Coalesced transpose using quadratic tiles dim3 grid(n1/m1, n2/m2); dim3 threads(TILE_DIM,BLOCK_ROWS); if (m1*m2 == 1) { transpose2d_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2); } else { transpose2d_mult_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2, m1, m2); } const int l1 = m1*grid.x*TILE_DIM; const int l2 = m2*grid.y*TILE_DIM; if (nelmt1 > l1) { const int nn1 = (nelmt1-l1+TILE_DIM-1)/TILE_DIM; const int nn2 = (nelmt2+TILE_DIM-1)/TILE_DIM; const int mm1 = (nn1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int mm2 = (nn2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); // Coalesced transpose of last few (less than TILE_DIM) rows grid = dim3((nn1+mm1-1)/mm1, (nn2+mm2-1)/mm2); if (mm1*mm2 == 1) { transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2, l1, 0); } else { transpose2d_mult_offset_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2, mm1, mm2, l1, 0); } } if (nelmt2 > l2) { const int nn1 = (nelmt1+TILE_DIM-1)/TILE_DIM; const int nn2 = (nelmt2-l2+TILE_DIM-1)/TILE_DIM; const int mm1 = (nn1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int mm2 = (nn2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); // Coalesced transpose of last few (less than TILE_DIM) columns grid = dim3((nn1+mm1-1)/mm1, (nn2+mm2-1)/mm2); if (mm1*mm2 == 1) { transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2, 0, l2); } else { transpose2d_mult_offset_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2, mm1, mm2, 0, l2); } } } else if (nelmt1 > TILE_DIM) { // // Transpose array with first dimension larger than TILE_DIM // // Compute number of tiles and grid size const int n1 = nelmt1/TILE_DIM; const int m1 = (n1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); dim3 grid(n1/m1,1); dim3 threads(TILE_DIM,1); transpose2d_rect_knl<TILE_DIM,2><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2); const int l1 = m1*grid.x*TILE_DIM; if (nelmt1 > l1) { const int nn1 = (nelmt1-l1+TILE_DIM-1)/TILE_DIM; const int mm1 = (nn1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); // Coalesced transpose of last few (less than TILE_DIM) rows grid = dim3((nn1+mm1-1)/mm1,1); dim3 threads(TILE_DIM,BLOCK_ROWS); transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2, l1, 0); } } else if (nelmt2 > TILE_DIM) { // // Transpose array with second dimension larger than TILE_DIM // // Compute number of tiles and grid size const int n2 = nelmt2/TILE_DIM; const int m2 = (n2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); dim3 grid(1,n2/m2); dim3 threads(TILE_DIM,1); transpose2d_rect_knl<TILE_DIM,1><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2); const int l2 = m2*grid.y*TILE_DIM; if (nelmt2 > l2) { const int nn2 = (nelmt2-l2+TILE_DIM-1)/TILE_DIM; const int mm2 = (nn2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); // Coalesced transpose of last few (less than TILE_DIM) rows grid = dim3(1,(nn2+mm2-1)/mm2); dim3 threads(TILE_DIM,BLOCK_ROWS); transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2, 0, l2); } } else { // // Transpose array with both dimensions smaller than TILE_DIM // dim3 grid(1,1); dim3 threads(TILE_DIM,BLOCK_ROWS); transpose2d_offset_knl<TILE_DIM,BLOCK_ROWS><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1, nelmt2, 0, 0); } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_naive_knl(int num_elmt, T *Src, T *Dest, int nelmt1, int nelmt2) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; //int index_out = yIndex + nelmt2 * (xIndex); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index_in+i*nelmt1 < nelmt1*nelmt2) { int xIdx = index_in/nelmt1; int yIdx = index_in-xIdx*nelmt1; xIndex = xIdx/num_elmt; yIndex = yIndex*num_elmt + xIdx%num_elmt; int index_out = yIndex + nelmt2 * xIndex; Dest[index_out] = Src[index_in+i*nelmt1]; } } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, class T> __global__ void transpose2d_naive_mult_knl(int num_elmt, T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2) { for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; int index_out = yIndex + nelmt2 * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (index_out+i < nelmt1*nelmt2) Dest[index_out+i] = Src[index_in+i*nelmt1]; } } } } /****************************************************************************** * 2D-transpose in device memory: * Each element is assumed to have num_elmt data items given at run-time ******************************************************************************/ template<typename T> void coproc_transposeOnDevice2d(int num_elmt, const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, int nelmt1, int nelmt2, cudaStream_t stream) { T *ptrSrc = (T*)(d_ptrSrc); T *ptrDest = (T*)(d_ptrDest); const cudaDeviceProp *devProp = coproc_getCurrentDeviceProp(); const int TILE_DIM = 32; const int BLOCK_ROWS = 2; // Compute number of tiles and grid size const int n1 = (nelmt1+TILE_DIM-1)/TILE_DIM; const int n2 = (nelmt2+TILE_DIM-1)/TILE_DIM; const int m1 = (n1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int m2 = (n2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); dim3 grid((n1+m1-1)/m1, (n2+m2-1)/m2); dim3 threads(TILE_DIM,BLOCK_ROWS); // Naive transpose of rectangular arrays if (m1*m2 == 1) { transpose2d_naive_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (num_elmt, ptrSrc, ptrDest, nelmt1, nelmt2); } else { transpose2d_naive_mult_knl<TILE_DIM,BLOCK_ROWS,T><<<grid, threads, 0, stream>>> (num_elmt, ptrSrc, ptrDest, nelmt1, nelmt2, m1, m2); } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, int num_elmt, class T> __global__ void transpose2d_naive_knl(T *Src, T *Dest, int nelmt1, int nelmt2) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; //int index_out = yIndex + nelmt2 * (xIndex); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { int index = index_in+i*nelmt1; if (index < nelmt1*nelmt2) { int yIdx = index/nelmt1; int xIdx = index-yIdx*nelmt1; xIndex = xIdx/num_elmt; yIndex = yIndex*num_elmt + xIdx%num_elmt; int index_out = yIdx + nelmt2 * xIdx; Dest[index_out] = Src[index]; } } } /****************************************************************************** * Naive 2D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time. * Remark: same as before but this kernel can handle multiple items per thread. ******************************************************************************/ template<int TILE_DIM, int BLOCK_ROWS, int num_elmt, class T> __global__ void transpose2d_naive_mult_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nitem1, int nitem2) { for (int k1=0; k1<nitem1; k1++) { for (int k2=0; k2<nitem2; k2++) { int xIndex = (k1*gridDim.x + blockIdx.x) * TILE_DIM + threadIdx.x; int yIndex = (k2*gridDim.y + blockIdx.y) * TILE_DIM + threadIdx.y; int index_in = xIndex + nelmt1 * yIndex; int index_out = yIndex + nelmt2 * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if ((index_out+i < nelmt1*nelmt2) && (index_in+i*nelmt1 < nelmt1*nelmt2)) Dest[index_out+i] = Src[index_in+i*nelmt1]; } } } } /****************************************************************************** * 2D-transpose in device memory: * Each element is assumed to have num_elmt data items known at compile-time ******************************************************************************/ template<typename T, int num_elmt> void coproc_transposeOnDevice2d(const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, int nelmt1, int nelmt2, cudaStream_t stream) { T *ptrSrc = (T*)(d_ptrSrc); T *ptrDest = (T*)(d_ptrDest); const cudaDeviceProp *devProp = coproc_getCurrentDeviceProp(); const int TILE_DIM = 32; const int BLOCK_ROWS = 2; // Compute number of tiles and grid size const int n1 = (nelmt1*num_elmt+TILE_DIM-1)/TILE_DIM; const int n2 = (nelmt2+TILE_DIM-1)/TILE_DIM; const int m1 = (n1+devProp->maxGridSize[0]-1)/(devProp->maxGridSize[0]); const int m2 = (n2+devProp->maxGridSize[1]-1)/(devProp->maxGridSize[1]); dim3 grid((n1+m1-1)/m1, (n2+m2-1)/m2); dim3 threads(TILE_DIM,BLOCK_ROWS); cout << nelmt1 << "," << num_elmt << ":" << nelmt2 << endl; // Naive transpose of rectangular arrays if (m1*m2 == 1) { transpose2d_naive_knl<TILE_DIM,BLOCK_ROWS,num_elmt,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1*num_elmt, nelmt2); } else { transpose2d_naive_mult_knl<TILE_DIM,BLOCK_ROWS,num_elmt,T><<<grid, threads, 0, stream>>> (ptrSrc, ptrDest, nelmt1*num_elmt, nelmt2, m1, m2); } } /****************************************************************************** * Wrapper routine in C++ ******************************************************************************/ void coproc_transposeOnDevice2d(const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, int bytes_per_elmt, int nelmt1, int nelmt2, cudaStream_t stream) { cudaEvent_t start,stop; float inTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,stream); switch (bytes_per_elmt) { // // Check for 1,2,4,8 bytes // case sizeof(uchar1): // 1 byte coproc_transposeOnDevice2d<uchar1>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(uchar2): // 2 byte coproc_transposeOnDevice2d<uchar2>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(uchar3): // 3 byte coproc_transposeOnDevice2d<uchar3>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(float1): // 4 byte coproc_transposeOnDevice2d<float1>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(float2): // 8 byte coproc_transposeOnDevice2d<float2>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(float3): // 12 byte coproc_transposeOnDevice2d<float3>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case sizeof(float4): // 16 byte coproc_transposeOnDevice2d<float4>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; // // Multiples of float3 : n*12 byte // case 24: cout << "THIS IS IT" << endl; coproc_transposeOnDevice2d<float3,2>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 60: coproc_transposeOnDevice2d<float3,5>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 72: coproc_transposeOnDevice2d<float3,6>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 84: coproc_transposeOnDevice2d<float3,7>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 108: coproc_transposeOnDevice2d<float3,9>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 120: coproc_transposeOnDevice2d<float3,10>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 132: coproc_transposeOnDevice2d<float3,11>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 156: coproc_transposeOnDevice2d<float3,13>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 168: coproc_transposeOnDevice2d<float3,14>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 180: coproc_transposeOnDevice2d<float3,15>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 204: coproc_transposeOnDevice2d<float3,17>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 216: coproc_transposeOnDevice2d<float3,18>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 228: coproc_transposeOnDevice2d<float3,19>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 252: coproc_transposeOnDevice2d<float3,21>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; // // Multiples of float4 : n * 16 byte // case 32: coproc_transposeOnDevice2d<float4,2>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 48: coproc_transposeOnDevice2d<float4,3>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 64: coproc_transposeOnDevice2d<float4,4>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 80: coproc_transposeOnDevice2d<float4,5>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 96: coproc_transposeOnDevice2d<float4,6>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 112: coproc_transposeOnDevice2d<float4,7>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 128: coproc_transposeOnDevice2d<float4,8>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 144: coproc_transposeOnDevice2d<float4,9>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 160: coproc_transposeOnDevice2d<float4,10>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 176: coproc_transposeOnDevice2d<float4,11>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 192: coproc_transposeOnDevice2d<float4,12>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 208: coproc_transposeOnDevice2d<float4,13>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 224: coproc_transposeOnDevice2d<float4,14>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 240: coproc_transposeOnDevice2d<float4,15>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 256: coproc_transposeOnDevice2d<float4,16>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 512: coproc_transposeOnDevice2d<float4,32>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; case 1024: coproc_transposeOnDevice2d<float4,64>(d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); break; default: // // Default: check if we have a multiple of 16,8,4,2 bytes // if ((bytes_per_elmt&15) == 0) { coproc_transposeOnDevice2d<float4>((bytes_per_elmt>>4), d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } if ((bytes_per_elmt&7) == 0) { coproc_transposeOnDevice2d<float2>((bytes_per_elmt>>3), d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } else if ((bytes_per_elmt&3) == 0) { coproc_transposeOnDevice2d<float1>((bytes_per_elmt>>2), d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } else if ((bytes_per_elmt&1) == 0) { coproc_transposeOnDevice2d<char2>((bytes_per_elmt>>1), d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } else { coproc_transposeOnDevice2d<char1>(bytes_per_elmt, d_ptrSrc, d_ptrDest, nelmt1, nelmt2, stream); } break; } coproc_checkError("coproc_transposeOnDevice2d"); cudaEventRecord(stop,stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&inTime, start, stop); cout << "Bandwidth: " << 2.0f*1000.0f*nelmt1*nelmt2*bytes_per_elmt/(1024*1024*1024)/(inTime) << "GB/s" << endl; } /****************************************************************************** * Wrapper routines to be called from Fortran ******************************************************************************/ extern "C" { void FNAME(coproc_transposeondevice2d)(const void *__restrict__ d_ptrSrc, void *__restrict__ d_ptrDest, __INT *bytes_per_elmt, __INT *nelmt1, __INT *nelmt2, __I64 *stream) { coproc_transposeOnDevice2d(d_ptrSrc, d_ptrDest, *bytes_per_elmt, *nelmt1, *nelmt2, (cudaStream_t)(*stream)); } }; /*############################################################################# * Transpose three-dimensional arrays in device memory *############################################################################# */ /****************************************************************************** * Naive 3D-transpose CUDA kernel: * Each element is assumed to have a single data item ******************************************************************************/ template<class T> __global__ void transpose3d_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nelmt3, int offset1=0, int offset2=0, int offset3=0) { int xIndex = offset1 + blockIdx.x + threadIdx.x; int yIndex = offset2 + blockIdx.y + threadIdx.y; int zIndex = offset3 + blockIdx.z + threadIdx.z; int index_in = xIndex + nelmt1 * yIndex + nelmt1 * nelmt2 * zIndex; int index_out = zIndex + nelmt3 * yIndex + nelmt2 * nelmt3 * xIndex; Dest[index_out] = Src[index_in]; } /****************************************************************************** * Naive 3D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items given at run-time ******************************************************************************/ template<class T> __global__ void transpose3d_knl(int num_elmt, T *Src, T *Dest, int nelmt1, int nelmt2, int nelmt3, int offset1=0, int offset2=0, int offset3=0) { int xIndex = offset1 + blockIdx.x + threadIdx.x; int yIndex = offset2 + blockIdx.y + threadIdx.y; int zIndex = offset3 + blockIdx.z + threadIdx.z; int index_in = (xIndex + nelmt1 * yIndex + nelmt1 * nelmt2 * zIndex) * num_elmt; int index_out = (zIndex + nelmt3 * yIndex + nelmt2 * nelmt3 * xIndex) * num_elmt; for (int ielmt=0; ielmt<num_elmt; ++ielmt) Dest[index_out+ielmt] = Src[index_in+ielmt]; } /****************************************************************************** * Naive 3D-transpose CUDA kernel: * Each element is assumed to have num_elmt data items known at compile-time ******************************************************************************/ template<class T, int num_elmt> __global__ void transpose3d_knl(T *Src, T *Dest, int nelmt1, int nelmt2, int nelmt3, int offset1=0, int offset2=0, int offset3=0) { int xIndex = offset1 + blockIdx.x + threadIdx.x; int yIndex = offset2 + blockIdx.y + threadIdx.y; int zIndex = offset3 + blockIdx.z + threadIdx.z; int index_in = (xIndex + nelmt1 * yIndex + nelmt1 * nelmt2 * zIndex) * num_elmt; int index_out = (zIndex + nelmt3 * yIndex + nelmt2 * nelmt3 * xIndex) * num_elmt; for (int ielmt=0; ielmt<num_elmt; ++ielmt) Dest[index_out+ielmt] = Src[index_in+ielmt]; }
afecdf304850c558a862901a1b4c08e824c4c49f.hip
// !!! This is a file automatically generated by hipify!!! // cd /home/hork/cuda-workspace/CudaSHA256/Debug/files // time ~/Dropbox/FIIT/APS/Projekt/CpuSHA256/a.out -f ../file-list // time ../CudaSHA256 -f ../file-list #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <hip/hip_runtime.h> #include "scrypt.h" #include <dirent.h> #include <ctype.h> #include <sys/time.h> #define N 16384 #define M 1000000 #define MAXLOOP M/N //#define N 6 #define checkCudaErrors(x) \ { \ hipGetLastError(); \ x; \ hipError_t err = hipGetLastError(); \ if (err != hipSuccess) \ printf("GPU: hipError_t %d (%s)\n", err, hipGetErrorString(err)); \ } __device__ scrypt_cuda(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP1, int N, int dklenP2, WORD* hash_out){ hash_out = scrypt(ctx, block, block_len, dklenP1, dklenP2); } __global__ void scrypt_top_cuda(uint32_t max_loop) { uint32_t index = blockIdx.x * blockDim.x + threadIdx.x; SHA256_CTX *ctx = new SHA256_CTX(); char ver[]="20000000"; char prev_block[]="48f4bdc6cbabf6e59d5714adc7caa1af293bc49c75d447c2fdc1843694d1ef56"; char mrkl_root[]="f03a2314e267c0e67627a51aa8c7bcdd99a2d173deec41ab96945eb4c7e43dee"; char time[9]; char bits[9]; little_endian(ver, sizeof(ver) - 1); little_endian(prev_block, sizeof(prev_block) - 1); little_endian(mrkl_root, sizeof(mrkl_root) - 1); // Get time struct tm t; time_t t_of_day; t.tm_year = 2019-1900; // Year - 1900 t.tm_mon = 3-1; // Month, where 1 = jan t.tm_mday = 13; // Day of the month t.tm_hour = 7+9; t.tm_min = 51; t.tm_sec = 51; t.tm_isdst = -1; // Is DST on? 1 = yes, 0 = no, -1 = unknown t_of_day = mktime(&t); WORD *wtime = new WORD(t_of_day); endian_cvt(wtime); word_to_hex_eight(*wtime, time, 8); word_to_hex_eight(436330391, bits, 8); // bits -- input little_endian(bits, 8); char test_scrypt_in[153]; int in_index = 0; WORD i; for( i = 0; i < sizeof(ver)-1; i++){ test_scrypt_in[i]=ver[i]; } in_index += sizeof(ver)-1; for( i = 0; i < sizeof(prev_block); i++){ test_scrypt_in[in_index+i] = prev_block[i]; } in_index += sizeof(prev_block)-1; for( i = 0; i < sizeof(mrkl_root); i++){ test_scrypt_in[in_index+i] = mrkl_root[i]; } in_index += sizeof(mrkl_root)-1; for( i = 0; i < sizeof(time); i++){ test_scrypt_in[in_index+i] = time[i]; } in_index += sizeof(time)-1; for( i = 0; i < sizeof(bits); i++){ test_scrypt_in[in_index+i] = bits[i]; } WORD *nonce = new WORD(data->i*THREAD_NO_NONCE); endian_cvt(nonce); WORD *test_scrypt_out_w = new WORD[8](); char *test_scrypt_out = new char[32*8](); WORD test_scrypt_in_w[20]; for (i = index*max_loop; i<(index+1)*max_loop; i++){ hex_string_to_words(test_scrypt_in, sizeof(test_scrypt_in), test_scrypt_in_w); test_scrypt_in_w[19] = i; endian_cvt(&test_scrypt_in_w[19]); test_scrypt_out_w = scrypt_cuda(ctx, test_scrypt_in_w, 20, 256, 1024, 1024); if(i==(index+1)*max_loop-1){ printf("\nThread id: %d, nonce: %d\n", data->i, *nonce); } } } int main(int argc)) { int GPU_N; checkCudaErrors(hipGetDeviceCount(&GPU_N)); printf("CUDA-capable device count: %d\n", GPU_N); checkCudaErrors(hipSetDevice(GPU_N-1)); uint32_t blockSize = 256; uint32_t numBlocks = (N + blockSize - 1) / blockSize; // uint32_t *max_loop_cpu = (uint32_t *)malloc(sizeof(uint32_t)); // *max_loop_cpu = M; // checkCudaErrors(hipMallocManaged(&max_loop_gpu, sizeof(uint32_t))); // hipMemcpy(max_loop_gpu, max_loop_cpu, hipMemcpyHostToDevice); hipLaunchKernelGGL(( scrypt_cuda) , dim3(numBlocks), dim3(blockSize), 0, 0, MAXLOOP); hipDeviceReset(); return argc - 1; }
afecdf304850c558a862901a1b4c08e824c4c49f.cu
// cd /home/hork/cuda-workspace/CudaSHA256/Debug/files // time ~/Dropbox/FIIT/APS/Projekt/CpuSHA256/a.out -f ../file-list // time ../CudaSHA256 -f ../file-list #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <cuda.h> #include "scrypt.h" #include <dirent.h> #include <ctype.h> #include <sys/time.h> #define N 16384 #define M 1000000 #define MAXLOOP M/N //#define N 6 #define checkCudaErrors(x) \ { \ cudaGetLastError(); \ x; \ cudaError_t err = cudaGetLastError(); \ if (err != cudaSuccess) \ printf("GPU: cudaError %d (%s)\n", err, cudaGetErrorString(err)); \ } __device__ scrypt_cuda(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP1, int N, int dklenP2, WORD* hash_out){ hash_out = scrypt(ctx, block, block_len, dklenP1, dklenP2); } __global__ void scrypt_top_cuda(uint32_t max_loop) { uint32_t index = blockIdx.x * blockDim.x + threadIdx.x; SHA256_CTX *ctx = new SHA256_CTX(); char ver[]="20000000"; char prev_block[]="48f4bdc6cbabf6e59d5714adc7caa1af293bc49c75d447c2fdc1843694d1ef56"; char mrkl_root[]="f03a2314e267c0e67627a51aa8c7bcdd99a2d173deec41ab96945eb4c7e43dee"; char time[9]; char bits[9]; little_endian(ver, sizeof(ver) - 1); little_endian(prev_block, sizeof(prev_block) - 1); little_endian(mrkl_root, sizeof(mrkl_root) - 1); // Get time struct tm t; time_t t_of_day; t.tm_year = 2019-1900; // Year - 1900 t.tm_mon = 3-1; // Month, where 1 = jan t.tm_mday = 13; // Day of the month t.tm_hour = 7+9; t.tm_min = 51; t.tm_sec = 51; t.tm_isdst = -1; // Is DST on? 1 = yes, 0 = no, -1 = unknown t_of_day = mktime(&t); WORD *wtime = new WORD(t_of_day); endian_cvt(wtime); word_to_hex_eight(*wtime, time, 8); word_to_hex_eight(436330391, bits, 8); // bits -- input little_endian(bits, 8); char test_scrypt_in[153]; int in_index = 0; WORD i; for( i = 0; i < sizeof(ver)-1; i++){ test_scrypt_in[i]=ver[i]; } in_index += sizeof(ver)-1; for( i = 0; i < sizeof(prev_block); i++){ test_scrypt_in[in_index+i] = prev_block[i]; } in_index += sizeof(prev_block)-1; for( i = 0; i < sizeof(mrkl_root); i++){ test_scrypt_in[in_index+i] = mrkl_root[i]; } in_index += sizeof(mrkl_root)-1; for( i = 0; i < sizeof(time); i++){ test_scrypt_in[in_index+i] = time[i]; } in_index += sizeof(time)-1; for( i = 0; i < sizeof(bits); i++){ test_scrypt_in[in_index+i] = bits[i]; } WORD *nonce = new WORD(data->i*THREAD_NO_NONCE); endian_cvt(nonce); WORD *test_scrypt_out_w = new WORD[8](); char *test_scrypt_out = new char[32*8](); WORD test_scrypt_in_w[20]; for (i = index*max_loop; i<(index+1)*max_loop; i++){ hex_string_to_words(test_scrypt_in, sizeof(test_scrypt_in), test_scrypt_in_w); test_scrypt_in_w[19] = i; endian_cvt(&test_scrypt_in_w[19]); test_scrypt_out_w = scrypt_cuda(ctx, test_scrypt_in_w, 20, 256, 1024, 1024); if(i==(index+1)*max_loop-1){ printf("\nThread id: %d, nonce: %d\n", data->i, *nonce); } } } int main(int argc)) { int GPU_N; checkCudaErrors(cudaGetDeviceCount(&GPU_N)); printf("CUDA-capable device count: %d\n", GPU_N); checkCudaErrors(cudaSetDevice(GPU_N-1)); uint32_t blockSize = 256; uint32_t numBlocks = (N + blockSize - 1) / blockSize; // uint32_t *max_loop_cpu = (uint32_t *)malloc(sizeof(uint32_t)); // *max_loop_cpu = M; // checkCudaErrors(cudaMallocManaged(&max_loop_gpu, sizeof(uint32_t))); // cudaMemcpy(max_loop_gpu, max_loop_cpu, cudaMemcpyHostToDevice); scrypt_cuda <<<numBlocks, blockSize>>> (MAXLOOP); cudaDeviceReset(); return argc - 1; }
91a04f43bae37b708e9d4f227871de18a6691a45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* https://devblogs.nvidia.com/even-easier-introduction-cuda/ */ #include <iostream> #include <math.h> // __global__: indica que a funo add dever ser executada na __global__ void add(int n, float *x, float *y){ int index = threadIdx.x; int stride = blockDim.x; for(int i = index; i<n ; i=i+stride){ y[i] = x[i]+y[i]; } } int main(void){ int N = 1<<20; /* Alocao em C++ puro float *x = new float[N]; float *y = new float[N]; */ /* Alocao em CUDA */ float *x,*y; hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); for (int i=0;i<N;i++){ x[i]=1.0f; y[i]=2.0f; } // Run kernel on 1M elements on the GPU //Utilizando um thread block com um 256 threads hipLaunchKernelGGL(( add), dim3(1),dim3(256), 0, 0, N,x,y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); float maxError = 0.0f; for(int i=0; i<N ; i++) maxError = fmax(maxError,fabs(y[i]-3.0f)); std::cout <<"Max error: "<<maxError<<std::endl; //Free memory hipFree(x); hipFree(y); return 0; }
91a04f43bae37b708e9d4f227871de18a6691a45.cu
/* https://devblogs.nvidia.com/even-easier-introduction-cuda/ */ #include <iostream> #include <math.h> // __global__: indica que a função add deverá ser executada na __global__ void add(int n, float *x, float *y){ int index = threadIdx.x; int stride = blockDim.x; for(int i = index; i<n ; i=i+stride){ y[i] = x[i]+y[i]; } } int main(void){ int N = 1<<20; /* Alocação em C++ puro float *x = new float[N]; float *y = new float[N]; */ /* Alocação em CUDA */ float *x,*y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); for (int i=0;i<N;i++){ x[i]=1.0f; y[i]=2.0f; } // Run kernel on 1M elements on the GPU //Utilizando um thread block com um 256 threads add<<<1,256>>>(N,x,y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); float maxError = 0.0f; for(int i=0; i<N ; i++) maxError = fmax(maxError,fabs(y[i]-3.0f)); std::cout <<"Max error: "<<maxError<<std::endl; //Free memory cudaFree(x); cudaFree(y); return 0; }
14f038f00d7c3f99bc8eb1b79680a2d6815b6309.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_fp16.h> #include <algorithm> #include "paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { SplitPlugin* CreateSplitPluginDeserialize(const void* buffer, size_t length) { return new SplitPlugin(buffer, length); } REGISTER_TRT_PLUGIN("split_plugin", CreateSplitPluginDeserialize); // copied from operators::math::SplitFunctor template <typename T> __global__ void SplitKernel(const T* input_data, const int in_row, const int in_col, const int* out_cols, int out_cols_size, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int curr_segment = 0; int curr_offset = out_cols[0]; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int curr_col_offset = out_cols[curr_segment + 1]; while (curr_col_offset <= tid_x) { curr_offset = curr_col_offset; ++curr_segment; curr_col_offset = out_cols[curr_segment + 1]; } int local_col = tid_x - curr_offset; int segment_width = curr_col_offset - curr_offset; T* output_ptr = outputs_data[curr_segment]; if (output_ptr != nullptr) { int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) output_ptr[tid_y * segment_width + local_col] = input_data[tid_y * in_col + tid_x]; } } } template <typename T> __global__ void SplitKernel(const T* input_data, const int in_row, const int in_col, const int fixed_out_col, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int split = tid_x / fixed_out_col; int in_offset = tid_x - split * fixed_out_col; T* output_ptr = outputs_data[split]; if (output_ptr != nullptr) { int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) output_ptr[tid_y * fixed_out_col + in_offset] = input_data[tid_y * in_col + tid_x]; } } } nvinfer1::Dims SplitPlugin::getOutputDimensions( int index, const nvinfer1::Dims* input_dims, int num_inputs) { PADDLE_ENFORCE_EQ(num_inputs, 1); PADDLE_ENFORCE_LT(index, this->getNbOutputs()); nvinfer1::Dims output_dims = input_dims[0]; output_dims.d[axis_] = output_length_.at(index); return output_dims; } int SplitPlugin::initialize() { PADDLE_ENFORCE_LE(axis_, nvinfer1::Dims::MAX_DIMS); // notice input dims is [C, H, W] nvinfer1::Dims dims = this->getInputDims(0); outer_rows_ = 1; inner_cols_ = 1; for (int i = 0; i < axis_; ++i) { outer_rows_ *= dims.d[i]; } for (int i = axis_ + 1; i < dims.nbDims; ++i) { inner_cols_ *= dims.d[i]; } same_shape_ = true; std::vector<int> segment_offsets(1, 0); for (int i = 0; i < this->getNbOutputs(); ++i) { if (output_length_[i] != output_length_[0]) { same_shape_ = false; } segment_offsets.push_back(segment_offsets.back() + output_length_[i] * inner_cols_); } inner_cols_ *= dims.d[axis_]; d_segment_offsets_ = segment_offsets; segment_offsets_ = std::move(segment_offsets); d_output_ptrs_.resize(this->getNbOutputs(), nullptr); return 0; } template <typename T> inline void Split(hipStream_t stream, const bool same_shape, const int outer_rows, const int inner_cols, const std::vector<int>& segment_offsets, const int* d_segment_offsets, const T* input, T** outputs) { const int kThreadsPerBlock = 1024; const int kMaxBlocks = 65535; int block_cols = kThreadsPerBlock; if (inner_cols < kThreadsPerBlock) { // block_cols is aligned by 32. block_cols = ((inner_cols + 31) >> 5) << 5; } int block_rows = kThreadsPerBlock / block_cols; dim3 block_size = dim3(block_cols, block_rows, 1); int grid_cols = ::min((inner_cols + block_cols - 1) / block_cols, kMaxBlocks); int grid_rows = ::min(kMaxBlocks / grid_cols, ::max(outer_rows / block_rows, 1)); dim3 grid_size = dim3(grid_cols, grid_rows, 1); if (same_shape) { hipLaunchKernelGGL(( SplitKernel), dim3(grid_size), dim3(block_size), 0, stream, input, outer_rows, inner_cols, segment_offsets[1], outputs); } else { hipLaunchKernelGGL(( SplitKernel), dim3(grid_size), dim3(block_size), 0, stream, input, outer_rows, inner_cols, d_segment_offsets, static_cast<int>(segment_offsets.size()), outputs); } } int SplitPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) { float const* input_ptr = reinterpret_cast<float const*>(inputs[0]); if (((batchSize == 1 && axis_ == 0) || axis_ == -1) && this->getNbOutputs() < 10) { float** output_ptrs = reinterpret_cast<float**>(outputs); int data_type_size = (this->getDataType() == nvinfer1::DataType::kFLOAT) ? sizeof(float) : sizeof(__half); for (int i = 0; i < this->getNbOutputs(); ++i) { PADDLE_ENFORCE( hipMemcpyAsync( output_ptrs[i], input_ptr + segment_offsets_[i], (segment_offsets_[i + 1] - segment_offsets_[i]) * data_type_size, hipMemcpyDeviceToDevice, stream) == hipSuccess); } } else { outer_rows_ *= batchSize; const int* d_segment_offsets_ptr = thrust::raw_pointer_cast(&d_segment_offsets_[0]); float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs_[0]); PADDLE_ENFORCE(hipMemcpyAsync(output_ptrs, outputs, this->getNbOutputs() * sizeof(float*), hipMemcpyHostToDevice, stream) == hipSuccess); if (this->getDataType() == nvinfer1::DataType::kFLOAT) { Split(stream, same_shape_, outer_rows_, inner_cols_, segment_offsets_, d_segment_offsets_ptr, input_ptr, output_ptrs); } else { Split(stream, same_shape_, outer_rows_, inner_cols_, segment_offsets_, d_segment_offsets_ptr, (__half*)input_ptr, // NOLINT (__half**)output_ptrs); // NOLINT } } return hipGetLastError() != hipSuccess; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
14f038f00d7c3f99bc8eb1b79680a2d6815b6309.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda_fp16.h> #include <algorithm> #include "paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { SplitPlugin* CreateSplitPluginDeserialize(const void* buffer, size_t length) { return new SplitPlugin(buffer, length); } REGISTER_TRT_PLUGIN("split_plugin", CreateSplitPluginDeserialize); // copied from operators::math::SplitFunctor template <typename T> __global__ void SplitKernel(const T* input_data, const int in_row, const int in_col, const int* out_cols, int out_cols_size, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int curr_segment = 0; int curr_offset = out_cols[0]; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int curr_col_offset = out_cols[curr_segment + 1]; while (curr_col_offset <= tid_x) { curr_offset = curr_col_offset; ++curr_segment; curr_col_offset = out_cols[curr_segment + 1]; } int local_col = tid_x - curr_offset; int segment_width = curr_col_offset - curr_offset; T* output_ptr = outputs_data[curr_segment]; if (output_ptr != nullptr) { int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) output_ptr[tid_y * segment_width + local_col] = input_data[tid_y * in_col + tid_x]; } } } template <typename T> __global__ void SplitKernel(const T* input_data, const int in_row, const int in_col, const int fixed_out_col, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { int split = tid_x / fixed_out_col; int in_offset = tid_x - split * fixed_out_col; T* output_ptr = outputs_data[split]; if (output_ptr != nullptr) { int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) output_ptr[tid_y * fixed_out_col + in_offset] = input_data[tid_y * in_col + tid_x]; } } } nvinfer1::Dims SplitPlugin::getOutputDimensions( int index, const nvinfer1::Dims* input_dims, int num_inputs) { PADDLE_ENFORCE_EQ(num_inputs, 1); PADDLE_ENFORCE_LT(index, this->getNbOutputs()); nvinfer1::Dims output_dims = input_dims[0]; output_dims.d[axis_] = output_length_.at(index); return output_dims; } int SplitPlugin::initialize() { PADDLE_ENFORCE_LE(axis_, nvinfer1::Dims::MAX_DIMS); // notice input dims is [C, H, W] nvinfer1::Dims dims = this->getInputDims(0); outer_rows_ = 1; inner_cols_ = 1; for (int i = 0; i < axis_; ++i) { outer_rows_ *= dims.d[i]; } for (int i = axis_ + 1; i < dims.nbDims; ++i) { inner_cols_ *= dims.d[i]; } same_shape_ = true; std::vector<int> segment_offsets(1, 0); for (int i = 0; i < this->getNbOutputs(); ++i) { if (output_length_[i] != output_length_[0]) { same_shape_ = false; } segment_offsets.push_back(segment_offsets.back() + output_length_[i] * inner_cols_); } inner_cols_ *= dims.d[axis_]; d_segment_offsets_ = segment_offsets; segment_offsets_ = std::move(segment_offsets); d_output_ptrs_.resize(this->getNbOutputs(), nullptr); return 0; } template <typename T> inline void Split(cudaStream_t stream, const bool same_shape, const int outer_rows, const int inner_cols, const std::vector<int>& segment_offsets, const int* d_segment_offsets, const T* input, T** outputs) { const int kThreadsPerBlock = 1024; const int kMaxBlocks = 65535; int block_cols = kThreadsPerBlock; if (inner_cols < kThreadsPerBlock) { // block_cols is aligned by 32. block_cols = ((inner_cols + 31) >> 5) << 5; } int block_rows = kThreadsPerBlock / block_cols; dim3 block_size = dim3(block_cols, block_rows, 1); int grid_cols = std::min((inner_cols + block_cols - 1) / block_cols, kMaxBlocks); int grid_rows = std::min(kMaxBlocks / grid_cols, std::max(outer_rows / block_rows, 1)); dim3 grid_size = dim3(grid_cols, grid_rows, 1); if (same_shape) { SplitKernel<<<grid_size, block_size, 0, stream>>>( input, outer_rows, inner_cols, segment_offsets[1], outputs); } else { SplitKernel<<<grid_size, block_size, 0, stream>>>( input, outer_rows, inner_cols, d_segment_offsets, static_cast<int>(segment_offsets.size()), outputs); } } int SplitPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) { float const* input_ptr = reinterpret_cast<float const*>(inputs[0]); if (((batchSize == 1 && axis_ == 0) || axis_ == -1) && this->getNbOutputs() < 10) { float** output_ptrs = reinterpret_cast<float**>(outputs); int data_type_size = (this->getDataType() == nvinfer1::DataType::kFLOAT) ? sizeof(float) : sizeof(__half); for (int i = 0; i < this->getNbOutputs(); ++i) { PADDLE_ENFORCE( cudaMemcpyAsync( output_ptrs[i], input_ptr + segment_offsets_[i], (segment_offsets_[i + 1] - segment_offsets_[i]) * data_type_size, cudaMemcpyDeviceToDevice, stream) == cudaSuccess); } } else { outer_rows_ *= batchSize; const int* d_segment_offsets_ptr = thrust::raw_pointer_cast(&d_segment_offsets_[0]); float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs_[0]); PADDLE_ENFORCE(cudaMemcpyAsync(output_ptrs, outputs, this->getNbOutputs() * sizeof(float*), cudaMemcpyHostToDevice, stream) == cudaSuccess); if (this->getDataType() == nvinfer1::DataType::kFLOAT) { Split(stream, same_shape_, outer_rows_, inner_cols_, segment_offsets_, d_segment_offsets_ptr, input_ptr, output_ptrs); } else { Split(stream, same_shape_, outer_rows_, inner_cols_, segment_offsets_, d_segment_offsets_ptr, (__half*)input_ptr, // NOLINT (__half**)output_ptrs); // NOLINT } } return cudaGetLastError() != cudaSuccess; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
86306d97471ff0de9e37ac0d01d9567bfe4d0feb.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include "hipcub/hipcub.hpp" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { // Generic implementation - CUDA will handle the right function to call for us void CUDAContext::CopyBytesAsync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // TODO: verify that the CUDA handles copy from device to device correctly // even without SetDevice() // TODO: verify whether source or dest device should be a priority in picking // the stream // NB: right now the cross-device copy logic is invoked only in the contexts // when surrounding code explicitly manages data dependencies and sets up // events, so it's fine. In order to make it a standalone function proper // synchronization between stream is required int gpu_id = 0; if (dst_device.type() == DeviceType::CUDA) { gpu_id = dst_device.index(); } else if (src_device.type() == DeviceType::CUDA) { gpu_id = src_device.index(); } else { LOG(FATAL) << "shouldn't be called with non-cuda device"; } CUDA_ENFORCE(hipMemcpyAsync( dst, src, nbytes, hipMemcpyDefault, CUDAContext::getCudaObjects().GetStream(gpu_id))); } void CUDAContext::CopyBytesSync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // This emulates Caffe2 original behavior where sync copy doesn't change the // device. It's probably better for clarity to switch to the target device // explicitly here, but in the worst case CUDA would sync for us. // TODO: change it to HIPGuardMasqueradingAsCUDA CUDAContext context(-1); // take current device CUDA_ENFORCE(hipMemcpyAsync( dst, src, nbytes, hipMemcpyDefault, context.cuda_stream())); // destructor of context synchronizes } // For the CPU context, we also allow a (probably expensive) function // to copy the data from a cuda context. Inside the function, we create // a temporary CUDAContext object to carry out the copy. From the caller's // side, these functions are synchronous with respect to the host, similar // to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call. template <> inline void CPUContext::CopyBytes<CUDAContext, CPUContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(src)); context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst); } template <> inline void CPUContext::CopyBytes<CPUContext, CUDAContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(dst)); context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst); } } // namespace caffe2 namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is guarded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } C10_LOG_API_USAGE_ONCE("caffe2.init.cuda"); // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), C10_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", C10_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile."); for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) { HIPGuardMasqueradingAsCUDA g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = ::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for hipDeviceEnablePeerAccess that should always be // zero currently. // It is ok if peer access is already enabled... hipError_t err = hipDeviceEnablePeerAccess(j, 0); if ((err != hipErrorPeerAccessAlreadyEnabled) && (err != hipSuccess)) { CAFFE_THROW(hipGetErrorString(err)); } hipGetLastError(); // reset cuda error code } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new hipcub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } /** * An allocator that does the CPU memory allocation with pinned memory. * * This is needed because if we want to do any asynchronous cuda memcpy, * the underlying CPU memory also needs to be allocated into pinned memory * space. As a result, whenever Caffe2 is built with GPU and there is * GPU present during runtime, at global initialization time we will set * the CPU memory allocator to allocate pinned memory. * * NB: This behavior is probably too aggressive. We should consider asking users * to do on-demand memory pinning (like exposed in PyTorch APIs) instead. */ struct CAFFE2_CUDA_API PinnedCPUAllocator final : public at::Allocator { PinnedCPUAllocator() { baseAllocator_ = GetDefaultCPUAllocator(); } ~PinnedCPUAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { if (nbytes == 0) { // replicate c10::alloc_cpu behavior - return nullptr return {nullptr, nullptr, &Delete, at::Device(CPU)}; } void* data; at::DataPtr data_ptr; std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { at::DeleterFnPtr expected_deleter = baseAllocator_->raw_deleter(); data_ptr = baseAllocator_->allocate(nbytes); data = data_ptr.get(); CAFFE_ENFORCE(data); CUDA_ENFORCE(hipHostRegister(data, nbytes, hipHostRegisterDefault)); CAFFE_ENFORCE( data_ptr.compare_exchange_deleter(expected_deleter, &Delete), "Failed to swap deleter (already swapped?)"); } else { CUDA_ENFORCE(hipHostMalloc(&data, nbytes)); data_ptr = {data, data, &Delete, at::Device(CPU)}; } memset(data, 0, nbytes); return data_ptr; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* data) { if (!data) { return; } // Caffe2 uses a lazy way to figure out if one is actually going to use GPUs // or not. If a CUDAContext::New() call is made, inside the CUDAContext // function we will switch the cpu side allocator to a PinnedCPUAllocator. // But, if one calls CPUContext::New() before any cuda allocations, // PinnedCPUAllocator can still delete the corresponding memory. std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { CUDA_ENFORCE(hipHostUnregister(data)); GetDefaultCPUAllocator()->raw_deleter()(data); } else { hipError_t err = hipHostFree(data); if (err == hipErrorInvalidValue) { free(data); // Calling hipGetLastError will reset the cuda error. hipError_t _err = hipGetLastError(); } else { // For all other errors, still do a cuda check. CUDA_ENFORCE(err); } } } at::Allocator* baseAllocator_; }; static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "hipHostMalloc. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; SetCPUAllocator(&g_pinned_cpu_alloc); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(DeviceIndex gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = ::max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: if (nbytes != 0) { CUDA_ENFORCE(hipMalloc(&ptr, nbytes)); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::CUB: if (nbytes != 0) { CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); } g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::THC: { // The reason we have this stream guard here is to preserve // the historical behavior of the 'thc' allocator in Caffe2, // which is to put all allocations on the same (default) // stream. This behavior is morally wrong (since passing // allocations between streams allows for the possibility // of you handing out some memory that an old stream // is still working on), but it doesn't seem to cause issues // in Caffe2 today. Our hypothesis for why this is the case // is that Caffe2 doesn't really do very many allocations // on the fly; instead they allocate once and then reuse // the allocations for the whole program. In this case, // the hazard is avoided. // // We intend to remove this stream guard, but the benefit // to putting all allocations on the same stream is it // reduces per-stream fragmentation, and this helps // some models that are currently running with the thc // allocator fit in memory. We will need to find some // way of resolving this problem. hip::HIPStreamGuardMasqueradingAsCUDA g( Stream( Stream::DEFAULT, Device(kCUDA, CaffeCudaGetDevice()) )); ptr = hip::HIPCachingAllocator::raw_alloc(nbytes); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple hipFree. hipError_t error = hipFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != hipSuccess && error != hipErrorDeinitialized) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << hipGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { hip::HIPCachingAllocator::raw_delete(ptr); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); } // namespace caffe2 namespace at { REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CPU, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CPU, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); } // namespace at
86306d97471ff0de9e37ac0d01d9567bfe4d0feb.cu
#include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include <c10/cuda/CUDACachingAllocator.h> #include "cub/util_allocator.cuh" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { // Generic implementation - CUDA will handle the right function to call for us void CUDAContext::CopyBytesAsync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // TODO: verify that the CUDA handles copy from device to device correctly // even without SetDevice() // TODO: verify whether source or dest device should be a priority in picking // the stream // NB: right now the cross-device copy logic is invoked only in the contexts // when surrounding code explicitly manages data dependencies and sets up // events, so it's fine. In order to make it a standalone function proper // synchronization between stream is required int gpu_id = 0; if (dst_device.type() == DeviceType::CUDA) { gpu_id = dst_device.index(); } else if (src_device.type() == DeviceType::CUDA) { gpu_id = src_device.index(); } else { LOG(FATAL) << "shouldn't be called with non-cuda device"; } CUDA_ENFORCE(cudaMemcpyAsync( dst, src, nbytes, cudaMemcpyDefault, CUDAContext::getCudaObjects().GetStream(gpu_id))); } void CUDAContext::CopyBytesSync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // This emulates Caffe2 original behavior where sync copy doesn't change the // device. It's probably better for clarity to switch to the target device // explicitly here, but in the worst case CUDA would sync for us. // TODO: change it to CUDAGuard CUDAContext context(-1); // take current device CUDA_ENFORCE(cudaMemcpyAsync( dst, src, nbytes, cudaMemcpyDefault, context.cuda_stream())); // destructor of context synchronizes } // For the CPU context, we also allow a (probably expensive) function // to copy the data from a cuda context. Inside the function, we create // a temporary CUDAContext object to carry out the copy. From the caller's // side, these functions are synchronous with respect to the host, similar // to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call. template <> inline void CPUContext::CopyBytes<CUDAContext, CPUContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(src)); context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst); } template <> inline void CPUContext::CopyBytes<CPUContext, CUDAContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(dst)); context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst); } } // namespace caffe2 namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is guarded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } C10_LOG_API_USAGE_ONCE("caffe2.init.cuda"); // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), C10_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", C10_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile."); for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) { CUDAGuard g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = std::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for cudaDeviceEnablePeerAccess that should always be // zero currently. // It is ok if peer access is already enabled... cudaError_t err = cudaDeviceEnablePeerAccess(j, 0); if ((err != cudaErrorPeerAccessAlreadyEnabled) && (err != cudaSuccess)) { CAFFE_THROW(cudaGetErrorString(err)); } cudaGetLastError(); // reset cuda error code } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new cub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } /** * An allocator that does the CPU memory allocation with pinned memory. * * This is needed because if we want to do any asynchronous cuda memcpy, * the underlying CPU memory also needs to be allocated into pinned memory * space. As a result, whenever Caffe2 is built with GPU and there is * GPU present during runtime, at global initialization time we will set * the CPU memory allocator to allocate pinned memory. * * NB: This behavior is probably too aggressive. We should consider asking users * to do on-demand memory pinning (like exposed in PyTorch APIs) instead. */ struct CAFFE2_CUDA_API PinnedCPUAllocator final : public at::Allocator { PinnedCPUAllocator() { baseAllocator_ = GetDefaultCPUAllocator(); } ~PinnedCPUAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { if (nbytes == 0) { // replicate c10::alloc_cpu behavior - return nullptr return {nullptr, nullptr, &Delete, at::Device(CPU)}; } void* data; at::DataPtr data_ptr; std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { at::DeleterFnPtr expected_deleter = baseAllocator_->raw_deleter(); data_ptr = baseAllocator_->allocate(nbytes); data = data_ptr.get(); CAFFE_ENFORCE(data); CUDA_ENFORCE(cudaHostRegister(data, nbytes, cudaHostRegisterDefault)); CAFFE_ENFORCE( data_ptr.compare_exchange_deleter(expected_deleter, &Delete), "Failed to swap deleter (already swapped?)"); } else { CUDA_ENFORCE(cudaMallocHost(&data, nbytes)); data_ptr = {data, data, &Delete, at::Device(CPU)}; } memset(data, 0, nbytes); return data_ptr; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* data) { if (!data) { return; } // Caffe2 uses a lazy way to figure out if one is actually going to use GPUs // or not. If a CUDAContext::New() call is made, inside the CUDAContext // function we will switch the cpu side allocator to a PinnedCPUAllocator. // But, if one calls CPUContext::New() before any cuda allocations, // PinnedCPUAllocator can still delete the corresponding memory. std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { CUDA_ENFORCE(cudaHostUnregister(data)); GetDefaultCPUAllocator()->raw_deleter()(data); } else { cudaError_t err = cudaFreeHost(data); if (err == cudaErrorInvalidValue) { free(data); // Calling cudaGetLastError will reset the cuda error. cudaError_t _err = cudaGetLastError(); } else { // For all other errors, still do a cuda check. CUDA_ENFORCE(err); } } } at::Allocator* baseAllocator_; }; static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "cudaMallocHost. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; SetCPUAllocator(&g_pinned_cpu_alloc); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(DeviceIndex gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = std::max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: if (nbytes != 0) { CUDA_ENFORCE(cudaMalloc(&ptr, nbytes)); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::CUB: if (nbytes != 0) { CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); } g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::THC: { // The reason we have this stream guard here is to preserve // the historical behavior of the 'thc' allocator in Caffe2, // which is to put all allocations on the same (default) // stream. This behavior is morally wrong (since passing // allocations between streams allows for the possibility // of you handing out some memory that an old stream // is still working on), but it doesn't seem to cause issues // in Caffe2 today. Our hypothesis for why this is the case // is that Caffe2 doesn't really do very many allocations // on the fly; instead they allocate once and then reuse // the allocations for the whole program. In this case, // the hazard is avoided. // // We intend to remove this stream guard, but the benefit // to putting all allocations on the same stream is it // reduces per-stream fragmentation, and this helps // some models that are currently running with the thc // allocator fit in memory. We will need to find some // way of resolving this problem. cuda::CUDAStreamGuard g( Stream( Stream::DEFAULT, Device(kCUDA, CaffeCudaGetDevice()) )); ptr = cuda::CUDACachingAllocator::raw_alloc(nbytes); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple cudaFree. cudaError_t error = cudaFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != cudaSuccess && error != cudaErrorCudartUnloading) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << cudaGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { cuda::CUDACachingAllocator::raw_delete(ptr); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); } // namespace caffe2 namespace at { REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CPU, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CPU, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); } // namespace at
00912c9469bf4a1af0ec5403a082854a83d6b372.hip
// !!! This is a file automatically generated by hipify!!! /*$Id: main.cu 738 2009-11-13 16:08:10Z wenbinor $*/ /** *This is the source code for Mars, a MapReduce framework on graphics *processors. *Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia) *Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com). *If you have any question on the code, please contact us at * wenbin@cse.ust.hk or savenhe@microsoft.com * *The license is a free non-exclusive, non-transferable license to reproduce, *use, modify and display the source code version of the Software, with or *without modifications solely for non-commercial research, educational or *evaluation purposes. The license does not entitle Licensee to technical support, *telephone assistance, enhancements or updates to the Software. All rights, title *to and ownership interest in Mars, including all intellectual property rights *therein shall remain in HKUST. */ /*********************************************************************** *Page View Count (PVC): It obtains the number of *distinct page views from the web logs. Each entry in the *web log is represented as <URL, IP, Cookie>, where *URL is the URL of the accessed page; IP is the IP *address that accesses the page; Cookie is the cookie *information generated when the page is accessed. This *application has two executions of MapReduce. The first *one removes the duplicate entries in the web logs. The *second one counts the number of page views. In the *first MapReduce, each Map takes the pair of an entry as *the key and the size of the entry as value. The sort is to *eliminate the redundancy in the web log. Specifically, if *more than one log entries have the same information, *we keep only one of them. The first MapReduce *outputs the result pair of the log entry as key and the *size of the line as value. The second MapReduce *processes the key/value pairs generated from the first *MapReduce. The Map outputs the URL as the key and *the IP as the value. The Reduce computes the number *of IPs for each URL. ***********************************************************************/ #include "MarsInc.h" #include "global.h" #define __OUTPUT__ void validate(Spec_t* spec, char* h_filebuf, char* d_filebuf, int fileSize, int num) { if (num > spec->outputDiffKeyCount) num = spec->outputDiffKeyCount; CUDA_SAFE_CALL(hipMemcpy(h_filebuf, d_filebuf, fileSize, hipMemcpyDeviceToHost)); //printf("num = %d",num); for (int i = 0; i < num; i++) { int2 groupInfo = spec->outputKeyListRange[i]; PVC_KEY_T* keys = (PVC_KEY_T*)(spec->outputKeys + spec->outputOffsetSizes[groupInfo.x].x); int* ip_offsets = (int*)(spec->outputVals + spec->outputOffsetSizes[groupInfo.x].z); int groupSize = groupInfo.y - groupInfo.x; printf("===========URL: %s - %d unique ip accesses===========\n", h_filebuf + keys->entry_offset, groupSize); for (int j = 0; j < groupSize; j++) { printf("IP: %s\n", h_filebuf + ip_offsets[j]); } } } //----------------------------------------------------------------- //usage: PageViewCount datafile //param: datafile //----------------------------------------------------------------- int main( int argc, char** argv) { if (argc != 2) { printf("usage: %s datafile\n", argv[0]); exit(-1); } Spec_t *spec = GetDefaultSpec(); TimeVal_t allTimer; startTimer(&allTimer); //------------------------------------------------------------------ //prepare input //------------------------------------------------------------------ TimeVal_t preTimer; startTimer(&preTimer); FILE *fp = fopen(argv[1], "r"); fseek(fp, 0, SEEK_END); int fileSize = ftell(fp) + 1; rewind(fp); char *h_filebuf = (char*)malloc(fileSize); fread(h_filebuf, fileSize, 1, fp); h_filebuf[fileSize-1] = '\n'; fclose(fp); char* d_filebuf = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_filebuf, fileSize)); char* p = h_filebuf; char* start = h_filebuf; int cur = 0; PVC_KEY_T key; key.file_buf = d_filebuf; PVC_VAL_T val; val.phase = 0; // int urlsize = 0; // char * pt = start; // for(;*pt!= 32;++pt,++urlsize){ // printf("%c",*pt); // } // printf("\n"); while (1) { for (; *p != '\n' && *p != '\r' && *p != '\0'; ++p); *p = '\0'; p++; key.entry_offset = cur; val.entry_size = p - start; cur += val.entry_size; if (cur >fileSize) break; AddMapInputRecord(spec, &key, &val, sizeof(PVC_KEY_T), sizeof(PVC_VAL_T)); start = p; } CUDA_SAFE_CALL(hipMemcpy(d_filebuf, h_filebuf, fileSize, hipMemcpyHostToDevice)); endTimer("preprocess", &preTimer); //------------------------------------------------------------------ //the first MapReduce //------------------------------------------------------------------ spec->workflow = MAP_REDUCE; MapReduce(spec); //------------------------------------------------------------------ //the second MapReduce //------------------------------------------------------------------ CUDA_SAFE_CALL(hipMemcpy(spec->inputKeys, spec->outputKeys, spec->outputAllKeySize, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(spec->inputVals, spec->outputVals, spec->outputAllValSize, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(spec->inputOffsetSizes, spec->outputOffsetSizes, spec->outputRecordCount * sizeof(int4), hipMemcpyDeviceToHost)); spec->inputRecordCount = spec->outputRecordCount; CUDA_SAFE_CALL(hipFree(spec->outputKeys)); CUDA_SAFE_CALL(hipFree(spec->outputVals)); CUDA_SAFE_CALL(hipFree(spec->outputOffsetSizes)); spec->workflow = MAP_GROUP; #ifdef __OUTPUT__ spec->outputToHost = 1; #endif MapReduce(spec); endTimer("all", &allTimer); //------------------------------------------------------------------ //Further processing //------------------------------------------------------------------ #ifdef __OUTPUT__ validate(spec, h_filebuf, d_filebuf, fileSize, 200); #endif //------------------------------------------------------------------ //Complete //------------------------------------------------------------------ FinishMapReduce(spec); free(h_filebuf); return 0; }
00912c9469bf4a1af0ec5403a082854a83d6b372.cu
/*$Id: main.cu 738 2009-11-13 16:08:10Z wenbinor $*/ /** *This is the source code for Mars, a MapReduce framework on graphics *processors. *Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia) *Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com). *If you have any question on the code, please contact us at * wenbin@cse.ust.hk or savenhe@microsoft.com * *The license is a free non-exclusive, non-transferable license to reproduce, *use, modify and display the source code version of the Software, with or *without modifications solely for non-commercial research, educational or *evaluation purposes. The license does not entitle Licensee to technical support, *telephone assistance, enhancements or updates to the Software. All rights, title *to and ownership interest in Mars, including all intellectual property rights *therein shall remain in HKUST. */ /*********************************************************************** *Page View Count (PVC): It obtains the number of *distinct page views from the web logs. Each entry in the *web log is represented as <URL, IP, Cookie>, where *URL is the URL of the accessed page; IP is the IP *address that accesses the page; Cookie is the cookie *information generated when the page is accessed. This *application has two executions of MapReduce. The first *one removes the duplicate entries in the web logs. The *second one counts the number of page views. In the *first MapReduce, each Map takes the pair of an entry as *the key and the size of the entry as value. The sort is to *eliminate the redundancy in the web log. Specifically, if *more than one log entries have the same information, *we keep only one of them. The first MapReduce *outputs the result pair of the log entry as key and the *size of the line as value. The second MapReduce *processes the key/value pairs generated from the first *MapReduce. The Map outputs the URL as the key and *the IP as the value. The Reduce computes the number *of IPs for each URL. ***********************************************************************/ #include "MarsInc.h" #include "global.h" #define __OUTPUT__ void validate(Spec_t* spec, char* h_filebuf, char* d_filebuf, int fileSize, int num) { if (num > spec->outputDiffKeyCount) num = spec->outputDiffKeyCount; CUDA_SAFE_CALL(cudaMemcpy(h_filebuf, d_filebuf, fileSize, cudaMemcpyDeviceToHost)); //printf("num = %d",num); for (int i = 0; i < num; i++) { int2 groupInfo = spec->outputKeyListRange[i]; PVC_KEY_T* keys = (PVC_KEY_T*)(spec->outputKeys + spec->outputOffsetSizes[groupInfo.x].x); int* ip_offsets = (int*)(spec->outputVals + spec->outputOffsetSizes[groupInfo.x].z); int groupSize = groupInfo.y - groupInfo.x; printf("===========URL: %s - %d unique ip accesses===========\n", h_filebuf + keys->entry_offset, groupSize); for (int j = 0; j < groupSize; j++) { printf("IP: %s\n", h_filebuf + ip_offsets[j]); } } } //----------------------------------------------------------------- //usage: PageViewCount datafile //param: datafile //----------------------------------------------------------------- int main( int argc, char** argv) { if (argc != 2) { printf("usage: %s datafile\n", argv[0]); exit(-1); } Spec_t *spec = GetDefaultSpec(); TimeVal_t allTimer; startTimer(&allTimer); //------------------------------------------------------------------ //prepare input //------------------------------------------------------------------ TimeVal_t preTimer; startTimer(&preTimer); FILE *fp = fopen(argv[1], "r"); fseek(fp, 0, SEEK_END); int fileSize = ftell(fp) + 1; rewind(fp); char *h_filebuf = (char*)malloc(fileSize); fread(h_filebuf, fileSize, 1, fp); h_filebuf[fileSize-1] = '\n'; fclose(fp); char* d_filebuf = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_filebuf, fileSize)); char* p = h_filebuf; char* start = h_filebuf; int cur = 0; PVC_KEY_T key; key.file_buf = d_filebuf; PVC_VAL_T val; val.phase = 0; // int urlsize = 0; // char * pt = start; // for(;*pt!= 32;++pt,++urlsize){ // printf("%c",*pt); // } // printf("\n"); while (1) { for (; *p != '\n' && *p != '\r' && *p != '\0'; ++p); *p = '\0'; p++; key.entry_offset = cur; val.entry_size = p - start; cur += val.entry_size; if (cur >fileSize) break; AddMapInputRecord(spec, &key, &val, sizeof(PVC_KEY_T), sizeof(PVC_VAL_T)); start = p; } CUDA_SAFE_CALL(cudaMemcpy(d_filebuf, h_filebuf, fileSize, cudaMemcpyHostToDevice)); endTimer("preprocess", &preTimer); //------------------------------------------------------------------ //the first MapReduce //------------------------------------------------------------------ spec->workflow = MAP_REDUCE; MapReduce(spec); //------------------------------------------------------------------ //the second MapReduce //------------------------------------------------------------------ CUDA_SAFE_CALL(cudaMemcpy(spec->inputKeys, spec->outputKeys, spec->outputAllKeySize, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(spec->inputVals, spec->outputVals, spec->outputAllValSize, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(spec->inputOffsetSizes, spec->outputOffsetSizes, spec->outputRecordCount * sizeof(int4), cudaMemcpyDeviceToHost)); spec->inputRecordCount = spec->outputRecordCount; CUDA_SAFE_CALL(cudaFree(spec->outputKeys)); CUDA_SAFE_CALL(cudaFree(spec->outputVals)); CUDA_SAFE_CALL(cudaFree(spec->outputOffsetSizes)); spec->workflow = MAP_GROUP; #ifdef __OUTPUT__ spec->outputToHost = 1; #endif MapReduce(spec); endTimer("all", &allTimer); //------------------------------------------------------------------ //Further processing //------------------------------------------------------------------ #ifdef __OUTPUT__ validate(spec, h_filebuf, d_filebuf, fileSize, 200); #endif //------------------------------------------------------------------ //Complete //------------------------------------------------------------------ FinishMapReduce(spec); free(h_filebuf); return 0; }
79789eb01db79f09565c52759a578b33431ce9ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <array> #include "plugin.h" #include "kernel.h" #include "hip/hip_fp16.h" inline __device__ __half minus_fb(const __half & a, const __half & b) { #if __CUDA_ARCH__ >= 530 return a - b; #else return __float2half(__half2float(a) - __half2float(b)); #endif } inline __device__ float minus_fb(const float & a, const float & b) { return a - b; } template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void gatherTopDetections_kernel( const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const int* indices, const T_SCORE* scores, const T_BBOX* bboxData, int* keepCount, T_BBOX* topDetections, const T_SCORE score_shift) { if (keepTopK > topK) return; for (int i = blockIdx.x * nthds_per_cta + threadIdx.x; i < numImages * keepTopK; i += gridDim.x * nthds_per_cta) { const int imgId = i / keepTopK; const int detId = i % keepTopK; const int offset = imgId * numClasses * topK; const int index = indices[offset + detId]; const T_SCORE score = scores[offset + detId]; /* * It is also likely that there is "bad bounding boxes" in the keepTopK bounding boxes. * We set the bounding boxes parameters as the parameters shown below. * These data will only show up at the end of keepTopK bounding boxes since the bounding boxes were sorted previously. * It is also not going to affect the count of valid bounding boxes (keepCount). * These data will probably never be used (because we have keepCount). */ if (index == -1) { topDetections[i * 7] = imgId; // image id topDetections[i * 7 + 1] = -1; // label topDetections[i * 7 + 2] = 0; // confidence score // score==0 will not pass the VisualizeBBox check topDetections[i * 7 + 3] = 0; // bbox xmin topDetections[i * 7 + 4] = 0; // bbox ymin topDetections[i * 7 + 5] = 0; // bbox xmax topDetections[i * 7 + 6] = 0; // bbox ymax } else { const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass)); const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 4; topDetections[i * 7] = imgId; // image id topDetections[i * 7 + 1] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label topDetections[i * 7 + 2] = score; // confidence score // subtract 1.0 score shift we added in sortScorePerClass topDetections[i * 7 + 2] = minus_fb(topDetections[i * 7 + 2], score_shift); const T_BBOX xMin = bboxData[bboxId]; const T_BBOX yMin = bboxData[bboxId + 1]; const T_BBOX xMax = bboxData[bboxId + 2]; const T_BBOX yMax = bboxData[bboxId + 3]; // clipped bbox xmin topDetections[i * 7 + 3] = saturate(xMin); // clipped bbox ymin topDetections[i * 7 + 4] = saturate(yMin); // clipped bbox xmax topDetections[i * 7 + 5] = saturate(xMax); // clipped bbox ymax topDetections[i * 7 + 6] = saturate(yMax); // Atomic add to increase the count of valid keepTopK bounding boxes // Without having to do manual sync. atomicAdd(&keepCount[i / keepTopK], 1); } } } template <typename T_BBOX, typename T_SCORE> pluginStatus_t gatherTopDetections_gpu( hipStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const void* indices, const void* scores, const void* bboxData, void* keepCount, void* topDetections, const float score_shift ) { hipMemsetAsync(keepCount, 0, numImages * sizeof(int), stream); const int BS = 32; const int GS = 32; hipLaunchKernelGGL(( gatherTopDetections_kernel<T_BBOX, T_SCORE, BS>), dim3(GS), dim3(BS), 0, stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, (int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData, (int*) keepCount, (T_BBOX*) topDetections, T_SCORE(score_shift)); CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // gatherTopDetections LAUNCH CONFIG typedef pluginStatus_t (*gtdFunc)(hipStream_t, const bool, const int, const int, const int, const int, const int, const void*, const void*, const void*, void*, void*, const float); struct gtdLaunchConfig { DataType t_bbox; DataType t_score; gtdFunc function; gtdLaunchConfig(DataType t_bbox, DataType t_score) : t_bbox(t_bbox) , t_score(t_score) { } gtdLaunchConfig(DataType t_bbox, DataType t_score, gtdFunc function) : t_bbox(t_bbox) , t_score(t_score) , function(function) { } bool operator==(const gtdLaunchConfig& other) { return t_bbox == other.t_bbox && t_score == other.t_score; } }; using nvinfer1::DataType; static std::array<gtdLaunchConfig, 2> gtdLCOptions = { gtdLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherTopDetections_gpu<float, float>), gtdLaunchConfig(DataType::kHALF, DataType::kHALF, gatherTopDetections_gpu<__half, __half>) }; pluginStatus_t gatherTopDetections( hipStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const DataType DT_BBOX, const DataType DT_SCORE, const void* indices, const void* scores, const void* bboxData, void* keepCount, void* topDetections, const float score_shift) { gtdLaunchConfig lc = gtdLaunchConfig(DT_BBOX, DT_SCORE); for (unsigned i = 0; i < gtdLCOptions.size(); ++i) { if (lc == gtdLCOptions[i]) { DEBUG_PRINTF("gatherTopDetections kernel %d\n", i); return gtdLCOptions[i].function(stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, indices, scores, bboxData, keepCount, topDetections, score_shift); } } return STATUS_BAD_PARAM; }
79789eb01db79f09565c52759a578b33431ce9ae.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <array> #include "plugin.h" #include "kernel.h" #include "cuda_fp16.h" inline __device__ __half minus_fb(const __half & a, const __half & b) { #if __CUDA_ARCH__ >= 530 return a - b; #else return __float2half(__half2float(a) - __half2float(b)); #endif } inline __device__ float minus_fb(const float & a, const float & b) { return a - b; } template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void gatherTopDetections_kernel( const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const int* indices, const T_SCORE* scores, const T_BBOX* bboxData, int* keepCount, T_BBOX* topDetections, const T_SCORE score_shift) { if (keepTopK > topK) return; for (int i = blockIdx.x * nthds_per_cta + threadIdx.x; i < numImages * keepTopK; i += gridDim.x * nthds_per_cta) { const int imgId = i / keepTopK; const int detId = i % keepTopK; const int offset = imgId * numClasses * topK; const int index = indices[offset + detId]; const T_SCORE score = scores[offset + detId]; /* * It is also likely that there is "bad bounding boxes" in the keepTopK bounding boxes. * We set the bounding boxes parameters as the parameters shown below. * These data will only show up at the end of keepTopK bounding boxes since the bounding boxes were sorted previously. * It is also not going to affect the count of valid bounding boxes (keepCount). * These data will probably never be used (because we have keepCount). */ if (index == -1) { topDetections[i * 7] = imgId; // image id topDetections[i * 7 + 1] = -1; // label topDetections[i * 7 + 2] = 0; // confidence score // score==0 will not pass the VisualizeBBox check topDetections[i * 7 + 3] = 0; // bbox xmin topDetections[i * 7 + 4] = 0; // bbox ymin topDetections[i * 7 + 5] = 0; // bbox xmax topDetections[i * 7 + 6] = 0; // bbox ymax } else { const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass)); const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 4; topDetections[i * 7] = imgId; // image id topDetections[i * 7 + 1] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label topDetections[i * 7 + 2] = score; // confidence score // subtract 1.0 score shift we added in sortScorePerClass topDetections[i * 7 + 2] = minus_fb(topDetections[i * 7 + 2], score_shift); const T_BBOX xMin = bboxData[bboxId]; const T_BBOX yMin = bboxData[bboxId + 1]; const T_BBOX xMax = bboxData[bboxId + 2]; const T_BBOX yMax = bboxData[bboxId + 3]; // clipped bbox xmin topDetections[i * 7 + 3] = saturate(xMin); // clipped bbox ymin topDetections[i * 7 + 4] = saturate(yMin); // clipped bbox xmax topDetections[i * 7 + 5] = saturate(xMax); // clipped bbox ymax topDetections[i * 7 + 6] = saturate(yMax); // Atomic add to increase the count of valid keepTopK bounding boxes // Without having to do manual sync. atomicAdd(&keepCount[i / keepTopK], 1); } } } template <typename T_BBOX, typename T_SCORE> pluginStatus_t gatherTopDetections_gpu( cudaStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const void* indices, const void* scores, const void* bboxData, void* keepCount, void* topDetections, const float score_shift ) { cudaMemsetAsync(keepCount, 0, numImages * sizeof(int), stream); const int BS = 32; const int GS = 32; gatherTopDetections_kernel<T_BBOX, T_SCORE, BS><<<GS, BS, 0, stream>>>(shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, (int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData, (int*) keepCount, (T_BBOX*) topDetections, T_SCORE(score_shift)); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // gatherTopDetections LAUNCH CONFIG typedef pluginStatus_t (*gtdFunc)(cudaStream_t, const bool, const int, const int, const int, const int, const int, const void*, const void*, const void*, void*, void*, const float); struct gtdLaunchConfig { DataType t_bbox; DataType t_score; gtdFunc function; gtdLaunchConfig(DataType t_bbox, DataType t_score) : t_bbox(t_bbox) , t_score(t_score) { } gtdLaunchConfig(DataType t_bbox, DataType t_score, gtdFunc function) : t_bbox(t_bbox) , t_score(t_score) , function(function) { } bool operator==(const gtdLaunchConfig& other) { return t_bbox == other.t_bbox && t_score == other.t_score; } }; using nvinfer1::DataType; static std::array<gtdLaunchConfig, 2> gtdLCOptions = { gtdLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherTopDetections_gpu<float, float>), gtdLaunchConfig(DataType::kHALF, DataType::kHALF, gatherTopDetections_gpu<__half, __half>) }; pluginStatus_t gatherTopDetections( cudaStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const DataType DT_BBOX, const DataType DT_SCORE, const void* indices, const void* scores, const void* bboxData, void* keepCount, void* topDetections, const float score_shift) { gtdLaunchConfig lc = gtdLaunchConfig(DT_BBOX, DT_SCORE); for (unsigned i = 0; i < gtdLCOptions.size(); ++i) { if (lc == gtdLCOptions[i]) { DEBUG_PRINTF("gatherTopDetections kernel %d\n", i); return gtdLCOptions[i].function(stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, indices, scores, bboxData, keepCount, topDetections, score_shift); } } return STATUS_BAD_PARAM; }
60b0dd27dc4321f6e4dd1f7c03d0ddff784491e3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "render_init.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int max_x = 1; int max_y = 1; hiprandState_t *rand_state = NULL; hipMalloc(&rand_state, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( render_init), dim3(gridBlock),dim3(threadBlock), 0, 0, max_x,max_y,rand_state); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( render_init), dim3(gridBlock),dim3(threadBlock), 0, 0, max_x,max_y,rand_state); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( render_init), dim3(gridBlock),dim3(threadBlock), 0, 0, max_x,max_y,rand_state); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
60b0dd27dc4321f6e4dd1f7c03d0ddff784491e3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "render_init.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int max_x = 1; int max_y = 1; curandState *rand_state = NULL; cudaMalloc(&rand_state, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); render_init<<<gridBlock,threadBlock>>>(max_x,max_y,rand_state); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { render_init<<<gridBlock,threadBlock>>>(max_x,max_y,rand_state); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { render_init<<<gridBlock,threadBlock>>>(max_x,max_y,rand_state); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
afbe709bb24c4e7e2aa26a81f78d7fb4885a4e05.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> // CUDA includes #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/device_functions.h> const int ARRAY_SIZE = 30; __device__ int getIndex(int x, int y) { return ((y * ARRAY_SIZE) + x); } __device__ int mod(int num, int mod) { int ret = a % b; if(ret < 0) ret+=b; return ret; } __device__ int isCellAlive(int *d_read) { int up, down, left, right; int sm1 = ARRAY_SIZE - 1; // size-1 up = mod(threadIdx.y - 1, sm1); down = mod(threadIdx.y + 1, sm1); left = mod(threadIdx.x - 1, sm1); right = mod(threadIdx.x + 1, sm1); int count = d_read[getIndex(left, up)] + d_read[getIndex(threadIdx.x, up)] + d_read[getIndex(right, up)] + d_read[getIndex(left, threadIdx.y)] + d_read[getIndex(right, threadIdx.y)] + d_read[getIndex(left, down)] + d_read[getIndex(threadIdx.x, down)] + d_read[getIndex(right, down)]; // check rules of the game int rule1,rule2; rule1 = ((count == 2) || (count == 3)) && (d_read[getIndex(threadIdx.x, threadIdx.y)]); // count == 2 or 3 && the current cell == 1 rule2 = (count == 3) && (d_read[getIndex(threadIdx.x, threadIdx.y)] == 0); // count == 3 && the current cell == 0 return (rule1 || rule2); } __global__ void simulate(int *d_read, int *d_write) { int i = getIndex(threadIdx.x, threadIdx.y); d_write[i] = isCellAlive(d_read); // swap values then reset the write buffer d_read[i] = d_write[i]; d_write[i] = 0; } void printGrid(int grid[ARRAY_SIZE][ARRAY_SIZE]) { for (int y = 0; y < ARRAY_SIZE; y++){ for (int x = 0; x < ARRAY_SIZE; x++) { if (grid[x][y] == 1) { std::cout << " #"; } else { std::cout << " ."; } } std::cout << std::endl; } std::cout << "\n\n"; } int main() { // create the host grid used for printing int h_read[ARRAY_SIZE][ARRAY_SIZE] = { 0 }; // glider for testing h_read[3][2] = 1; h_read[4][3] = 1; h_read[2][4] = 1; h_read[3][4] = 1; h_read[4][4] = 1; printGrid(h_read); // declate GPU grid pointers int *d_read; int *d_write; // allocate memory in the device's memory space hipMalloc(&d_read, sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)); hipMalloc(&d_write, sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)); // copy the input data from the host's memory space to the device's memory space hipMemcpy(d_read, h_read, (sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)), hipMemcpyHostToDevice); memset(h_read, 0, sizeof(h_read)); hipMemcpy(d_write, h_read, (sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)), hipMemcpyHostToDevice); // run the kernel int gen = 10; for (int _gen = 0; _gen < gen; _gen++) { simulate << <1, dim3(ARRAY_SIZE, ARRAY_SIZE) >> >(d_read, d_write); } // copy the input data from the device's memory space to the host's memory space hipMemcpy(h_read, d_read, (sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)), hipMemcpyDeviceToHost); printGrid(h_read); hipFree(d_read); hipFree(d_write); //std::cin.get(); // stop console from automatically closing (for testing) return 0; }
afbe709bb24c4e7e2aa26a81f78d7fb4885a4e05.cu
#include <stdio.h> #include <iostream> // CUDA includes #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <device_functions.h> const int ARRAY_SIZE = 30; __device__ int getIndex(int x, int y) { return ((y * ARRAY_SIZE) + x); } __device__ int mod(int num, int mod) { int ret = a % b; if(ret < 0) ret+=b; return ret; } __device__ int isCellAlive(int *d_read) { int up, down, left, right; int sm1 = ARRAY_SIZE - 1; // size-1 up = mod(threadIdx.y - 1, sm1); down = mod(threadIdx.y + 1, sm1); left = mod(threadIdx.x - 1, sm1); right = mod(threadIdx.x + 1, sm1); int count = d_read[getIndex(left, up)] + d_read[getIndex(threadIdx.x, up)] + d_read[getIndex(right, up)] + d_read[getIndex(left, threadIdx.y)] + d_read[getIndex(right, threadIdx.y)] + d_read[getIndex(left, down)] + d_read[getIndex(threadIdx.x, down)] + d_read[getIndex(right, down)]; // check rules of the game int rule1,rule2; rule1 = ((count == 2) || (count == 3)) && (d_read[getIndex(threadIdx.x, threadIdx.y)]); // count == 2 or 3 && the current cell == 1 rule2 = (count == 3) && (d_read[getIndex(threadIdx.x, threadIdx.y)] == 0); // count == 3 && the current cell == 0 return (rule1 || rule2); } __global__ void simulate(int *d_read, int *d_write) { int i = getIndex(threadIdx.x, threadIdx.y); d_write[i] = isCellAlive(d_read); // swap values then reset the write buffer d_read[i] = d_write[i]; d_write[i] = 0; } void printGrid(int grid[ARRAY_SIZE][ARRAY_SIZE]) { for (int y = 0; y < ARRAY_SIZE; y++){ for (int x = 0; x < ARRAY_SIZE; x++) { if (grid[x][y] == 1) { std::cout << " #"; } else { std::cout << " ."; } } std::cout << std::endl; } std::cout << "\n\n"; } int main() { // create the host grid used for printing int h_read[ARRAY_SIZE][ARRAY_SIZE] = { 0 }; // glider for testing h_read[3][2] = 1; h_read[4][3] = 1; h_read[2][4] = 1; h_read[3][4] = 1; h_read[4][4] = 1; printGrid(h_read); // declate GPU grid pointers int *d_read; int *d_write; // allocate memory in the device's memory space cudaMalloc(&d_read, sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)); cudaMalloc(&d_write, sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)); // copy the input data from the host's memory space to the device's memory space cudaMemcpy(d_read, h_read, (sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)), cudaMemcpyHostToDevice); memset(h_read, 0, sizeof(h_read)); cudaMemcpy(d_write, h_read, (sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)), cudaMemcpyHostToDevice); // run the kernel int gen = 10; for (int _gen = 0; _gen < gen; _gen++) { simulate << <1, dim3(ARRAY_SIZE, ARRAY_SIZE) >> >(d_read, d_write); } // copy the input data from the device's memory space to the host's memory space cudaMemcpy(h_read, d_read, (sizeof(int) * (ARRAY_SIZE * ARRAY_SIZE)), cudaMemcpyDeviceToHost); printGrid(h_read); cudaFree(d_read); cudaFree(d_write); //std::cin.get(); // stop console from automatically closing (for testing) return 0; }
86ce69cb617d4bb2a9d1b50a4429415818166fc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************************** realize_mod.c Takes a struct mod_t model and "realizes" its components as polyhedral solids made up of triangular facets. Modified 2016 July 9 by Matthias Engels: Adapted for use with shape-cuda. ------------------------------------------------------------------------------------------ Modified 2014 April 26 by CM: Increase the minimum permitted value of the highest-order coefficient in the cubic equation that locates an ovoid vertex: if the coefficient is smaller than this minimum, treat it as if it's zero and solve a quadratic equation instead Modified 2014 March 22 by CM: Relax the tolerance for finding a valid ovoid vertex position Modified 2014 March 10 by CM: Guard against roundoff problems when computing vertex positions for ovoid components with very small |k| Modified 2014 February 10 by CM: Implement multiple radar and optical scattering laws Modified 2013 August 28 by CM: Set the bad diameter flag for harmonic components with tiny or negative vertex displacements, and for harmonic and vertex components with tiny or negative "scale factor" values Modified 2013 June 2 by CM: In the cubic_realroot routine, initialize nrealroots to avoid compilation warning Fix a comment Modified 2013 May 20 by CM: Implement ovoid shape components Modified 2012 July 4 by CM: Add test in "realize_coordinates" routine to avoid compilation warning Modified 2011 September 2 by CM: Bug fix: the "check_surface" routine makes use of facet normals when identifying active vs. inactive vertices and facets, but facet normals weren't being computed until *after* check_surface was called Make the code more modular (and address the above bug) by introducing the "realize_coordinates" and "compute_moments" routines, as per the version of realize_mod in the SHERMAN package Store the area and the centroid coordinates of each facet Add "harmlambert" optical scattering law (compute facet angular coordinates) Modified 2010 September 1 by CM: Add "facetnorm" argument to the rayfacint routine Modified 2010 June 1 by CM: Change "scalefactor" parameter from a scalar to a 3-component vector Modified 2010 March 19 by CM: Implement '=' state for vertex deviations Modified 2009 November 15 by CM: In the "check_surface" routine, eliminate an unused variable and fix a couple of ambiguous nested if-then-else statements Modified 2009 August 3 by CM: For the "harmlommel" "harmhapke" "harmkaas" and "harmcosine_diff" inhomogeneous scattering laws, compute the spherical coordinates (theta and phi) of each facet after each component's rotational and translational offsets have been applied rather than before, so that these laws can be used for multiple-component models For multiple-component models, use a more careful method (already used for facets) to determine which vertices are on the model's surface; also, for both vertices and facets, allow for a bit of roundoff error in this determination by adding a tolerance argument to the "rayfacint" routine For multiple-component models, determine the new "act" (active) flag for each model side For multiple-component models, fix a bug in computing the center of mass for individual components Modified 2009 July 5 by CM: Turn each component's rotational offsets into a rotation matrix here rather than in the "read_mod" routine, in case the offsets are being allowed to float Modified 2009 July 1 by CM: Add "check_surface" routine that determines which facets of a multiple-component model lie on the model's surface rather than interior to the model For multiple-component models, when computing the area and the moments of the overall model, ignore facets that lie interior to the model Modified 2009 April 3 by CM: Fix slight bug in defining function a[i] = 1/radius^2 when a/b or b/c is tiny or negative for ellipsoid components Initialize the "baddiam_logfactor" parameter and set its value when 2a, a/b, or b/c is tiny or negative for ellipsoid components Modified 2007 August 10 by CM: Eliminate unused variable Modified 2007 January 8 by CM: Define "scalefactor" state for vertex realizations of ellipsoid and harmonic components, not just its value Modified 2006 October 1 by CM: Add "scalefactor" to harmonic and vertex shape structures Replace ellipsoid diameters D with two_a, a_over_b, b_over_c Modified 2005 September 6 by CM: Add computation of facet angular coordinates for use with harmonic scattering laws Modified 2005 August 17 by CM: Move computation of spherical harmonic functions afactor and bfactor from here to read_mod.c, so that it can be done just once per fit Modified 2005 February 28 by CM: Initialize the "baddiam" parameter (flag indicating tiny or negative ellipsoid diameters) to 0 here rather than in bestfit.c so that it can be used for actions other than "fit" Modified 2004 August 23 by CM: Eliminated newtheta and oldcostheta variables and THETATOL constant, since they weren't actually being used (i.e., the test in which they were included was always true) Modified 2003 April 17 by CM: Added computation of component and model moments; this used to be done in function penalties (but wasn't always being done) Added code to cope with tiny or negative ellipsoid diameters; as a result, must now pass the model's parameter structure as an argument to realize_mod Added surface area computation for components and for the full model *****************************************************************************************/ extern "C" { #include "../shape/head.h" } #define HAIRWIDTH 1.0e-7 #define SMALLRATIO 0.01 #define SMALLOVOIDK1 0.01 #define SMALLOVOIDK2 1.0e-6 #define OVOIDTOL 1.0e-6 #define MAXEDGE 100 #define EDGETOL 1.0e-14 #define RTOL 1000*EDGETOL #define SMALLCOEFF3 1.0e-5 /* These 2 device variables are to get nf and nv from the GPU-located dmod file */ __device__ int dnv, dnf, dns; __device__ double d_a[3]; __device__ double a_radius, a_over_b, b_over_c, k_asym, x0term, numer, denom, x0; __device__ int harmonic_scatlaw, cm_nf; __device__ float rm_area=0.0, rm_ifarea=0.0, rm_vol=0.0, rm_ifvol=0.0, rm_dcom[3], rm_ifdcom[3], rm_dI[3][3], rm_ifdI[3][3]; static int nv, nf, ns; static dim3 nvBLK,nvTHD,nfBLK,nfTHD,nsBLK,nsTHD; __host__ void realize_coordinates_cuda(struct par_t *dpar, struct mod_t *dmod, unsigned char type); __host__ void check_surface_cuda(struct mod_t *dmod); __host__ void compute_moments_cuda(struct mod_t *dmod); __global__ void set_diam_krnl(struct par_t *dpar, struct mod_t *dmod){ /* This is a single-thread kernel */ if (threadIdx.x == 0) { dpar->baddiam = 0; dpar->baddiam_logfactor = 0; dnv = dmod->shape.comp[0].real.nv; dnf = dmod->shape.comp[0].real.nf; dns = dmod->shape.comp[0].real.ns; } __syncthreads(); } __global__ void ellipse_diameter_krnl(struct par_t *dpar, struct mod_t *dmod) { /* This is a single-thread kernel */ double diam, diamratio; if (threadIdx.x == 0) { diam = dmod->shape.comp[0].desc.ell.two_a.val; if (diam > HAIRWIDTH) { d_a[0] = 2.0/diam; /* 1/radii */ } else { d_a[0] = (2.0/HAIRWIDTH) * (1 + HAIRWIDTH - diam); dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + HAIRWIDTH - diam); } diam = (2.0/d_a[0]); diamratio = dmod->shape.comp[0].desc.ell.a_over_b.val; if (diamratio > SMALLRATIO) { d_a[1] = 2.0/(diam/diamratio); } else { d_a[1] = (2.0/(diam/SMALLRATIO)) / (1 + SMALLRATIO - diamratio); dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - diamratio); } diam = (2.0/d_a[1]); diamratio = dmod->shape.comp[0].desc.ell.b_over_c.val; if (diamratio > SMALLRATIO) { d_a[2] = 2.0/(diam/diamratio); } else { d_a[2] = (2.0/(diam/SMALLRATIO)) / (1 + SMALLRATIO - diamratio); dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - diamratio); } d_a[0] *= d_a[0]; d_a[1] *= d_a[1]; d_a[2] *= d_a[2]; } } __global__ void ellipse_distance_krnl(struct par_t *dpar, struct mod_t *dmod) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int j; double den; if (offset < dmod->shape.comp[0].real.nv) { /* Routine setuprealver (called by setupreal, which was called by * read_mod) already created as many ellipsoid vertices as were needed * for specified value of theta_steps, and initialized direction * cosines u[j] for each vertex to be * sin(theta)cos(phi), sin(theta)sin(phi), and cos(theta) for * j=0, 1, and 2, respectively. * * These values are x/r, y/r, and z/r, where r is distance from origin * to ellipsoid surface along direction (theta, phi) for given vertex. * Since an ellipsoid has (x/a)^2 + (y/b)^2 + (z/c)^2 = 1, quantity * "den" in code below is equal to 1/(r^2) for vertex i. * * Note that setuprealver initialized all vertex "base points" a[j] to * be zero for ellipsoid components; hence "deviation" r is in fact the * entire thing. */ den = 0.0; for (j=0; j<=2; j++) den += d_a[j]*( dmod->shape.comp[0].real.v[offset].u[j] * dmod->shape.comp[0].real.v[offset].u[j] ); dmod->shape.comp[0].real.v[offset].r.val = 1/sqrt(den); } } __global__ void ellipse_scalefactor_krnl(struct mod_t *dmod) { /* Single-threaded kernel */ int j; if (threadIdx.x == 0) { dmod->shape.comp[0].real.scalefactor[0].state = dmod->shape.comp[0].desc.ell.two_a.state; dmod->shape.comp[0].real.scalefactor[1].state = dmod->shape.comp[0].desc.ell.a_over_b.state; dmod->shape.comp[0].real.scalefactor[2].state = dmod->shape.comp[0].desc.ell.b_over_c.state; for (j=0; j<=2; j++) dmod->shape.comp[0].real.scalefactor[j].val = 1.0; } } __global__ void set_ovoid_parameters_krnl(struct par_t *dpar, struct mod_t *dmod) { //, double a_radius, double a_over_b, double b_over_c, double // k_asym, double x0term, double numer, double denom, double x0) { /* Single-threaded kernel */ if (threadIdx.x == 0) { /* Determine all shape parameters, making sure that none are out of bounds */ a_radius = dmod->shape.comp[0].desc.ovoid.two_a.val / 2; if (a_radius <= HAIRWIDTH/2) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + HAIRWIDTH - 2*a_radius); a_radius = (HAIRWIDTH/2) / (1 + HAIRWIDTH - 2*a_radius); } a_over_b = dmod->shape.comp[0].desc.ovoid.a_over_b.val; if (a_over_b <= SMALLRATIO) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - a_over_b); a_over_b = SMALLRATIO / (1 + SMALLRATIO - a_over_b); } b_over_c = dmod->shape.comp[0].desc.ovoid.b_over_c.val; if (b_over_c <= SMALLRATIO) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - b_over_c); b_over_c = SMALLRATIO / (1 + SMALLRATIO - b_over_c); } k_asym = dmod->shape.comp[0].desc.ovoid.k.val; if (fabs(k_asym) > 1 - SMALLVAL) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(fabs(k_asym) + SMALLVAL); if (k_asym > 0.0) k_asym = 1 - SMALLVAL*(1 - SMALLVAL)/k_asym; else k_asym = -1 - SMALLVAL*(1 - SMALLVAL)/k_asym; } /* Compute x0, the x-offset that places the ovoid's center of mass at the * origin; for small |k|, use an analytical approximation to avoid * roundoff problems */ if (fabs(k_asym) > SMALLOVOIDK1) { x0term = 3*(1 - k_asym*k_asym)*log((1 + k_asym)/(1 - k_asym)); numer = 2.0*k_asym*(3 - 2*k_asym*k_asym) - x0term; denom = 2.0*k_asym*(3 - k_asym*k_asym) - x0term; x0 = (a_radius/k_asym)*(numer/denom); } else { x0 = 0.4*k_asym*a_radius; } } } __global__ void ovoid_distance_krnl(struct par_t *dpar, struct mod_t *dmod) //double d_a[3], double a_radius, double a_over_b, double b_over_c, double //k_asym, double x0term, double numer, double denom, double x0) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j, k, nrealroots; double a_over_c, h, alpha0, u_x, coeff[4], goodroot, realroot[3], x_over_a; if (i < dmod->shape.comp[0].real.nv) { a_over_c = a_over_b*b_over_c; h = a_over_b*a_over_b*dmod->shape.comp[0].real.v[i].u[1] *dmod->shape.comp[0].real.v[i].u[1] + a_over_c*a_over_c *dmod->shape.comp[0].real.v[i].u[2]*dmod->shape.comp[0].real.v[i].u[2]; alpha0 = x0/a_radius; u_x = dmod->shape.comp[0].real.v[i].u[0]; coeff[3] = (h - u_x*u_x)*k_asym*u_x; coeff[2] = (1 + 3*k_asym*alpha0)*u_x*u_x + h*(1 - k_asym*alpha0); coeff[1] = (k_asym - (2 + 3*k_asym*alpha0)*alpha0)*u_x; coeff[0] = -(1 - alpha0*alpha0)*(1 + k_asym*alpha0); if (fabs(k_asym) <= SMALLOVOIDK2) { /* |k| is very small, so guard against roundoff error by * computing the vertex position for an ellipsoid (k = 0) and then * applying a first-order correction for nonzero k */ goodroot = 1/sqrt(u_x*u_x + h); goodroot -= (coeff[3]*goodroot*goodroot*goodroot + coeff[1]*goodroot) / (3*coeff[3]*goodroot*goodroot + 2*coeff[2]*goodroot + coeff[1]); } else { /* |k| isn't very small, so solve the cubic equation */ nrealroots = cubic_realroots_cuda( coeff, realroot); goodroot = -HUGENUMBER; for (k=0; k<nrealroots; k++) if (realroot[k] >= 0.0) { x_over_a = realroot[k]*u_x; if (fabs(x_over_a - alpha0) - 1 < OVOIDTOL) goodroot = MAX( goodroot, realroot[k]); } } if (goodroot < 0.0) printf("Can't compute vertex displacement for ovoid vertex %d\n", i); dmod->shape.comp[0].real.v[i].r.val = goodroot*a_radius; /* Assign scalefactor values */ dmod->shape.comp[0].real.scalefactor[0].state = dmod->shape.comp[0].desc.ovoid.two_a.state; dmod->shape.comp[0].real.scalefactor[1].state = dmod->shape.comp[0].desc.ovoid.a_over_b.state; dmod->shape.comp[0].real.scalefactor[2].state = dmod->shape.comp[0].desc.ovoid.b_over_c.state; for (j=0; j<=2; j++) dmod->shape.comp[0].real.scalefactor[j].val = 1.0; } } __global__ void harmonic_krnl(struct par_t *dpar, struct mod_t *dmod) { int i = blockIdx.x * blockDim.x + threadIdx.x; int L, l, m; double r; if (i < dmod->shape.comp[0].real.nv) { L = dmod->shape.comp[0].desc.har.nhar; r = 0.0; for (l=0; l<=L; l++) { r += dmod->shape.comp[0].desc.har.a[l][0].val * dmod->shape.comp[0].real.v[i].afactor[l][0]; for (m=1; m<=l; m++) r += dmod->shape.comp[0].desc.har.a[l][m].val * dmod->shape.comp[0].real.v[i].afactor[l][m] + dmod->shape.comp[0].desc.har.b[l][m].val * dmod->shape.comp[0].real.v[i].bfactor[l][m]; } if (r > HAIRWIDTH/2) { dmod->shape.comp[0].real.v[i].r.val = r; } else { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + HAIRWIDTH - 2*r) / ((L+1)*(L+1)); dmod->shape.comp[0].real.v[i].r.val = (HAIRWIDTH/2) / (1 + HAIRWIDTH - 2*r); } } } __global__ void harmonic_scalefactor_krnl(struct par_t *dpar, struct mod_t *dmod) { // This is a 3-thread single thread kernel int j = threadIdx.x; if (j < 3){ if (j > 0 && dmod->shape.comp[0].desc.har.scalefactor[j].state == '=') dmod->shape.comp[0].desc.har.scalefactor[j].val = dmod->shape.comp[0].desc.har.scalefactor[j-1].val; dmod->shape.comp[0].real.scalefactor[j].state = dmod->shape.comp[0].desc.har.scalefactor[j].state; dmod->shape.comp[0].real.scalefactor[j].val = dmod->shape.comp[0].desc.har.scalefactor[j].val; if (dmod->shape.comp[0].real.scalefactor[j].val <= SMALLRATIO) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val); dmod->shape.comp[0].real.scalefactor[j].val = SMALLRATIO / (1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val); } } } __global__ void vertex_update_dev_krnl(struct par_t *dpar, struct mod_t *dmod) { int i = blockIdx.x * blockDim.x + threadIdx.x; int v_mirror; if (i < dmod->shape.comp[0].real.nv) { if (dmod->shape.comp[0].real.v[i].r.state == '=') { v_mirror = dmod->shape.comp[0].real.v[i].v_mirror; dmod->shape.comp[0].real.v[i].r.val = dmod->shape.comp[0].real.v[v_mirror].r.val; } } } __global__ void vertex_scalefactor_krnl(struct par_t *dpar, struct mod_t *dmod) { // This is a 3-thread single thread kernel int j = threadIdx.x; if (j < 2) { if (j > 0 && dmod->shape.comp[0].desc.ver.scalefactor[j].state == '=') dmod->shape.comp[0].desc.ver.scalefactor[j].val = dmod->shape.comp[0].desc.ver.scalefactor[j-1].val; dmod->shape.comp[0].real.scalefactor[j].val = dmod->shape.comp[0].desc.ver.scalefactor[j].val; if (dmod->shape.comp[0].real.scalefactor[j].val <= SMALLRATIO) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val); dmod->shape.comp[0].real.scalefactor[j].val = SMALLRATIO / (1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val); } } } __global__ void calc_vertex_co_krnl(struct par_t *dpar, struct mod_t *dmod) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j; if (i < dmod->shape.comp[0].real.nv){ for (j=0; j<=2; j++) dmod->shape.comp[0].real.v[i].x[j] = dmod->shape.comp[0].real.scalefactor[j].val * (dmod->shape.comp[0].real.v[i].u[j] * dmod->shape.comp[0].real.v[i].r.val + dmod->shape.comp[0].real.v[i].a[j]); } } __global__ void perform_rotation_krnl(struct par_t *dpar, struct mod_t *dmod) { /* Single-threaded kernel */ int i = blockIdx.x * blockDim.x + threadIdx.x; double x[3]; int j, k; if (threadIdx.x == 0) { if (!(dmod->shape.comp[0].rot[0].val == 0 && dmod->shape.comp[0].rot[1].val == 0 && dmod->shape.comp[0].rot[2].val == 0 )) { if (i <dmod->shape.comp[0].real.nv){ for (j=0; j<=2; j++) { x[j] = 0.0; for (k=0; k<=2; k++) x[j] += dmod->shape.comp[0].m[j][k] * dmod->shape.comp[0].real.v[i].x[k]; } for (j=0; j<=2; j++) dmod->shape.comp[0].real.v[i].x[j] = x[j]; } } } } __global__ void perform_translation_krnl(struct par_t *dpar, struct mod_t *dmod) { /* Single-threaded kernel */ int i = blockIdx.x * blockDim.x + threadIdx.x; int j; if (threadIdx.x == 0) { if (!(dmod->shape.comp[0].off[0].val == 0.0 && dmod->shape.comp[0].off[1].val == 0.0 && dmod->shape.comp[0].off[2].val == 0.0 )) { if (i <dmod->shape.comp[0].real.nv){ for (j=0; j<=2; j++) dmod->shape.comp[0].real.v[i].x[j] += dmod->shape.comp[0].off[j].val; } } } } __global__ void set_optical_params_krnl(struct par_t *dpar, struct mod_t *dmod) { /* Single-thread kernel */ int ilaw; harmonic_scatlaw = 0; if (threadIdx.x == 0) { for (ilaw=0; ilaw<dmod->photo.noptlaws; ilaw++) if (dmod->photo.opttype[ilaw] == HARMLAMBERT || dmod->photo.opttype[ilaw] == HARMLOMMEL || dmod->photo.opttype[ilaw] == HARMHAPKE || dmod->photo.opttype[ilaw] == HARMKAAS) harmonic_scatlaw = 1; for (ilaw=0; ilaw<dmod->photo.nradlaws; ilaw++) if (dmod->photo.radtype[ilaw] == HARMCOSINE_DIFF) harmonic_scatlaw = 1; } } __global__ void dbg_vertex_nrmls_krnl(struct mod_t *dmod, int *nafnas) { /* nv-threaded kernel */ int v = blockIdx.x * blockDim.x + threadIdx.x; if (v == 0) { nafnas[0] = 0; nafnas[1] = 0; } __syncthreads(); if (v < dmod->shape.comp[0].real.nv) { atomicMax(&nafnas[0], dmod->shape.comp[0].real.v[v].naf); atomicMax(&nafnas[1], dmod->shape.comp[0].real.v[v].nas); // dmod->shape.comp[0].real.f[f].n[0] = 0.0; // dmod->shape.comp[0].real.f[f].n[1] = 0.0; // dmod->shape.comp[0].real.f[f].n[2] = 0.0; } } __global__ void calc_vertex_nrmls_krnl(struct mod_t *dmod) { /* nv-threaded kernel */ int i = blockIdx.x * blockDim.x + threadIdx.x; double n[3]; int j, k, naf, f; if (i < dmod->shape.comp[0].real.nv){ n[0] = n[1] = n[2] = 0.0; naf = dmod->shape.comp[0].real.v[i].naf; for (j=0; j<naf; j++) { f = dmod->shape.comp[0].real.v[i].af[j]; n[0] += dmod->shape.comp[0].real.f[f].n[0]; n[1] += dmod->shape.comp[0].real.f[f].n[1]; n[2] += dmod->shape.comp[0].real.f[f].n[2]; //for (k=0; k<=2; k++) { // n[k] += dmod->shape.comp[0].real.f[f].n[k]; // printf("f[%i].n[%i]: %g\n", f, k, dmod->shape.comp[0].real.f[f].n[k]); //} } dev_normalize( n); for (k=0; k<=2; k++) dmod->shape.comp[0].real.v[i].n[k] = n[k]; } } __global__ void facet_krnl(struct par_t *dpar, struct mod_t *dmod) { /* For each facet of this component, compute the outward unit normal, * the area, the mean coordinates of the three corner vertices, and * the corresponding angular coordinates (for some scattering laws) */ /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int j; if (f < dmod->shape.comp[0].real.nf) { dmod->shape.comp[0].real.f[f].area = dev_facnrm(dmod->shape.comp[0].real, f); for (j=0; j<=2; j++) dmod->shape.comp[0].real.f[f].x[j] = (dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x[j] + dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x[j] + dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x[j] )/3; if (harmonic_scatlaw) { dmod->shape.comp[0].real.f[f].theta = atan2( sqrt(dmod->shape.comp[0].real.f[f].x[0]*dmod->shape.comp[0].real.f[f].x[0] + dmod->shape.comp[0].real.f[f].x[1]*dmod->shape.comp[0].real.f[f].x[1] ), dmod->shape.comp[0].real.f[f].x[2]); dmod->shape.comp[0].real.f[f].phi = atan2( dmod->shape.comp[0].real.f[f].x[1], dmod->shape.comp[0].real.f[f].x[0]); } } } __global__ void set_real_active_vert_krnl(struct mod_t *dmod) { /* nv-threaded kernel */ int v = blockIdx.x * blockDim.x + threadIdx.x; if (v < dnv) //dmod->shape.comp[0].real.nv) dmod->shape.comp[0].real.v[v].act = 1; } __global__ void set_real_active_facet_krnl(struct mod_t *dmod) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < dmod->shape.comp[0].real.nf) dmod->shape.comp[0].real.f[f].act = 1; } __global__ void set_real_active_side_krnl(struct mod_t *dmod) { /* ns-threaded kernel */ int k = blockIdx.x * blockDim.x + threadIdx.x; if (k < dmod->shape.comp[0].real.ns) dmod->shape.comp[0].real.s[k].act = 1; } __host__ void realize_mod_cuda( struct par_t *dpar, struct mod_t *dmod, unsigned char type) { /* We need to realize each model component as a polyhedral solid with triangular facets. The first step is to call realize_coordinates, which computes the displacement of each vertex in this realization, represented as a base displacement plus a vertex deviation (either positive or negative) along a specified set of direction cosines. Additionally, for each facet it computes the outward unit normal, the area, the mean coordinates of the corner vertices, and (for some scattering laws) the corresponding angular coordinates. */ realize_coordinates_cuda(dpar, dmod, type); /* For multiple-component models, figure out which facets lie on the model's surface and which fall within some other component; such facets will have their "act" (active) flag reset to zero. */ check_surface_cuda(dmod); /* Compute the area and moments (volume, center of mass, and inertia tensor) of each component and of the overall model */ compute_moments_cuda(dmod); } /* Compute the vertex coordinates and (if necessary) facet angular coordinates for each component of the model's vertex realization */ __host__ void realize_coordinates_cuda( struct par_t *dpar, struct mod_t *dmod, unsigned char type) { dim3 BLK, THD; /* Loop over all model components, realizing each one as a polyhedral solid * with triangular facets. Compute displacement of each vertex in this * realization, represented as a base displacement plus a vertex deviation * (positive or negative) along a specified set of direction cosines*/ /* Call Kernel to initialize flag for tiny/negative ellipsoid diameters */ hipLaunchKernelGGL(( set_diam_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod);//, dnv, dnf); checkErrorAfterKernelLaunch("set_diam_krnl, line 563"); /* Note: The CUDA-code assumes a single-component model for now. */ /* Loop over all model components, realizing each one as a polyhedral solid * with triangular facets. Compute the displacement of each vertex in this * realization, represented as a base displacement plus a vertex deviation * (positive or negative) along a specified set of direction cosines. */ /* Copy nf and nv back from device copies dnf and dnv; used as launch * parameters below */ gpuErrchk(hipMemcpyFromSymbol(&nv, dnv, sizeof(nv), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&nf, dnf, sizeof(nv), 0, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpyFromSymbol(&ns, dns, sizeof(nv), 0, hipMemcpyDeviceToHost)); /* Calculate launch parameters for all kernels going over all vertices */ nvBLK.x = floor((maxThreadsPerBlock - 1 + nv) / maxThreadsPerBlock); nvTHD.x = maxThreadsPerBlock; // Thread block dimensions /* Calculate launch parameters for all kernels going over all facets */ nfBLK.x = floor((maxThreadsPerBlock - 1 + nf) / maxThreadsPerBlock); nfTHD.x = maxThreadsPerBlock; // Thread block dimensions /* Check component type & create corresponding vertex realization. */ switch (type) { case ELLIPSE: /* To avoid negative diameters/very small positive diameters, * adjust the function a[i] = 1/radius[i]^2 so it monotonically * increases as diameter[i] decreases through zero and beyond, * rather than being symmetric about zero diameter. Also set flag * "baddiam" when any diameter is very small or negative, so that * extra penalties can later be applied to this model. */ /* Launch ellipse diameter kernel */ hipLaunchKernelGGL(( ellipse_diameter_krnl), dim3(BLK),dim3(THD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("ellipse_diameter_krnl, line 594"); /* Kernel finds distance of each vertex to ellipsoid's center */ hipLaunchKernelGGL(( ellipse_distance_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("ellipse_distance_krnl, line 598"); /* Launch kernel to set real->scalefactor */ hipLaunchKernelGGL(( ellipse_scalefactor_krnl), dim3(1),dim3(1), 0, 0, dmod); checkErrorAfterKernelLaunch("ellipse_scalefactor_krnl, line "); break; case OVOID: /* Determine all shape parameters, making sure that none are out of bounds */ hipLaunchKernelGGL(( set_ovoid_parameters_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("set_ovoid_parameters_krnl, line 603"); /* Kernel finds distance of each vertex to ovoid's center */ hipLaunchKernelGGL(( ovoid_distance_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("ovoid_distance_krnl, line 608"); break; case HARMONIC: /* Kernel sets parameters associated with harmonic model */ hipLaunchKernelGGL(( harmonic_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("harmonic_krnl, line 614"); BLK.x = 1; THD.x = 3; hipLaunchKernelGGL(( harmonic_scalefactor_krnl), dim3(BLK),dim3(THD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("harmonic_scalefactor_krnl, line 618"); break; case VERTEX: /* The vertex type is its own realization, but we still need to update * the values of the "scale factor" parameters and update any vertex * deviations that have the '=' state */ hipLaunchKernelGGL(( vertex_update_dev_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("vertex_update_dev_kernel, line 625"); BLK.x = 1; THD.x = 3; hipLaunchKernelGGL(( vertex_scalefactor_krnl), dim3(BLK),dim3(THD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("vertex_scalefactor_krnl, line 629"); break; default: printf("realize_mod.c: don't know that component type\n"); } /* end of switch statement for component type */ /* Calculate vertex coordinates for this component */ hipLaunchKernelGGL(( calc_vertex_co_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("calc_vertex_co_krnl, line 637"); /* Use this component's rotational offset angles to create comp[c].m, the * rotation matrix that will be applied to the vertex coordinates */ hipLaunchKernelGGL(( euler2mat_realize_mod_krnl), dim3(1),dim3(1), 0, 0, dmod); checkErrorAfterKernelLaunch("dev_euler2mat, line 642"); /* If needed, perform rotation on this component */ hipLaunchKernelGGL(( perform_rotation_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("perform_rotation_krnl, line 647"); /* If needed, perform translation on this component */ hipLaunchKernelGGL(( perform_translation_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("perform_translation_krnl, line 651"); /* Figure out if optical/radar harmonic scattering laws are in use * * and set the flag harmonic_scatlaw accordingly */ hipLaunchKernelGGL(( set_optical_params_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("set_optical_params_krnl, line 656"); /* For each facet of this component, compute outward unit normal, area, * mean coordinates of the three corner vertices, and corresponding angular * coordinates (for some scattering laws) */ hipLaunchKernelGGL(( facet_krnl), dim3(nfBLK),dim3(nfTHD), 0, 0, dpar, dmod); checkErrorAfterKernelLaunch("facet_krnl, line 662"); /* Calculate vertex normals for this component as normalized sums of the * facet normals for all facets attached to each vertex */ /// // int *nafnas; // cudaCalloc((void**)&nafnas, sizeof(int), 2); // dbg_vertex_nrmls_krnl<<<nvBLK,nvTHD>>>(dmod, nafnas); hipLaunchKernelGGL(( calc_vertex_nrmls_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dmod); checkErrorAfterKernelLaunch("calc_vertex_nrmls, line 667"); // deviceSyncAfterKernelLaunch("dbg"); // printf("max naf: %i\n", nafnas[0]); // printf("max nas: %i\n", nafnas[1]); // printf("\n"); } /*.....................................................................................*/ /* Determine which vertices, facets, and sides of a multiple-component model lie interior to the model rather than on the model's surface, and reset their "act" (active) flags to zero */ __host__ void check_surface_cuda(struct mod_t *dmod) { /* Calculate launch parameters for all kernels going over all vertices */ nvBLK.x = floor((maxThreadsPerBlock - 1 + nv) / maxThreadsPerBlock); nvTHD.x = maxThreadsPerBlock; // Thread block dimensions /* Calculate launch parameters for all kernels going over all facets */ nfBLK.x = floor((maxThreadsPerBlock - 1 + nf) / maxThreadsPerBlock); nfTHD.x = maxThreadsPerBlock; // Thread block dimensions /* Calculate launch parameters for all kernels going over all facets */ nsBLK.x = floor((maxThreadsPerBlock - 1 + ns) / maxThreadsPerBlock); nsTHD.x = maxThreadsPerBlock; // Thread block dimensions /* 1-component model: flag all vertices and facets as active, then return */ hipLaunchKernelGGL(( set_real_active_vert_krnl), dim3(nvBLK),dim3(nvTHD), 0, 0, dmod); checkErrorAfterKernelLaunch("set_real_active_vert_krnl, line 690"); hipLaunchKernelGGL(( set_real_active_facet_krnl), dim3(nfBLK),dim3(nfTHD), 0, 0, dmod); checkErrorAfterKernelLaunch("set_real_active_vert_krnl, line 694"); hipLaunchKernelGGL(( set_real_active_side_krnl), dim3(nsBLK),dim3(nsTHD), 0, 0, dmod); checkErrorAfterKernelLaunch("set_real_active_side_krnl, line 696"); return; // a[0] = a[1] = a[2] = 0.0; /* vertex base displacement */ // r_edge = vector( 0, MAXEDGE-1); /* Only one-component models for CUDA right now. */ //for (c=0; c<dmod->shape.ncomp; c++) { /* Check this component's vertices */ // for (v=0; v<nv; v++) { // // /* Check whether vertex v of component c lies interior to // any other component c2 */ // // /* Start by considering a ray that starts at the origin and passes through // vertex v: the displacement vector for this vertex. Vector u below // holds the direction cosines of this ray, while dist is the magnitude // of the displacement. (The existing direction cosines dmod->shape.comp[0].real.v[v].u // may not point directly away from the origin, so we compute from scratch.) */ // // for (i=0; i<=2; i++) // u[i] = dmod->shape.comp[0].real.v[v].x[i]; // dist = normalize( u); // // /* Now, for each other component c2, loop through all facets f2 to find // the ones that are intersected by the ray defined above. Count up all // such facets of c2 for which the intersection point lies further from // the origin than vertex v. If this number is ODD, vertex v lies // interior to component c2, so we mark it as inactive. */ // // dmod->shape.comp[0].real.v[v].act = 1; // // c2 = (c == 0) ? 1 : 0; // do { // real2 = &dmod->shape.comp[c2].real; // n_intersections = 0; // n_edge = 0; // for (f2=0; f2<(*real2).nf; f2++) { // if (rayfacint( &r, &s, &t, u, a, // (*real2).v[ (*real2).f[f2].v[0] ].x, // (*real2).v[ (*real2).f[f2].v[1] ].x, // (*real2).v[ (*real2).f[f2].v[2] ].x, // (*real2).f[f2].n, EDGETOL)) // if (r > dist + RTOL) { // if (fabs(s) < EDGETOL || fabs(s - 1.0) < EDGETOL // || fabs(t) < EDGETOL || fabs(t - s) < EDGETOL) { // // /* The ray intersects facet f2 at its edge or corner, give or take // a bit of roundoff error. (Absent roundoff error, we would have // s = 0.0 or 1.0, or t = 0.0 or s.) We need to make sure that we // count only one intersection for this edge, rather than counting // both facets that adjoin the edge. Thus we check the distance r // from vertex v to the intersection point against the values of r // obtained for all previous edge intersections found for this // vertex. If the current r value is the same (to within a small // tolerance) as a previous one, we've already counted this // intersection, so don't count it again. */ // // new_edge = 1; // if (n_edge > 0) // for (n=0; n<n_edge; n++) // if (fabs(r - r_edge[n]) < RTOL) // new_edge = 0; // if (new_edge) { // if (n_edge == MAXEDGE) // bailout("realize_mod.c: need to increase MAXEDGE\n"); // r_edge[n_edge] = r; // n_edge++; // n_intersections++; // } // // } else { // // /* The ray intersects the interior of facet f2, not the edge */ // // n_intersections++; // } // } // } // if (n_intersections % 2 == 1) // dmod->shape.comp[0].real.v[v].act = 0; // c2 = (c2 == c-1) ? c2 + 2 : c2 + 1; // } while (dmod->shape.comp[0].real.v[v].act && c2 < dmod->shape.ncomp); // } // // /* Check this component's facets, doing exactly what we just did for vertices // but this time for the *mean displacement* of each facet's three vertices */ // // for (f=0; f<nf; f++) { // // for (i=0; i<=2; i++) // u[i] = dmod->shape.comp[0].real.f[f].x[i]; // dist = normalize( u); // // dmod->shape.comp[0].real.f[f].act = 1; // // c2 = (c == 0) ? 1 : 0; // do { // real2 = &dmod->shape.comp[c2].real; // n_intersections = 0; // n_edge = 0; // for (f2=0; f2<(*real2).nf; f2++) // if (rayfacint( &r, &s, &t, u, a, // (*real2).v[ (*real2).f[f2].v[0] ].x, // (*real2).v[ (*real2).f[f2].v[1] ].x, // (*real2).v[ (*real2).f[f2].v[2] ].x, // (*real2).f[f2].n, EDGETOL)) // if (r > dist + RTOL) { // if (fabs(s) < EDGETOL || fabs(s - 1.0) < EDGETOL // || fabs(t) < EDGETOL || fabs(t - s) < EDGETOL) { // new_edge = 1; // if (n_edge > 0) // for (n=0; n<n_edge; n++) // if (fabs(r - r_edge[n]) < RTOL) // new_edge = 0; // if (new_edge) { // if (n_edge == MAXEDGE) // bailout("realize_mod.c: need to increase MAXEDGE\n"); // r_edge[n_edge] = r; // n_edge++; // n_intersections++; // } // } else { // n_intersections++; // } // } // if (n_intersections % 2 == 1) // dmod->shape.comp[0].real.f[f].act = 0; // c2 = (c2 == c-1) ? c2 + 2 : c2 + 1; // } while (dmod->shape.comp[0].real.f[f].act && c2 < dmod->shape.ncomp); // } // // /* Check this component's sides: // a side is active IFF both of its end vertices are active */ // // for (k=0; k<ns; k++) { // v1 = dmod->shape.comp[0].real.s[k].v[0]; // v2 = dmod->shape.comp[0].real.s[k].v[1]; // if (dmod->shape.comp[0].real.v[v1].act && dmod->shape.comp[0].real.v[v2].act) // dmod->shape.comp[0].real.s[k].act = 1; // else // dmod->shape.comp[0].real.s[k].act = 0; // } // // } /* end loop over all components */ // // free_vector( r_edge, 0, MAXEDGE-1); } __global__ void comp_moments_1stinit_krnl(struct mod_t *dmod, int c) { /* Single-thread kernel */ int j, k; if (threadIdx.x == 0) { dmod->shape.area = 0.0; dmod->shape.volume = 0.0; for (k=0; k<=2; k++) { dmod->shape.com[k] = 0.0; for (j=0; j<=2; j++) dmod->shape.inertia[k][j] = 0.0; } cm_nf = dmod->shape.comp[c].real.nf; } } __global__ void comp_moments_2ndinit_krnl(struct mod_t *dmod, float area1, float area2, int c) { /* Single-threaded kernel - meant to initialize the individual component * com and inertia arrays */ if (threadIdx.x == 0) { int j, k; dmod->shape.comp[c].area = area1; dmod->shape.area = area2; dmod->shape.comp[0].volume = 0.0; for (k=0; k<=2; k++) { dmod->shape.comp[0].com[k] = 0.0; for (j=0; j<=2; j++) dmod->shape.comp[0].inertia[k][j] = 0.0; } dmod->shape.comp[0].area = 0.0; // actually 1st step in calculating surface area } } __global__ void comp_moments_facet_krnl(struct mod_t *dmod, int c, float *dvarr, float *dcom0, float *dcom1, float *dcom2, float *dI00, float *dI01, float *dI02, float *dI10, float *dI11, float *dI12, float *dI20, float *dI21, float *dI22) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; double dI[3][3], dcom[3], dv; if (f < dmod->shape.comp[0].real.nf) { dev_facmom(dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x, dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x, dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x, dmod->shape.comp[c].real.f[f].n, &dv, dcom, dI); /* Assign calculated dv, dcom, dI to each facet for later parallel reduction */ dvarr[f] = (float) dv; dcom0[f] = (float)dcom[0]; dcom1[f] = (float)dcom[1]; dcom2[f] = (float)dcom[2]; dI00[f] = (float)dI[0][0]; dI01[f] = (float)dI[0][1]; dI02[f] = (float)dI[0][2]; dI10[f] = (float)dI[1][0]; dI11[f] = (float)dI[1][1]; dI12[f] = (float)dI[1][2]; dI20[f] = (float)dI[2][0]; dI21[f] = (float)dI[2][1]; dI22[f] = (float)dI[2][2]; } } __global__ void comp_moments_facets_old_krnl(struct mod_t *dmod) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int j, k; double dI[3][3], dcom[3], dv; if (f < dmod->shape.comp[0].real.nf) { /* Calculate surface area for this component; for active facets, also add * the contributions to the area of the overall model */ dmod->shape.comp[0].area += dmod->shape.comp[0].real.f[f].area; if (dmod->shape.comp[0].real.f[f].act) dmod->shape.area += dmod->shape.comp[0].real.f[f].area; dev_facmom( dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x, dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x, dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x, dmod->shape.comp[0].real.f[f].n, &dv, dcom, dI); dmod->shape.comp[0].volume += dv; for (j=0; j<=2; j++) { dmod->shape.comp[0].com[j] += dcom[j]; for (k=0; k<=2; k++) dmod->shape.comp[0].inertia[j][k] += dI[j][k]; } if (dmod->shape.comp[0].real.f[f].act) { dmod->shape.volume += dv; for (j=0; j<=2; j++) { dmod->shape.com[j] += dcom[j]; for (k=0; k<=2; k++) dmod->shape.inertia[j][k] += dI[j][k]; } } } } __global__ void comp_moments_facets_atomics_krnl(struct mod_t *dmod) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int j, k; double dI[3][3], dcom[3], dv; if (f < dmod->shape.comp[0].real.nf) { /* Calculate surface area for this component; for active facets, also add * the contributions to the area of the overall model */ atomicAdd(&rm_area, (float)dmod->shape.comp[0].real.f[f].area); if (dmod->shape.comp[0].real.f[f].act) atomicAdd(&rm_ifarea, (float)dmod->shape.comp[0].real.f[f].area); dev_facmom( dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x, dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x, dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x, dmod->shape.comp[0].real.f[f].n, &dv, dcom, dI); atomicAdd(&rm_vol, (float)dv); for (j=0; j<=2; j++) { atomicAdd(&rm_dcom[j], (float)dcom[j]); for (k=0; k<=2; k++) atomicAdd(&rm_dI[j][k], (float)dI[j][k]); dmod->shape.comp[0].inertia[j][k] += dI[j][k]; } if (dmod->shape.comp[0].real.f[f].act) { atomicAdd(&rm_vol, (float)dv); for (j=0; j<=2; j++) { atomicAdd(&rm_ifdcom[j], dcom[j]); for (k=0; k<=2; k++) atomicAdd(&rm_ifdI[j][k], (float)dI[j][k]); } } } } __global__ void comp_moments_facets_at2_krnl(struct mod_t *dmod) { /* Single-threaded kernel */ if (threadIdx.x ==0) { int i, j; dmod->shape.comp[0].area = (double)rm_area; dmod->shape.area = (double)rm_ifarea; dmod->shape.comp[0].volume = (double)rm_vol; dmod->shape.volume = (double)rm_ifvol; for (i=0; i<3; i++) { dmod->shape.comp[0].com[i] = rm_dcom[i]; dmod->shape.com[i] = rm_ifdcom[i]; for (j=0; j<3; j++) { dmod->shape.comp[0].inertia[i][j] = rm_dI[i][j]; dmod->shape.inertia[i][j] = rm_ifdI[i][j]; } } } } __global__ void comp_moments_com_krnl(struct mod_t *dmod) { /* Single-thread kernel */ if (threadIdx.x == 0) { int j; for (j=0; j<=2; j++) { dmod->shape.comp[0].com[j] /= dmod->shape.comp[0].volume; dmod->shape.com[j] /= dmod->shape.volume; } j = 2; } } /* Compute the area and the 0,1,2-order moments (volume, center of mass, and inertia tensor) of each component and of the overall model, assuming uniform density and ignoring interior facets' contributions to the overall model */ __host__ void compute_moments_cuda( struct mod_t *dmod) { float area1=0.0, area2=0.0, *dv, *dcom0, *dcom1, *dcom2, *dI00, *dI01, *dI02, *dI10, *dI11, *dI12, *dI20, *dI21, *dI22; int c=0, size; /* Initialize the model's surface area, volume, center-of-mass (COM) * displacement, and inertia tensor */ hipLaunchKernelGGL(( comp_moments_1stinit_krnl), dim3(1),dim3(1), 0, 0, dmod, c); checkErrorAfterKernelLaunch("comp_moments_init_krnl, line 945"); gpuErrchk(hipMemcpyFromSymbol(&size, cm_nf, sizeof(int), 0, hipMemcpyDeviceToHost)); /* CUDA note: Only single-component models for now. * Loop over all model components, computing areas and moments (volume, * center of mass, and inertia tensor); COM and inertia tensor are computed * assuming uniform density. For multiple-component models, when computing * the area and the moments for overall model, ignore facets interior to * the model (i.e., that are inside some other component). */ /* Note that area2 (area of active facets summed up) is not currently * implemented. A single-component model is assumed, in which case every * facet is active and area1=area2 */ // for (c=0; c<dmod->shape.ncomp; c++) { area1 = compute_model_area(dmod, c, size); /*area2 = compute_model_area2(dmod, c, size);*/ area2 = area1; /* Allocate temporary dv, dcom, dI pointers */ cudaCalloc((void**)&dv, sizeof(float), size); cudaCalloc((void**)&dcom0, sizeof(float), size); cudaCalloc((void**)&dcom1, sizeof(float), size); cudaCalloc((void**)&dcom2, sizeof(float), size); cudaCalloc((void**)&dI00, sizeof(float), size); cudaCalloc((void**)&dI01, sizeof(float), size); cudaCalloc((void**)&dI02, sizeof(float), size); cudaCalloc((void**)&dI10, sizeof(float), size); cudaCalloc((void**)&dI11, sizeof(float), size); cudaCalloc((void**)&dI12, sizeof(float), size); cudaCalloc((void**)&dI20, sizeof(float), size); cudaCalloc((void**)&dI21, sizeof(float), size); cudaCalloc((void**)&dI22, sizeof(float), size); /* Set area and initialize per-component COM and Inertia arrays */ hipLaunchKernelGGL(( comp_moments_2ndinit_krnl), dim3(1),dim3(1), 0, 0, dmod, area1, area2, c); checkErrorAfterKernelLaunch("comp_moments_2ndinit_krnl in realize_mod_cuda"); /* Load the temporary arrays with data */ hipLaunchKernelGGL(( comp_moments_facet_krnl), dim3(nfBLK),dim3(nfTHD), 0, 0, dmod, c, dv, dcom0, dcom1, dcom2, dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22); checkErrorAfterKernelLaunch("comp_moments_facets_krnl in compute_moments_cuda"); /* Calculate surface area for this component; for active facets, also add * the contributions to the area of the overall model */ dvdI_reduce_single(dmod, dv, dcom0, dcom1, dcom2, dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22, size, c); // comp_moments_facets_at2_krnl<<<1,1>>>(dmod); // checkErrorAfterKernelLaunch("comp_moments_facets_at2_krnl, line 959"); /* This kernel computes the overall COM vector */ hipLaunchKernelGGL(( comp_moments_com_krnl), dim3(1),dim3(1), 0, 0, dmod); checkErrorAfterKernelLaunch("comp_moments_facets_krnl, line 963"); /* Free up the temporary arrays */ hipFree(dv); hipFree(dcom0); hipFree(dcom1); hipFree(dcom2); hipFree(dI00); hipFree(dI01); hipFree(dI02); hipFree(dI10); hipFree(dI11); hipFree(dI12); hipFree(dI20); hipFree(dI21); hipFree(dI22); } /* Find all real roots of a cubic equation, using methods given in section 5.6 of Numerical Recipes in C. Element 3 of the input coeff vector is the cubic coefficient while element 0 is the constant term. Up to three real roots are stored in the output realroot vector, with any unused elements set to a large negative dummy value. The return value is the number of real roots found. The routine includes several tests for coefficients that are equal to zero; those tests assume that nonzero coefficients are of order unity. */ __device__ int cubic_realroots_cuda( double *coeff, double *realroot) { int nrealroots, bsign; double a, b, c, discriminant, q, qsqrt, r, r2minusq3, rsign, s, t, theta; nrealroots = 0; realroot[0] = realroot[1] = realroot[2] = -HUGENUMBER; if (fabs(coeff[3]) < SMALLCOEFF3) { /* cubic term is zero */ a = coeff[2]; b = coeff[1]; c = coeff[0]; if (fabs(a) < SMALLVAL) { if (fabs(b) < SMALLVAL) { /* Error: the cubic, quadratic, and linear terms are zero */ if (fabs(c) < SMALLVAL) printf("cubic_realroots in realize_mod.c: all four coefficients are zero\n"); else printf("cubic_realroots in realize_mod.c: only the constant term is nonzero\n"); } else { /* linear equation */ realroot[0] = -c/b; nrealroots = 1; } } else { /* quadratic equation */ discriminant = b*b - 4*a*c; if (discriminant < 0.0) printf("cubic_realroots in realize_mod.c: quadratic equation has no real roots\n"); if (fabs(b) < SMALLVAL) { realroot[0] = sqrt(discriminant)/(2*a); realroot[1] = -realroot[0]; } else { bsign = (b < 0.0) ? -1 : 1; q = -0.5*(b + bsign*sqrt(discriminant)); realroot[0] = q/a; realroot[1] = c/q; } nrealroots = 2; } } else { /* cubic term is nonzero: scale to standard form x^3 + ax^2 + b^x + c = 0 */ a = coeff[2]/coeff[3]; b = coeff[1]/coeff[3]; c = coeff[0]/coeff[3]; /* Check if there is one real root or three. Write out test quantity * r^2 - q^3 explicitly in terms of coefficients a, b, and c in order * to cancel high-order terms and thus reduce the likelihood of * roundoff problems */ q = (a*a - 3*b)/9; r = (2*a*a*a - 9*a*b + 27*c)/54; r2minusq3 = (4*a*a*a*c - a*a*b*b - 18*a*b*c + 27*c*c + 4*b*b*b)/108; if (r2minusq3 >= 0.0) { /* one real root */ rsign = (r < 0.0) ? -1 : 1; s = -rsign*pow( fabs(r) + sqrt(r2minusq3), 1.0/3); t = (fabs(s) >= SMALLVAL) ? q/s : 0.0; realroot[0] = s + t - a/3; nrealroots = 1; } else { /* three real roots */ qsqrt = sqrt(q); theta = acos(r/(q*qsqrt)); realroot[0] = -2*qsqrt*cos(theta/3) - a/3; realroot[1] = -2*qsqrt*cos((theta + 2*PIE)/3) - a/3; realroot[2] = -2*qsqrt*cos((theta - 2*PIE)/3) - a/3; nrealroots = 3; } } return nrealroots; } #undef HAIRWIDTH #undef SMALLRATIO #undef SMALLOVOIDK1 #undef SMALLOVOIDK2 #undef OVOIDTOL #undef MAXEDGE #undef EDGETOL #undef RTOL #undef SMALLCOEFF3 __device__ double dev_facnrm( struct vertices_t verts, int fi) { int i; double a[3], b[3], area; for (i=0; i<=2; i++) { a[i] = verts.v[verts.f[fi].v[1]].x[i] - verts.v[verts.f[fi].v[0]].x[i]; b[i] = verts.v[verts.f[fi].v[2]].x[i] - verts.v[verts.f[fi].v[1]].x[i]; } area = 0.5*dev_cross( verts.f[fi].n, a, b); dev_normalize( verts.f[fi].n); return area; } __device__ double dev_cross( double z[3], double x[3], double y[3]) { double zz[3]; zz[0] = x[1]*y[2]-x[2]*y[1]; zz[1] = x[2]*y[0]-x[0]*y[2]; zz[2] = x[0]*y[1]-x[1]*y[0]; z[0] = zz[0]; z[1] = zz[1]; z[2] = zz[2]; return sqrt(z[0]*z[0]+z[1]*z[1]+z[2]*z[2]); }
86ce69cb617d4bb2a9d1b50a4429415818166fc2.cu
/***************************************************************************************** realize_mod.c Takes a struct mod_t model and "realizes" its components as polyhedral solids made up of triangular facets. Modified 2016 July 9 by Matthias Engels: Adapted for use with shape-cuda. ------------------------------------------------------------------------------------------ Modified 2014 April 26 by CM: Increase the minimum permitted value of the highest-order coefficient in the cubic equation that locates an ovoid vertex: if the coefficient is smaller than this minimum, treat it as if it's zero and solve a quadratic equation instead Modified 2014 March 22 by CM: Relax the tolerance for finding a valid ovoid vertex position Modified 2014 March 10 by CM: Guard against roundoff problems when computing vertex positions for ovoid components with very small |k| Modified 2014 February 10 by CM: Implement multiple radar and optical scattering laws Modified 2013 August 28 by CM: Set the bad diameter flag for harmonic components with tiny or negative vertex displacements, and for harmonic and vertex components with tiny or negative "scale factor" values Modified 2013 June 2 by CM: In the cubic_realroot routine, initialize nrealroots to avoid compilation warning Fix a comment Modified 2013 May 20 by CM: Implement ovoid shape components Modified 2012 July 4 by CM: Add test in "realize_coordinates" routine to avoid compilation warning Modified 2011 September 2 by CM: Bug fix: the "check_surface" routine makes use of facet normals when identifying active vs. inactive vertices and facets, but facet normals weren't being computed until *after* check_surface was called Make the code more modular (and address the above bug) by introducing the "realize_coordinates" and "compute_moments" routines, as per the version of realize_mod in the SHERMAN package Store the area and the centroid coordinates of each facet Add "harmlambert" optical scattering law (compute facet angular coordinates) Modified 2010 September 1 by CM: Add "facetnorm" argument to the rayfacint routine Modified 2010 June 1 by CM: Change "scalefactor" parameter from a scalar to a 3-component vector Modified 2010 March 19 by CM: Implement '=' state for vertex deviations Modified 2009 November 15 by CM: In the "check_surface" routine, eliminate an unused variable and fix a couple of ambiguous nested if-then-else statements Modified 2009 August 3 by CM: For the "harmlommel" "harmhapke" "harmkaas" and "harmcosine_diff" inhomogeneous scattering laws, compute the spherical coordinates (theta and phi) of each facet after each component's rotational and translational offsets have been applied rather than before, so that these laws can be used for multiple-component models For multiple-component models, use a more careful method (already used for facets) to determine which vertices are on the model's surface; also, for both vertices and facets, allow for a bit of roundoff error in this determination by adding a tolerance argument to the "rayfacint" routine For multiple-component models, determine the new "act" (active) flag for each model side For multiple-component models, fix a bug in computing the center of mass for individual components Modified 2009 July 5 by CM: Turn each component's rotational offsets into a rotation matrix here rather than in the "read_mod" routine, in case the offsets are being allowed to float Modified 2009 July 1 by CM: Add "check_surface" routine that determines which facets of a multiple-component model lie on the model's surface rather than interior to the model For multiple-component models, when computing the area and the moments of the overall model, ignore facets that lie interior to the model Modified 2009 April 3 by CM: Fix slight bug in defining function a[i] = 1/radius^2 when a/b or b/c is tiny or negative for ellipsoid components Initialize the "baddiam_logfactor" parameter and set its value when 2a, a/b, or b/c is tiny or negative for ellipsoid components Modified 2007 August 10 by CM: Eliminate unused variable Modified 2007 January 8 by CM: Define "scalefactor" state for vertex realizations of ellipsoid and harmonic components, not just its value Modified 2006 October 1 by CM: Add "scalefactor" to harmonic and vertex shape structures Replace ellipsoid diameters D with two_a, a_over_b, b_over_c Modified 2005 September 6 by CM: Add computation of facet angular coordinates for use with harmonic scattering laws Modified 2005 August 17 by CM: Move computation of spherical harmonic functions afactor and bfactor from here to read_mod.c, so that it can be done just once per fit Modified 2005 February 28 by CM: Initialize the "baddiam" parameter (flag indicating tiny or negative ellipsoid diameters) to 0 here rather than in bestfit.c so that it can be used for actions other than "fit" Modified 2004 August 23 by CM: Eliminated newtheta and oldcostheta variables and THETATOL constant, since they weren't actually being used (i.e., the test in which they were included was always true) Modified 2003 April 17 by CM: Added computation of component and model moments; this used to be done in function penalties (but wasn't always being done) Added code to cope with tiny or negative ellipsoid diameters; as a result, must now pass the model's parameter structure as an argument to realize_mod Added surface area computation for components and for the full model *****************************************************************************************/ extern "C" { #include "../shape/head.h" } #define HAIRWIDTH 1.0e-7 #define SMALLRATIO 0.01 #define SMALLOVOIDK1 0.01 #define SMALLOVOIDK2 1.0e-6 #define OVOIDTOL 1.0e-6 #define MAXEDGE 100 #define EDGETOL 1.0e-14 #define RTOL 1000*EDGETOL #define SMALLCOEFF3 1.0e-5 /* These 2 device variables are to get nf and nv from the GPU-located dmod file */ __device__ int dnv, dnf, dns; __device__ double d_a[3]; __device__ double a_radius, a_over_b, b_over_c, k_asym, x0term, numer, denom, x0; __device__ int harmonic_scatlaw, cm_nf; __device__ float rm_area=0.0, rm_ifarea=0.0, rm_vol=0.0, rm_ifvol=0.0, rm_dcom[3], rm_ifdcom[3], rm_dI[3][3], rm_ifdI[3][3]; static int nv, nf, ns; static dim3 nvBLK,nvTHD,nfBLK,nfTHD,nsBLK,nsTHD; __host__ void realize_coordinates_cuda(struct par_t *dpar, struct mod_t *dmod, unsigned char type); __host__ void check_surface_cuda(struct mod_t *dmod); __host__ void compute_moments_cuda(struct mod_t *dmod); __global__ void set_diam_krnl(struct par_t *dpar, struct mod_t *dmod){ /* This is a single-thread kernel */ if (threadIdx.x == 0) { dpar->baddiam = 0; dpar->baddiam_logfactor = 0; dnv = dmod->shape.comp[0].real.nv; dnf = dmod->shape.comp[0].real.nf; dns = dmod->shape.comp[0].real.ns; } __syncthreads(); } __global__ void ellipse_diameter_krnl(struct par_t *dpar, struct mod_t *dmod) { /* This is a single-thread kernel */ double diam, diamratio; if (threadIdx.x == 0) { diam = dmod->shape.comp[0].desc.ell.two_a.val; if (diam > HAIRWIDTH) { d_a[0] = 2.0/diam; /* 1/radii */ } else { d_a[0] = (2.0/HAIRWIDTH) * (1 + HAIRWIDTH - diam); dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + HAIRWIDTH - diam); } diam = (2.0/d_a[0]); diamratio = dmod->shape.comp[0].desc.ell.a_over_b.val; if (diamratio > SMALLRATIO) { d_a[1] = 2.0/(diam/diamratio); } else { d_a[1] = (2.0/(diam/SMALLRATIO)) / (1 + SMALLRATIO - diamratio); dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - diamratio); } diam = (2.0/d_a[1]); diamratio = dmod->shape.comp[0].desc.ell.b_over_c.val; if (diamratio > SMALLRATIO) { d_a[2] = 2.0/(diam/diamratio); } else { d_a[2] = (2.0/(diam/SMALLRATIO)) / (1 + SMALLRATIO - diamratio); dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - diamratio); } d_a[0] *= d_a[0]; d_a[1] *= d_a[1]; d_a[2] *= d_a[2]; } } __global__ void ellipse_distance_krnl(struct par_t *dpar, struct mod_t *dmod) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int j; double den; if (offset < dmod->shape.comp[0].real.nv) { /* Routine setuprealver (called by setupreal, which was called by * read_mod) already created as many ellipsoid vertices as were needed * for specified value of theta_steps, and initialized direction * cosines u[j] for each vertex to be * sin(theta)cos(phi), sin(theta)sin(phi), and cos(theta) for * j=0, 1, and 2, respectively. * * These values are x/r, y/r, and z/r, where r is distance from origin * to ellipsoid surface along direction (theta, phi) for given vertex. * Since an ellipsoid has (x/a)^2 + (y/b)^2 + (z/c)^2 = 1, quantity * "den" in code below is equal to 1/(r^2) for vertex i. * * Note that setuprealver initialized all vertex "base points" a[j] to * be zero for ellipsoid components; hence "deviation" r is in fact the * entire thing. */ den = 0.0; for (j=0; j<=2; j++) den += d_a[j]*( dmod->shape.comp[0].real.v[offset].u[j] * dmod->shape.comp[0].real.v[offset].u[j] ); dmod->shape.comp[0].real.v[offset].r.val = 1/sqrt(den); } } __global__ void ellipse_scalefactor_krnl(struct mod_t *dmod) { /* Single-threaded kernel */ int j; if (threadIdx.x == 0) { dmod->shape.comp[0].real.scalefactor[0].state = dmod->shape.comp[0].desc.ell.two_a.state; dmod->shape.comp[0].real.scalefactor[1].state = dmod->shape.comp[0].desc.ell.a_over_b.state; dmod->shape.comp[0].real.scalefactor[2].state = dmod->shape.comp[0].desc.ell.b_over_c.state; for (j=0; j<=2; j++) dmod->shape.comp[0].real.scalefactor[j].val = 1.0; } } __global__ void set_ovoid_parameters_krnl(struct par_t *dpar, struct mod_t *dmod) { //, double a_radius, double a_over_b, double b_over_c, double // k_asym, double x0term, double numer, double denom, double x0) { /* Single-threaded kernel */ if (threadIdx.x == 0) { /* Determine all shape parameters, making sure that none are out of bounds */ a_radius = dmod->shape.comp[0].desc.ovoid.two_a.val / 2; if (a_radius <= HAIRWIDTH/2) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + HAIRWIDTH - 2*a_radius); a_radius = (HAIRWIDTH/2) / (1 + HAIRWIDTH - 2*a_radius); } a_over_b = dmod->shape.comp[0].desc.ovoid.a_over_b.val; if (a_over_b <= SMALLRATIO) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - a_over_b); a_over_b = SMALLRATIO / (1 + SMALLRATIO - a_over_b); } b_over_c = dmod->shape.comp[0].desc.ovoid.b_over_c.val; if (b_over_c <= SMALLRATIO) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - b_over_c); b_over_c = SMALLRATIO / (1 + SMALLRATIO - b_over_c); } k_asym = dmod->shape.comp[0].desc.ovoid.k.val; if (fabs(k_asym) > 1 - SMALLVAL) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(fabs(k_asym) + SMALLVAL); if (k_asym > 0.0) k_asym = 1 - SMALLVAL*(1 - SMALLVAL)/k_asym; else k_asym = -1 - SMALLVAL*(1 - SMALLVAL)/k_asym; } /* Compute x0, the x-offset that places the ovoid's center of mass at the * origin; for small |k|, use an analytical approximation to avoid * roundoff problems */ if (fabs(k_asym) > SMALLOVOIDK1) { x0term = 3*(1 - k_asym*k_asym)*log((1 + k_asym)/(1 - k_asym)); numer = 2.0*k_asym*(3 - 2*k_asym*k_asym) - x0term; denom = 2.0*k_asym*(3 - k_asym*k_asym) - x0term; x0 = (a_radius/k_asym)*(numer/denom); } else { x0 = 0.4*k_asym*a_radius; } } } __global__ void ovoid_distance_krnl(struct par_t *dpar, struct mod_t *dmod) //double d_a[3], double a_radius, double a_over_b, double b_over_c, double //k_asym, double x0term, double numer, double denom, double x0) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j, k, nrealroots; double a_over_c, h, alpha0, u_x, coeff[4], goodroot, realroot[3], x_over_a; if (i < dmod->shape.comp[0].real.nv) { a_over_c = a_over_b*b_over_c; h = a_over_b*a_over_b*dmod->shape.comp[0].real.v[i].u[1] *dmod->shape.comp[0].real.v[i].u[1] + a_over_c*a_over_c *dmod->shape.comp[0].real.v[i].u[2]*dmod->shape.comp[0].real.v[i].u[2]; alpha0 = x0/a_radius; u_x = dmod->shape.comp[0].real.v[i].u[0]; coeff[3] = (h - u_x*u_x)*k_asym*u_x; coeff[2] = (1 + 3*k_asym*alpha0)*u_x*u_x + h*(1 - k_asym*alpha0); coeff[1] = (k_asym - (2 + 3*k_asym*alpha0)*alpha0)*u_x; coeff[0] = -(1 - alpha0*alpha0)*(1 + k_asym*alpha0); if (fabs(k_asym) <= SMALLOVOIDK2) { /* |k| is very small, so guard against roundoff error by * computing the vertex position for an ellipsoid (k = 0) and then * applying a first-order correction for nonzero k */ goodroot = 1/sqrt(u_x*u_x + h); goodroot -= (coeff[3]*goodroot*goodroot*goodroot + coeff[1]*goodroot) / (3*coeff[3]*goodroot*goodroot + 2*coeff[2]*goodroot + coeff[1]); } else { /* |k| isn't very small, so solve the cubic equation */ nrealroots = cubic_realroots_cuda( coeff, realroot); goodroot = -HUGENUMBER; for (k=0; k<nrealroots; k++) if (realroot[k] >= 0.0) { x_over_a = realroot[k]*u_x; if (fabs(x_over_a - alpha0) - 1 < OVOIDTOL) goodroot = MAX( goodroot, realroot[k]); } } if (goodroot < 0.0) printf("Can't compute vertex displacement for ovoid vertex %d\n", i); dmod->shape.comp[0].real.v[i].r.val = goodroot*a_radius; /* Assign scalefactor values */ dmod->shape.comp[0].real.scalefactor[0].state = dmod->shape.comp[0].desc.ovoid.two_a.state; dmod->shape.comp[0].real.scalefactor[1].state = dmod->shape.comp[0].desc.ovoid.a_over_b.state; dmod->shape.comp[0].real.scalefactor[2].state = dmod->shape.comp[0].desc.ovoid.b_over_c.state; for (j=0; j<=2; j++) dmod->shape.comp[0].real.scalefactor[j].val = 1.0; } } __global__ void harmonic_krnl(struct par_t *dpar, struct mod_t *dmod) { int i = blockIdx.x * blockDim.x + threadIdx.x; int L, l, m; double r; if (i < dmod->shape.comp[0].real.nv) { L = dmod->shape.comp[0].desc.har.nhar; r = 0.0; for (l=0; l<=L; l++) { r += dmod->shape.comp[0].desc.har.a[l][0].val * dmod->shape.comp[0].real.v[i].afactor[l][0]; for (m=1; m<=l; m++) r += dmod->shape.comp[0].desc.har.a[l][m].val * dmod->shape.comp[0].real.v[i].afactor[l][m] + dmod->shape.comp[0].desc.har.b[l][m].val * dmod->shape.comp[0].real.v[i].bfactor[l][m]; } if (r > HAIRWIDTH/2) { dmod->shape.comp[0].real.v[i].r.val = r; } else { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + HAIRWIDTH - 2*r) / ((L+1)*(L+1)); dmod->shape.comp[0].real.v[i].r.val = (HAIRWIDTH/2) / (1 + HAIRWIDTH - 2*r); } } } __global__ void harmonic_scalefactor_krnl(struct par_t *dpar, struct mod_t *dmod) { // This is a 3-thread single thread kernel int j = threadIdx.x; if (j < 3){ if (j > 0 && dmod->shape.comp[0].desc.har.scalefactor[j].state == '=') dmod->shape.comp[0].desc.har.scalefactor[j].val = dmod->shape.comp[0].desc.har.scalefactor[j-1].val; dmod->shape.comp[0].real.scalefactor[j].state = dmod->shape.comp[0].desc.har.scalefactor[j].state; dmod->shape.comp[0].real.scalefactor[j].val = dmod->shape.comp[0].desc.har.scalefactor[j].val; if (dmod->shape.comp[0].real.scalefactor[j].val <= SMALLRATIO) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val); dmod->shape.comp[0].real.scalefactor[j].val = SMALLRATIO / (1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val); } } } __global__ void vertex_update_dev_krnl(struct par_t *dpar, struct mod_t *dmod) { int i = blockIdx.x * blockDim.x + threadIdx.x; int v_mirror; if (i < dmod->shape.comp[0].real.nv) { if (dmod->shape.comp[0].real.v[i].r.state == '=') { v_mirror = dmod->shape.comp[0].real.v[i].v_mirror; dmod->shape.comp[0].real.v[i].r.val = dmod->shape.comp[0].real.v[v_mirror].r.val; } } } __global__ void vertex_scalefactor_krnl(struct par_t *dpar, struct mod_t *dmod) { // This is a 3-thread single thread kernel int j = threadIdx.x; if (j < 2) { if (j > 0 && dmod->shape.comp[0].desc.ver.scalefactor[j].state == '=') dmod->shape.comp[0].desc.ver.scalefactor[j].val = dmod->shape.comp[0].desc.ver.scalefactor[j-1].val; dmod->shape.comp[0].real.scalefactor[j].val = dmod->shape.comp[0].desc.ver.scalefactor[j].val; if (dmod->shape.comp[0].real.scalefactor[j].val <= SMALLRATIO) { dpar->baddiam = 1; dpar->baddiam_logfactor += log(1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val); dmod->shape.comp[0].real.scalefactor[j].val = SMALLRATIO / (1 + SMALLRATIO - dmod->shape.comp[0].real.scalefactor[j].val); } } } __global__ void calc_vertex_co_krnl(struct par_t *dpar, struct mod_t *dmod) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j; if (i < dmod->shape.comp[0].real.nv){ for (j=0; j<=2; j++) dmod->shape.comp[0].real.v[i].x[j] = dmod->shape.comp[0].real.scalefactor[j].val * (dmod->shape.comp[0].real.v[i].u[j] * dmod->shape.comp[0].real.v[i].r.val + dmod->shape.comp[0].real.v[i].a[j]); } } __global__ void perform_rotation_krnl(struct par_t *dpar, struct mod_t *dmod) { /* Single-threaded kernel */ int i = blockIdx.x * blockDim.x + threadIdx.x; double x[3]; int j, k; if (threadIdx.x == 0) { if (!(dmod->shape.comp[0].rot[0].val == 0 && dmod->shape.comp[0].rot[1].val == 0 && dmod->shape.comp[0].rot[2].val == 0 )) { if (i <dmod->shape.comp[0].real.nv){ for (j=0; j<=2; j++) { x[j] = 0.0; for (k=0; k<=2; k++) x[j] += dmod->shape.comp[0].m[j][k] * dmod->shape.comp[0].real.v[i].x[k]; } for (j=0; j<=2; j++) dmod->shape.comp[0].real.v[i].x[j] = x[j]; } } } } __global__ void perform_translation_krnl(struct par_t *dpar, struct mod_t *dmod) { /* Single-threaded kernel */ int i = blockIdx.x * blockDim.x + threadIdx.x; int j; if (threadIdx.x == 0) { if (!(dmod->shape.comp[0].off[0].val == 0.0 && dmod->shape.comp[0].off[1].val == 0.0 && dmod->shape.comp[0].off[2].val == 0.0 )) { if (i <dmod->shape.comp[0].real.nv){ for (j=0; j<=2; j++) dmod->shape.comp[0].real.v[i].x[j] += dmod->shape.comp[0].off[j].val; } } } } __global__ void set_optical_params_krnl(struct par_t *dpar, struct mod_t *dmod) { /* Single-thread kernel */ int ilaw; harmonic_scatlaw = 0; if (threadIdx.x == 0) { for (ilaw=0; ilaw<dmod->photo.noptlaws; ilaw++) if (dmod->photo.opttype[ilaw] == HARMLAMBERT || dmod->photo.opttype[ilaw] == HARMLOMMEL || dmod->photo.opttype[ilaw] == HARMHAPKE || dmod->photo.opttype[ilaw] == HARMKAAS) harmonic_scatlaw = 1; for (ilaw=0; ilaw<dmod->photo.nradlaws; ilaw++) if (dmod->photo.radtype[ilaw] == HARMCOSINE_DIFF) harmonic_scatlaw = 1; } } __global__ void dbg_vertex_nrmls_krnl(struct mod_t *dmod, int *nafnas) { /* nv-threaded kernel */ int v = blockIdx.x * blockDim.x + threadIdx.x; if (v == 0) { nafnas[0] = 0; nafnas[1] = 0; } __syncthreads(); if (v < dmod->shape.comp[0].real.nv) { atomicMax(&nafnas[0], dmod->shape.comp[0].real.v[v].naf); atomicMax(&nafnas[1], dmod->shape.comp[0].real.v[v].nas); // dmod->shape.comp[0].real.f[f].n[0] = 0.0; // dmod->shape.comp[0].real.f[f].n[1] = 0.0; // dmod->shape.comp[0].real.f[f].n[2] = 0.0; } } __global__ void calc_vertex_nrmls_krnl(struct mod_t *dmod) { /* nv-threaded kernel */ int i = blockIdx.x * blockDim.x + threadIdx.x; double n[3]; int j, k, naf, f; if (i < dmod->shape.comp[0].real.nv){ n[0] = n[1] = n[2] = 0.0; naf = dmod->shape.comp[0].real.v[i].naf; for (j=0; j<naf; j++) { f = dmod->shape.comp[0].real.v[i].af[j]; n[0] += dmod->shape.comp[0].real.f[f].n[0]; n[1] += dmod->shape.comp[0].real.f[f].n[1]; n[2] += dmod->shape.comp[0].real.f[f].n[2]; //for (k=0; k<=2; k++) { // n[k] += dmod->shape.comp[0].real.f[f].n[k]; // printf("f[%i].n[%i]: %g\n", f, k, dmod->shape.comp[0].real.f[f].n[k]); //} } dev_normalize( n); for (k=0; k<=2; k++) dmod->shape.comp[0].real.v[i].n[k] = n[k]; } } __global__ void facet_krnl(struct par_t *dpar, struct mod_t *dmod) { /* For each facet of this component, compute the outward unit normal, * the area, the mean coordinates of the three corner vertices, and * the corresponding angular coordinates (for some scattering laws) */ /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int j; if (f < dmod->shape.comp[0].real.nf) { dmod->shape.comp[0].real.f[f].area = dev_facnrm(dmod->shape.comp[0].real, f); for (j=0; j<=2; j++) dmod->shape.comp[0].real.f[f].x[j] = (dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x[j] + dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x[j] + dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x[j] )/3; if (harmonic_scatlaw) { dmod->shape.comp[0].real.f[f].theta = atan2( sqrt(dmod->shape.comp[0].real.f[f].x[0]*dmod->shape.comp[0].real.f[f].x[0] + dmod->shape.comp[0].real.f[f].x[1]*dmod->shape.comp[0].real.f[f].x[1] ), dmod->shape.comp[0].real.f[f].x[2]); dmod->shape.comp[0].real.f[f].phi = atan2( dmod->shape.comp[0].real.f[f].x[1], dmod->shape.comp[0].real.f[f].x[0]); } } } __global__ void set_real_active_vert_krnl(struct mod_t *dmod) { /* nv-threaded kernel */ int v = blockIdx.x * blockDim.x + threadIdx.x; if (v < dnv) //dmod->shape.comp[0].real.nv) dmod->shape.comp[0].real.v[v].act = 1; } __global__ void set_real_active_facet_krnl(struct mod_t *dmod) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; if (f < dmod->shape.comp[0].real.nf) dmod->shape.comp[0].real.f[f].act = 1; } __global__ void set_real_active_side_krnl(struct mod_t *dmod) { /* ns-threaded kernel */ int k = blockIdx.x * blockDim.x + threadIdx.x; if (k < dmod->shape.comp[0].real.ns) dmod->shape.comp[0].real.s[k].act = 1; } __host__ void realize_mod_cuda( struct par_t *dpar, struct mod_t *dmod, unsigned char type) { /* We need to realize each model component as a polyhedral solid with triangular facets. The first step is to call realize_coordinates, which computes the displacement of each vertex in this realization, represented as a base displacement plus a vertex deviation (either positive or negative) along a specified set of direction cosines. Additionally, for each facet it computes the outward unit normal, the area, the mean coordinates of the corner vertices, and (for some scattering laws) the corresponding angular coordinates. */ realize_coordinates_cuda(dpar, dmod, type); /* For multiple-component models, figure out which facets lie on the model's surface and which fall within some other component; such facets will have their "act" (active) flag reset to zero. */ check_surface_cuda(dmod); /* Compute the area and moments (volume, center of mass, and inertia tensor) of each component and of the overall model */ compute_moments_cuda(dmod); } /* Compute the vertex coordinates and (if necessary) facet angular coordinates for each component of the model's vertex realization */ __host__ void realize_coordinates_cuda( struct par_t *dpar, struct mod_t *dmod, unsigned char type) { dim3 BLK, THD; /* Loop over all model components, realizing each one as a polyhedral solid * with triangular facets. Compute displacement of each vertex in this * realization, represented as a base displacement plus a vertex deviation * (positive or negative) along a specified set of direction cosines*/ /* Call Kernel to initialize flag for tiny/negative ellipsoid diameters */ set_diam_krnl<<<1,1>>>(dpar, dmod);//, dnv, dnf); checkErrorAfterKernelLaunch("set_diam_krnl, line 563"); /* Note: The CUDA-code assumes a single-component model for now. */ /* Loop over all model components, realizing each one as a polyhedral solid * with triangular facets. Compute the displacement of each vertex in this * realization, represented as a base displacement plus a vertex deviation * (positive or negative) along a specified set of direction cosines. */ /* Copy nf and nv back from device copies dnf and dnv; used as launch * parameters below */ gpuErrchk(cudaMemcpyFromSymbol(&nv, dnv, sizeof(nv), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&nf, dnf, sizeof(nv), 0, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpyFromSymbol(&ns, dns, sizeof(nv), 0, cudaMemcpyDeviceToHost)); /* Calculate launch parameters for all kernels going over all vertices */ nvBLK.x = floor((maxThreadsPerBlock - 1 + nv) / maxThreadsPerBlock); nvTHD.x = maxThreadsPerBlock; // Thread block dimensions /* Calculate launch parameters for all kernels going over all facets */ nfBLK.x = floor((maxThreadsPerBlock - 1 + nf) / maxThreadsPerBlock); nfTHD.x = maxThreadsPerBlock; // Thread block dimensions /* Check component type & create corresponding vertex realization. */ switch (type) { case ELLIPSE: /* To avoid negative diameters/very small positive diameters, * adjust the function a[i] = 1/radius[i]^2 so it monotonically * increases as diameter[i] decreases through zero and beyond, * rather than being symmetric about zero diameter. Also set flag * "baddiam" when any diameter is very small or negative, so that * extra penalties can later be applied to this model. */ /* Launch ellipse diameter kernel */ ellipse_diameter_krnl<<<BLK,THD>>>(dpar, dmod); checkErrorAfterKernelLaunch("ellipse_diameter_krnl, line 594"); /* Kernel finds distance of each vertex to ellipsoid's center */ ellipse_distance_krnl<<<nvBLK,nvTHD>>>(dpar, dmod); checkErrorAfterKernelLaunch("ellipse_distance_krnl, line 598"); /* Launch kernel to set real->scalefactor */ ellipse_scalefactor_krnl<<<1,1>>>(dmod); checkErrorAfterKernelLaunch("ellipse_scalefactor_krnl, line "); break; case OVOID: /* Determine all shape parameters, making sure that none are out of bounds */ set_ovoid_parameters_krnl<<<1,1>>>(dpar, dmod); checkErrorAfterKernelLaunch("set_ovoid_parameters_krnl, line 603"); /* Kernel finds distance of each vertex to ovoid's center */ ovoid_distance_krnl<<<nvBLK,nvTHD>>>(dpar, dmod); checkErrorAfterKernelLaunch("ovoid_distance_krnl, line 608"); break; case HARMONIC: /* Kernel sets parameters associated with harmonic model */ harmonic_krnl<<<nvBLK,nvTHD>>>(dpar, dmod); checkErrorAfterKernelLaunch("harmonic_krnl, line 614"); BLK.x = 1; THD.x = 3; harmonic_scalefactor_krnl<<<BLK,THD>>>(dpar, dmod); checkErrorAfterKernelLaunch("harmonic_scalefactor_krnl, line 618"); break; case VERTEX: /* The vertex type is its own realization, but we still need to update * the values of the "scale factor" parameters and update any vertex * deviations that have the '=' state */ vertex_update_dev_krnl<<<nvBLK,nvTHD>>>(dpar, dmod); checkErrorAfterKernelLaunch("vertex_update_dev_kernel, line 625"); BLK.x = 1; THD.x = 3; vertex_scalefactor_krnl<<<BLK,THD>>>(dpar, dmod); checkErrorAfterKernelLaunch("vertex_scalefactor_krnl, line 629"); break; default: printf("realize_mod.c: don't know that component type\n"); } /* end of switch statement for component type */ /* Calculate vertex coordinates for this component */ calc_vertex_co_krnl<<<nvBLK,nvTHD>>>(dpar, dmod); checkErrorAfterKernelLaunch("calc_vertex_co_krnl, line 637"); /* Use this component's rotational offset angles to create comp[c].m, the * rotation matrix that will be applied to the vertex coordinates */ euler2mat_realize_mod_krnl<<<1,1>>>(dmod); checkErrorAfterKernelLaunch("dev_euler2mat, line 642"); /* If needed, perform rotation on this component */ perform_rotation_krnl<<<nvBLK,nvTHD>>>(dpar, dmod); checkErrorAfterKernelLaunch("perform_rotation_krnl, line 647"); /* If needed, perform translation on this component */ perform_translation_krnl<<<nvBLK,nvTHD>>>(dpar, dmod); checkErrorAfterKernelLaunch("perform_translation_krnl, line 651"); /* Figure out if optical/radar harmonic scattering laws are in use * * and set the flag harmonic_scatlaw accordingly */ set_optical_params_krnl<<<1,1>>>(dpar, dmod); checkErrorAfterKernelLaunch("set_optical_params_krnl, line 656"); /* For each facet of this component, compute outward unit normal, area, * mean coordinates of the three corner vertices, and corresponding angular * coordinates (for some scattering laws) */ facet_krnl<<<nfBLK,nfTHD>>>(dpar, dmod); checkErrorAfterKernelLaunch("facet_krnl, line 662"); /* Calculate vertex normals for this component as normalized sums of the * facet normals for all facets attached to each vertex */ /// // int *nafnas; // cudaCalloc((void**)&nafnas, sizeof(int), 2); // dbg_vertex_nrmls_krnl<<<nvBLK,nvTHD>>>(dmod, nafnas); calc_vertex_nrmls_krnl<<<nvBLK,nvTHD>>>(dmod); checkErrorAfterKernelLaunch("calc_vertex_nrmls, line 667"); // deviceSyncAfterKernelLaunch("dbg"); // printf("max naf: %i\n", nafnas[0]); // printf("max nas: %i\n", nafnas[1]); // printf("\n"); } /*.....................................................................................*/ /* Determine which vertices, facets, and sides of a multiple-component model lie interior to the model rather than on the model's surface, and reset their "act" (active) flags to zero */ __host__ void check_surface_cuda(struct mod_t *dmod) { /* Calculate launch parameters for all kernels going over all vertices */ nvBLK.x = floor((maxThreadsPerBlock - 1 + nv) / maxThreadsPerBlock); nvTHD.x = maxThreadsPerBlock; // Thread block dimensions /* Calculate launch parameters for all kernels going over all facets */ nfBLK.x = floor((maxThreadsPerBlock - 1 + nf) / maxThreadsPerBlock); nfTHD.x = maxThreadsPerBlock; // Thread block dimensions /* Calculate launch parameters for all kernels going over all facets */ nsBLK.x = floor((maxThreadsPerBlock - 1 + ns) / maxThreadsPerBlock); nsTHD.x = maxThreadsPerBlock; // Thread block dimensions /* 1-component model: flag all vertices and facets as active, then return */ set_real_active_vert_krnl<<<nvBLK,nvTHD>>>(dmod); checkErrorAfterKernelLaunch("set_real_active_vert_krnl, line 690"); set_real_active_facet_krnl<<<nfBLK,nfTHD>>>(dmod); checkErrorAfterKernelLaunch("set_real_active_vert_krnl, line 694"); set_real_active_side_krnl<<<nsBLK,nsTHD>>>(dmod); checkErrorAfterKernelLaunch("set_real_active_side_krnl, line 696"); return; // a[0] = a[1] = a[2] = 0.0; /* vertex base displacement */ // r_edge = vector( 0, MAXEDGE-1); /* Only one-component models for CUDA right now. */ //for (c=0; c<dmod->shape.ncomp; c++) { /* Check this component's vertices */ // for (v=0; v<nv; v++) { // // /* Check whether vertex v of component c lies interior to // any other component c2 */ // // /* Start by considering a ray that starts at the origin and passes through // vertex v: the displacement vector for this vertex. Vector u below // holds the direction cosines of this ray, while dist is the magnitude // of the displacement. (The existing direction cosines dmod->shape.comp[0].real.v[v].u // may not point directly away from the origin, so we compute from scratch.) */ // // for (i=0; i<=2; i++) // u[i] = dmod->shape.comp[0].real.v[v].x[i]; // dist = normalize( u); // // /* Now, for each other component c2, loop through all facets f2 to find // the ones that are intersected by the ray defined above. Count up all // such facets of c2 for which the intersection point lies further from // the origin than vertex v. If this number is ODD, vertex v lies // interior to component c2, so we mark it as inactive. */ // // dmod->shape.comp[0].real.v[v].act = 1; // // c2 = (c == 0) ? 1 : 0; // do { // real2 = &dmod->shape.comp[c2].real; // n_intersections = 0; // n_edge = 0; // for (f2=0; f2<(*real2).nf; f2++) { // if (rayfacint( &r, &s, &t, u, a, // (*real2).v[ (*real2).f[f2].v[0] ].x, // (*real2).v[ (*real2).f[f2].v[1] ].x, // (*real2).v[ (*real2).f[f2].v[2] ].x, // (*real2).f[f2].n, EDGETOL)) // if (r > dist + RTOL) { // if (fabs(s) < EDGETOL || fabs(s - 1.0) < EDGETOL // || fabs(t) < EDGETOL || fabs(t - s) < EDGETOL) { // // /* The ray intersects facet f2 at its edge or corner, give or take // a bit of roundoff error. (Absent roundoff error, we would have // s = 0.0 or 1.0, or t = 0.0 or s.) We need to make sure that we // count only one intersection for this edge, rather than counting // both facets that adjoin the edge. Thus we check the distance r // from vertex v to the intersection point against the values of r // obtained for all previous edge intersections found for this // vertex. If the current r value is the same (to within a small // tolerance) as a previous one, we've already counted this // intersection, so don't count it again. */ // // new_edge = 1; // if (n_edge > 0) // for (n=0; n<n_edge; n++) // if (fabs(r - r_edge[n]) < RTOL) // new_edge = 0; // if (new_edge) { // if (n_edge == MAXEDGE) // bailout("realize_mod.c: need to increase MAXEDGE\n"); // r_edge[n_edge] = r; // n_edge++; // n_intersections++; // } // // } else { // // /* The ray intersects the interior of facet f2, not the edge */ // // n_intersections++; // } // } // } // if (n_intersections % 2 == 1) // dmod->shape.comp[0].real.v[v].act = 0; // c2 = (c2 == c-1) ? c2 + 2 : c2 + 1; // } while (dmod->shape.comp[0].real.v[v].act && c2 < dmod->shape.ncomp); // } // // /* Check this component's facets, doing exactly what we just did for vertices // but this time for the *mean displacement* of each facet's three vertices */ // // for (f=0; f<nf; f++) { // // for (i=0; i<=2; i++) // u[i] = dmod->shape.comp[0].real.f[f].x[i]; // dist = normalize( u); // // dmod->shape.comp[0].real.f[f].act = 1; // // c2 = (c == 0) ? 1 : 0; // do { // real2 = &dmod->shape.comp[c2].real; // n_intersections = 0; // n_edge = 0; // for (f2=0; f2<(*real2).nf; f2++) // if (rayfacint( &r, &s, &t, u, a, // (*real2).v[ (*real2).f[f2].v[0] ].x, // (*real2).v[ (*real2).f[f2].v[1] ].x, // (*real2).v[ (*real2).f[f2].v[2] ].x, // (*real2).f[f2].n, EDGETOL)) // if (r > dist + RTOL) { // if (fabs(s) < EDGETOL || fabs(s - 1.0) < EDGETOL // || fabs(t) < EDGETOL || fabs(t - s) < EDGETOL) { // new_edge = 1; // if (n_edge > 0) // for (n=0; n<n_edge; n++) // if (fabs(r - r_edge[n]) < RTOL) // new_edge = 0; // if (new_edge) { // if (n_edge == MAXEDGE) // bailout("realize_mod.c: need to increase MAXEDGE\n"); // r_edge[n_edge] = r; // n_edge++; // n_intersections++; // } // } else { // n_intersections++; // } // } // if (n_intersections % 2 == 1) // dmod->shape.comp[0].real.f[f].act = 0; // c2 = (c2 == c-1) ? c2 + 2 : c2 + 1; // } while (dmod->shape.comp[0].real.f[f].act && c2 < dmod->shape.ncomp); // } // // /* Check this component's sides: // a side is active IFF both of its end vertices are active */ // // for (k=0; k<ns; k++) { // v1 = dmod->shape.comp[0].real.s[k].v[0]; // v2 = dmod->shape.comp[0].real.s[k].v[1]; // if (dmod->shape.comp[0].real.v[v1].act && dmod->shape.comp[0].real.v[v2].act) // dmod->shape.comp[0].real.s[k].act = 1; // else // dmod->shape.comp[0].real.s[k].act = 0; // } // // } /* end loop over all components */ // // free_vector( r_edge, 0, MAXEDGE-1); } __global__ void comp_moments_1stinit_krnl(struct mod_t *dmod, int c) { /* Single-thread kernel */ int j, k; if (threadIdx.x == 0) { dmod->shape.area = 0.0; dmod->shape.volume = 0.0; for (k=0; k<=2; k++) { dmod->shape.com[k] = 0.0; for (j=0; j<=2; j++) dmod->shape.inertia[k][j] = 0.0; } cm_nf = dmod->shape.comp[c].real.nf; } } __global__ void comp_moments_2ndinit_krnl(struct mod_t *dmod, float area1, float area2, int c) { /* Single-threaded kernel - meant to initialize the individual component * com and inertia arrays */ if (threadIdx.x == 0) { int j, k; dmod->shape.comp[c].area = area1; dmod->shape.area = area2; dmod->shape.comp[0].volume = 0.0; for (k=0; k<=2; k++) { dmod->shape.comp[0].com[k] = 0.0; for (j=0; j<=2; j++) dmod->shape.comp[0].inertia[k][j] = 0.0; } dmod->shape.comp[0].area = 0.0; // actually 1st step in calculating surface area } } __global__ void comp_moments_facet_krnl(struct mod_t *dmod, int c, float *dvarr, float *dcom0, float *dcom1, float *dcom2, float *dI00, float *dI01, float *dI02, float *dI10, float *dI11, float *dI12, float *dI20, float *dI21, float *dI22) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; double dI[3][3], dcom[3], dv; if (f < dmod->shape.comp[0].real.nf) { dev_facmom(dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x, dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x, dmod->shape.comp[c].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x, dmod->shape.comp[c].real.f[f].n, &dv, dcom, dI); /* Assign calculated dv, dcom, dI to each facet for later parallel reduction */ dvarr[f] = (float) dv; dcom0[f] = (float)dcom[0]; dcom1[f] = (float)dcom[1]; dcom2[f] = (float)dcom[2]; dI00[f] = (float)dI[0][0]; dI01[f] = (float)dI[0][1]; dI02[f] = (float)dI[0][2]; dI10[f] = (float)dI[1][0]; dI11[f] = (float)dI[1][1]; dI12[f] = (float)dI[1][2]; dI20[f] = (float)dI[2][0]; dI21[f] = (float)dI[2][1]; dI22[f] = (float)dI[2][2]; } } __global__ void comp_moments_facets_old_krnl(struct mod_t *dmod) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int j, k; double dI[3][3], dcom[3], dv; if (f < dmod->shape.comp[0].real.nf) { /* Calculate surface area for this component; for active facets, also add * the contributions to the area of the overall model */ dmod->shape.comp[0].area += dmod->shape.comp[0].real.f[f].area; if (dmod->shape.comp[0].real.f[f].act) dmod->shape.area += dmod->shape.comp[0].real.f[f].area; dev_facmom( dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x, dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x, dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x, dmod->shape.comp[0].real.f[f].n, &dv, dcom, dI); dmod->shape.comp[0].volume += dv; for (j=0; j<=2; j++) { dmod->shape.comp[0].com[j] += dcom[j]; for (k=0; k<=2; k++) dmod->shape.comp[0].inertia[j][k] += dI[j][k]; } if (dmod->shape.comp[0].real.f[f].act) { dmod->shape.volume += dv; for (j=0; j<=2; j++) { dmod->shape.com[j] += dcom[j]; for (k=0; k<=2; k++) dmod->shape.inertia[j][k] += dI[j][k]; } } } } __global__ void comp_moments_facets_atomics_krnl(struct mod_t *dmod) { /* nf-threaded kernel */ int f = blockIdx.x * blockDim.x + threadIdx.x; int j, k; double dI[3][3], dcom[3], dv; if (f < dmod->shape.comp[0].real.nf) { /* Calculate surface area for this component; for active facets, also add * the contributions to the area of the overall model */ atomicAdd(&rm_area, (float)dmod->shape.comp[0].real.f[f].area); if (dmod->shape.comp[0].real.f[f].act) atomicAdd(&rm_ifarea, (float)dmod->shape.comp[0].real.f[f].area); dev_facmom( dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[0] ].x, dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[1] ].x, dmod->shape.comp[0].real.v[ dmod->shape.comp[0].real.f[f].v[2] ].x, dmod->shape.comp[0].real.f[f].n, &dv, dcom, dI); atomicAdd(&rm_vol, (float)dv); for (j=0; j<=2; j++) { atomicAdd(&rm_dcom[j], (float)dcom[j]); for (k=0; k<=2; k++) atomicAdd(&rm_dI[j][k], (float)dI[j][k]); dmod->shape.comp[0].inertia[j][k] += dI[j][k]; } if (dmod->shape.comp[0].real.f[f].act) { atomicAdd(&rm_vol, (float)dv); for (j=0; j<=2; j++) { atomicAdd(&rm_ifdcom[j], dcom[j]); for (k=0; k<=2; k++) atomicAdd(&rm_ifdI[j][k], (float)dI[j][k]); } } } } __global__ void comp_moments_facets_at2_krnl(struct mod_t *dmod) { /* Single-threaded kernel */ if (threadIdx.x ==0) { int i, j; dmod->shape.comp[0].area = (double)rm_area; dmod->shape.area = (double)rm_ifarea; dmod->shape.comp[0].volume = (double)rm_vol; dmod->shape.volume = (double)rm_ifvol; for (i=0; i<3; i++) { dmod->shape.comp[0].com[i] = rm_dcom[i]; dmod->shape.com[i] = rm_ifdcom[i]; for (j=0; j<3; j++) { dmod->shape.comp[0].inertia[i][j] = rm_dI[i][j]; dmod->shape.inertia[i][j] = rm_ifdI[i][j]; } } } } __global__ void comp_moments_com_krnl(struct mod_t *dmod) { /* Single-thread kernel */ if (threadIdx.x == 0) { int j; for (j=0; j<=2; j++) { dmod->shape.comp[0].com[j] /= dmod->shape.comp[0].volume; dmod->shape.com[j] /= dmod->shape.volume; } j = 2; } } /* Compute the area and the 0,1,2-order moments (volume, center of mass, and inertia tensor) of each component and of the overall model, assuming uniform density and ignoring interior facets' contributions to the overall model */ __host__ void compute_moments_cuda( struct mod_t *dmod) { float area1=0.0, area2=0.0, *dv, *dcom0, *dcom1, *dcom2, *dI00, *dI01, *dI02, *dI10, *dI11, *dI12, *dI20, *dI21, *dI22; int c=0, size; /* Initialize the model's surface area, volume, center-of-mass (COM) * displacement, and inertia tensor */ comp_moments_1stinit_krnl<<<1,1>>>(dmod, c); checkErrorAfterKernelLaunch("comp_moments_init_krnl, line 945"); gpuErrchk(cudaMemcpyFromSymbol(&size, cm_nf, sizeof(int), 0, cudaMemcpyDeviceToHost)); /* CUDA note: Only single-component models for now. * Loop over all model components, computing areas and moments (volume, * center of mass, and inertia tensor); COM and inertia tensor are computed * assuming uniform density. For multiple-component models, when computing * the area and the moments for overall model, ignore facets interior to * the model (i.e., that are inside some other component). */ /* Note that area2 (area of active facets summed up) is not currently * implemented. A single-component model is assumed, in which case every * facet is active and area1=area2 */ // for (c=0; c<dmod->shape.ncomp; c++) { area1 = compute_model_area(dmod, c, size); /*area2 = compute_model_area2(dmod, c, size);*/ area2 = area1; /* Allocate temporary dv, dcom, dI pointers */ cudaCalloc((void**)&dv, sizeof(float), size); cudaCalloc((void**)&dcom0, sizeof(float), size); cudaCalloc((void**)&dcom1, sizeof(float), size); cudaCalloc((void**)&dcom2, sizeof(float), size); cudaCalloc((void**)&dI00, sizeof(float), size); cudaCalloc((void**)&dI01, sizeof(float), size); cudaCalloc((void**)&dI02, sizeof(float), size); cudaCalloc((void**)&dI10, sizeof(float), size); cudaCalloc((void**)&dI11, sizeof(float), size); cudaCalloc((void**)&dI12, sizeof(float), size); cudaCalloc((void**)&dI20, sizeof(float), size); cudaCalloc((void**)&dI21, sizeof(float), size); cudaCalloc((void**)&dI22, sizeof(float), size); /* Set area and initialize per-component COM and Inertia arrays */ comp_moments_2ndinit_krnl<<<1,1>>>(dmod, area1, area2, c); checkErrorAfterKernelLaunch("comp_moments_2ndinit_krnl in realize_mod_cuda"); /* Load the temporary arrays with data */ comp_moments_facet_krnl<<<nfBLK,nfTHD>>>(dmod, c, dv, dcom0, dcom1, dcom2, dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22); checkErrorAfterKernelLaunch("comp_moments_facets_krnl in compute_moments_cuda"); /* Calculate surface area for this component; for active facets, also add * the contributions to the area of the overall model */ dvdI_reduce_single(dmod, dv, dcom0, dcom1, dcom2, dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22, size, c); // comp_moments_facets_at2_krnl<<<1,1>>>(dmod); // checkErrorAfterKernelLaunch("comp_moments_facets_at2_krnl, line 959"); /* This kernel computes the overall COM vector */ comp_moments_com_krnl<<<1,1>>>(dmod); checkErrorAfterKernelLaunch("comp_moments_facets_krnl, line 963"); /* Free up the temporary arrays */ cudaFree(dv); cudaFree(dcom0); cudaFree(dcom1); cudaFree(dcom2); cudaFree(dI00); cudaFree(dI01); cudaFree(dI02); cudaFree(dI10); cudaFree(dI11); cudaFree(dI12); cudaFree(dI20); cudaFree(dI21); cudaFree(dI22); } /* Find all real roots of a cubic equation, using methods given in section 5.6 of Numerical Recipes in C. Element 3 of the input coeff vector is the cubic coefficient while element 0 is the constant term. Up to three real roots are stored in the output realroot vector, with any unused elements set to a large negative dummy value. The return value is the number of real roots found. The routine includes several tests for coefficients that are equal to zero; those tests assume that nonzero coefficients are of order unity. */ __device__ int cubic_realroots_cuda( double *coeff, double *realroot) { int nrealroots, bsign; double a, b, c, discriminant, q, qsqrt, r, r2minusq3, rsign, s, t, theta; nrealroots = 0; realroot[0] = realroot[1] = realroot[2] = -HUGENUMBER; if (fabs(coeff[3]) < SMALLCOEFF3) { /* cubic term is zero */ a = coeff[2]; b = coeff[1]; c = coeff[0]; if (fabs(a) < SMALLVAL) { if (fabs(b) < SMALLVAL) { /* Error: the cubic, quadratic, and linear terms are zero */ if (fabs(c) < SMALLVAL) printf("cubic_realroots in realize_mod.c: all four coefficients are zero\n"); else printf("cubic_realroots in realize_mod.c: only the constant term is nonzero\n"); } else { /* linear equation */ realroot[0] = -c/b; nrealroots = 1; } } else { /* quadratic equation */ discriminant = b*b - 4*a*c; if (discriminant < 0.0) printf("cubic_realroots in realize_mod.c: quadratic equation has no real roots\n"); if (fabs(b) < SMALLVAL) { realroot[0] = sqrt(discriminant)/(2*a); realroot[1] = -realroot[0]; } else { bsign = (b < 0.0) ? -1 : 1; q = -0.5*(b + bsign*sqrt(discriminant)); realroot[0] = q/a; realroot[1] = c/q; } nrealroots = 2; } } else { /* cubic term is nonzero: scale to standard form x^3 + ax^2 + b^x + c = 0 */ a = coeff[2]/coeff[3]; b = coeff[1]/coeff[3]; c = coeff[0]/coeff[3]; /* Check if there is one real root or three. Write out test quantity * r^2 - q^3 explicitly in terms of coefficients a, b, and c in order * to cancel high-order terms and thus reduce the likelihood of * roundoff problems */ q = (a*a - 3*b)/9; r = (2*a*a*a - 9*a*b + 27*c)/54; r2minusq3 = (4*a*a*a*c - a*a*b*b - 18*a*b*c + 27*c*c + 4*b*b*b)/108; if (r2minusq3 >= 0.0) { /* one real root */ rsign = (r < 0.0) ? -1 : 1; s = -rsign*pow( fabs(r) + sqrt(r2minusq3), 1.0/3); t = (fabs(s) >= SMALLVAL) ? q/s : 0.0; realroot[0] = s + t - a/3; nrealroots = 1; } else { /* three real roots */ qsqrt = sqrt(q); theta = acos(r/(q*qsqrt)); realroot[0] = -2*qsqrt*cos(theta/3) - a/3; realroot[1] = -2*qsqrt*cos((theta + 2*PIE)/3) - a/3; realroot[2] = -2*qsqrt*cos((theta - 2*PIE)/3) - a/3; nrealroots = 3; } } return nrealroots; } #undef HAIRWIDTH #undef SMALLRATIO #undef SMALLOVOIDK1 #undef SMALLOVOIDK2 #undef OVOIDTOL #undef MAXEDGE #undef EDGETOL #undef RTOL #undef SMALLCOEFF3 __device__ double dev_facnrm( struct vertices_t verts, int fi) { int i; double a[3], b[3], area; for (i=0; i<=2; i++) { a[i] = verts.v[verts.f[fi].v[1]].x[i] - verts.v[verts.f[fi].v[0]].x[i]; b[i] = verts.v[verts.f[fi].v[2]].x[i] - verts.v[verts.f[fi].v[1]].x[i]; } area = 0.5*dev_cross( verts.f[fi].n, a, b); dev_normalize( verts.f[fi].n); return area; } __device__ double dev_cross( double z[3], double x[3], double y[3]) { double zz[3]; zz[0] = x[1]*y[2]-x[2]*y[1]; zz[1] = x[2]*y[0]-x[0]*y[2]; zz[2] = x[0]*y[1]-x[1]*y[0]; z[0] = zz[0]; z[1] = zz[1]; z[2] = zz[2]; return sqrt(z[0]*z[0]+z[1]*z[1]+z[2]*z[2]); }
a6b22acd5c852077a833458f02d51406b89eb5d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <float.h> #include <iostream> #include <tuple> #include "utils/dispatch.cuh" #include "utils/mink.cuh" // A chunk of work is blocksize-many points of P1. // The number of potential chunks to do is N*(1+(P1-1)/blocksize) // call (1+(P1-1)/blocksize) chunks_per_cloud // These chunks are divided among the gridSize-many blocks. // In block b, we work on chunks b, b+gridSize, b+2*gridSize etc . // In chunk i, we work on cloud i/chunks_per_cloud on points starting from // blocksize*(i%chunks_per_cloud). template <typename scalar_t> __global__ void KNearestNeighborKernelV0( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t D, const size_t K, const size_t norm) { // Store both dists and indices for knn in global memory. const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; int offset = n * P1 * K + p1 * K; int64_t length2 = lengths2[n]; MinK<scalar_t, int64_t> mink(dists + offset, idxs + offset, K); for (int p2 = 0; p2 < length2; ++p2) { // Find the distance between points1[n, p1] and points[n, p2] scalar_t dist = 0; for (int d = 0; d < D; ++d) { scalar_t coord1 = points1[n * P1 * D + p1 * D + d]; scalar_t coord2 = points2[n * P2 * D + p2 * D + d]; scalar_t diff = coord1 - coord2; scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); dist += norm_diff; } mink.add(dist, p2); } } } template <typename scalar_t, int64_t D> __global__ void KNearestNeighborKernelV1( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t K, const size_t norm) { // Same idea as the previous version, but hoist D into a template argument // so we can cache the current point in a thread-local array. We still store // the current best K dists and indices in global memory, so this should work // for very large K and fairly large D. scalar_t cur_point[D]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int offset = n * P1 * K + p1 * K; int64_t length2 = lengths2[n]; MinK<scalar_t, int64_t> mink(dists + offset, idxs + offset, K); for (int p2 = 0; p2 < length2; ++p2) { // Find the distance between cur_point and points[n, p2] scalar_t dist = 0; for (int d = 0; d < D; ++d) { scalar_t diff = cur_point[d] - points2[n * P2 * D + p2 * D + d]; scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); dist += norm_diff; } mink.add(dist, p2); } } } // This is a shim functor to allow us to dispatch using DispatchKernel1D template <typename scalar_t, int64_t D> struct KNearestNeighborV1Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t K, const size_t norm) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( KNearestNeighborKernelV1<scalar_t, D>), dim3(blocks), dim3(threads), 0, stream, points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, K, norm); } }; template <typename scalar_t, int64_t D, int64_t K> __global__ void KNearestNeighborKernelV2( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const int64_t N, const int64_t P1, const int64_t P2, const size_t norm) { // Same general implementation as V2, but also hoist K into a template arg. scalar_t cur_point[D]; scalar_t min_dists[K]; int min_idxs[K]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int64_t length2 = lengths2[n]; MinK<scalar_t, int> mink(min_dists, min_idxs, K); for (int p2 = 0; p2 < length2; ++p2) { scalar_t dist = 0; for (int d = 0; d < D; ++d) { int offset = n * P2 * D + p2 * D + d; scalar_t diff = cur_point[d] - points2[offset]; scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); dist += norm_diff; } mink.add(dist, p2); } for (int k = 0; k < mink.size(); ++k) { idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; dists[n * P1 * K + p1 * K + k] = min_dists[k]; } } } // This is a shim so we can dispatch using DispatchKernel2D template <typename scalar_t, int64_t D, int64_t K> struct KNearestNeighborKernelV2Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const int64_t N, const int64_t P1, const int64_t P2, const size_t norm) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( KNearestNeighborKernelV2<scalar_t, D, K>), dim3(blocks), dim3(threads), 0, stream, points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, norm); } }; template <typename scalar_t, int D, int K> __global__ void KNearestNeighborKernelV3( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t norm) { // Same idea as V2, but use register indexing for thread-local arrays. // Enabling sorting for this version leads to huge slowdowns; I suspect // that it forces min_dists into local memory rather than registers. // As a result this version is always unsorted. scalar_t cur_point[D]; scalar_t min_dists[K]; int min_idxs[K]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int64_t length2 = lengths2[n]; RegisterMinK<scalar_t, int, K> mink(min_dists, min_idxs); for (int p2 = 0; p2 < length2; ++p2) { scalar_t dist = 0; for (int d = 0; d < D; ++d) { int offset = n * P2 * D + p2 * D + d; scalar_t diff = cur_point[d] - points2[offset]; scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); dist += norm_diff; } mink.add(dist, p2); } for (int k = 0; k < mink.size(); ++k) { idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; dists[n * P1 * K + p1 * K + k] = min_dists[k]; } } } // This is a shim so we can dispatch using DispatchKernel2D template <typename scalar_t, int64_t D, int64_t K> struct KNearestNeighborKernelV3Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t norm) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( KNearestNeighborKernelV3<scalar_t, D, K>), dim3(blocks), dim3(threads), 0, stream, points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, norm); } }; constexpr int V1_MIN_D = 1; constexpr int V1_MAX_D = 32; constexpr int V2_MIN_D = 1; constexpr int V2_MAX_D = 8; constexpr int V2_MIN_K = 1; constexpr int V2_MAX_K = 32; constexpr int V3_MIN_D = 1; constexpr int V3_MAX_D = 8; constexpr int V3_MIN_K = 1; constexpr int V3_MAX_K = 4; bool InBounds(const int64_t min, const int64_t x, const int64_t max) { return min <= x && x <= max; } bool KnnCheckVersion(int version, const int64_t D, const int64_t K) { if (version == 0) { return true; } else if (version == 1) { return InBounds(V1_MIN_D, D, V1_MAX_D); } else if (version == 2) { return InBounds(V2_MIN_D, D, V2_MAX_D) && InBounds(V2_MIN_K, K, V2_MAX_K); } else if (version == 3) { return InBounds(V3_MIN_D, D, V3_MAX_D) && InBounds(V3_MIN_K, K, V3_MAX_K); } return false; } int ChooseVersion(const int64_t D, const int64_t K) { for (int version = 3; version >= 1; version--) { if (KnnCheckVersion(version, D, K)) { return version; } } return 0; } std::tuple<at::Tensor, at::Tensor> KNearestNeighborIdxCuda( const at::Tensor& p1, const at::Tensor& p2, const at::Tensor& lengths1, const at::Tensor& lengths2, const int norm, const int K, int version) { // Check inputs are on the same device at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}; at::CheckedFrom c = "KNearestNeighborIdxCuda"; at::checkAllSameGPU(c, {p1_t, p2_t, lengths1_t, lengths2_t}); at::checkAllSameType(c, {p1_t, p2_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(p1.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p2.size(2); const int64_t K_64 = K; TORCH_CHECK((norm == 1) || (norm == 2), "Norm must be 1 or 2."); TORCH_CHECK(p2.size(2) == D, "Point sets must have the same last dimension"); auto long_dtype = lengths1.options().dtype(at::kLong); auto idxs = at::zeros({N, P1, K}, long_dtype); auto dists = at::zeros({N, P1, K}, p1.options()); if (idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(idxs, dists); } if (version < 0) { version = ChooseVersion(D, K); } else if (!KnnCheckVersion(version, D, K)) { int new_version = ChooseVersion(D, K); std::cout << "WARNING: Requested KNN version " << version << " is not compatible with D = " << D << "; K = " << K << ". Falling back to version = " << new_version << std::endl; version = new_version; } // At this point we should have a valid version no matter what data the user // gave us. But we can check once more to be sure; however this time // assert fail since failing at this point means we have a bug in our version // selection or checking code. AT_ASSERTM(KnnCheckVersion(version, D, K), "Invalid version"); const size_t threads = 256; const size_t blocks = 256; if (version == 0) { AT_DISPATCH_FLOATING_TYPES( p1.scalar_type(), "knn_kernel_cuda", ([&] { hipLaunchKernelGGL(( KNearestNeighborKernelV0<scalar_t>), dim3(blocks), dim3(threads), 0, stream, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, D, K, norm); })); } else if (version == 1) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel1D< KNearestNeighborV1Functor, scalar_t, V1_MIN_D, V1_MAX_D>( D, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, K, norm); })); } else if (version == 2) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel2D< KNearestNeighborKernelV2Functor, scalar_t, V2_MIN_D, V2_MAX_D, V2_MIN_K, V2_MAX_K>( D, K_64, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, norm); })); } else if (version == 3) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel2D< KNearestNeighborKernelV3Functor, scalar_t, V3_MIN_D, V3_MAX_D, V3_MIN_K, V3_MAX_K>( D, K_64, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, norm); })); } AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(idxs, dists); } // ------------------------------------------------------------- // // Backward Operators // // ------------------------------------------------------------- // // TODO(gkioxari) support all data types once AtomicAdd supports doubles. // Currently, support is for floats only. __global__ void KNearestNeighborBackwardKernel( const float* __restrict__ p1, // (N, P1, D) const float* __restrict__ p2, // (N, P2, D) const int64_t* __restrict__ lengths1, // (N,) const int64_t* __restrict__ lengths2, // (N,) const int64_t* __restrict__ idxs, // (N, P1, K) const float* __restrict__ grad_dists, // (N, P1, K) float* __restrict__ grad_p1, // (N, P1, D) float* __restrict__ grad_p2, // (N, P2, D) const size_t N, const size_t P1, const size_t P2, const size_t K, const size_t D, const size_t norm) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = gridDim.x * blockDim.x; for (size_t i = tid; i < N * P1 * K * D; i += stride) { const size_t n = i / (P1 * K * D); // batch index size_t rem = i % (P1 * K * D); const size_t p1_idx = rem / (K * D); // index of point in p1 rem = rem % (K * D); const size_t k = rem / D; // k-th nearest neighbor const size_t d = rem % D; // d-th dimension in the feature vector const size_t num1 = lengths1[n]; // number of valid points in p1 in batch const size_t num2 = lengths2[n]; // number of valid points in p2 in batch if ((p1_idx < num1) && (k < num2)) { const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k]; // index of point in p2 corresponding to the k-th nearest neighbor const size_t p2_idx = idxs[n * P1 * K + p1_idx * K + k]; // If the index is the pad value of -1 then ignore it if (p2_idx == -1) { continue; } float diff = 0.0; if (norm == 1) { float sign = (p1[n * P1 * D + p1_idx * D + d] > p2[n * P2 * D + p2_idx * D + d]) ? 1.0 : -1.0; diff = grad_dist * sign; } else { // norm is 2 diff = 2.0 * grad_dist * (p1[n * P1 * D + p1_idx * D + d] - p2[n * P2 * D + p2_idx * D + d]); } atomicAdd(grad_p1 + n * P1 * D + p1_idx * D + d, diff); atomicAdd(grad_p2 + n * P2 * D + p2_idx * D + d, -1.0f * diff); } } } std::tuple<at::Tensor, at::Tensor> KNearestNeighborBackwardCuda( const at::Tensor& p1, const at::Tensor& p2, const at::Tensor& lengths1, const at::Tensor& lengths2, const at::Tensor& idxs, int norm, const at::Tensor& grad_dists) { // Check inputs are on the same device at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}, idxs_t{idxs, "idxs", 5}, grad_dists_t{grad_dists, "grad_dists", 6}; at::CheckedFrom c = "KNearestNeighborBackwardCuda"; at::checkAllSameGPU( c, {p1_t, p2_t, lengths1_t, lengths2_t, idxs_t, grad_dists_t}); at::checkAllSameType(c, {p1_t, p2_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(p1.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p2.size(2); const auto K = idxs.size(2); TORCH_CHECK(p1.size(2) == D, "Point sets must have the same last dimension"); TORCH_CHECK(idxs.size(0) == N, "KNN idxs must have the same batch dimension"); TORCH_CHECK( idxs.size(1) == P1, "KNN idxs must have the same point dimension as p1"); TORCH_CHECK(grad_dists.size(0) == N); TORCH_CHECK(grad_dists.size(1) == P1); TORCH_CHECK(grad_dists.size(2) == K); auto grad_p1 = at::zeros({N, P1, D}, p1.options()); auto grad_p2 = at::zeros({N, P2, D}, p2.options()); if (grad_p1.numel() == 0 || grad_p2.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(grad_p1, grad_p2); } const int blocks = 64; const int threads = 512; hipLaunchKernelGGL(( KNearestNeighborBackwardKernel), dim3(blocks), dim3(threads), 0, stream, p1.contiguous().data_ptr<float>(), p2.contiguous().data_ptr<float>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), idxs.contiguous().data_ptr<int64_t>(), grad_dists.contiguous().data_ptr<float>(), grad_p1.data_ptr<float>(), grad_p2.data_ptr<float>(), N, P1, P2, K, D, norm); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(grad_p1, grad_p2); }
a6b22acd5c852077a833458f02d51406b89eb5d2.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <float.h> #include <iostream> #include <tuple> #include "utils/dispatch.cuh" #include "utils/mink.cuh" // A chunk of work is blocksize-many points of P1. // The number of potential chunks to do is N*(1+(P1-1)/blocksize) // call (1+(P1-1)/blocksize) chunks_per_cloud // These chunks are divided among the gridSize-many blocks. // In block b, we work on chunks b, b+gridSize, b+2*gridSize etc . // In chunk i, we work on cloud i/chunks_per_cloud on points starting from // blocksize*(i%chunks_per_cloud). template <typename scalar_t> __global__ void KNearestNeighborKernelV0( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t D, const size_t K, const size_t norm) { // Store both dists and indices for knn in global memory. const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; int offset = n * P1 * K + p1 * K; int64_t length2 = lengths2[n]; MinK<scalar_t, int64_t> mink(dists + offset, idxs + offset, K); for (int p2 = 0; p2 < length2; ++p2) { // Find the distance between points1[n, p1] and points[n, p2] scalar_t dist = 0; for (int d = 0; d < D; ++d) { scalar_t coord1 = points1[n * P1 * D + p1 * D + d]; scalar_t coord2 = points2[n * P2 * D + p2 * D + d]; scalar_t diff = coord1 - coord2; scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); dist += norm_diff; } mink.add(dist, p2); } } } template <typename scalar_t, int64_t D> __global__ void KNearestNeighborKernelV1( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t K, const size_t norm) { // Same idea as the previous version, but hoist D into a template argument // so we can cache the current point in a thread-local array. We still store // the current best K dists and indices in global memory, so this should work // for very large K and fairly large D. scalar_t cur_point[D]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int offset = n * P1 * K + p1 * K; int64_t length2 = lengths2[n]; MinK<scalar_t, int64_t> mink(dists + offset, idxs + offset, K); for (int p2 = 0; p2 < length2; ++p2) { // Find the distance between cur_point and points[n, p2] scalar_t dist = 0; for (int d = 0; d < D; ++d) { scalar_t diff = cur_point[d] - points2[n * P2 * D + p2 * D + d]; scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); dist += norm_diff; } mink.add(dist, p2); } } } // This is a shim functor to allow us to dispatch using DispatchKernel1D template <typename scalar_t, int64_t D> struct KNearestNeighborV1Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t K, const size_t norm) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); KNearestNeighborKernelV1<scalar_t, D><<<blocks, threads, 0, stream>>>( points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, K, norm); } }; template <typename scalar_t, int64_t D, int64_t K> __global__ void KNearestNeighborKernelV2( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const int64_t N, const int64_t P1, const int64_t P2, const size_t norm) { // Same general implementation as V2, but also hoist K into a template arg. scalar_t cur_point[D]; scalar_t min_dists[K]; int min_idxs[K]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int64_t length2 = lengths2[n]; MinK<scalar_t, int> mink(min_dists, min_idxs, K); for (int p2 = 0; p2 < length2; ++p2) { scalar_t dist = 0; for (int d = 0; d < D; ++d) { int offset = n * P2 * D + p2 * D + d; scalar_t diff = cur_point[d] - points2[offset]; scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); dist += norm_diff; } mink.add(dist, p2); } for (int k = 0; k < mink.size(); ++k) { idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; dists[n * P1 * K + p1 * K + k] = min_dists[k]; } } } // This is a shim so we can dispatch using DispatchKernel2D template <typename scalar_t, int64_t D, int64_t K> struct KNearestNeighborKernelV2Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const int64_t N, const int64_t P1, const int64_t P2, const size_t norm) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); KNearestNeighborKernelV2<scalar_t, D, K><<<blocks, threads, 0, stream>>>( points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, norm); } }; template <typename scalar_t, int D, int K> __global__ void KNearestNeighborKernelV3( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t norm) { // Same idea as V2, but use register indexing for thread-local arrays. // Enabling sorting for this version leads to huge slowdowns; I suspect // that it forces min_dists into local memory rather than registers. // As a result this version is always unsorted. scalar_t cur_point[D]; scalar_t min_dists[K]; int min_idxs[K]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int64_t length2 = lengths2[n]; RegisterMinK<scalar_t, int, K> mink(min_dists, min_idxs); for (int p2 = 0; p2 < length2; ++p2) { scalar_t dist = 0; for (int d = 0; d < D; ++d) { int offset = n * P2 * D + p2 * D + d; scalar_t diff = cur_point[d] - points2[offset]; scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); dist += norm_diff; } mink.add(dist, p2); } for (int k = 0; k < mink.size(); ++k) { idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; dists[n * P1 * K + p1 * K + k] = min_dists[k]; } } } // This is a shim so we can dispatch using DispatchKernel2D template <typename scalar_t, int64_t D, int64_t K> struct KNearestNeighborKernelV3Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t norm) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); KNearestNeighborKernelV3<scalar_t, D, K><<<blocks, threads, 0, stream>>>( points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, norm); } }; constexpr int V1_MIN_D = 1; constexpr int V1_MAX_D = 32; constexpr int V2_MIN_D = 1; constexpr int V2_MAX_D = 8; constexpr int V2_MIN_K = 1; constexpr int V2_MAX_K = 32; constexpr int V3_MIN_D = 1; constexpr int V3_MAX_D = 8; constexpr int V3_MIN_K = 1; constexpr int V3_MAX_K = 4; bool InBounds(const int64_t min, const int64_t x, const int64_t max) { return min <= x && x <= max; } bool KnnCheckVersion(int version, const int64_t D, const int64_t K) { if (version == 0) { return true; } else if (version == 1) { return InBounds(V1_MIN_D, D, V1_MAX_D); } else if (version == 2) { return InBounds(V2_MIN_D, D, V2_MAX_D) && InBounds(V2_MIN_K, K, V2_MAX_K); } else if (version == 3) { return InBounds(V3_MIN_D, D, V3_MAX_D) && InBounds(V3_MIN_K, K, V3_MAX_K); } return false; } int ChooseVersion(const int64_t D, const int64_t K) { for (int version = 3; version >= 1; version--) { if (KnnCheckVersion(version, D, K)) { return version; } } return 0; } std::tuple<at::Tensor, at::Tensor> KNearestNeighborIdxCuda( const at::Tensor& p1, const at::Tensor& p2, const at::Tensor& lengths1, const at::Tensor& lengths2, const int norm, const int K, int version) { // Check inputs are on the same device at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}; at::CheckedFrom c = "KNearestNeighborIdxCuda"; at::checkAllSameGPU(c, {p1_t, p2_t, lengths1_t, lengths2_t}); at::checkAllSameType(c, {p1_t, p2_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(p1.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p2.size(2); const int64_t K_64 = K; TORCH_CHECK((norm == 1) || (norm == 2), "Norm must be 1 or 2."); TORCH_CHECK(p2.size(2) == D, "Point sets must have the same last dimension"); auto long_dtype = lengths1.options().dtype(at::kLong); auto idxs = at::zeros({N, P1, K}, long_dtype); auto dists = at::zeros({N, P1, K}, p1.options()); if (idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(idxs, dists); } if (version < 0) { version = ChooseVersion(D, K); } else if (!KnnCheckVersion(version, D, K)) { int new_version = ChooseVersion(D, K); std::cout << "WARNING: Requested KNN version " << version << " is not compatible with D = " << D << "; K = " << K << ". Falling back to version = " << new_version << std::endl; version = new_version; } // At this point we should have a valid version no matter what data the user // gave us. But we can check once more to be sure; however this time // assert fail since failing at this point means we have a bug in our version // selection or checking code. AT_ASSERTM(KnnCheckVersion(version, D, K), "Invalid version"); const size_t threads = 256; const size_t blocks = 256; if (version == 0) { AT_DISPATCH_FLOATING_TYPES( p1.scalar_type(), "knn_kernel_cuda", ([&] { KNearestNeighborKernelV0<scalar_t><<<blocks, threads, 0, stream>>>( p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, D, K, norm); })); } else if (version == 1) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel1D< KNearestNeighborV1Functor, scalar_t, V1_MIN_D, V1_MAX_D>( D, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, K, norm); })); } else if (version == 2) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel2D< KNearestNeighborKernelV2Functor, scalar_t, V2_MIN_D, V2_MAX_D, V2_MIN_K, V2_MAX_K>( D, K_64, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, norm); })); } else if (version == 3) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel2D< KNearestNeighborKernelV3Functor, scalar_t, V3_MIN_D, V3_MAX_D, V3_MIN_K, V3_MAX_K>( D, K_64, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, norm); })); } AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(idxs, dists); } // ------------------------------------------------------------- // // Backward Operators // // ------------------------------------------------------------- // // TODO(gkioxari) support all data types once AtomicAdd supports doubles. // Currently, support is for floats only. __global__ void KNearestNeighborBackwardKernel( const float* __restrict__ p1, // (N, P1, D) const float* __restrict__ p2, // (N, P2, D) const int64_t* __restrict__ lengths1, // (N,) const int64_t* __restrict__ lengths2, // (N,) const int64_t* __restrict__ idxs, // (N, P1, K) const float* __restrict__ grad_dists, // (N, P1, K) float* __restrict__ grad_p1, // (N, P1, D) float* __restrict__ grad_p2, // (N, P2, D) const size_t N, const size_t P1, const size_t P2, const size_t K, const size_t D, const size_t norm) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = gridDim.x * blockDim.x; for (size_t i = tid; i < N * P1 * K * D; i += stride) { const size_t n = i / (P1 * K * D); // batch index size_t rem = i % (P1 * K * D); const size_t p1_idx = rem / (K * D); // index of point in p1 rem = rem % (K * D); const size_t k = rem / D; // k-th nearest neighbor const size_t d = rem % D; // d-th dimension in the feature vector const size_t num1 = lengths1[n]; // number of valid points in p1 in batch const size_t num2 = lengths2[n]; // number of valid points in p2 in batch if ((p1_idx < num1) && (k < num2)) { const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k]; // index of point in p2 corresponding to the k-th nearest neighbor const size_t p2_idx = idxs[n * P1 * K + p1_idx * K + k]; // If the index is the pad value of -1 then ignore it if (p2_idx == -1) { continue; } float diff = 0.0; if (norm == 1) { float sign = (p1[n * P1 * D + p1_idx * D + d] > p2[n * P2 * D + p2_idx * D + d]) ? 1.0 : -1.0; diff = grad_dist * sign; } else { // norm is 2 diff = 2.0 * grad_dist * (p1[n * P1 * D + p1_idx * D + d] - p2[n * P2 * D + p2_idx * D + d]); } atomicAdd(grad_p1 + n * P1 * D + p1_idx * D + d, diff); atomicAdd(grad_p2 + n * P2 * D + p2_idx * D + d, -1.0f * diff); } } } std::tuple<at::Tensor, at::Tensor> KNearestNeighborBackwardCuda( const at::Tensor& p1, const at::Tensor& p2, const at::Tensor& lengths1, const at::Tensor& lengths2, const at::Tensor& idxs, int norm, const at::Tensor& grad_dists) { // Check inputs are on the same device at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}, idxs_t{idxs, "idxs", 5}, grad_dists_t{grad_dists, "grad_dists", 6}; at::CheckedFrom c = "KNearestNeighborBackwardCuda"; at::checkAllSameGPU( c, {p1_t, p2_t, lengths1_t, lengths2_t, idxs_t, grad_dists_t}); at::checkAllSameType(c, {p1_t, p2_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(p1.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p2.size(2); const auto K = idxs.size(2); TORCH_CHECK(p1.size(2) == D, "Point sets must have the same last dimension"); TORCH_CHECK(idxs.size(0) == N, "KNN idxs must have the same batch dimension"); TORCH_CHECK( idxs.size(1) == P1, "KNN idxs must have the same point dimension as p1"); TORCH_CHECK(grad_dists.size(0) == N); TORCH_CHECK(grad_dists.size(1) == P1); TORCH_CHECK(grad_dists.size(2) == K); auto grad_p1 = at::zeros({N, P1, D}, p1.options()); auto grad_p2 = at::zeros({N, P2, D}, p2.options()); if (grad_p1.numel() == 0 || grad_p2.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(grad_p1, grad_p2); } const int blocks = 64; const int threads = 512; KNearestNeighborBackwardKernel<<<blocks, threads, 0, stream>>>( p1.contiguous().data_ptr<float>(), p2.contiguous().data_ptr<float>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), idxs.contiguous().data_ptr<int64_t>(), grad_dists.contiguous().data_ptr<float>(), grad_p1.data_ptr<float>(), grad_p2.data_ptr<float>(), N, P1, P2, K, D, norm); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(grad_p1, grad_p2); }
da8fbf25eaf58269fc0dd153ad287c90fb674e43.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #include <cmath> clock_t begin1, begin2, end1, end2; // Macierze s pamitane wierszami, a wic: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float *elements; } Matrix; #define BLOCK_SIZE 32 // prototyp funkcji mnocej (kernela) __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Zakadamy (dla uproszczenia rozwaa), e wymiary macierzy s // cakowitymi wielokrotnociami wartoci BLOCK_SIZE // Funkcja mnoca void MatMul(const Matrix A, const Matrix B, Matrix C) { // kopiujemy macierze A i B to globalnej pamici urzdzenia // najpierw A Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc((void **)&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); // potem B Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc((void **)&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // przydzielamy macierz C w globalnej pamici urzdzenia Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc((void **)&d_C.elements, size); // preparujemy rodowisko i wywoujemy kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); //dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); begin1 = clock(); MatMulKernel << <dimGrid, dimBlock >> > (d_A, d_B, d_C); hipDeviceSynchronize(); end1 = clock(); // odbieramy obliczon macierz C z pamici globalnej urzdzenia hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); // zwalniamy pami hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } // kernel odpowiedzialny za wymnoenie macierzy __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // kady wtek oblicza jeden element macierzy C // akumulujc wynik w zmiennej Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; //Cvalue = powf(Cvalue, 10); C.elements[row * C.width + col] = Cvalue; } Matrix newMatrix(int row, int col) { Matrix newM; newM.width = row; newM.height = col; newM.elements = (float*)malloc(row * col * sizeof(float)); return newM; } int main(int argc, char** argv) { int N = 960; Matrix A = newMatrix(N, N); Matrix B = newMatrix(N, N); Matrix C = newMatrix(N, N); Matrix D = newMatrix(N, N); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { A.elements[i*N + j] = 2.45317621124123; B.elements[i*N + j] = 2.54493874134242; } } //begin1 = clock(); MatMul(A, B, C); //end1 = clock(); //Mnoenie CPU begin2 = clock(); float suma; for (int row = 0; row < N; row++) { for (int col = 0; col < N; col++) { suma = 0.f; for (int n = 0; n < N; n++) { suma += A.elements[row*N + n] * B.elements[n*N + col]; } //suma = powf(suma, 10); D.elements[row*N + col] = suma; } } end2 = clock(); double time_spent1 = (double)(end1 - begin1) / CLOCKS_PER_SEC; double time_spent2 = (double)(end2 - begin2) / CLOCKS_PER_SEC; printf("Wynik na GPU: %.24f \n", C.elements[0]); printf("Wynik na CPU: %.24f \n", D.elements[0]); //Full range 8 printf("Roznica GPU-CPU: %.24f \n", C.elements[0] - D.elements[0]); printf("Czas GPU: %.16f \n", time_spent1); printf("Czas CPU: %.16f \n", time_spent2); free(A.elements); free(B.elements); free(C.elements); free(D.elements); return 0; }
da8fbf25eaf58269fc0dd153ad287c90fb674e43.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #include <cmath> clock_t begin1, begin2, end1, end2; // Macierze są pamiętane wierszami, a więc: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float *elements; } Matrix; #define BLOCK_SIZE 32 // prototyp funkcji mnożącej (kernela) __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Zakładamy (dla uproszczenia rozważań), że wymiary macierzy są // całkowitymi wielokrotnościami wartości BLOCK_SIZE // Funkcja mnożąca void MatMul(const Matrix A, const Matrix B, Matrix C) { // kopiujemy macierze A i B to globalnej pamięci urządzenia // najpierw A Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc((void **)&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); // potem B Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc((void **)&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // przydzielamy macierz C w globalnej pamięci urządzenia Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc((void **)&d_C.elements, size); // preparujemy środowisko i wywołujemy kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); //dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); begin1 = clock(); MatMulKernel << <dimGrid, dimBlock >> > (d_A, d_B, d_C); cudaThreadSynchronize(); end1 = clock(); // odbieramy obliczoną macierz C z pamięci globalnej urządzenia cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // zwalniamy pamięć cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // kernel odpowiedzialny za wymnożenie macierzy __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // każdy wątek oblicza jeden element macierzy C // akumulując wynik w zmiennej Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; //Cvalue = powf(Cvalue, 10); C.elements[row * C.width + col] = Cvalue; } Matrix newMatrix(int row, int col) { Matrix newM; newM.width = row; newM.height = col; newM.elements = (float*)malloc(row * col * sizeof(float)); return newM; } int main(int argc, char** argv) { int N = 960; Matrix A = newMatrix(N, N); Matrix B = newMatrix(N, N); Matrix C = newMatrix(N, N); Matrix D = newMatrix(N, N); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { A.elements[i*N + j] = 2.45317621124123; B.elements[i*N + j] = 2.54493874134242; } } //begin1 = clock(); MatMul(A, B, C); //end1 = clock(); //Mnożenie CPU begin2 = clock(); float suma; for (int row = 0; row < N; row++) { for (int col = 0; col < N; col++) { suma = 0.f; for (int n = 0; n < N; n++) { suma += A.elements[row*N + n] * B.elements[n*N + col]; } //suma = powf(suma, 10); D.elements[row*N + col] = suma; } } end2 = clock(); double time_spent1 = (double)(end1 - begin1) / CLOCKS_PER_SEC; double time_spent2 = (double)(end2 - begin2) / CLOCKS_PER_SEC; printf("Wynik na GPU: %.24f \n", C.elements[0]); printf("Wynik na CPU: %.24f \n", D.elements[0]); //Full range 8 printf("Roznica GPU-CPU: %.24f \n", C.elements[0] - D.elements[0]); printf("Czas GPU: %.16f \n", time_spent1); printf("Czas CPU: %.16f \n", time_spent2); free(A.elements); free(B.elements); free(C.elements); free(D.elements); return 0; }
96c5611de2f7bf143586d9f50d56a0bf23e85964.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #define MAX 50 #define Block_Size 2 __global__ void RevWrd(int* md, int* nd, int* pd, int n_wid) { int a; int Pvalue = 0; int col = blockIdx.x * Block_Size + threadIdx.x; int row = blockIdx.y * Block_Size + threadIdx.y; for (a = 0; a < n_wid; a++) { Pvalue +=(md[row * n_wid + a] * nd[a * n_wid + col]); } pd[row * n_wid + col] = Pvalue; } int main(void) { int N = 4, i, j, sz; int A[4][4]; int B[4][4]; int C[4][4]; int* d_a, * d_b, * d_c; printf("given matrix:\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = 1; B[i][j] = 2; C[i][j] = 0; } } printf("Matrix A:\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%d ", A[i][j]); } printf("\n"); } printf("Matrix B:\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%d ", B[i][j]); } printf("\n"); } sz = sizeof(int) * N * N; int Grid_Sz; Grid_Sz = N / Block_Size; hipMalloc((void**)&d_a,sz); hipMalloc((void**)&d_b,sz); hipMalloc((void**)&d_c,sz); hipMemcpy(d_a,A,sz,hipMemcpyHostToDevice); hipMemcpy(d_b,B,sz,hipMemcpyHostToDevice); dim3 blockDim(Block_Size, Block_Size,1); dim3 gridDim(Grid_Sz, Grid_Sz,1); RevWrd << <gridDim, blockDim >> > (d_a, d_b, d_c, N); hipMemcpy(C, d_c, sz, hipMemcpyDeviceToHost); printf("Matrix C:\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%d ", C[i][j]); } printf("\n"); } hipFree(d_a); hipFree(d_b); hipFree(d_c); }
96c5611de2f7bf143586d9f50d56a0bf23e85964.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #define MAX 50 #define Block_Size 2 __global__ void RevWrd(int* md, int* nd, int* pd, int n_wid) { int a; int Pvalue = 0; int col = blockIdx.x * Block_Size + threadIdx.x; int row = blockIdx.y * Block_Size + threadIdx.y; for (a = 0; a < n_wid; a++) { Pvalue +=(md[row * n_wid + a] * nd[a * n_wid + col]); } pd[row * n_wid + col] = Pvalue; } int main(void) { int N = 4, i, j, sz; int A[4][4]; int B[4][4]; int C[4][4]; int* d_a, * d_b, * d_c; printf("given matrix:\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = 1; B[i][j] = 2; C[i][j] = 0; } } printf("Matrix A:\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%d ", A[i][j]); } printf("\n"); } printf("Matrix B:\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%d ", B[i][j]); } printf("\n"); } sz = sizeof(int) * N * N; int Grid_Sz; Grid_Sz = N / Block_Size; cudaMalloc((void**)&d_a,sz); cudaMalloc((void**)&d_b,sz); cudaMalloc((void**)&d_c,sz); cudaMemcpy(d_a,A,sz,cudaMemcpyHostToDevice); cudaMemcpy(d_b,B,sz,cudaMemcpyHostToDevice); dim3 blockDim(Block_Size, Block_Size,1); dim3 gridDim(Grid_Sz, Grid_Sz,1); RevWrd << <gridDim, blockDim >> > (d_a, d_b, d_c, N); cudaMemcpy(C, d_c, sz, cudaMemcpyDeviceToHost); printf("Matrix C:\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%d ", C[i][j]); } printf("\n"); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
6cc724e9bfd6446b60b7ec0cfd8c1e00f37bcd11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This software is Copyright (c) 2011,2012 Lukas Odzioba <ukasz at openwall dot net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include "../cuda_cryptmd5.h" #include "cuda_common.cuh" extern "C" void md5_crypt_gpu(crypt_md5_password *, uint32_t *, crypt_md5_salt *, int count); __device__ __constant__ char md5_salt_prefix_cu[] = "$1$"; __device__ __constant__ char apr1_salt_prefix_cu[] = "$apr1$"; __device__ __constant__ crypt_md5_salt cuda_salt[1]; __device__ void md5_process_block_cu(const void *, size_t, md5_ctx *); __device__ void md5_process_bytes_cu(const void *, size_t, md5_ctx *); __device__ void ctx_init(md5_ctx * ctx, uint8_t * ctx_buflen) { uint32_t *buf = (uint32_t *) ctx->buffer; int i = 14; while (i--) *buf++ = 0; *ctx_buflen = 0; } __device__ void ctx_update(md5_ctx * ctx, const char *string, uint8_t len, uint8_t * ctx_buflen) { uint8_t *dest = &ctx->buffer[*ctx_buflen]; uint8_t *src = (uint8_t *) string; *ctx_buflen += len; memcpy(dest, src, len); } __device__ void md5_digest(md5_ctx * ctx, uint32_t * result, uint8_t * ctx_buflen) { uint32_t len = *ctx_buflen; uint32_t *x = (uint32_t *) ctx->buffer; x[len / 4] |= (((uint32_t) 0x80) << ((len & 0x3) << 3)); len <<= 3; uint32_t b = 0xefcdab89; uint32_t c = 0x98badcfe; uint32_t d; // = 0x10325476; uint32_t a = ROTATE_LEFT(AC1 + x[0], S11); a += b; /* 1 */ d = ROTATE_LEFT((c ^ (a & MASK1)) + x[1] + AC2pCd, S12); d += a; /* 2 */ c = ROTATE_LEFT(F(d, a, b) + x[2] + AC3pCc, S13); c += d; /* 3 */ b = ROTATE_LEFT(F(c, d, a) + x[3] + AC4pCb, S14); b += c; /* 4 */ FF(a, b, c, d, x[4], S11, 0xf57c0faf); /* 5 */ FF(d, a, b, c, x[5], S12, 0x4787c62a); /* 6 */ FF(c, d, a, b, x[6], S13, 0xa8304613); /* 7 */ FF(b, c, d, a, x[7], S14, 0xfd469501); /* 8 */ FF(a, b, c, d, x[8], S11, 0x698098d8); /* 9 */ FF(d, a, b, c, x[9], S12, 0x8b44f7af); /* 10 */ FF(c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ FF(b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ FF(a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ FF(d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ FF(c, d, a, b, len, S13, 0xa679438e); /* 15 */ FF2(b, c, d, a, S14, 0x49b40821); /* 16 */ GG(a, b, c, d, x[1], S21, 0xf61e2562); /* 17 */ GG(d, a, b, c, x[6], S22, 0xc040b340); /* 18 */ GG(c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ GG(b, c, d, a, x[0], S24, 0xe9b6c7aa); /* 20 */ GG(a, b, c, d, x[5], S21, 0xd62f105d); /* 21 */ GG(d, a, b, c, x[10], S22, 0x2441453); /* 22 */ GG2(c, d, a, b, S23, 0xd8a1e681); /* 23 */ GG(b, c, d, a, x[4], S24, 0xe7d3fbc8); /* 24 */ GG(a, b, c, d, x[9], S21, 0x21e1cde6); /* 25 */ GG(d, a, b, c, len, S22, 0xc33707d6); /* 26 */ GG(c, d, a, b, x[3], S23, 0xf4d50d87); /* 27 */ GG(b, c, d, a, x[8], S24, 0x455a14ed); /* 28 */ GG(a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ GG(d, a, b, c, x[2], S22, 0xfcefa3f8); /* 30 */ GG(c, d, a, b, x[7], S23, 0x676f02d9); /* 31 */ GG(b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ HH(a, b, c, d, x[5], S31, 0xfffa3942); /* 33 */ HH(d, a, b, c, x[8], S32, 0x8771f681); /* 34 */ HH(c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ HH(b, c, d, a, len, S34, 0xfde5380c); /* 36 */ HH(a, b, c, d, x[1], S31, 0xa4beea44); /* 37 */ HH(d, a, b, c, x[4], S32, 0x4bdecfa9); /* 38 */ HH(c, d, a, b, x[7], S33, 0xf6bb4b60); /* 39 */ HH(b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ HH(a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ HH(d, a, b, c, x[0], S32, 0xeaa127fa); /* 42 */ HH(c, d, a, b, x[3], S33, 0xd4ef3085); /* 43 */ HH(b, c, d, a, x[6], S34, 0x4881d05); /* 44 */ HH(a, b, c, d, x[9], S31, 0xd9d4d039); /* 45 */ HH(d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ HH2(c, d, a, b, S33, 0x1fa27cf8); /* 47 */ HH(b, c, d, a, x[2], S34, 0xc4ac5665); /* 48 */ II(a, b, c, d, x[0], S41, 0xf4292244); /* 49 */ II(d, a, b, c, x[7], S42, 0x432aff97); /* 50 */ II(c, d, a, b, len, S43, 0xab9423a7); /* 51 */ II(b, c, d, a, x[5], S44, 0xfc93a039); /* 52 */ II(a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ II(d, a, b, c, x[3], S42, 0x8f0ccc92); /* 54 */ II(c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ II(b, c, d, a, x[1], S44, 0x85845dd1); /* 56 */ II(a, b, c, d, x[8], S41, 0x6fa87e4f); /* 57 */ II2(d, a, b, c, S42, 0xfe2ce6e0); /* 58 */ II(c, d, a, b, x[6], S43, 0xa3014314); /* 59 */ II(b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ II(a, b, c, d, x[4], S41, 0xf7537e82); /* 61 */ II(d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ II(c, d, a, b, x[2], S43, 0x2ad7d2bb); /* 63 */ II(b, c, d, a, x[9], S44, 0xeb86d391); /* 64 */ result[0] = a + 0x67452301; result[1] = b + 0xefcdab89; result[2] = c + 0x98badcfe; result[3] = d + 0x10325476; } __device__ void md5crypt(const char *gpass, size_t keysize, unsigned int *result) { uint32_t i; __shared__ uint32_t alt_result[THREADS][4 + 1]; __shared__ char spass[THREADS][16 + 4]; uint8_t ctx_buflen; char *pass = spass[threadIdx.x]; memcpy(pass, gpass, 15); uint8_t pass_len = keysize; uint8_t salt_len = cuda_salt[0].length; char *salt = cuda_salt[0].salt; md5_ctx ctx; ctx_init(&ctx, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); ctx_update(&ctx, salt, salt_len, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); ctx_init(&ctx, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); if (cuda_salt[0].prefix == '1') { ctx_update(&ctx, md5_salt_prefix_cu, 3, &ctx_buflen); } else ctx_update(&ctx, apr1_salt_prefix_cu, 6, &ctx_buflen); ctx_update(&ctx, salt, salt_len, &ctx_buflen); ctx_update(&ctx, (const char *) alt_result[threadIdx.x], pass_len, &ctx_buflen); *alt_result[threadIdx.x] = 0; for (i = pass_len; i > 0; i >>= 1) if ((i & 1) != 0) ctx.buffer[ctx_buflen++] = ((const char *) alt_result[threadIdx.x])[0]; else ctx.buffer[ctx_buflen++] = pass[0]; md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); for (i = 0; i < 1000; i++) { ctx_init(&ctx, &ctx_buflen); if ((i & 1) != 0) ctx_update(&ctx, pass, pass_len, &ctx_buflen); else ctx_update(&ctx, (const char *) alt_result[threadIdx.x], 16, &ctx_buflen); if (i % 3 != 0) ctx_update(&ctx, salt, salt_len, &ctx_buflen); if (i % 7 != 0) ctx_update(&ctx, pass, pass_len, &ctx_buflen); if ((i & 1) != 0) ctx_update(&ctx, (const char *) alt_result[threadIdx.x], 16, &ctx_buflen); else ctx_update(&ctx, pass, pass_len, &ctx_buflen); md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); } result[0] = alt_result[threadIdx.x][0]; result[1] = alt_result[threadIdx.x][1]; result[2] = alt_result[threadIdx.x][2]; result[3] = alt_result[threadIdx.x][3]; } __global__ void kernel_crypt_r(crypt_md5_password * inbuffer, crypt_md5_crack * outbuffer) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; md5crypt((char *) inbuffer[idx].v, inbuffer[idx].length, outbuffer[idx].hash); } __host__ void md5_crypt_gpu(crypt_md5_password * inbuffer, uint32_t * outbuffer, crypt_md5_salt * host_salt, int count) { int blocks = (count + THREADS - 1) / THREADS; HANDLE_ERROR(hipMemcpyToSymbol(cuda_salt, host_salt, sizeof(crypt_md5_salt))); crypt_md5_password *cuda_inbuffer; crypt_md5_crack *cuda_outbuffer; size_t insize = sizeof(crypt_md5_password) * KEYS_PER_CRYPT; size_t outsize = sizeof(crypt_md5_crack) * KEYS_PER_CRYPT; HANDLE_ERROR(hipMalloc(&cuda_inbuffer, insize)); HANDLE_ERROR(hipMalloc(&cuda_outbuffer, outsize)); HANDLE_ERROR(hipMemcpy(cuda_inbuffer, inbuffer, insize, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_crypt_r) , dim3(blocks), dim3(THREADS) , 0, 0, cuda_inbuffer, cuda_outbuffer); HANDLE_ERROR(hipGetLastError()); HANDLE_ERROR(hipMemcpy(outbuffer, cuda_outbuffer, outsize, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipFree(cuda_inbuffer)); HANDLE_ERROR(hipFree(cuda_outbuffer)); }
6cc724e9bfd6446b60b7ec0cfd8c1e00f37bcd11.cu
/* * This software is Copyright (c) 2011,2012 Lukas Odzioba <ukasz at openwall dot net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include "../cuda_cryptmd5.h" #include "cuda_common.cuh" extern "C" void md5_crypt_gpu(crypt_md5_password *, uint32_t *, crypt_md5_salt *, int count); __device__ __constant__ char md5_salt_prefix_cu[] = "$1$"; __device__ __constant__ char apr1_salt_prefix_cu[] = "$apr1$"; __device__ __constant__ crypt_md5_salt cuda_salt[1]; __device__ void md5_process_block_cu(const void *, size_t, md5_ctx *); __device__ void md5_process_bytes_cu(const void *, size_t, md5_ctx *); __device__ void ctx_init(md5_ctx * ctx, uint8_t * ctx_buflen) { uint32_t *buf = (uint32_t *) ctx->buffer; int i = 14; while (i--) *buf++ = 0; *ctx_buflen = 0; } __device__ void ctx_update(md5_ctx * ctx, const char *string, uint8_t len, uint8_t * ctx_buflen) { uint8_t *dest = &ctx->buffer[*ctx_buflen]; uint8_t *src = (uint8_t *) string; *ctx_buflen += len; memcpy(dest, src, len); } __device__ void md5_digest(md5_ctx * ctx, uint32_t * result, uint8_t * ctx_buflen) { uint32_t len = *ctx_buflen; uint32_t *x = (uint32_t *) ctx->buffer; x[len / 4] |= (((uint32_t) 0x80) << ((len & 0x3) << 3)); len <<= 3; uint32_t b = 0xefcdab89; uint32_t c = 0x98badcfe; uint32_t d; // = 0x10325476; uint32_t a = ROTATE_LEFT(AC1 + x[0], S11); a += b; /* 1 */ d = ROTATE_LEFT((c ^ (a & MASK1)) + x[1] + AC2pCd, S12); d += a; /* 2 */ c = ROTATE_LEFT(F(d, a, b) + x[2] + AC3pCc, S13); c += d; /* 3 */ b = ROTATE_LEFT(F(c, d, a) + x[3] + AC4pCb, S14); b += c; /* 4 */ FF(a, b, c, d, x[4], S11, 0xf57c0faf); /* 5 */ FF(d, a, b, c, x[5], S12, 0x4787c62a); /* 6 */ FF(c, d, a, b, x[6], S13, 0xa8304613); /* 7 */ FF(b, c, d, a, x[7], S14, 0xfd469501); /* 8 */ FF(a, b, c, d, x[8], S11, 0x698098d8); /* 9 */ FF(d, a, b, c, x[9], S12, 0x8b44f7af); /* 10 */ FF(c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ FF(b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ FF(a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ FF(d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ FF(c, d, a, b, len, S13, 0xa679438e); /* 15 */ FF2(b, c, d, a, S14, 0x49b40821); /* 16 */ GG(a, b, c, d, x[1], S21, 0xf61e2562); /* 17 */ GG(d, a, b, c, x[6], S22, 0xc040b340); /* 18 */ GG(c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ GG(b, c, d, a, x[0], S24, 0xe9b6c7aa); /* 20 */ GG(a, b, c, d, x[5], S21, 0xd62f105d); /* 21 */ GG(d, a, b, c, x[10], S22, 0x2441453); /* 22 */ GG2(c, d, a, b, S23, 0xd8a1e681); /* 23 */ GG(b, c, d, a, x[4], S24, 0xe7d3fbc8); /* 24 */ GG(a, b, c, d, x[9], S21, 0x21e1cde6); /* 25 */ GG(d, a, b, c, len, S22, 0xc33707d6); /* 26 */ GG(c, d, a, b, x[3], S23, 0xf4d50d87); /* 27 */ GG(b, c, d, a, x[8], S24, 0x455a14ed); /* 28 */ GG(a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ GG(d, a, b, c, x[2], S22, 0xfcefa3f8); /* 30 */ GG(c, d, a, b, x[7], S23, 0x676f02d9); /* 31 */ GG(b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ HH(a, b, c, d, x[5], S31, 0xfffa3942); /* 33 */ HH(d, a, b, c, x[8], S32, 0x8771f681); /* 34 */ HH(c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ HH(b, c, d, a, len, S34, 0xfde5380c); /* 36 */ HH(a, b, c, d, x[1], S31, 0xa4beea44); /* 37 */ HH(d, a, b, c, x[4], S32, 0x4bdecfa9); /* 38 */ HH(c, d, a, b, x[7], S33, 0xf6bb4b60); /* 39 */ HH(b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ HH(a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ HH(d, a, b, c, x[0], S32, 0xeaa127fa); /* 42 */ HH(c, d, a, b, x[3], S33, 0xd4ef3085); /* 43 */ HH(b, c, d, a, x[6], S34, 0x4881d05); /* 44 */ HH(a, b, c, d, x[9], S31, 0xd9d4d039); /* 45 */ HH(d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ HH2(c, d, a, b, S33, 0x1fa27cf8); /* 47 */ HH(b, c, d, a, x[2], S34, 0xc4ac5665); /* 48 */ II(a, b, c, d, x[0], S41, 0xf4292244); /* 49 */ II(d, a, b, c, x[7], S42, 0x432aff97); /* 50 */ II(c, d, a, b, len, S43, 0xab9423a7); /* 51 */ II(b, c, d, a, x[5], S44, 0xfc93a039); /* 52 */ II(a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ II(d, a, b, c, x[3], S42, 0x8f0ccc92); /* 54 */ II(c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ II(b, c, d, a, x[1], S44, 0x85845dd1); /* 56 */ II(a, b, c, d, x[8], S41, 0x6fa87e4f); /* 57 */ II2(d, a, b, c, S42, 0xfe2ce6e0); /* 58 */ II(c, d, a, b, x[6], S43, 0xa3014314); /* 59 */ II(b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ II(a, b, c, d, x[4], S41, 0xf7537e82); /* 61 */ II(d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ II(c, d, a, b, x[2], S43, 0x2ad7d2bb); /* 63 */ II(b, c, d, a, x[9], S44, 0xeb86d391); /* 64 */ result[0] = a + 0x67452301; result[1] = b + 0xefcdab89; result[2] = c + 0x98badcfe; result[3] = d + 0x10325476; } __device__ void md5crypt(const char *gpass, size_t keysize, unsigned int *result) { uint32_t i; __shared__ uint32_t alt_result[THREADS][4 + 1]; __shared__ char spass[THREADS][16 + 4]; uint8_t ctx_buflen; char *pass = spass[threadIdx.x]; memcpy(pass, gpass, 15); uint8_t pass_len = keysize; uint8_t salt_len = cuda_salt[0].length; char *salt = cuda_salt[0].salt; md5_ctx ctx; ctx_init(&ctx, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); ctx_update(&ctx, salt, salt_len, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); ctx_init(&ctx, &ctx_buflen); ctx_update(&ctx, pass, pass_len, &ctx_buflen); if (cuda_salt[0].prefix == '1') { ctx_update(&ctx, md5_salt_prefix_cu, 3, &ctx_buflen); } else ctx_update(&ctx, apr1_salt_prefix_cu, 6, &ctx_buflen); ctx_update(&ctx, salt, salt_len, &ctx_buflen); ctx_update(&ctx, (const char *) alt_result[threadIdx.x], pass_len, &ctx_buflen); *alt_result[threadIdx.x] = 0; for (i = pass_len; i > 0; i >>= 1) if ((i & 1) != 0) ctx.buffer[ctx_buflen++] = ((const char *) alt_result[threadIdx.x])[0]; else ctx.buffer[ctx_buflen++] = pass[0]; md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); for (i = 0; i < 1000; i++) { ctx_init(&ctx, &ctx_buflen); if ((i & 1) != 0) ctx_update(&ctx, pass, pass_len, &ctx_buflen); else ctx_update(&ctx, (const char *) alt_result[threadIdx.x], 16, &ctx_buflen); if (i % 3 != 0) ctx_update(&ctx, salt, salt_len, &ctx_buflen); if (i % 7 != 0) ctx_update(&ctx, pass, pass_len, &ctx_buflen); if ((i & 1) != 0) ctx_update(&ctx, (const char *) alt_result[threadIdx.x], 16, &ctx_buflen); else ctx_update(&ctx, pass, pass_len, &ctx_buflen); md5_digest(&ctx, alt_result[threadIdx.x], &ctx_buflen); } result[0] = alt_result[threadIdx.x][0]; result[1] = alt_result[threadIdx.x][1]; result[2] = alt_result[threadIdx.x][2]; result[3] = alt_result[threadIdx.x][3]; } __global__ void kernel_crypt_r(crypt_md5_password * inbuffer, crypt_md5_crack * outbuffer) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; md5crypt((char *) inbuffer[idx].v, inbuffer[idx].length, outbuffer[idx].hash); } __host__ void md5_crypt_gpu(crypt_md5_password * inbuffer, uint32_t * outbuffer, crypt_md5_salt * host_salt, int count) { int blocks = (count + THREADS - 1) / THREADS; HANDLE_ERROR(cudaMemcpyToSymbol(cuda_salt, host_salt, sizeof(crypt_md5_salt))); crypt_md5_password *cuda_inbuffer; crypt_md5_crack *cuda_outbuffer; size_t insize = sizeof(crypt_md5_password) * KEYS_PER_CRYPT; size_t outsize = sizeof(crypt_md5_crack) * KEYS_PER_CRYPT; HANDLE_ERROR(cudaMalloc(&cuda_inbuffer, insize)); HANDLE_ERROR(cudaMalloc(&cuda_outbuffer, outsize)); HANDLE_ERROR(cudaMemcpy(cuda_inbuffer, inbuffer, insize, cudaMemcpyHostToDevice)); kernel_crypt_r <<< blocks, THREADS >>> (cuda_inbuffer, cuda_outbuffer); HANDLE_ERROR(cudaGetLastError()); HANDLE_ERROR(cudaMemcpy(outbuffer, cuda_outbuffer, outsize, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaFree(cuda_inbuffer)); HANDLE_ERROR(cudaFree(cuda_outbuffer)); }
f4b755563f626c9ad0cae45d888b8ab8e4ffca0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* All modification made by Cambricon Corporation: 2018-2019 Cambricon Corporation All rights reserved. All other contributions: Copyright (c) 2014--2019, the respective contributors All rights reserved. For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_deconv_layer.hpp" namespace caffe { __global__ void sync_deconv_groups() {} template <typename Dtype> void CuDNNDeconvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, bottom_descs_[i], bottom_data + bottom_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_deconv_groups), dim3(1), dim3(1), 0, 0, ); } } template <typename Dtype> void CuDNNDeconvolutionLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0 * this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1 * this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, bottom_descs_[i], bottom_data + bottom_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1 * this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK( cudnnConvolutionForward(handle_[2 * this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[2 * this->group_ + g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_deconv_groups), dim3(1), dim3(1), 0, 0, ); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNDeconvolutionLayer); } // namespace caffe #endif
f4b755563f626c9ad0cae45d888b8ab8e4ffca0a.cu
/* All modification made by Cambricon Corporation: © 2018-2019 Cambricon Corporation All rights reserved. All other contributions: Copyright (c) 2014--2019, the respective contributors All rights reserved. For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_deconv_layer.hpp" namespace caffe { __global__ void sync_deconv_groups() {} template <typename Dtype> void CuDNNDeconvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, bottom_descs_[i], bottom_data + bottom_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_deconv_groups<<<1, 1>>>(); } } template <typename Dtype> void CuDNNDeconvolutionLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0 * this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1 * this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, bottom_descs_[i], bottom_data + bottom_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1 * this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK( cudnnConvolutionForward(handle_[2 * this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[2 * this->group_ + g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_deconv_groups<<<1, 1>>>(); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNDeconvolutionLayer); } // namespace caffe #endif
e7daede892e247da6521f218307ac8df72d2a139.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <math.h> // Assertion to check for errors #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define NUM_THREADS_PER_BLOCK 256 #define NUM_BLOCKS 16 #define PRINT_TIME 1 #define SM_ARR_LEN 50000 #define TOL 1e-6 #define IMUL(a, b) __mul24(a, b) void initializeArray1D(float *arr, int len, int seed); __global__ void kernel_add (int arrLen, float* x, float* y, float* result) { const int tid = IMUL(blockDim.x, blockIdx.x) + threadIdx.x; const int threadN = IMUL(blockDim.x, gridDim.x); int i; for(i = tid; i < arrLen; i += threadN) { result[i] = (1e-6 * x[i] ) + (1e-7 * y[i]) + 0.25; } } int main(int argc, char **argv){ int arrLen = 0; // GPU Timing variables hipEvent_t start, stop; float elapsed_gpu; // Arrays on GPU global memoryc float *d_x; float *d_y; float *d_result; // Arrays on the host memory float *h_x; float *h_y; float *h_result; float *h_result_gold; int i, errCount = 0, zeroCount = 0; if (argc > 1) { arrLen = atoi(argv[1]); } else { arrLen = SM_ARR_LEN; } printf("Length of the array = %d\n", arrLen); // Select GPU CUDA_SAFE_CALL(hipSetDevice(0)); // Allocate GPU memory size_t allocSize = arrLen * sizeof(float); CUDA_SAFE_CALL(hipMalloc((void **)&d_x, allocSize)); CUDA_SAFE_CALL(hipMalloc((void **)&d_y, allocSize)); CUDA_SAFE_CALL(hipMalloc((void **)&d_result, allocSize)); // Allocate arrays on host memory h_x = (float *) malloc(allocSize); h_y = (float *) malloc(allocSize); h_result = (float *) malloc(allocSize); h_result_gold = (float *) malloc(allocSize); // Initialize the host arrays printf("\nInitializing the arrays ..."); // Arrays are initialized with a known seed for reproducability initializeArray1D(h_x, arrLen, 2453); initializeArray1D(h_y, arrLen, 1467); printf("\t... done\n\n"); #if PRINT_TIME // Create the cuda events hipEventCreate(&start); hipEventCreate(&stop); // Record event on the default stream hipEventRecord(start, 0); #endif // Transfer the arrays to the GPU memory CUDA_SAFE_CALL(hipMemcpy(d_x, h_x, allocSize, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_y, h_y, allocSize, hipMemcpyHostToDevice)); // Launch the kernel hipLaunchKernelGGL(( kernel_add), dim3(NUM_BLOCKS), dim3(NUM_THREADS_PER_BLOCK), 0, 0, arrLen, d_x, d_y, d_result); // Check for errors during launch CUDA_SAFE_CALL(hipPeekAtLastError()); // Transfer the results back to the host CUDA_SAFE_CALL(hipMemcpy(h_result, d_result, allocSize, hipMemcpyDeviceToHost)); #if PRINT_TIME // Stop and destroy the timer hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_gpu, start, stop); printf("\nGPU time: %f (msec)\n", elapsed_gpu); hipEventDestroy(start); hipEventDestroy(stop); #endif // Compute the results on the host for(i = 0; i < arrLen; i++) { h_result_gold[i] = (1e-6 * h_x[i]) + (1e-7 * h_y[i]) + 0.25; } // Compare the results for(i = 0; i < arrLen; i++) { if (abs(h_result_gold[i] - h_result[i]) > TOL) { errCount++; } if (h_result[i] == 0) { zeroCount++; } } /* for(i = 0; i < 50; i++) { printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]); } */ if (errCount > 0) { printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount); } else if (zeroCount > 0){ printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount); } else { printf("\nTEST PASSED: All results matched\n"); } // Free-up device and host memory CUDA_SAFE_CALL(hipFree(d_x)); CUDA_SAFE_CALL(hipFree(d_y)); CUDA_SAFE_CALL(hipFree(d_result)); free(h_x); free(h_y); free(h_result); return 0; } void initializeArray1D(float *arr, int len, int seed) { int i; float randNum; srand(seed); for (i = 0; i < len; i++) { randNum = (float) rand(); arr[i] = randNum; } }
e7daede892e247da6521f218307ac8df72d2a139.cu
#include <cstdio> #include <cstdlib> #include <math.h> // Assertion to check for errors #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define NUM_THREADS_PER_BLOCK 256 #define NUM_BLOCKS 16 #define PRINT_TIME 1 #define SM_ARR_LEN 50000 #define TOL 1e-6 #define IMUL(a, b) __mul24(a, b) void initializeArray1D(float *arr, int len, int seed); __global__ void kernel_add (int arrLen, float* x, float* y, float* result) { const int tid = IMUL(blockDim.x, blockIdx.x) + threadIdx.x; const int threadN = IMUL(blockDim.x, gridDim.x); int i; for(i = tid; i < arrLen; i += threadN) { result[i] = (1e-6 * x[i] ) + (1e-7 * y[i]) + 0.25; } } int main(int argc, char **argv){ int arrLen = 0; // GPU Timing variables cudaEvent_t start, stop; float elapsed_gpu; // Arrays on GPU global memoryc float *d_x; float *d_y; float *d_result; // Arrays on the host memory float *h_x; float *h_y; float *h_result; float *h_result_gold; int i, errCount = 0, zeroCount = 0; if (argc > 1) { arrLen = atoi(argv[1]); } else { arrLen = SM_ARR_LEN; } printf("Length of the array = %d\n", arrLen); // Select GPU CUDA_SAFE_CALL(cudaSetDevice(0)); // Allocate GPU memory size_t allocSize = arrLen * sizeof(float); CUDA_SAFE_CALL(cudaMalloc((void **)&d_x, allocSize)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_y, allocSize)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_result, allocSize)); // Allocate arrays on host memory h_x = (float *) malloc(allocSize); h_y = (float *) malloc(allocSize); h_result = (float *) malloc(allocSize); h_result_gold = (float *) malloc(allocSize); // Initialize the host arrays printf("\nInitializing the arrays ..."); // Arrays are initialized with a known seed for reproducability initializeArray1D(h_x, arrLen, 2453); initializeArray1D(h_y, arrLen, 1467); printf("\t... done\n\n"); #if PRINT_TIME // Create the cuda events cudaEventCreate(&start); cudaEventCreate(&stop); // Record event on the default stream cudaEventRecord(start, 0); #endif // Transfer the arrays to the GPU memory CUDA_SAFE_CALL(cudaMemcpy(d_x, h_x, allocSize, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_y, h_y, allocSize, cudaMemcpyHostToDevice)); // Launch the kernel kernel_add<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>(arrLen, d_x, d_y, d_result); // Check for errors during launch CUDA_SAFE_CALL(cudaPeekAtLastError()); // Transfer the results back to the host CUDA_SAFE_CALL(cudaMemcpy(h_result, d_result, allocSize, cudaMemcpyDeviceToHost)); #if PRINT_TIME // Stop and destroy the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_gpu, start, stop); printf("\nGPU time: %f (msec)\n", elapsed_gpu); cudaEventDestroy(start); cudaEventDestroy(stop); #endif // Compute the results on the host for(i = 0; i < arrLen; i++) { h_result_gold[i] = (1e-6 * h_x[i]) + (1e-7 * h_y[i]) + 0.25; } // Compare the results for(i = 0; i < arrLen; i++) { if (abs(h_result_gold[i] - h_result[i]) > TOL) { errCount++; } if (h_result[i] == 0) { zeroCount++; } } /* for(i = 0; i < 50; i++) { printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]); } */ if (errCount > 0) { printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount); } else if (zeroCount > 0){ printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount); } else { printf("\nTEST PASSED: All results matched\n"); } // Free-up device and host memory CUDA_SAFE_CALL(cudaFree(d_x)); CUDA_SAFE_CALL(cudaFree(d_y)); CUDA_SAFE_CALL(cudaFree(d_result)); free(h_x); free(h_y); free(h_result); return 0; } void initializeArray1D(float *arr, int len, int seed) { int i; float randNum; srand(seed); for (i = 0; i < len; i++) { randNum = (float) rand(); arr[i] = randNum; } }
cc109fd673552f314d41c4cb81653150b60b2056.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/handle.hpp> #include "auto_arima.cuh" #include <cuml/tsa/auto_arima.h> namespace ML { int divide_by_mask_build_index(const raft::handle_t& handle, const bool* d_mask, int* d_index, int batch_size) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); return ML::TimeSeries::divide_by_mask_build_index(d_mask, d_index, batch_size, allocator, stream); } template <typename DataT> inline void divide_by_mask_execute_helper(const raft::handle_t& handle, const DataT* d_in, const bool* d_mask, const int* d_index, DataT* d_out0, DataT* d_out1, int batch_size, int n_obs) { hipStream_t stream = handle.get_stream(); ML::TimeSeries::divide_by_mask_execute(d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs, stream); } void divide_by_mask_execute(const raft::handle_t& handle, const float* d_in, const bool* d_mask, const int* d_index, float* d_out0, float* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const double* d_in, const bool* d_mask, const int* d_index, double* d_out0, double* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const int* d_in, const bool* d_mask, const int* d_index, int* d_out0, int* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } template <typename DataT> inline void divide_by_min_build_index_helper(const raft::handle_t& handle, const DataT* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::divide_by_min_build_index( d_matrix, d_batch, d_index, h_size, batch_size, n_sub, allocator, stream); } void divide_by_min_build_index(const raft::handle_t& handle, const float* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } void divide_by_min_build_index(const raft::handle_t& handle, const double* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } template <typename DataT> inline void divide_by_min_execute_helper(const raft::handle_t& handle, const DataT* d_in, const int* d_batch, const int* d_index, DataT** hd_out, int batch_size, int n_sub, int n_obs) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::divide_by_min_execute(d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs, allocator, stream); } void divide_by_min_execute(const raft::handle_t& handle, const float* d_in, const int* d_batch, const int* d_index, float** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const double* d_in, const int* d_batch, const int* d_index, double** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const int* d_in, const int* d_batch, const int* d_index, int** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void build_division_map(const raft::handle_t& handle, const int* const* hd_id, const int* h_size, int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::build_division_map(hd_id, h_size, d_id_to_pos, d_id_to_model, batch_size, n_sub, allocator, stream); } template <typename DataT> inline void merge_series_helper(const raft::handle_t& handle, const DataT* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, DataT* d_out, int batch_size, int n_sub, int n_obs) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::merge_series(hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs, allocator, stream); } void merge_series(const raft::handle_t& handle, const float* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, float* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } void merge_series(const raft::handle_t& handle, const double* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, double* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } } // namespace ML
cc109fd673552f314d41c4cb81653150b60b2056.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/handle.hpp> #include "auto_arima.cuh" #include <cuml/tsa/auto_arima.h> namespace ML { int divide_by_mask_build_index(const raft::handle_t& handle, const bool* d_mask, int* d_index, int batch_size) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); return ML::TimeSeries::divide_by_mask_build_index(d_mask, d_index, batch_size, allocator, stream); } template <typename DataT> inline void divide_by_mask_execute_helper(const raft::handle_t& handle, const DataT* d_in, const bool* d_mask, const int* d_index, DataT* d_out0, DataT* d_out1, int batch_size, int n_obs) { cudaStream_t stream = handle.get_stream(); ML::TimeSeries::divide_by_mask_execute(d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs, stream); } void divide_by_mask_execute(const raft::handle_t& handle, const float* d_in, const bool* d_mask, const int* d_index, float* d_out0, float* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const double* d_in, const bool* d_mask, const int* d_index, double* d_out0, double* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const int* d_in, const bool* d_mask, const int* d_index, int* d_out0, int* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } template <typename DataT> inline void divide_by_min_build_index_helper(const raft::handle_t& handle, const DataT* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::divide_by_min_build_index( d_matrix, d_batch, d_index, h_size, batch_size, n_sub, allocator, stream); } void divide_by_min_build_index(const raft::handle_t& handle, const float* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } void divide_by_min_build_index(const raft::handle_t& handle, const double* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } template <typename DataT> inline void divide_by_min_execute_helper(const raft::handle_t& handle, const DataT* d_in, const int* d_batch, const int* d_index, DataT** hd_out, int batch_size, int n_sub, int n_obs) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::divide_by_min_execute(d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs, allocator, stream); } void divide_by_min_execute(const raft::handle_t& handle, const float* d_in, const int* d_batch, const int* d_index, float** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const double* d_in, const int* d_batch, const int* d_index, double** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const int* d_in, const int* d_batch, const int* d_index, int** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void build_division_map(const raft::handle_t& handle, const int* const* hd_id, const int* h_size, int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::build_division_map(hd_id, h_size, d_id_to_pos, d_id_to_model, batch_size, n_sub, allocator, stream); } template <typename DataT> inline void merge_series_helper(const raft::handle_t& handle, const DataT* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, DataT* d_out, int batch_size, int n_sub, int n_obs) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::merge_series(hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs, allocator, stream); } void merge_series(const raft::handle_t& handle, const float* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, float* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } void merge_series(const raft::handle_t& handle, const double* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, double* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } } // namespace ML
650f68ce54447362d2b683063189b0ca231c8885.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" #include <algorithm> #include <iostream> namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernScan(int n, int bar, int *in, int *out) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) { return; } if (k >= bar) { out[k] = in[k - bar] + in[k]; } else { out[k] = in[k]; } return; } __global__ void kernShift(int n, int *in, int *out) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) { return; } if (k == 0) { out[k] = 0; } else { out[k] = in[k - 1]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int* in; hipMalloc((void**)&in, n * sizeof(int)); int* out; hipMalloc((void**)&out, n * sizeof(int)); hipMemcpy(in, idata, sizeof(int) * n, hipMemcpyHostToDevice); timer().startGpuTimer(); int roundup_n = pow(2, ilog2ceil(n)); int blockSize = 128; dim3 blockPerGrid((roundup_n + blockSize - 1) / blockSize); for (int d = 1; d <= ilog2ceil(n); d++) { hipLaunchKernelGGL(( kernScan) , dim3(blockPerGrid), dim3(blockSize), 0, 0, n, pow(2, d-1), in, out); std::swap(in, out); } kernShift << <blockPerGrid, blockSize>> > (n, in, out); timer().endGpuTimer(); hipMemcpy(odata, out, sizeof(int) * n, hipMemcpyDeviceToHost); hipFree(in); hipFree(out); } } }
650f68ce54447362d2b683063189b0ca231c8885.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" #include <algorithm> #include <iostream> namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernScan(int n, int bar, int *in, int *out) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) { return; } if (k >= bar) { out[k] = in[k - bar] + in[k]; } else { out[k] = in[k]; } return; } __global__ void kernShift(int n, int *in, int *out) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) { return; } if (k == 0) { out[k] = 0; } else { out[k] = in[k - 1]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int* in; cudaMalloc((void**)&in, n * sizeof(int)); int* out; cudaMalloc((void**)&out, n * sizeof(int)); cudaMemcpy(in, idata, sizeof(int) * n, cudaMemcpyHostToDevice); timer().startGpuTimer(); int roundup_n = pow(2, ilog2ceil(n)); int blockSize = 128; dim3 blockPerGrid((roundup_n + blockSize - 1) / blockSize); for (int d = 1; d <= ilog2ceil(n); d++) { kernScan <<<blockPerGrid, blockSize>>>(n, pow(2, d-1), in, out); std::swap(in, out); } kernShift << <blockPerGrid, blockSize>> > (n, in, out); timer().endGpuTimer(); cudaMemcpy(odata, out, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaFree(in); cudaFree(out); } } }
5dd425a5e9eb93f1ac9c469b02913cd687205ca1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "counting.h" #include <cstdio> #include <cassert> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } //-------------------------------Below is Part I --------------------------------------------------------- template <typename T> struct set_zero : public thrust::unary_function<T, int> { __host__ __device__ int operator()(T x) const {return 0;} }; struct is_new_line { __host__ __device__ bool operator()(char x) {return x=='\n';} }; void CountPosition1(const char *text, int *pos, int text_size) { //std::cout << "text_size: " << text_size << std::endl; thrust::device_ptr<const char> device_text(text) ; thrust::device_ptr<int> device_pos(pos) ; thrust::fill(device_pos, device_pos+text_size, 1); is_new_line pred; set_zero<int> op; thrust::transform_if(thrust::device, device_text, device_text + text_size, device_pos, op, pred) ; thrust::inclusive_scan_by_key(device_pos, device_pos + text_size, device_pos, device_pos); #if 0 using namespace std; thrust::copy(device_text, device_text+text_size, std::ostream_iterator<char>(std::cout, " ")); cout << endl << "-------------------------------------------------------------------" << endl; thrust::copy(device_pos, device_pos+text_size, std::ostream_iterator<int>(std::cout, " ")); #endif } //-------------------------------Below is Part II -------------------------------------------------------- #define N (2*2048) /*In TX2, # of threads per MP = 2048, ther are 2 MP */ #define TPB (1024) /*In TX2, # of threads per bock = 1024 */ __global__ void my_transform_if (const char *text, int *pos, int text_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x ; if (idx >= text_size) return; if (text[idx] == '\n') { pos[idx] = 0; } else { pos[idx] = 1; } return ; } __global__ void my_inclusive_scan_by_key(int *pos, int text_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x ; if (idx >= text_size) return; if (pos[idx] == 0) goto exit; if (idx-1>=0 && pos[idx-1] == 1) goto exit ; while (idx < text_size && pos[idx] !=0 ) { pos[idx] += pos[idx-1]; idx++; } exit: __syncthreads (); return ; } void CountPosition2(const char *text, int *pos, int text_size) { int Q = text_size / N; int R = text_size % N; int i = 0; // printf ("text_size: %d Q: %d R: %d \n", text_size, Q, R); // do transform_if - set '\n' corresponding pos to be zero for (i=0; i<Q; i++) hipLaunchKernelGGL(( my_transform_if) , dim3(N/TPB), dim3(TPB), 0, 0, text+i*N, pos+i*N, N); if (R>0) hipLaunchKernelGGL(( my_transform_if) , dim3(N/TPB), dim3(TPB), 0, 0, text+N*Q, pos+N*Q, R); Q = (text_size-1) / N; R = (text_size-1) % N; // printf ("text_size: %d Q: %d R: %d \n", (text_size-1), Q, R); // do prefix_sum - Count pos within a substring for (i=0; i<Q; i++) hipLaunchKernelGGL(( my_inclusive_scan_by_key) , dim3(N/TPB), dim3(TPB), 0, 0, (pos+1)+i*N, N); if (R>0) hipLaunchKernelGGL(( my_inclusive_scan_by_key) , dim3(N/TPB), dim3(TPB), 0, 0, (pos+1)+N*Q, R); #if 0 char *pchar = (char *)calloc (sizeof(char) , text_size); hipMemcpy (pchar, text, sizeof(char) * text_size, hipMemcpyDeviceToHost); int *pint = (int *)calloc (sizeof(int), text_size); hipMemcpy (pint, pos, sizeof(int)*text_size, hipMemcpyDeviceToHost); for (i=0; i< text_size; i++) printf ("%c:%d ", pchar[i], pint[i]); #endif }
5dd425a5e9eb93f1ac9c469b02913cd687205ca1.cu
#include "counting.h" #include <cstdio> #include <cassert> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } //-------------------------------Below is Part I --------------------------------------------------------- template <typename T> struct set_zero : public thrust::unary_function<T, int> { __host__ __device__ int operator()(T x) const {return 0;} }; struct is_new_line { __host__ __device__ bool operator()(char x) {return x=='\n';} }; void CountPosition1(const char *text, int *pos, int text_size) { //std::cout << "text_size: " << text_size << std::endl; thrust::device_ptr<const char> device_text(text) ; thrust::device_ptr<int> device_pos(pos) ; thrust::fill(device_pos, device_pos+text_size, 1); is_new_line pred; set_zero<int> op; thrust::transform_if(thrust::device, device_text, device_text + text_size, device_pos, op, pred) ; thrust::inclusive_scan_by_key(device_pos, device_pos + text_size, device_pos, device_pos); #if 0 using namespace std; thrust::copy(device_text, device_text+text_size, std::ostream_iterator<char>(std::cout, " ")); cout << endl << "-------------------------------------------------------------------" << endl; thrust::copy(device_pos, device_pos+text_size, std::ostream_iterator<int>(std::cout, " ")); #endif } //-------------------------------Below is Part II -------------------------------------------------------- #define N (2*2048) /*In TX2, # of threads per MP = 2048, ther are 2 MP */ #define TPB (1024) /*In TX2, # of threads per bock = 1024 */ __global__ void my_transform_if (const char *text, int *pos, int text_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x ; if (idx >= text_size) return; if (text[idx] == '\n') { pos[idx] = 0; } else { pos[idx] = 1; } return ; } __global__ void my_inclusive_scan_by_key(int *pos, int text_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x ; if (idx >= text_size) return; if (pos[idx] == 0) goto exit; if (idx-1>=0 && pos[idx-1] == 1) goto exit ; while (idx < text_size && pos[idx] !=0 ) { pos[idx] += pos[idx-1]; idx++; } exit: __syncthreads (); return ; } void CountPosition2(const char *text, int *pos, int text_size) { int Q = text_size / N; int R = text_size % N; int i = 0; // printf ("text_size: %d Q: %d R: %d \n", text_size, Q, R); // do transform_if - set '\n' corresponding pos to be zero for (i=0; i<Q; i++) my_transform_if <<<N/TPB, TPB>>> (text+i*N, pos+i*N, N); if (R>0) my_transform_if <<<N/TPB, TPB>>> (text+N*Q, pos+N*Q, R); Q = (text_size-1) / N; R = (text_size-1) % N; // printf ("text_size: %d Q: %d R: %d \n", (text_size-1), Q, R); // do prefix_sum - Count pos within a substring for (i=0; i<Q; i++) my_inclusive_scan_by_key <<<N/TPB, TPB>>> ((pos+1)+i*N, N); if (R>0) my_inclusive_scan_by_key <<<N/TPB, TPB>>> ((pos+1)+N*Q, R); #if 0 char *pchar = (char *)calloc (sizeof(char) , text_size); cudaMemcpy (pchar, text, sizeof(char) * text_size, cudaMemcpyDeviceToHost); int *pint = (int *)calloc (sizeof(int), text_size); cudaMemcpy (pint, pos, sizeof(int)*text_size, cudaMemcpyDeviceToHost); for (i=0; i< text_size; i++) printf ("%c:%d ", pchar[i], pint[i]); #endif }
479ad9cb58b895cac9efb19d5c3eaa12c8d8e3b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> //Utilizar `__global__` es lo mas parecido a declarar un `int main` pero en el device (GPU) __global__ void device_greetings(void){ //blockIdx.x representa el id del bloque en el que se encuentra. //blockDim.x representa la cantidad total de bloques de la ejecucin. //threadx.x representa el id del thread en el que se encuentra. printf("[DEVICE; BLOCK:%d; THREAD:%d] Hello world!\n",blockIdx.x, threadIdx.x); } int main (void){ //Mostramos un mensaje desde el host (CPU) printf("[HOST] Hello world!\n"); //Aqui es donde se envian llamados paralelos de la funcion `int main` del device: // <<<B,N>>>: donde B es la cantidad de bloques de hilos y N es la cantidad de thread por bloque. //En el siguiente ejemplo se ejecutaran 2 bloques con 10 hilos cada uno. // Es decir se mostraran 20 mensajes de `Hello world!` desde el device. hipLaunchKernelGGL(( device_greetings), dim3(2),dim3(10), 0, 0, ); //Para mostrar los mensajes del device (GPU), debemos realizar una especie de `barrier` // esperando a que todos los bloques y threads terminen. Retornara un mensaje de error en caso de error. hipDeviceSynchronize(); return 0; }
479ad9cb58b895cac9efb19d5c3eaa12c8d8e3b3.cu
#include <stdio.h> //Utilizar `__global__` es lo mas parecido a declarar un `int main` pero en el device (GPU) __global__ void device_greetings(void){ //blockIdx.x representa el id del bloque en el que se encuentra. //blockDim.x representa la cantidad total de bloques de la ejecución. //threadx.x representa el id del thread en el que se encuentra. printf("[DEVICE; BLOCK:%d; THREAD:%d] Hello world!\n",blockIdx.x, threadIdx.x); } int main (void){ //Mostramos un mensaje desde el host (CPU) printf("[HOST] Hello world!\n"); //Aqui es donde se envian llamados paralelos de la funcion `int main` del device: // <<<B,N>>>: donde B es la cantidad de bloques de hilos y N es la cantidad de thread por bloque. //En el siguiente ejemplo se ejecutaran 2 bloques con 10 hilos cada uno. // Es decir se mostraran 20 mensajes de `Hello world!` desde el device. device_greetings<<<2,10>>>(); //Para mostrar los mensajes del device (GPU), debemos realizar una especie de `barrier` // esperando a que todos los bloques y threads terminen. Retornara un mensaje de error en caso de error. cudaDeviceSynchronize(); return 0; }
9361dcf9b56cae9936b4f27e284f25ed6fb2c13f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaRGB.h" //------------------------------------------------------------------------------------------------------------------------- __global__ void RGBToRGBAf(uchar3* srcImage, float4* dstImage, uint32_t width, uint32_t height) { int x, y, pixel; x = (blockIdx.x * blockDim.x) + threadIdx.x; y = (blockIdx.y * blockDim.y) + threadIdx.y; pixel = y * width + x; if (x >= width) return; if (y >= height) return; // printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel); const float s = 1.0f; const uchar3 px = srcImage[pixel]; dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s); } hipError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height ) { if( !srcDev || !destDev ) return hipErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1); hipLaunchKernelGGL(( RGBToRGBAf), dim3(gridDim), dim3(blockDim), 0, 0, srcDev, destDev, width, height ); return CUDA(hipGetLastError()); }
9361dcf9b56cae9936b4f27e284f25ed6fb2c13f.cu
#include "cudaRGB.h" //------------------------------------------------------------------------------------------------------------------------- __global__ void RGBToRGBAf(uchar3* srcImage, float4* dstImage, uint32_t width, uint32_t height) { int x, y, pixel; x = (blockIdx.x * blockDim.x) + threadIdx.x; y = (blockIdx.y * blockDim.y) + threadIdx.y; pixel = y * width + x; if (x >= width) return; if (y >= height) return; // printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel); const float s = 1.0f; const uchar3 px = srcImage[pixel]; dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s); } cudaError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height ) { if( !srcDev || !destDev ) return cudaErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1); RGBToRGBAf<<<gridDim, blockDim>>>( srcDev, destDev, width, height ); return CUDA(cudaGetLastError()); }
705fa9b8655748b0eb41773a020ad71d0a433d19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> #include <cassert> #include <zlib.h> #include <png.h> #define MASK_N 2 #define MASK_X 5 #define MASK_Y 5 #define SCALE 8 unsigned char *device_s = NULL; unsigned char *device_t = NULL; __device__ int dev_mask[MASK_N][MASK_X][MASK_Y] = { {{ -1, -4, -6, -4, -1}, { -2, -8,-12, -8, -2}, { 0, 0, 0, 0, 0}, { 2, 8, 12, 8, 2}, { 1, 4, 6, 4, 1}}, {{ -1, -2, 0, 2, 1}, { -4, -8, 0, 8, 4}, { -6,-12, 0, 12, 6}, { -4, -8, 0, 8, 4}, { -1, -2, 0, 2, 1}} }; int read_png(const char* filename, unsigned char** image, unsigned* height, unsigned* width, unsigned* channels) { unsigned char sig[8]; FILE* infile; infile = fopen(filename, "rb"); fread(sig, 1, 8, infile); if (!png_check_sig(sig, 8)) return 1; /* bad signature */ png_structp png_ptr; png_infop info_ptr; png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (!png_ptr) return 4; /* out of memory */ info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { png_destroy_read_struct(&png_ptr, NULL, NULL); return 4; /* out of memory */ } png_init_io(png_ptr, infile); png_set_sig_bytes(png_ptr, 8); png_read_info(png_ptr, info_ptr); int bit_depth, color_type; png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL); png_uint_32 i, rowbytes; png_bytep row_pointers[*height]; png_read_update_info(png_ptr, info_ptr); rowbytes = png_get_rowbytes(png_ptr, info_ptr); *channels = (int) png_get_channels(png_ptr, info_ptr); /* if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return 3; }*/ hipHostMalloc(image, rowbytes * *height); for (i = 0; i < *height; ++i) row_pointers[i] = *image + i * rowbytes; png_read_image(png_ptr, row_pointers); png_read_end(png_ptr, NULL); return 0; } void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width, const unsigned channels) { FILE* fp = fopen(filename, "wb"); png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); png_infop info_ptr = png_create_info_struct(png_ptr); png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_set_filter(png_ptr, 0, PNG_NO_FILTERS); png_write_info(png_ptr, info_ptr); png_set_compression_level(png_ptr, 1); png_bytep row_ptr[height]; for (int i = 0; i < height; ++ i) { row_ptr[i] = image + i * width * channels * sizeof(unsigned char); } png_write_image(png_ptr, row_ptr); png_write_end(png_ptr, NULL); png_destroy_write_struct(&png_ptr, &info_ptr); fclose(fp); } void __global__ sobel (unsigned char* s, unsigned char* t, unsigned height, unsigned width, unsigned channels,size_t pitch) { int x, y, i, v, u; int R, G, B; double val[MASK_N*3] = {0.0}; int adjustX, adjustY, xBound, yBound; __shared__ int mask[MASK_N][MASK_X][MASK_Y]; x = (blockIdx.x * blockDim.x + threadIdx.x)%MASK_N; y = (blockIdx.x * blockDim.x + threadIdx.x)/MASK_N%MASK_X; i = (blockIdx.x * blockDim.x + threadIdx.x)/MASK_N/MASK_X%MASK_Y; mask[x][y][i] = dev_mask[x][y][i]; __syncthreads();// wait for each thread to copy its elemene //for (y = 0; y < height; ++y) { y = blockIdx.x * blockDim.x + threadIdx.x + 2; if (y>=height + 2) return; for (x = 2; x < width+2; ++x) { for (i = 0; i < MASK_N; ++i) { adjustX = (MASK_X % 2) ? 1 : 0;//5 adjustY = (MASK_Y % 2) ? 1 : 0;//5 xBound = MASK_X /2;//2 yBound = MASK_Y /2;//2 val[i*3+2] = 0.0; val[i*3+1] = 0.0; val[i*3] = 0.0; for (v = -yBound; v < yBound + adjustY; ++v) { for (u = -xBound; u < xBound + adjustX; ++u) { //if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) { R = s[pitch * (y+v) + channels*(x+u) + 2]; G = s[pitch * (y+v) + channels*(x+u) + 1]; B = s[pitch * (y+v) + channels*(x+u) + 0]; val[i*3+2] += R * mask[i][u + xBound][v + yBound]; val[i*3+1] += G * mask[i][u + xBound][v + yBound]; val[i*3+0] += B * mask[i][u + xBound][v + yBound]; //} } } } double totalR = 0.0; double totalG = 0.0; double totalB = 0.0; for (i = 0; i < MASK_N; ++i) { totalR += val[i * 3 + 2] * val[i * 3 + 2]; totalG += val[i * 3 + 1] * val[i * 3 + 1]; totalB += val[i * 3 + 0] * val[i * 3 + 0]; } totalR = sqrt(totalR) / SCALE; totalG = sqrt(totalG) / SCALE; totalB = sqrt(totalB) / SCALE; const unsigned char cR = (totalR > 255.0) ? 255 : totalR; const unsigned char cG = (totalG > 255.0) ? 255 : totalG; const unsigned char cB = (totalB > 255.0) ? 255 : totalB; t[pitch * y + channels*x + 2] = cR; t[pitch * y + channels*x + 1] = cG; t[pitch * y + channels*x + 0] = cB; } //} } int main(int argc, char** argv) { assert(argc == 3); unsigned height, width, channels; unsigned char* host_s = NULL; read_png(argv[1], &host_s, &height, &width, &channels); //hipMalloc(&device_s, (height+4) * (width+4) * channels * sizeof(unsigned char)); size_t pitch; hipMallocPitch(&device_s, &pitch, (width+4) * sizeof(unsigned char)* channels, (height+4)); hipMallocPitch(&device_t, &pitch,(width+4) * sizeof(unsigned char)* channels, (height+4)); unsigned char* host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char)); //hipMemcpy(device_s, host_s, (height+4) * (width+4) * channels * sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy2D(device_s+2*pitch+2*channels, pitch, host_s, width * sizeof(unsigned char)* channels ,width * channels * sizeof(unsigned char), height, hipMemcpyHostToDevice); hipLaunchKernelGGL(( sobel), dim3((height/256) + 1), dim3(256), 0, 0, device_s, device_t, height, width, channels,pitch); //hipMemcpy(host_t, device_t, height * width * channels * sizeof(unsigned char), hipMemcpyDeviceToHost); hipMemcpy2D( host_t , width * sizeof(unsigned char)* channels, device_t+2*pitch+2*channels, pitch , width * channels * sizeof(unsigned char), height, hipMemcpyDeviceToHost); write_png(argv[2], host_t, height, width, channels); hipFree(device_s); hipFree(device_t); hipHostFree(host_s); free(host_t); return 0; }
705fa9b8655748b0eb41773a020ad71d0a433d19.cu
#include <iostream> #include <cstdlib> #include <cassert> #include <zlib.h> #include <png.h> #define MASK_N 2 #define MASK_X 5 #define MASK_Y 5 #define SCALE 8 unsigned char *device_s = NULL; unsigned char *device_t = NULL; __device__ int dev_mask[MASK_N][MASK_X][MASK_Y] = { {{ -1, -4, -6, -4, -1}, { -2, -8,-12, -8, -2}, { 0, 0, 0, 0, 0}, { 2, 8, 12, 8, 2}, { 1, 4, 6, 4, 1}}, {{ -1, -2, 0, 2, 1}, { -4, -8, 0, 8, 4}, { -6,-12, 0, 12, 6}, { -4, -8, 0, 8, 4}, { -1, -2, 0, 2, 1}} }; int read_png(const char* filename, unsigned char** image, unsigned* height, unsigned* width, unsigned* channels) { unsigned char sig[8]; FILE* infile; infile = fopen(filename, "rb"); fread(sig, 1, 8, infile); if (!png_check_sig(sig, 8)) return 1; /* bad signature */ png_structp png_ptr; png_infop info_ptr; png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (!png_ptr) return 4; /* out of memory */ info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { png_destroy_read_struct(&png_ptr, NULL, NULL); return 4; /* out of memory */ } png_init_io(png_ptr, infile); png_set_sig_bytes(png_ptr, 8); png_read_info(png_ptr, info_ptr); int bit_depth, color_type; png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL); png_uint_32 i, rowbytes; png_bytep row_pointers[*height]; png_read_update_info(png_ptr, info_ptr); rowbytes = png_get_rowbytes(png_ptr, info_ptr); *channels = (int) png_get_channels(png_ptr, info_ptr); /* if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return 3; }*/ cudaMallocHost(image, rowbytes * *height); for (i = 0; i < *height; ++i) row_pointers[i] = *image + i * rowbytes; png_read_image(png_ptr, row_pointers); png_read_end(png_ptr, NULL); return 0; } void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width, const unsigned channels) { FILE* fp = fopen(filename, "wb"); png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); png_infop info_ptr = png_create_info_struct(png_ptr); png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_set_filter(png_ptr, 0, PNG_NO_FILTERS); png_write_info(png_ptr, info_ptr); png_set_compression_level(png_ptr, 1); png_bytep row_ptr[height]; for (int i = 0; i < height; ++ i) { row_ptr[i] = image + i * width * channels * sizeof(unsigned char); } png_write_image(png_ptr, row_ptr); png_write_end(png_ptr, NULL); png_destroy_write_struct(&png_ptr, &info_ptr); fclose(fp); } void __global__ sobel (unsigned char* s, unsigned char* t, unsigned height, unsigned width, unsigned channels,size_t pitch) { int x, y, i, v, u; int R, G, B; double val[MASK_N*3] = {0.0}; int adjustX, adjustY, xBound, yBound; __shared__ int mask[MASK_N][MASK_X][MASK_Y]; x = (blockIdx.x * blockDim.x + threadIdx.x)%MASK_N; y = (blockIdx.x * blockDim.x + threadIdx.x)/MASK_N%MASK_X; i = (blockIdx.x * blockDim.x + threadIdx.x)/MASK_N/MASK_X%MASK_Y; mask[x][y][i] = dev_mask[x][y][i]; __syncthreads();// wait for each thread to copy its elemene //for (y = 0; y < height; ++y) { y = blockIdx.x * blockDim.x + threadIdx.x + 2; if (y>=height + 2) return; for (x = 2; x < width+2; ++x) { for (i = 0; i < MASK_N; ++i) { adjustX = (MASK_X % 2) ? 1 : 0;//5 adjustY = (MASK_Y % 2) ? 1 : 0;//5 xBound = MASK_X /2;//2 yBound = MASK_Y /2;//2 val[i*3+2] = 0.0; val[i*3+1] = 0.0; val[i*3] = 0.0; for (v = -yBound; v < yBound + adjustY; ++v) { for (u = -xBound; u < xBound + adjustX; ++u) { //if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) { R = s[pitch * (y+v) + channels*(x+u) + 2]; G = s[pitch * (y+v) + channels*(x+u) + 1]; B = s[pitch * (y+v) + channels*(x+u) + 0]; val[i*3+2] += R * mask[i][u + xBound][v + yBound]; val[i*3+1] += G * mask[i][u + xBound][v + yBound]; val[i*3+0] += B * mask[i][u + xBound][v + yBound]; //} } } } double totalR = 0.0; double totalG = 0.0; double totalB = 0.0; for (i = 0; i < MASK_N; ++i) { totalR += val[i * 3 + 2] * val[i * 3 + 2]; totalG += val[i * 3 + 1] * val[i * 3 + 1]; totalB += val[i * 3 + 0] * val[i * 3 + 0]; } totalR = sqrt(totalR) / SCALE; totalG = sqrt(totalG) / SCALE; totalB = sqrt(totalB) / SCALE; const unsigned char cR = (totalR > 255.0) ? 255 : totalR; const unsigned char cG = (totalG > 255.0) ? 255 : totalG; const unsigned char cB = (totalB > 255.0) ? 255 : totalB; t[pitch * y + channels*x + 2] = cR; t[pitch * y + channels*x + 1] = cG; t[pitch * y + channels*x + 0] = cB; } //} } int main(int argc, char** argv) { assert(argc == 3); unsigned height, width, channels; unsigned char* host_s = NULL; read_png(argv[1], &host_s, &height, &width, &channels); //cudaMalloc(&device_s, (height+4) * (width+4) * channels * sizeof(unsigned char)); size_t pitch; cudaMallocPitch(&device_s, &pitch, (width+4) * sizeof(unsigned char)* channels, (height+4)); cudaMallocPitch(&device_t, &pitch,(width+4) * sizeof(unsigned char)* channels, (height+4)); unsigned char* host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char)); //cudaMemcpy(device_s, host_s, (height+4) * (width+4) * channels * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy2D(device_s+2*pitch+2*channels, pitch, host_s, width * sizeof(unsigned char)* channels ,width * channels * sizeof(unsigned char), height, cudaMemcpyHostToDevice); sobel<<<(height/256) + 1, 256>>>(device_s, device_t, height, width, channels,pitch); //cudaMemcpy(host_t, device_t, height * width * channels * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy2D( host_t , width * sizeof(unsigned char)* channels, device_t+2*pitch+2*channels, pitch , width * channels * sizeof(unsigned char), height, cudaMemcpyDeviceToHost); write_png(argv[2], host_t, height, width, channels); cudaFree(device_s); cudaFree(device_t); cudaFreeHost(host_s); free(host_t); return 0; }
ea7fc75572e59d6172913129b78c148dccdfee35.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ // gdf_graph tests // Author: Alex Fender afender@nvidia.com #include "gtest/gtest.h" #include <cugraph.h> #include "test_utils.h" #include <string.h> #include <rmm_utils.h> TEST(gdf_edge_list, success) { hipStream_t stream{nullptr}; gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column col_src, col_dest, col_weights; col_src.dtype = GDF_INT32; col_src.valid = nullptr; col_src.null_count = 0; col_dest.dtype = GDF_INT32; col_dest.valid = nullptr; col_dest.null_count = 0; col_weights.dtype = GDF_FLOAT32; col_weights.valid = nullptr; col_weights.null_count = 0; size_t vertices = 0, edges = 0; char argv [1024] = "grmat --rmat_scale=20 --rmat_edgefactor=16 --device=0 --normalized --rmat_self_loops --quiet"; ASSERT_EQ(gdf_grmat_gen(argv, vertices, edges, &col_src, &col_dest, &col_weights), GDF_SUCCESS); std::vector<int> src_h(edges), dest_h(edges); std::vector<float> w_h(edges); hipMemcpy(&src_h[0], col_src.data, sizeof(int) * edges, hipMemcpyDeviceToHost); hipMemcpy(&dest_h[0], col_dest.data, sizeof(int) * edges, hipMemcpyDeviceToHost); hipMemcpy(&w_h[0], col_weights.data, sizeof(float) * edges, hipMemcpyDeviceToHost); ASSERT_EQ(gdf_edge_list_view(G.get(), &col_src, &col_dest, &col_weights),GDF_SUCCESS); std::vector<int> src2_h(edges), dest2_h(edges); std::vector<float> w2_h(edges); hipMemcpy(&src2_h[0], G.get()->edgeList->src_indices->data, sizeof(int) * edges, hipMemcpyDeviceToHost); hipMemcpy(&dest2_h[0], G.get()->edgeList->dest_indices->data, sizeof(int) * edges, hipMemcpyDeviceToHost); hipMemcpy(&w2_h[0], G.get()->edgeList->edge_data->data, sizeof(float) * edges, hipMemcpyDeviceToHost); ASSERT_EQ( eq(src_h,src2_h), 0); ASSERT_EQ( eq(dest_h,dest2_h), 0); ASSERT_EQ( eq(w_h,w2_h), 0); ALLOC_FREE_TRY(col_src.data, stream); ALLOC_FREE_TRY(col_dest.data, stream); ALLOC_FREE_TRY(col_weights.data, stream); } TEST(gdf_edge_list, success_no_weights) { hipStream_t stream{nullptr}; gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column col_src, col_dest; col_src.dtype = GDF_INT32; col_src.valid = nullptr; col_dest.dtype = GDF_INT32; col_dest.valid = nullptr; col_src.null_count = 0; col_dest.null_count = 0; size_t vertices = 0, edges = 0; char argv [1024] = "grmat --rmat_scale=20 --rmat_edgefactor=16 --device=0 --normalized --rmat_self_loops --quiet"; ASSERT_EQ(gdf_grmat_gen(argv, vertices, edges, &col_src, &col_dest, nullptr), GDF_SUCCESS); ASSERT_EQ(gdf_edge_list_view(G.get(), &col_src, &col_dest, nullptr),GDF_SUCCESS); ALLOC_FREE_TRY(col_src.data, stream); ALLOC_FREE_TRY(col_dest.data, stream); } TEST(gdf_edge_list, size_mismatch) { gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_src, col_dest, col_weights; std::vector<int> src_h={0, 0, 2, 2, 2, 3, 3, 4, 4, 5}, dest_h={1, 2, 0, 1, 4}; std::vector<float> w_h={0.50, 0.50, 0.33, 0.33, 0.33, 0.50, 0.50, 0.50, 0.50}; col_src = create_gdf_column(src_h); col_dest = create_gdf_column(dest_h); col_weights = create_gdf_column(w_h); ASSERT_EQ(gdf_edge_list_view(G.get(), col_src.get(), col_dest.get(), col_weights.get()),GDF_COLUMN_SIZE_MISMATCH); } TEST(gdf_edge_list, size_mismatch2) { gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_src, col_dest, col_weights; std::vector<int> src_h={0, 0, 2, 2, 2, 3, 3, 4, 4, 5}, dest_h={1, 2, 0, 1, 4, 4, 5, 3, 5, 3}; std::vector<float> w_h={0.50, 0.50, 0.33, 0.33, 0.33, 0.50, 0.50, 0.50}; col_src = create_gdf_column(src_h); col_dest = create_gdf_column(dest_h); col_weights = create_gdf_column(w_h); ASSERT_EQ(gdf_edge_list_view(G.get(), col_src.get(), col_dest.get(), col_weights.get()),GDF_COLUMN_SIZE_MISMATCH); } TEST(gdf_edge_list, wrong_type) { gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_src, col_dest; std::vector<float> src_h={0.0, 0.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0}, dest_h={1.0, 2.0, 0.0, 1.0, 4.0, 4.0, 5.0, 3.0, 5.0, 3.0}; col_src = create_gdf_column(src_h); col_dest = create_gdf_column(dest_h); ASSERT_EQ(gdf_edge_list_view(G.get(), col_src.get(), col_dest.get(), nullptr),GDF_UNSUPPORTED_DTYPE); } TEST(gdf_adj_list, success) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<float> w_h = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_off, col_ind, col_w; col_off = create_gdf_column(off_h); col_ind = create_gdf_column(ind_h); col_w = create_gdf_column(w_h); ASSERT_EQ(gdf_adj_list_view(G.get(), col_off.get(), col_ind.get(), col_w.get()),GDF_SUCCESS); std::vector<int> off2_h(off_h.size()), ind2_h(ind_h.size()); std::vector<float> w2_h(w_h.size()); hipMemcpy(&off2_h[0], G.get()->adjList->offsets->data, sizeof(int) * off_h.size(), hipMemcpyDeviceToHost); hipMemcpy(&ind2_h[0], G.get()->adjList->indices->data, sizeof(int) * ind_h.size(), hipMemcpyDeviceToHost); hipMemcpy(&w2_h[0], G.get()->adjList->edge_data->data, sizeof(float) * w_h.size(), hipMemcpyDeviceToHost); ASSERT_EQ( eq(off_h,off2_h), 0); ASSERT_EQ( eq(ind_h,ind2_h), 0); ASSERT_EQ( eq(w_h,w2_h), 0); } TEST(gdf_adj_list, success_no_weights) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_off, col_ind; col_off = create_gdf_column(off_h); col_ind = create_gdf_column(ind_h); ASSERT_EQ(gdf_adj_list_view(G.get(), col_off.get(), col_ind.get(), nullptr),GDF_SUCCESS); std::vector<int> off2_h(off_h.size()), ind2_h(ind_h.size()); hipMemcpy(&off2_h[0], G.get()->adjList->offsets->data, sizeof(int) * off_h.size(), hipMemcpyDeviceToHost); hipMemcpy(&ind2_h[0], G.get()->adjList->indices->data, sizeof(int) * ind_h.size(), hipMemcpyDeviceToHost); ASSERT_EQ( eq(off_h,off2_h), 0); ASSERT_EQ( eq(ind_h,ind2_h), 0); } TEST(gdf_graph_properties, success) { gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_graph_properties *prop = new gdf_graph_properties; ASSERT_FALSE(prop->directed); ASSERT_FALSE(prop->weighted); ASSERT_FALSE(prop->multigraph); ASSERT_FALSE(prop->bipartite); ASSERT_FALSE(prop->tree); prop->directed = true; prop->weighted = true; prop->tree = false; ASSERT_TRUE(prop->directed); ASSERT_TRUE(prop->weighted); ASSERT_FALSE(prop->multigraph); ASSERT_FALSE(prop->bipartite); ASSERT_FALSE(prop->tree); } TEST(gdf_delete_adjacency_list, success1) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<float> w_h = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; gdf_graph G; gdf_column col_off, col_ind, col_w; //size_t free, free2, total; //hipMemGetInfo(&free, &total); create_gdf_column(off_h, &col_off); create_gdf_column(ind_h, &col_ind); create_gdf_column(w_h, &col_w); ASSERT_EQ(gdf_adj_list_view(&G, &col_off, &col_ind, &col_w),GDF_SUCCESS); //hipMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_delete_adj_list(&G),GDF_SUCCESS); //hipMemGetInfo(&free2, &total); //EXPECT_EQ(free,free2); } TEST(gdf_delete_adjacency_list, success2) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<float> w_h = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; gdf_graph *G = new gdf_graph; gdf_column *col_off = new gdf_column, *col_ind = new gdf_column, *col_w = new gdf_column; //size_t free, free2, total; //hipMemGetInfo(&free, &total); create_gdf_column(off_h, col_off); create_gdf_column(ind_h, col_ind); create_gdf_column(w_h, col_w); ASSERT_EQ(gdf_adj_list_view(G, col_off, col_ind, col_w),GDF_SUCCESS); //hipMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_delete_adj_list(G),GDF_SUCCESS); //hipMemGetInfo(&free2, &total); //EXPECT_EQ(free,free2); delete G; delete col_off; delete col_ind; delete col_w; } TEST(gdf_delete_edge_list, success1) { std::vector<int> src_h={0, 0, 2, 2, 2, 3, 3, 4, 4, 5}, dest_h={1, 2, 0, 1, 4, 4, 5, 3, 5, 3}; std::vector<float> w_h={0.50, 0.50, 0.33, 0.33, 0.33, 0.50, 0.50, 0.50, 0.50, 1.00}; gdf_graph G ; gdf_column col_src, col_dest, col_w; //size_t free, free2, total; //hipMemGetInfo(&free, &total); create_gdf_column(src_h, &col_src); create_gdf_column(dest_h, &col_dest); create_gdf_column(w_h, &col_w); ASSERT_EQ(gdf_edge_list_view(&G, &col_src, &col_dest, &col_w),GDF_SUCCESS); //hipMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_delete_edge_list(&G),GDF_SUCCESS); //hipMemGetInfo(&free2, &total); //EXPECT_EQ(free,free2); } TEST(gdf_delete_edge_list, success2) { std::vector<int> src_h={0, 0, 2, 2, 2, 3, 3, 4, 4, 5}, dest_h={1, 2, 0, 1, 4, 4, 5, 3, 5, 3}; std::vector<float> w_h={0.50, 0.50, 0.33, 0.33, 0.33, 0.50, 0.50, 0.50, 0.50, 1.00}; gdf_graph *G = new gdf_graph; gdf_column *col_src = new gdf_column, *col_dest = new gdf_column, *col_w = new gdf_column; //size_t free, free2, total; //hipMemGetInfo(&free, &total); create_gdf_column(src_h, col_src); create_gdf_column(dest_h, col_dest); create_gdf_column(w_h, col_w); ASSERT_EQ(gdf_edge_list_view(G, col_src, col_dest, col_w),GDF_SUCCESS); //hipMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_delete_edge_list(G),GDF_SUCCESS); //hipMemGetInfo(&free2, &total); //EXPECT_EQ(free,free2); delete G; delete col_src; delete col_dest; delete col_w; } TEST(gdf_graph, gdf_add_transposed_adj_list) { std::vector<int> src_h={0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18, 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2, 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33, 32, 33, 32, 33, 33}; std::vector<int> dest_h={1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2, 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33, 32, 33, 32, 33, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18, 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32}; gdf_graph *G = new gdf_graph; gdf_column *col_src = new gdf_column, *col_dest = new gdf_column; //size_t free, free2, free3, free4, total; //hipMemGetInfo(&free, &total); create_gdf_column(src_h, col_src); create_gdf_column(dest_h, col_dest); //hipMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_edge_list_view(G, col_src, col_dest, nullptr),GDF_SUCCESS); //hipMemGetInfo(&free3, &total); //EXPECT_EQ(free2,free3); //EXPECT_NE(free,free3); ASSERT_EQ(gdf_add_transposed_adj_list(G),GDF_SUCCESS); //this check doen't work on small case (false positive) //hipMemGetInfo(&free3, &total); //EXPECT_NE(free3,free2); std::vector<int> off_h(G->transposedAdjList->offsets->size ), ind_h(G->transposedAdjList->indices->size); hipMemcpy(&off_h[0], G->transposedAdjList->offsets->data, sizeof(int) * G->transposedAdjList->offsets->size, hipMemcpyDeviceToHost); hipMemcpy(&ind_h[0], G->transposedAdjList->indices->data, sizeof(int) * G->transposedAdjList->indices->size, hipMemcpyDeviceToHost); size_t zero = 0; EXPECT_GT(off_h.size(), zero); EXPECT_GT(ind_h.size(), zero); EXPECT_EQ(off_h.size()-2, (size_t)(*(std::max_element(ind_h.begin(), ind_h.end())))); EXPECT_EQ(ind_h.size(), (size_t)off_h.back()); std::sort (ind_h.begin(), ind_h.end()); std::sort (src_h.begin(), src_h.end()); EXPECT_EQ( eq(ind_h,src_h), 0); delete G; //hipMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free2); //EXPECT_NE(free4,free); gdf_col_delete(col_src); gdf_col_delete(col_dest); //hipMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free); } TEST(gdf_graph, gdf_add_adjList) { std::vector<int> src_h={0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18, 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2, 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33, 32, 33, 32, 33, 33}; std::vector<int> dest_h={1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2, 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33, 32, 33, 32, 33, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18, 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32}; std::vector<int> off_ref_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; gdf_graph *G = new gdf_graph; gdf_column *col_src = new gdf_column, *col_dest = new gdf_column; //size_t free, free2, free3, free4, total; //hipMemGetInfo(&free, &total); create_gdf_column(src_h, col_src); create_gdf_column(dest_h, col_dest); //hipMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_edge_list_view(G, col_src, col_dest, nullptr),GDF_SUCCESS); //hipMemGetInfo(&free3, &total); //EXPECT_EQ(free2,free3); //EXPECT_NE(free,free3); ASSERT_EQ(gdf_add_adj_list(G),GDF_SUCCESS); //this check doen't work on small case (false positive) //hipMemGetInfo(&free3, &total); //EXPECT_NE(free3,free2); std::vector<int> off_h(G->adjList->offsets->size ), ind_h(G->adjList->indices->size); hipMemcpy(&off_h[0], G->adjList->offsets->data, sizeof(int) * G->adjList->offsets->size, hipMemcpyDeviceToHost); hipMemcpy(&ind_h[0], G->adjList->indices->data, sizeof(int) * G->adjList->indices->size, hipMemcpyDeviceToHost); size_t zero = 0; EXPECT_GT(off_h.size(), zero); EXPECT_GT(ind_h.size(), zero); EXPECT_EQ(off_h.size()-2, (size_t)(*(std::max_element(ind_h.begin(), ind_h.end())))); EXPECT_EQ(ind_h.size(), (size_t)off_h.back()); std::sort (ind_h.begin(), ind_h.end()); std::sort (dest_h.begin(), dest_h.end()); EXPECT_EQ( eq(ind_h,dest_h), 0); EXPECT_EQ( eq(off_h,off_ref_h), 0); delete G; //hipMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free2); //EXPECT_NE(free4,free); gdf_col_delete(col_src); gdf_col_delete(col_dest); //hipMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free); } void offsets2indices(std::vector<int> &offsets, std::vector<int> &indices) { for (int i = 0; i < (int)offsets.size()-1; ++i) for (int j = offsets[i]; j < offsets[i+1]; ++j) indices[j] = i; } TEST(gdf_graph, gdf_add_edge_list) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<float> w_h = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; gdf_graph *G = new gdf_graph; gdf_column *col_off = new gdf_column, *col_ind = new gdf_column, *col_w = new gdf_column; create_gdf_column(off_h, col_off); create_gdf_column(ind_h, col_ind); create_gdf_column(w_h, col_w); ASSERT_EQ(gdf_adj_list_view(G, col_off, col_ind, col_w),GDF_SUCCESS); ASSERT_EQ(gdf_add_edge_list(G),GDF_SUCCESS); std::vector<int> src_h(ind_h.size()), src2_h(ind_h.size()), dest2_h(ind_h.size()); std::vector<float> w2_h(w_h.size()); hipMemcpy(&src2_h[0], G->edgeList->src_indices->data, sizeof(int) * ind_h.size(), hipMemcpyDeviceToHost); hipMemcpy(&dest2_h[0], G->edgeList->dest_indices->data, sizeof(int) * ind_h.size(), hipMemcpyDeviceToHost); hipMemcpy(&w2_h[0], G->edgeList->edge_data->data, sizeof(float) * w_h.size(), hipMemcpyDeviceToHost); offsets2indices(off_h, src_h); ASSERT_LE(*(std::max_element(src2_h.begin(), src2_h.end())),(int)off_h.size()-1); ASSERT_GE(*(std::min_element(src2_h.begin(), src2_h.end())),off_h.front()); ASSERT_EQ( eq(src_h,src2_h), 0); ASSERT_EQ( eq(ind_h,dest2_h), 0); ASSERT_EQ( eq(w_h,w2_h), 0); delete G; gdf_col_delete(col_off); gdf_col_delete(col_ind); gdf_col_delete(col_w); } TEST(gdf_graph, get_vertex_identifiers) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<int> idx_h(off_h.size()-1), idx2_h(off_h.size()-1); gdf_graph *G = new gdf_graph; gdf_column *col_off = new gdf_column, *col_ind = new gdf_column, *col_idx = new gdf_column; create_gdf_column(off_h, col_off); create_gdf_column(ind_h, col_ind); create_gdf_column(idx2_h, col_idx); ASSERT_EQ(gdf_adj_list_view(G, col_off, col_ind, nullptr),GDF_SUCCESS); ASSERT_EQ(G->adjList->get_vertex_identifiers(col_idx),GDF_SUCCESS); hipMemcpy(&idx2_h[0], col_idx->data, sizeof(int) * col_idx->size, hipMemcpyDeviceToHost); std::generate(idx_h.begin(), idx_h.end(), [n = 0]() mutable {return n++;}); ASSERT_EQ( eq(idx_h,idx2_h), 0); delete G; gdf_col_delete(col_off); gdf_col_delete(col_ind); gdf_col_delete(col_idx); } TEST(gdf_graph, get_source_indices) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<int> src_h(ind_h.size()), src2_h(ind_h.size()); gdf_graph *G = new gdf_graph; gdf_column *col_off = new gdf_column, *col_ind = new gdf_column, *col_src = new gdf_column; create_gdf_column(off_h, col_off); create_gdf_column(ind_h, col_ind); create_gdf_column(src2_h, col_src); ASSERT_EQ(gdf_adj_list_view(G, col_off, col_ind, nullptr),GDF_SUCCESS); ASSERT_EQ(G->adjList->get_source_indices(col_src),GDF_SUCCESS); hipMemcpy(&src2_h[0], col_src->data, sizeof(int) * col_src->size, hipMemcpyDeviceToHost); offsets2indices(off_h, src_h); ASSERT_EQ( eq(src_h,src2_h), 0); delete G; gdf_col_delete(col_off); gdf_col_delete(col_ind); gdf_col_delete(col_src); } TEST(gdf_graph, memory) { gdf_graph *G = new gdf_graph; gdf_column col_src, col_dest; col_src.dtype = GDF_INT32; col_src.valid = nullptr; col_dest.dtype = GDF_INT32; col_dest.valid = nullptr; col_src.null_count = 0; col_dest.null_count = 0; //size_t free, free2, free3, free4_, free4, total; //hipMemGetInfo(&free, &total); size_t vertices = 0, edges = 0; char argv[1024] = "grmat --rmat_scale=23 --rmat_edgefactor=16 --device=0 --normalized --rmat_self_loops --quiet"; ASSERT_EQ(gdf_grmat_gen(argv, vertices, edges, &col_src, &col_dest, nullptr), GDF_SUCCESS); //hipMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_edge_list_view(G, &col_src, &col_dest, nullptr),GDF_SUCCESS); //hipMemGetInfo(&free3, &total); //EXPECT_EQ(free2,free3); //EXPECT_NE(free,free3); ASSERT_EQ(gdf_add_transposed_adj_list(G),GDF_SUCCESS); //this check doen't work on small case (false positive) //hipMemGetInfo(&free4_, &total); //EXPECT_NE(free4_,free2); ASSERT_EQ(gdf_add_adj_list(G),GDF_SUCCESS); ASSERT_EQ(gdf_delete_adj_list(G),GDF_SUCCESS); //hipMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free4_); //EXPECT_NE(free4,free2); delete G; //hipMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free3); //EXPECT_NE(free4,free); hipStream_t stream{nullptr}; ALLOC_FREE_TRY(col_src.data, stream); ALLOC_FREE_TRY(col_dest.data, stream); //hipMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free); } TEST(gdf_graph, gdf_column_overhead) { size_t sz = 100000000; std::vector<int> src_h(sz,1); std::vector<int> dest_h(sz,1); //size_t free, free2, free3, total; //hipMemGetInfo(&free, &total); gdf_graph *G = new gdf_graph; gdf_column *col_src = new gdf_column, *col_dest = new gdf_column; create_gdf_column(src_h, col_src); create_gdf_column(dest_h, col_dest); //hipMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); // check that gdf_column_overhead < 5 per cent //EXPECT_LT(free-free2, 2*sz*sizeof(int)*1.05); ASSERT_EQ(gdf_edge_list_view(G, col_src, col_dest, nullptr),GDF_SUCCESS); //hipMemGetInfo(&free3, &total); //EXPECT_EQ(free2,free3); //EXPECT_NE(free,free3); delete G; gdf_col_delete(col_src); gdf_col_delete(col_dest); } int main(int argc, char **argv) { srand(42); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
ea7fc75572e59d6172913129b78c148dccdfee35.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ // gdf_graph tests // Author: Alex Fender afender@nvidia.com #include "gtest/gtest.h" #include <cugraph.h> #include "test_utils.h" #include <string.h> #include <rmm_utils.h> TEST(gdf_edge_list, success) { cudaStream_t stream{nullptr}; gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column col_src, col_dest, col_weights; col_src.dtype = GDF_INT32; col_src.valid = nullptr; col_src.null_count = 0; col_dest.dtype = GDF_INT32; col_dest.valid = nullptr; col_dest.null_count = 0; col_weights.dtype = GDF_FLOAT32; col_weights.valid = nullptr; col_weights.null_count = 0; size_t vertices = 0, edges = 0; char argv [1024] = "grmat --rmat_scale=20 --rmat_edgefactor=16 --device=0 --normalized --rmat_self_loops --quiet"; ASSERT_EQ(gdf_grmat_gen(argv, vertices, edges, &col_src, &col_dest, &col_weights), GDF_SUCCESS); std::vector<int> src_h(edges), dest_h(edges); std::vector<float> w_h(edges); cudaMemcpy(&src_h[0], col_src.data, sizeof(int) * edges, cudaMemcpyDeviceToHost); cudaMemcpy(&dest_h[0], col_dest.data, sizeof(int) * edges, cudaMemcpyDeviceToHost); cudaMemcpy(&w_h[0], col_weights.data, sizeof(float) * edges, cudaMemcpyDeviceToHost); ASSERT_EQ(gdf_edge_list_view(G.get(), &col_src, &col_dest, &col_weights),GDF_SUCCESS); std::vector<int> src2_h(edges), dest2_h(edges); std::vector<float> w2_h(edges); cudaMemcpy(&src2_h[0], G.get()->edgeList->src_indices->data, sizeof(int) * edges, cudaMemcpyDeviceToHost); cudaMemcpy(&dest2_h[0], G.get()->edgeList->dest_indices->data, sizeof(int) * edges, cudaMemcpyDeviceToHost); cudaMemcpy(&w2_h[0], G.get()->edgeList->edge_data->data, sizeof(float) * edges, cudaMemcpyDeviceToHost); ASSERT_EQ( eq(src_h,src2_h), 0); ASSERT_EQ( eq(dest_h,dest2_h), 0); ASSERT_EQ( eq(w_h,w2_h), 0); ALLOC_FREE_TRY(col_src.data, stream); ALLOC_FREE_TRY(col_dest.data, stream); ALLOC_FREE_TRY(col_weights.data, stream); } TEST(gdf_edge_list, success_no_weights) { cudaStream_t stream{nullptr}; gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column col_src, col_dest; col_src.dtype = GDF_INT32; col_src.valid = nullptr; col_dest.dtype = GDF_INT32; col_dest.valid = nullptr; col_src.null_count = 0; col_dest.null_count = 0; size_t vertices = 0, edges = 0; char argv [1024] = "grmat --rmat_scale=20 --rmat_edgefactor=16 --device=0 --normalized --rmat_self_loops --quiet"; ASSERT_EQ(gdf_grmat_gen(argv, vertices, edges, &col_src, &col_dest, nullptr), GDF_SUCCESS); ASSERT_EQ(gdf_edge_list_view(G.get(), &col_src, &col_dest, nullptr),GDF_SUCCESS); ALLOC_FREE_TRY(col_src.data, stream); ALLOC_FREE_TRY(col_dest.data, stream); } TEST(gdf_edge_list, size_mismatch) { gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_src, col_dest, col_weights; std::vector<int> src_h={0, 0, 2, 2, 2, 3, 3, 4, 4, 5}, dest_h={1, 2, 0, 1, 4}; std::vector<float> w_h={0.50, 0.50, 0.33, 0.33, 0.33, 0.50, 0.50, 0.50, 0.50}; col_src = create_gdf_column(src_h); col_dest = create_gdf_column(dest_h); col_weights = create_gdf_column(w_h); ASSERT_EQ(gdf_edge_list_view(G.get(), col_src.get(), col_dest.get(), col_weights.get()),GDF_COLUMN_SIZE_MISMATCH); } TEST(gdf_edge_list, size_mismatch2) { gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_src, col_dest, col_weights; std::vector<int> src_h={0, 0, 2, 2, 2, 3, 3, 4, 4, 5}, dest_h={1, 2, 0, 1, 4, 4, 5, 3, 5, 3}; std::vector<float> w_h={0.50, 0.50, 0.33, 0.33, 0.33, 0.50, 0.50, 0.50}; col_src = create_gdf_column(src_h); col_dest = create_gdf_column(dest_h); col_weights = create_gdf_column(w_h); ASSERT_EQ(gdf_edge_list_view(G.get(), col_src.get(), col_dest.get(), col_weights.get()),GDF_COLUMN_SIZE_MISMATCH); } TEST(gdf_edge_list, wrong_type) { gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_src, col_dest; std::vector<float> src_h={0.0, 0.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0}, dest_h={1.0, 2.0, 0.0, 1.0, 4.0, 4.0, 5.0, 3.0, 5.0, 3.0}; col_src = create_gdf_column(src_h); col_dest = create_gdf_column(dest_h); ASSERT_EQ(gdf_edge_list_view(G.get(), col_src.get(), col_dest.get(), nullptr),GDF_UNSUPPORTED_DTYPE); } TEST(gdf_adj_list, success) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<float> w_h = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_off, col_ind, col_w; col_off = create_gdf_column(off_h); col_ind = create_gdf_column(ind_h); col_w = create_gdf_column(w_h); ASSERT_EQ(gdf_adj_list_view(G.get(), col_off.get(), col_ind.get(), col_w.get()),GDF_SUCCESS); std::vector<int> off2_h(off_h.size()), ind2_h(ind_h.size()); std::vector<float> w2_h(w_h.size()); cudaMemcpy(&off2_h[0], G.get()->adjList->offsets->data, sizeof(int) * off_h.size(), cudaMemcpyDeviceToHost); cudaMemcpy(&ind2_h[0], G.get()->adjList->indices->data, sizeof(int) * ind_h.size(), cudaMemcpyDeviceToHost); cudaMemcpy(&w2_h[0], G.get()->adjList->edge_data->data, sizeof(float) * w_h.size(), cudaMemcpyDeviceToHost); ASSERT_EQ( eq(off_h,off2_h), 0); ASSERT_EQ( eq(ind_h,ind2_h), 0); ASSERT_EQ( eq(w_h,w2_h), 0); } TEST(gdf_adj_list, success_no_weights) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_column_ptr col_off, col_ind; col_off = create_gdf_column(off_h); col_ind = create_gdf_column(ind_h); ASSERT_EQ(gdf_adj_list_view(G.get(), col_off.get(), col_ind.get(), nullptr),GDF_SUCCESS); std::vector<int> off2_h(off_h.size()), ind2_h(ind_h.size()); cudaMemcpy(&off2_h[0], G.get()->adjList->offsets->data, sizeof(int) * off_h.size(), cudaMemcpyDeviceToHost); cudaMemcpy(&ind2_h[0], G.get()->adjList->indices->data, sizeof(int) * ind_h.size(), cudaMemcpyDeviceToHost); ASSERT_EQ( eq(off_h,off2_h), 0); ASSERT_EQ( eq(ind_h,ind2_h), 0); } TEST(gdf_graph_properties, success) { gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; gdf_graph_properties *prop = new gdf_graph_properties; ASSERT_FALSE(prop->directed); ASSERT_FALSE(prop->weighted); ASSERT_FALSE(prop->multigraph); ASSERT_FALSE(prop->bipartite); ASSERT_FALSE(prop->tree); prop->directed = true; prop->weighted = true; prop->tree = false; ASSERT_TRUE(prop->directed); ASSERT_TRUE(prop->weighted); ASSERT_FALSE(prop->multigraph); ASSERT_FALSE(prop->bipartite); ASSERT_FALSE(prop->tree); } TEST(gdf_delete_adjacency_list, success1) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<float> w_h = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; gdf_graph G; gdf_column col_off, col_ind, col_w; //size_t free, free2, total; //cudaMemGetInfo(&free, &total); create_gdf_column(off_h, &col_off); create_gdf_column(ind_h, &col_ind); create_gdf_column(w_h, &col_w); ASSERT_EQ(gdf_adj_list_view(&G, &col_off, &col_ind, &col_w),GDF_SUCCESS); //cudaMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_delete_adj_list(&G),GDF_SUCCESS); //cudaMemGetInfo(&free2, &total); //EXPECT_EQ(free,free2); } TEST(gdf_delete_adjacency_list, success2) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<float> w_h = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; gdf_graph *G = new gdf_graph; gdf_column *col_off = new gdf_column, *col_ind = new gdf_column, *col_w = new gdf_column; //size_t free, free2, total; //cudaMemGetInfo(&free, &total); create_gdf_column(off_h, col_off); create_gdf_column(ind_h, col_ind); create_gdf_column(w_h, col_w); ASSERT_EQ(gdf_adj_list_view(G, col_off, col_ind, col_w),GDF_SUCCESS); //cudaMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_delete_adj_list(G),GDF_SUCCESS); //cudaMemGetInfo(&free2, &total); //EXPECT_EQ(free,free2); delete G; delete col_off; delete col_ind; delete col_w; } TEST(gdf_delete_edge_list, success1) { std::vector<int> src_h={0, 0, 2, 2, 2, 3, 3, 4, 4, 5}, dest_h={1, 2, 0, 1, 4, 4, 5, 3, 5, 3}; std::vector<float> w_h={0.50, 0.50, 0.33, 0.33, 0.33, 0.50, 0.50, 0.50, 0.50, 1.00}; gdf_graph G ; gdf_column col_src, col_dest, col_w; //size_t free, free2, total; //cudaMemGetInfo(&free, &total); create_gdf_column(src_h, &col_src); create_gdf_column(dest_h, &col_dest); create_gdf_column(w_h, &col_w); ASSERT_EQ(gdf_edge_list_view(&G, &col_src, &col_dest, &col_w),GDF_SUCCESS); //cudaMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_delete_edge_list(&G),GDF_SUCCESS); //cudaMemGetInfo(&free2, &total); //EXPECT_EQ(free,free2); } TEST(gdf_delete_edge_list, success2) { std::vector<int> src_h={0, 0, 2, 2, 2, 3, 3, 4, 4, 5}, dest_h={1, 2, 0, 1, 4, 4, 5, 3, 5, 3}; std::vector<float> w_h={0.50, 0.50, 0.33, 0.33, 0.33, 0.50, 0.50, 0.50, 0.50, 1.00}; gdf_graph *G = new gdf_graph; gdf_column *col_src = new gdf_column, *col_dest = new gdf_column, *col_w = new gdf_column; //size_t free, free2, total; //cudaMemGetInfo(&free, &total); create_gdf_column(src_h, col_src); create_gdf_column(dest_h, col_dest); create_gdf_column(w_h, col_w); ASSERT_EQ(gdf_edge_list_view(G, col_src, col_dest, col_w),GDF_SUCCESS); //cudaMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_delete_edge_list(G),GDF_SUCCESS); //cudaMemGetInfo(&free2, &total); //EXPECT_EQ(free,free2); delete G; delete col_src; delete col_dest; delete col_w; } TEST(gdf_graph, gdf_add_transposed_adj_list) { std::vector<int> src_h={0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18, 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2, 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33, 32, 33, 32, 33, 33}; std::vector<int> dest_h={1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2, 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33, 32, 33, 32, 33, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18, 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32}; gdf_graph *G = new gdf_graph; gdf_column *col_src = new gdf_column, *col_dest = new gdf_column; //size_t free, free2, free3, free4, total; //cudaMemGetInfo(&free, &total); create_gdf_column(src_h, col_src); create_gdf_column(dest_h, col_dest); //cudaMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_edge_list_view(G, col_src, col_dest, nullptr),GDF_SUCCESS); //cudaMemGetInfo(&free3, &total); //EXPECT_EQ(free2,free3); //EXPECT_NE(free,free3); ASSERT_EQ(gdf_add_transposed_adj_list(G),GDF_SUCCESS); //this check doen't work on small case (false positive) //cudaMemGetInfo(&free3, &total); //EXPECT_NE(free3,free2); std::vector<int> off_h(G->transposedAdjList->offsets->size ), ind_h(G->transposedAdjList->indices->size); cudaMemcpy(&off_h[0], G->transposedAdjList->offsets->data, sizeof(int) * G->transposedAdjList->offsets->size, cudaMemcpyDeviceToHost); cudaMemcpy(&ind_h[0], G->transposedAdjList->indices->data, sizeof(int) * G->transposedAdjList->indices->size, cudaMemcpyDeviceToHost); size_t zero = 0; EXPECT_GT(off_h.size(), zero); EXPECT_GT(ind_h.size(), zero); EXPECT_EQ(off_h.size()-2, (size_t)(*(std::max_element(ind_h.begin(), ind_h.end())))); EXPECT_EQ(ind_h.size(), (size_t)off_h.back()); std::sort (ind_h.begin(), ind_h.end()); std::sort (src_h.begin(), src_h.end()); EXPECT_EQ( eq(ind_h,src_h), 0); delete G; //cudaMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free2); //EXPECT_NE(free4,free); gdf_col_delete(col_src); gdf_col_delete(col_dest); //cudaMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free); } TEST(gdf_graph, gdf_add_adjList) { std::vector<int> src_h={0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18, 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2, 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33, 32, 33, 32, 33, 33}; std::vector<int> dest_h={1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2, 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7, 12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32, 33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33, 32, 33, 32, 33, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18, 18, 19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32}; std::vector<int> off_ref_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; gdf_graph *G = new gdf_graph; gdf_column *col_src = new gdf_column, *col_dest = new gdf_column; //size_t free, free2, free3, free4, total; //cudaMemGetInfo(&free, &total); create_gdf_column(src_h, col_src); create_gdf_column(dest_h, col_dest); //cudaMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_edge_list_view(G, col_src, col_dest, nullptr),GDF_SUCCESS); //cudaMemGetInfo(&free3, &total); //EXPECT_EQ(free2,free3); //EXPECT_NE(free,free3); ASSERT_EQ(gdf_add_adj_list(G),GDF_SUCCESS); //this check doen't work on small case (false positive) //cudaMemGetInfo(&free3, &total); //EXPECT_NE(free3,free2); std::vector<int> off_h(G->adjList->offsets->size ), ind_h(G->adjList->indices->size); cudaMemcpy(&off_h[0], G->adjList->offsets->data, sizeof(int) * G->adjList->offsets->size, cudaMemcpyDeviceToHost); cudaMemcpy(&ind_h[0], G->adjList->indices->data, sizeof(int) * G->adjList->indices->size, cudaMemcpyDeviceToHost); size_t zero = 0; EXPECT_GT(off_h.size(), zero); EXPECT_GT(ind_h.size(), zero); EXPECT_EQ(off_h.size()-2, (size_t)(*(std::max_element(ind_h.begin(), ind_h.end())))); EXPECT_EQ(ind_h.size(), (size_t)off_h.back()); std::sort (ind_h.begin(), ind_h.end()); std::sort (dest_h.begin(), dest_h.end()); EXPECT_EQ( eq(ind_h,dest_h), 0); EXPECT_EQ( eq(off_h,off_ref_h), 0); delete G; //cudaMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free2); //EXPECT_NE(free4,free); gdf_col_delete(col_src); gdf_col_delete(col_dest); //cudaMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free); } void offsets2indices(std::vector<int> &offsets, std::vector<int> &indices) { for (int i = 0; i < (int)offsets.size()-1; ++i) for (int j = offsets[i]; j < offsets[i+1]; ++j) indices[j] = i; } TEST(gdf_graph, gdf_add_edge_list) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<float> w_h = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; gdf_graph *G = new gdf_graph; gdf_column *col_off = new gdf_column, *col_ind = new gdf_column, *col_w = new gdf_column; create_gdf_column(off_h, col_off); create_gdf_column(ind_h, col_ind); create_gdf_column(w_h, col_w); ASSERT_EQ(gdf_adj_list_view(G, col_off, col_ind, col_w),GDF_SUCCESS); ASSERT_EQ(gdf_add_edge_list(G),GDF_SUCCESS); std::vector<int> src_h(ind_h.size()), src2_h(ind_h.size()), dest2_h(ind_h.size()); std::vector<float> w2_h(w_h.size()); cudaMemcpy(&src2_h[0], G->edgeList->src_indices->data, sizeof(int) * ind_h.size(), cudaMemcpyDeviceToHost); cudaMemcpy(&dest2_h[0], G->edgeList->dest_indices->data, sizeof(int) * ind_h.size(), cudaMemcpyDeviceToHost); cudaMemcpy(&w2_h[0], G->edgeList->edge_data->data, sizeof(float) * w_h.size(), cudaMemcpyDeviceToHost); offsets2indices(off_h, src_h); ASSERT_LE(*(std::max_element(src2_h.begin(), src2_h.end())),(int)off_h.size()-1); ASSERT_GE(*(std::min_element(src2_h.begin(), src2_h.end())),off_h.front()); ASSERT_EQ( eq(src_h,src2_h), 0); ASSERT_EQ( eq(ind_h,dest2_h), 0); ASSERT_EQ( eq(w_h,w2_h), 0); delete G; gdf_col_delete(col_off); gdf_col_delete(col_ind); gdf_col_delete(col_w); } TEST(gdf_graph, get_vertex_identifiers) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<int> idx_h(off_h.size()-1), idx2_h(off_h.size()-1); gdf_graph *G = new gdf_graph; gdf_column *col_off = new gdf_column, *col_ind = new gdf_column, *col_idx = new gdf_column; create_gdf_column(off_h, col_off); create_gdf_column(ind_h, col_ind); create_gdf_column(idx2_h, col_idx); ASSERT_EQ(gdf_adj_list_view(G, col_off, col_ind, nullptr),GDF_SUCCESS); ASSERT_EQ(G->adjList->get_vertex_identifiers(col_idx),GDF_SUCCESS); cudaMemcpy(&idx2_h[0], col_idx->data, sizeof(int) * col_idx->size, cudaMemcpyDeviceToHost); std::generate(idx_h.begin(), idx_h.end(), [n = 0]() mutable {return n++;}); ASSERT_EQ( eq(idx_h,idx2_h), 0); delete G; gdf_col_delete(col_off); gdf_col_delete(col_ind); gdf_col_delete(col_idx); } TEST(gdf_graph, get_source_indices) { // Hard-coded Zachary Karate Club network input std::vector<int> off_h = {0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66, 67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93, 98, 101, 104, 106, 110, 113, 117, 121, 127, 139, 156}; std::vector<int> ind_h = {1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8, 9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0, 6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32, 33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33, 32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1, 32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31, 29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1, 8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18, 20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30, 31, 32}; std::vector<int> src_h(ind_h.size()), src2_h(ind_h.size()); gdf_graph *G = new gdf_graph; gdf_column *col_off = new gdf_column, *col_ind = new gdf_column, *col_src = new gdf_column; create_gdf_column(off_h, col_off); create_gdf_column(ind_h, col_ind); create_gdf_column(src2_h, col_src); ASSERT_EQ(gdf_adj_list_view(G, col_off, col_ind, nullptr),GDF_SUCCESS); ASSERT_EQ(G->adjList->get_source_indices(col_src),GDF_SUCCESS); cudaMemcpy(&src2_h[0], col_src->data, sizeof(int) * col_src->size, cudaMemcpyDeviceToHost); offsets2indices(off_h, src_h); ASSERT_EQ( eq(src_h,src2_h), 0); delete G; gdf_col_delete(col_off); gdf_col_delete(col_ind); gdf_col_delete(col_src); } TEST(gdf_graph, memory) { gdf_graph *G = new gdf_graph; gdf_column col_src, col_dest; col_src.dtype = GDF_INT32; col_src.valid = nullptr; col_dest.dtype = GDF_INT32; col_dest.valid = nullptr; col_src.null_count = 0; col_dest.null_count = 0; //size_t free, free2, free3, free4_, free4, total; //cudaMemGetInfo(&free, &total); size_t vertices = 0, edges = 0; char argv[1024] = "grmat --rmat_scale=23 --rmat_edgefactor=16 --device=0 --normalized --rmat_self_loops --quiet"; ASSERT_EQ(gdf_grmat_gen(argv, vertices, edges, &col_src, &col_dest, nullptr), GDF_SUCCESS); //cudaMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); ASSERT_EQ(gdf_edge_list_view(G, &col_src, &col_dest, nullptr),GDF_SUCCESS); //cudaMemGetInfo(&free3, &total); //EXPECT_EQ(free2,free3); //EXPECT_NE(free,free3); ASSERT_EQ(gdf_add_transposed_adj_list(G),GDF_SUCCESS); //this check doen't work on small case (false positive) //cudaMemGetInfo(&free4_, &total); //EXPECT_NE(free4_,free2); ASSERT_EQ(gdf_add_adj_list(G),GDF_SUCCESS); ASSERT_EQ(gdf_delete_adj_list(G),GDF_SUCCESS); //cudaMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free4_); //EXPECT_NE(free4,free2); delete G; //cudaMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free3); //EXPECT_NE(free4,free); cudaStream_t stream{nullptr}; ALLOC_FREE_TRY(col_src.data, stream); ALLOC_FREE_TRY(col_dest.data, stream); //cudaMemGetInfo(&free4, &total); //EXPECT_EQ(free4,free); } TEST(gdf_graph, gdf_column_overhead) { size_t sz = 100000000; std::vector<int> src_h(sz,1); std::vector<int> dest_h(sz,1); //size_t free, free2, free3, total; //cudaMemGetInfo(&free, &total); gdf_graph *G = new gdf_graph; gdf_column *col_src = new gdf_column, *col_dest = new gdf_column; create_gdf_column(src_h, col_src); create_gdf_column(dest_h, col_dest); //cudaMemGetInfo(&free2, &total); //EXPECT_NE(free,free2); // check that gdf_column_overhead < 5 per cent //EXPECT_LT(free-free2, 2*sz*sizeof(int)*1.05); ASSERT_EQ(gdf_edge_list_view(G, col_src, col_dest, nullptr),GDF_SUCCESS); //cudaMemGetInfo(&free3, &total); //EXPECT_EQ(free2,free3); //EXPECT_NE(free,free3); delete G; gdf_col_delete(col_src); gdf_col_delete(col_dest); } int main(int argc, char **argv) { srand(42); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
58cfdda373085f3098ec022a68e76aa9d2531fe2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <stdio.h> __global__ void add(double *a,double*b,double* c,int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id>0) c[id] = a[id] + b[id]; } int main() { int n = 100; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; int i=0; hipMallocManaged(&d_a,n*sizeof(double)); hipMallocManaged(&d_b,n*sizeof(double)); hipMallocManaged(&d_c,n*sizeof(double)); for ( i = 0; i < n; i++) { d_a[i] = i; d_b[i] = i; } int blockSize = 512; // Number of thread blocks in grid int gridSize = (int)ceil((float)n/blockSize); hipLaunchKernelGGL(( add) , dim3(gridSize),dim3(blockSize) , 0, 0, d_a,d_b,d_c,n); hipDeviceSynchronize(); printf("%d %d\n",gridSize,blockSize ); for(i=0;i<n;i++) { printf("%f + %f = %f\n",d_a[i],d_b[i],d_c[i]); } hipFree(d_a); hipFree(d_b); hipFree(d_c); /*float maxError = 0.0f; for (int i = 0; i < n; i++) maxError = fmax(maxError, fabs(d_c[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl;*/ }
58cfdda373085f3098ec022a68e76aa9d2531fe2.cu
#include <iostream> #include <math.h> #include <stdio.h> __global__ void add(double *a,double*b,double* c,int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id>0) c[id] = a[id] + b[id]; } int main() { int n = 100; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; int i=0; cudaMallocManaged(&d_a,n*sizeof(double)); cudaMallocManaged(&d_b,n*sizeof(double)); cudaMallocManaged(&d_c,n*sizeof(double)); for ( i = 0; i < n; i++) { d_a[i] = i; d_b[i] = i; } int blockSize = 512; // Number of thread blocks in grid int gridSize = (int)ceil((float)n/blockSize); add <<< gridSize,blockSize >>>(d_a,d_b,d_c,n); cudaDeviceSynchronize(); printf("%d %d\n",gridSize,blockSize ); for(i=0;i<n;i++) { printf("%f + %f = %f\n",d_a[i],d_b[i],d_c[i]); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); /*float maxError = 0.0f; for (int i = 0; i < n; i++) maxError = fmax(maxError, fabs(d_c[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl;*/ }
2f7bcdac8cd196b8ed56b97fab6c058ef762445e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/device_atomics.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/wrappers/timestamps.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/timestamp_utilities.cuh> #include <cudf_test/type_lists.hpp> #include <rmm/cuda_stream_view.hpp> #include <algorithm> template <typename T> __global__ void gpu_atomic_test(T* result, T* data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomicAdd(&result[0], data[id]); atomicMin(&result[1], data[id]); atomicMax(&result[2], data[id]); cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{}); cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{}); cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{}); } } template <typename T, typename BinaryOp> constexpr inline bool is_timestamp_sum() { return cudf::is_timestamp<T>() && std::is_same_v<BinaryOp, cudf::DeviceSum>; } // Disable SUM of TIMESTAMP types template <typename T, typename BinaryOp, typename std::enable_if_t<is_timestamp_sum<T, BinaryOp>()>* = nullptr> __device__ T atomic_op(T* addr, T const& value, BinaryOp op) { return {}; } template <typename T, typename BinaryOp, typename std::enable_if_t<!is_timestamp_sum<T, BinaryOp>()>* = nullptr> __device__ T atomic_op(T* addr, T const& value, BinaryOp op) { T old_value = *addr; T assumed; do { assumed = old_value; T new_value = op(old_value, value); old_value = atomicCAS(addr, assumed, new_value); } while (assumed != old_value); return old_value; } template <typename T> __global__ void gpu_atomicCAS_test(T* result, T* data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomic_op(&result[0], data[id], cudf::DeviceSum{}); atomic_op(&result[1], data[id], cudf::DeviceMin{}); atomic_op(&result[2], data[id], cudf::DeviceMax{}); atomic_op(&result[3], data[id], cudf::DeviceSum{}); atomic_op(&result[4], data[id], cudf::DeviceMin{}); atomic_op(&result[5], data[id], cudf::DeviceMax{}); } } template <typename T> typename std::enable_if_t<!cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs) { return std::accumulate(xs.begin(), xs.end(), T{0}); } template <typename T> typename std::enable_if_t<cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs) { auto ys = std::vector<typename T::rep>(xs.size()); std::transform( xs.begin(), xs.end(), ys.begin(), [](T const& ts) { return ts.time_since_epoch().count(); }); return T{typename T::duration{std::accumulate(ys.begin(), ys.end(), 0)}}; } template <typename T> struct AtomicsTest : public cudf::test::BaseFixture { void atomic_test(std::vector<int> const& v_input, bool is_cas_test, int block_size = 0, int grid_size = 1) { size_t vec_size = v_input.size(); // use transform from thrust::host_vector<int> instead. thrust::host_vector<T> v(vec_size); std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { T t = cudf::test::make_type_param_scalar<T>(x); return t; }); T exact[3]; exact[0] = accumulate<T>(v); exact[1] = *(std::min_element(v.begin(), v.end())); exact[2] = *(std::max_element(v.begin(), v.end())); thrust::host_vector<T> result_init(9); // +3 padding for int8 tests result_init[0] = cudf::test::make_type_param_scalar<T>(0); result_init[1] = std::numeric_limits<T>::max(); result_init[2] = std::numeric_limits<T>::min(); result_init[3] = result_init[0]; result_init[4] = result_init[1]; result_init[5] = result_init[2]; auto dev_data = cudf::detail::make_device_uvector_sync(v); auto dev_result = cudf::detail::make_device_uvector_sync(result_init); if (block_size == 0) { block_size = vec_size; } if (is_cas_test) { hipLaunchKernelGGL(( gpu_atomicCAS_test), dim3(grid_size), dim3(block_size), 0, 0, dev_result.data(), dev_data.data(), vec_size); } else { hipLaunchKernelGGL(( gpu_atomic_test), dim3(grid_size), dim3(block_size), 0, 0, dev_result.data(), dev_data.data(), vec_size); } auto host_result = cudf::detail::make_host_vector_sync(dev_result); CHECK_CUDA(rmm::cuda_stream_default.value()); if (!is_timestamp_sum<T, cudf::DeviceSum>()) { EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed"; } EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed"; EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed"; if (!is_timestamp_sum<T, cudf::DeviceSum>()) { EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed"; } EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed"; EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed"; } }; TYPED_TEST_SUITE(AtomicsTest, cudf::test::FixedWidthTypesWithoutFixedPoint); // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOps) { bool is_cas_test = false; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCAS) { bool is_cas_test = true; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOpsGrid) { bool is_cas_test = false; int block_size = 3; int grid_size = 4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCASGrid) { bool is_cas_test = true; int block_size = 3; int grid_size = 4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for large array TYPED_TEST(AtomicsTest, atomicOpsRandom) { bool is_cas_test = false; int block_size = 256; int grid_size = 64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); }); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } TYPED_TEST(AtomicsTest, atomicCASRandom) { bool is_cas_test = true; int block_size = 256; int grid_size = 64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); }); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } template <typename T> __global__ void gpu_atomic_bitwiseOp_test(T* result, T* data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomicAnd(&result[0], data[id]); atomicOr(&result[1], data[id]); atomicXor(&result[2], data[id]); cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{}); cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{}); cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{}); } } template <typename T> struct AtomicsBitwiseOpTest : public cudf::test::BaseFixture { void atomic_test(std::vector<uint64_t> const& v_input, int block_size = 0, int grid_size = 1) { size_t vec_size = v_input.size(); std::vector<T> v(vec_size); std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { T t(x); return t; }); thrust::host_vector<T> identity(9, T{0}); // +3 elements padding for int8 tests identity[0] = T(~0ull); identity[3] = T(~0ull); T exact[3]; exact[0] = std::accumulate( v.begin(), v.end(), identity[0], [](T acc, uint64_t i) { return acc & T(i); }); exact[1] = std::accumulate( v.begin(), v.end(), identity[1], [](T acc, uint64_t i) { return acc | T(i); }); exact[2] = std::accumulate( v.begin(), v.end(), identity[2], [](T acc, uint64_t i) { return acc ^ T(i); }); auto dev_result = cudf::detail::make_device_uvector_sync(identity); auto dev_data = cudf::detail::make_device_uvector_sync(v); if (block_size == 0) { block_size = vec_size; } hipLaunchKernelGGL(( gpu_atomic_bitwiseOp_test<T>), dim3(grid_size), dim3(block_size), 0, 0, reinterpret_cast<T*>(dev_result.data()), reinterpret_cast<T*>(dev_data.data()), vec_size); auto host_result = cudf::detail::make_host_vector_sync(dev_result); CHECK_CUDA(rmm::cuda_stream_default.value()); // print_exact(exact, "exact"); // print_exact(host_result.data(), "result"); EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed"; EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed"; EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed"; EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed"; EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed"; EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed"; } [[maybe_unused]] void print_exact(const T* v, const char* msg) { std::cout << std::hex << std::showbase; std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", " << +v[2] << "}" << std::endl; } }; using BitwiseOpTestingTypes = cudf::test::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t>; TYPED_TEST_SUITE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes); TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) { { // test for AND, XOR std::vector<uint64_t> input_array( {0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc, 0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc}); this->atomic_test(input_array); } { // test for OR, XOR std::vector<uint64_t> input_array( {0x01, 0xfc02, 0x1dff03, 0x1100a0b0801d0003, 0x8000000000000000, 0x1dff03}); this->atomic_test(input_array); } } CUDF_TEST_PROGRAM_MAIN()
2f7bcdac8cd196b8ed56b97fab6c058ef762445e.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/device_atomics.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/wrappers/timestamps.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/timestamp_utilities.cuh> #include <cudf_test/type_lists.hpp> #include <rmm/cuda_stream_view.hpp> #include <algorithm> template <typename T> __global__ void gpu_atomic_test(T* result, T* data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomicAdd(&result[0], data[id]); atomicMin(&result[1], data[id]); atomicMax(&result[2], data[id]); cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{}); cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{}); cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{}); } } template <typename T, typename BinaryOp> constexpr inline bool is_timestamp_sum() { return cudf::is_timestamp<T>() && std::is_same_v<BinaryOp, cudf::DeviceSum>; } // Disable SUM of TIMESTAMP types template <typename T, typename BinaryOp, typename std::enable_if_t<is_timestamp_sum<T, BinaryOp>()>* = nullptr> __device__ T atomic_op(T* addr, T const& value, BinaryOp op) { return {}; } template <typename T, typename BinaryOp, typename std::enable_if_t<!is_timestamp_sum<T, BinaryOp>()>* = nullptr> __device__ T atomic_op(T* addr, T const& value, BinaryOp op) { T old_value = *addr; T assumed; do { assumed = old_value; T new_value = op(old_value, value); old_value = atomicCAS(addr, assumed, new_value); } while (assumed != old_value); return old_value; } template <typename T> __global__ void gpu_atomicCAS_test(T* result, T* data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomic_op(&result[0], data[id], cudf::DeviceSum{}); atomic_op(&result[1], data[id], cudf::DeviceMin{}); atomic_op(&result[2], data[id], cudf::DeviceMax{}); atomic_op(&result[3], data[id], cudf::DeviceSum{}); atomic_op(&result[4], data[id], cudf::DeviceMin{}); atomic_op(&result[5], data[id], cudf::DeviceMax{}); } } template <typename T> typename std::enable_if_t<!cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs) { return std::accumulate(xs.begin(), xs.end(), T{0}); } template <typename T> typename std::enable_if_t<cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs) { auto ys = std::vector<typename T::rep>(xs.size()); std::transform( xs.begin(), xs.end(), ys.begin(), [](T const& ts) { return ts.time_since_epoch().count(); }); return T{typename T::duration{std::accumulate(ys.begin(), ys.end(), 0)}}; } template <typename T> struct AtomicsTest : public cudf::test::BaseFixture { void atomic_test(std::vector<int> const& v_input, bool is_cas_test, int block_size = 0, int grid_size = 1) { size_t vec_size = v_input.size(); // use transform from thrust::host_vector<int> instead. thrust::host_vector<T> v(vec_size); std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { T t = cudf::test::make_type_param_scalar<T>(x); return t; }); T exact[3]; exact[0] = accumulate<T>(v); exact[1] = *(std::min_element(v.begin(), v.end())); exact[2] = *(std::max_element(v.begin(), v.end())); thrust::host_vector<T> result_init(9); // +3 padding for int8 tests result_init[0] = cudf::test::make_type_param_scalar<T>(0); result_init[1] = std::numeric_limits<T>::max(); result_init[2] = std::numeric_limits<T>::min(); result_init[3] = result_init[0]; result_init[4] = result_init[1]; result_init[5] = result_init[2]; auto dev_data = cudf::detail::make_device_uvector_sync(v); auto dev_result = cudf::detail::make_device_uvector_sync(result_init); if (block_size == 0) { block_size = vec_size; } if (is_cas_test) { gpu_atomicCAS_test<<<grid_size, block_size>>>(dev_result.data(), dev_data.data(), vec_size); } else { gpu_atomic_test<<<grid_size, block_size>>>(dev_result.data(), dev_data.data(), vec_size); } auto host_result = cudf::detail::make_host_vector_sync(dev_result); CHECK_CUDA(rmm::cuda_stream_default.value()); if (!is_timestamp_sum<T, cudf::DeviceSum>()) { EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed"; } EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed"; EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed"; if (!is_timestamp_sum<T, cudf::DeviceSum>()) { EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed"; } EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed"; EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed"; } }; TYPED_TEST_SUITE(AtomicsTest, cudf::test::FixedWidthTypesWithoutFixedPoint); // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOps) { bool is_cas_test = false; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCAS) { bool is_cas_test = true; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOpsGrid) { bool is_cas_test = false; int block_size = 3; int grid_size = 4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCASGrid) { bool is_cas_test = true; int block_size = 3; int grid_size = 4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for large array TYPED_TEST(AtomicsTest, atomicOpsRandom) { bool is_cas_test = false; int block_size = 256; int grid_size = 64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); }); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } TYPED_TEST(AtomicsTest, atomicCASRandom) { bool is_cas_test = true; int block_size = 256; int grid_size = 64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); }); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } template <typename T> __global__ void gpu_atomic_bitwiseOp_test(T* result, T* data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomicAnd(&result[0], data[id]); atomicOr(&result[1], data[id]); atomicXor(&result[2], data[id]); cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{}); cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{}); cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{}); } } template <typename T> struct AtomicsBitwiseOpTest : public cudf::test::BaseFixture { void atomic_test(std::vector<uint64_t> const& v_input, int block_size = 0, int grid_size = 1) { size_t vec_size = v_input.size(); std::vector<T> v(vec_size); std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { T t(x); return t; }); thrust::host_vector<T> identity(9, T{0}); // +3 elements padding for int8 tests identity[0] = T(~0ull); identity[3] = T(~0ull); T exact[3]; exact[0] = std::accumulate( v.begin(), v.end(), identity[0], [](T acc, uint64_t i) { return acc & T(i); }); exact[1] = std::accumulate( v.begin(), v.end(), identity[1], [](T acc, uint64_t i) { return acc | T(i); }); exact[2] = std::accumulate( v.begin(), v.end(), identity[2], [](T acc, uint64_t i) { return acc ^ T(i); }); auto dev_result = cudf::detail::make_device_uvector_sync(identity); auto dev_data = cudf::detail::make_device_uvector_sync(v); if (block_size == 0) { block_size = vec_size; } gpu_atomic_bitwiseOp_test<T><<<grid_size, block_size>>>( reinterpret_cast<T*>(dev_result.data()), reinterpret_cast<T*>(dev_data.data()), vec_size); auto host_result = cudf::detail::make_host_vector_sync(dev_result); CHECK_CUDA(rmm::cuda_stream_default.value()); // print_exact(exact, "exact"); // print_exact(host_result.data(), "result"); EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed"; EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed"; EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed"; EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed"; EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed"; EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed"; } [[maybe_unused]] void print_exact(const T* v, const char* msg) { std::cout << std::hex << std::showbase; std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", " << +v[2] << "}" << std::endl; } }; using BitwiseOpTestingTypes = cudf::test::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t>; TYPED_TEST_SUITE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes); TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) { { // test for AND, XOR std::vector<uint64_t> input_array( {0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc, 0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc}); this->atomic_test(input_array); } { // test for OR, XOR std::vector<uint64_t> input_array( {0x01, 0xfc02, 0x1dff03, 0x1100a0b0801d0003, 0x8000000000000000, 0x1dff03}); this->atomic_test(input_array); } } CUDF_TEST_PROGRAM_MAIN()
2cc527f2ebfc389e86e2f67891c6687c601926f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <hiprand/hiprand.h> #include <ctime> #include <assert.h> // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(hipError_t stat, const char *file, int line) { if (stat != hipSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) { if (stat != HIPRAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } #include <mma.h> using namespace nvcuda; //enum MatrixLayout{ #define ROW_MAJOR 0 #define COL_MAJOR 1 //}; //ONLY THE PARAMETER HERE NEEDS TO BE CHANGED // Must be multiples of 16 for wmma code to work #define MATRIX_M (32) #define MATRIX_N (8) #define MATRIX_K (16) const int WMMA_M =32; const int WMMA_N =8; const int WMMA_K =16; typedef half atype; typedef half btype; typedef float ctype; typedef float dtype; typedef float host_type; #define A_LAYOUT ROW_MAJOR #define B_LAYOUT ROW_MAJOR #define C_LAYOUT ROW_MAJOR #define D_LAYOUT ROW_MAJOR #define NUM_CTA 1 #define WARP_IN_CTA 1 //Don't change anything after here #define THREAD_IN_WARP 32 #if A_LAYOUT==ROW_MAJOR #define LAYOUT_A wmma::row_major #define A_STRIDE MATRIX_K #else #define LAYOUT_A wmma::col_major #define A_STRIDE MATRIX_M #endif #if B_LAYOUT==ROW_MAJOR #define LAYOUT_B wmma::row_major #define B_STRIDE MATRIX_N #else #define LAYOUT_B wmma::col_major #define B_STRIDE MATRIX_K #endif #if C_LAYOUT==ROW_MAJOR #define LAYOUT_C wmma::mem_row_major #define C_STRIDE MATRIX_N #else #define LAYOUT_C wmma::mem_col_major #define C_STRIDE MATRIX_M #endif #if D_LAYOUT==ROW_MAJOR #define LAYOUT_D wmma::mem_row_major #define D_STRIDE MATRIX_N #else #define LAYOUT_D wmma::mem_col_major #define D_STRIDE MATRIX_M #endif enum MatrixInitializationType{ ZERO, ONE, RANDOM, IDENTITY, LINEAR }; int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){ static int val=0; switch(init_type){ case ZERO: break; case ONE: val=1; break; case RANDOM: val=rand()%randomRange; break; case LINEAR: val++; break; default : printf("illegal MatrixInitializationType\n"); abort(); break; } if(RESET) val=0; return val; } template <typename T> void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ T val; if(layout==ROW_MAJOR) val=matrix[row*col_size+col]; else val=matrix[col*row_size+row]; printf("%.2f ",static_cast<float>(val)); } printf(";\n"); } } template <typename T> void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ if(init_type==IDENTITY){ assert(row_size==col_size);//only for square matrix can be used matrix[row*row_size+col]=static_cast<T>(1); } else{ if(layout==ROW_MAJOR){ matrix[row*col_size+col]=static_cast<T>(get_value(init_type)); } else{ matrix[col*row_size+row]=static_cast<T>(get_value(init_type)); } } } } get_value(init_type,10,true);//reseting the val counter print_matrix<T>(matrix,row_size,col_size,layout); } int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){ int index=0; if(layout==ROW_MAJOR){ index=row*col_size+col; } else{ index=col*row_size+row; } return index; } template <typename T> void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){ for(int row=0;row<M;row++){ for(int col=0;col<N;col++){ int rindex=get_index(row,col,M,N,resultlayout); int cindex=get_index(row,col,M,N,clayout); for(int k=0;k<K;k++){ int aindex=get_index(row,k,M,K,alayout); int bindex=get_index(k,col,K,N,blayout); result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex]; } result_matrix[rindex]+=matrix_c[cindex]; } } print_matrix<T>(result_matrix,M,N,resultlayout); } template <typename T> void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ int index_a,index_b; index_a=get_index(row,col,row_size,col_size,alayout); index_b=get_index(row,col,row_size,col_size,alayout); if(matrix_a[index_a]!=matrix_b[index_b]) printf("ERROR at index row=%d col=%d\n",row,col); } } } __global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d) { float t; // Declare the fragments wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag; // Bounds checking wmma::load_matrix_sync(a_frag, a, A_STRIDE); wmma::load_matrix_sync(b_frag, b, B_STRIDE); wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C); for(int i=0; i < a_frag.num_elements; i++) { t=static_cast<float>(a_frag.x[i]); printf("A_THREAD%d: %.2f \n",threadIdx.x,t); } for(int i=0; i < b_frag.num_elements; i++) { t=static_cast<float>(b_frag.x[i]); printf("B_THREAD%d: %.2f \n",threadIdx.x,t); } for(int i=0; i < c_frag.num_elements; i++) { t=static_cast<float>(c_frag.x[i]); printf("C_THREAD%d: %.2f \n",threadIdx.x,t); } wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D); } template <typename T1,typename T2> __global__ void convert(T1 *out, T2 *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = in[idx]; } } int main(int argc, char* argv[]) { //data on device in host type format host_type *a_htype; host_type *b_htype; host_type *c_htype; host_type *d_htype; //data on device in gemm format atype *a_atype; btype *b_btype; ctype *c_ctype; dtype *d_dtype; srand(time(NULL)); host_type *a_host_wmma; host_type *b_host_wmma; host_type *c_host_wmma; host_type *d_host_wmma; host_type *d_cal_host_wmma; hipEvent_t startWMMA; hipEvent_t stopWMMA; cudaErrCheck(hipEventCreate(&startWMMA)); cudaErrCheck(hipEventCreate(&stopWMMA)); // Use tensor cores cudaErrCheck(hipMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype))); cudaErrCheck(hipMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype))); cudaErrCheck(hipMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype))); cudaErrCheck(hipMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype))); a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type)); b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type)); c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); printf("a_host\n"); initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR); printf("b_host\n"); initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR); printf("c_host\n"); initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR); printf("d_cal_host\n"); initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO); printf("d_cal_host\n"); matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT); cudaErrCheck(hipMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( convert<atype,host_type>) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_atype, a_htype, MATRIX_M * MATRIX_K); hipLaunchKernelGGL(( convert<btype,host_type>) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_btype, b_htype, MATRIX_K * MATRIX_N); hipLaunchKernelGGL(( convert<ctype,host_type>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, c_ctype, c_htype, MATRIX_M * MATRIX_N); printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K); printf("Running with wmma...\n"); cudaErrCheck(hipEventRecord(startWMMA)); hipLaunchKernelGGL(( wmma_example) , dim3(NUM_CTA),dim3(WARP_IN_CTA*THREAD_IN_WARP), 0, 0, a_atype, b_btype, c_ctype, d_dtype); cudaErrCheck(hipEventRecord(stopWMMA)); hipLaunchKernelGGL(( convert<host_type,dtype>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, d_htype, d_dtype, MATRIX_M * MATRIX_N); cudaErrCheck(hipEventSynchronize(stopWMMA)); // Error checking printf("\nChecking results...\n"); cudaErrCheck(hipMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyDeviceToHost)); printf("Results verified: cublas and WMMA agree.\n\n"); float wmmaTime; cudaErrCheck(hipEventElapsedTime(&wmmaTime, startWMMA, stopWMMA)); printf("wmma took %.2fms\n", wmmaTime); cudaErrCheck(hipEventDestroy(startWMMA)); cudaErrCheck(hipEventDestroy(stopWMMA)); //printf("D_CALCULATED\n"); //print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); //printf("D_WMMA\n"); //print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); //printf("CHECKING\n"); //compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT); cudaErrCheck(hipFree(a_htype)); cudaErrCheck(hipFree(b_htype)); cudaErrCheck(hipFree(c_htype)); cudaErrCheck(hipFree(d_htype)); cudaErrCheck(hipFree(a_atype)); cudaErrCheck(hipFree(b_btype)); cudaErrCheck(hipFree(c_ctype)); cudaErrCheck(hipFree(d_dtype)); free(a_host_wmma); free(b_host_wmma); free(c_host_wmma); free(d_host_wmma); free(d_cal_host_wmma); cudaErrCheck(hipDeviceReset()); return 0; }
2cc527f2ebfc389e86e2f67891c6687c601926f7.cu
#include <stdio.h> #include <curand.h> #include <ctime> #include <assert.h> // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(curandStatus_t stat, const char *file, int line) { if (stat != CURAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } #include <mma.h> using namespace nvcuda; //enum MatrixLayout{ #define ROW_MAJOR 0 #define COL_MAJOR 1 //}; //ONLY THE PARAMETER HERE NEEDS TO BE CHANGED // Must be multiples of 16 for wmma code to work #define MATRIX_M (32) #define MATRIX_N (8) #define MATRIX_K (16) const int WMMA_M =32; const int WMMA_N =8; const int WMMA_K =16; typedef half atype; typedef half btype; typedef float ctype; typedef float dtype; typedef float host_type; #define A_LAYOUT ROW_MAJOR #define B_LAYOUT ROW_MAJOR #define C_LAYOUT ROW_MAJOR #define D_LAYOUT ROW_MAJOR #define NUM_CTA 1 #define WARP_IN_CTA 1 //Don't change anything after here #define THREAD_IN_WARP 32 #if A_LAYOUT==ROW_MAJOR #define LAYOUT_A wmma::row_major #define A_STRIDE MATRIX_K #else #define LAYOUT_A wmma::col_major #define A_STRIDE MATRIX_M #endif #if B_LAYOUT==ROW_MAJOR #define LAYOUT_B wmma::row_major #define B_STRIDE MATRIX_N #else #define LAYOUT_B wmma::col_major #define B_STRIDE MATRIX_K #endif #if C_LAYOUT==ROW_MAJOR #define LAYOUT_C wmma::mem_row_major #define C_STRIDE MATRIX_N #else #define LAYOUT_C wmma::mem_col_major #define C_STRIDE MATRIX_M #endif #if D_LAYOUT==ROW_MAJOR #define LAYOUT_D wmma::mem_row_major #define D_STRIDE MATRIX_N #else #define LAYOUT_D wmma::mem_col_major #define D_STRIDE MATRIX_M #endif enum MatrixInitializationType{ ZERO, ONE, RANDOM, IDENTITY, LINEAR }; int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){ static int val=0; switch(init_type){ case ZERO: break; case ONE: val=1; break; case RANDOM: val=rand()%randomRange; break; case LINEAR: val++; break; default : printf("illegal MatrixInitializationType\n"); abort(); break; } if(RESET) val=0; return val; } template <typename T> void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ T val; if(layout==ROW_MAJOR) val=matrix[row*col_size+col]; else val=matrix[col*row_size+row]; printf("%.2f ",static_cast<float>(val)); } printf(";\n"); } } template <typename T> void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ if(init_type==IDENTITY){ assert(row_size==col_size);//only for square matrix can be used matrix[row*row_size+col]=static_cast<T>(1); } else{ if(layout==ROW_MAJOR){ matrix[row*col_size+col]=static_cast<T>(get_value(init_type)); } else{ matrix[col*row_size+row]=static_cast<T>(get_value(init_type)); } } } } get_value(init_type,10,true);//reseting the val counter print_matrix<T>(matrix,row_size,col_size,layout); } int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){ int index=0; if(layout==ROW_MAJOR){ index=row*col_size+col; } else{ index=col*row_size+row; } return index; } template <typename T> void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){ for(int row=0;row<M;row++){ for(int col=0;col<N;col++){ int rindex=get_index(row,col,M,N,resultlayout); int cindex=get_index(row,col,M,N,clayout); for(int k=0;k<K;k++){ int aindex=get_index(row,k,M,K,alayout); int bindex=get_index(k,col,K,N,blayout); result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex]; } result_matrix[rindex]+=matrix_c[cindex]; } } print_matrix<T>(result_matrix,M,N,resultlayout); } template <typename T> void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ int index_a,index_b; index_a=get_index(row,col,row_size,col_size,alayout); index_b=get_index(row,col,row_size,col_size,alayout); if(matrix_a[index_a]!=matrix_b[index_b]) printf("ERROR at index row=%d col=%d\n",row,col); } } } __global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d) { float t; // Declare the fragments wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag; // Bounds checking wmma::load_matrix_sync(a_frag, a, A_STRIDE); wmma::load_matrix_sync(b_frag, b, B_STRIDE); wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C); for(int i=0; i < a_frag.num_elements; i++) { t=static_cast<float>(a_frag.x[i]); printf("A_THREAD%d: %.2f \n",threadIdx.x,t); } for(int i=0; i < b_frag.num_elements; i++) { t=static_cast<float>(b_frag.x[i]); printf("B_THREAD%d: %.2f \n",threadIdx.x,t); } for(int i=0; i < c_frag.num_elements; i++) { t=static_cast<float>(c_frag.x[i]); printf("C_THREAD%d: %.2f \n",threadIdx.x,t); } wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D); } template <typename T1,typename T2> __global__ void convert(T1 *out, T2 *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = in[idx]; } } int main(int argc, char* argv[]) { //data on device in host type format host_type *a_htype; host_type *b_htype; host_type *c_htype; host_type *d_htype; //data on device in gemm format atype *a_atype; btype *b_btype; ctype *c_ctype; dtype *d_dtype; srand(time(NULL)); host_type *a_host_wmma; host_type *b_host_wmma; host_type *c_host_wmma; host_type *d_host_wmma; host_type *d_cal_host_wmma; cudaEvent_t startWMMA; cudaEvent_t stopWMMA; cudaErrCheck(cudaEventCreate(&startWMMA)); cudaErrCheck(cudaEventCreate(&stopWMMA)); // Use tensor cores cudaErrCheck(cudaMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype))); cudaErrCheck(cudaMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype))); cudaErrCheck(cudaMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype))); cudaErrCheck(cudaMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype))); a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type)); b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type)); c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); printf("a_host\n"); initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR); printf("b_host\n"); initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR); printf("c_host\n"); initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR); printf("d_cal_host\n"); initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO); printf("d_cal_host\n"); matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT); cudaErrCheck(cudaMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice)); convert<atype,host_type> <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_atype, a_htype, MATRIX_M * MATRIX_K); convert<btype,host_type> <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_btype, b_htype, MATRIX_K * MATRIX_N); convert<ctype,host_type> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (c_ctype, c_htype, MATRIX_M * MATRIX_N); printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K); printf("Running with wmma...\n"); cudaErrCheck(cudaEventRecord(startWMMA)); wmma_example <<< NUM_CTA,WARP_IN_CTA*THREAD_IN_WARP>>> (a_atype, b_btype, c_ctype, d_dtype); cudaErrCheck(cudaEventRecord(stopWMMA)); convert<host_type,dtype> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (d_htype, d_dtype, MATRIX_M * MATRIX_N); cudaErrCheck(cudaEventSynchronize(stopWMMA)); // Error checking printf("\nChecking results...\n"); cudaErrCheck(cudaMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyDeviceToHost)); printf("Results verified: cublas and WMMA agree.\n\n"); float wmmaTime; cudaErrCheck(cudaEventElapsedTime(&wmmaTime, startWMMA, stopWMMA)); printf("wmma took %.2fms\n", wmmaTime); cudaErrCheck(cudaEventDestroy(startWMMA)); cudaErrCheck(cudaEventDestroy(stopWMMA)); //printf("D_CALCULATED\n"); //print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); //printf("D_WMMA\n"); //print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); //printf("CHECKING\n"); //compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT); cudaErrCheck(cudaFree(a_htype)); cudaErrCheck(cudaFree(b_htype)); cudaErrCheck(cudaFree(c_htype)); cudaErrCheck(cudaFree(d_htype)); cudaErrCheck(cudaFree(a_atype)); cudaErrCheck(cudaFree(b_btype)); cudaErrCheck(cudaFree(c_ctype)); cudaErrCheck(cudaFree(d_dtype)); free(a_host_wmma); free(b_host_wmma); free(c_host_wmma); free(d_host_wmma); free(d_cal_host_wmma); cudaErrCheck(cudaDeviceReset()); return 0; }
5a6498453c644d72339ffef8eb0a1ada3efe8dd3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialMaxUnpooling.cu" #else void THNN_(SpatialMaxUnpooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *indices, int owidth, int oheight) { THCUNN_assertSameGPU(state, 3, input, output, indices); THCUNN_argCheck(state, input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D (batch mode) tensor expected for input, but got: %s"); THCUNN_check_shape_indices(state, indices, input); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCTensor_(newContiguous)(state, input); indices = THCIndexTensor_(newContiguous)(state, indices); THCTensor_(resize4d)(state, output, batchSize, nInputPlane, oheight, owidth); THCTensor_(zero)(state, output); int count = THCTensor_(nElement)(state, input); hipLaunchKernelGGL(( MaxUnpoolForward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, THCTensor_(data)(state, input), THCIndexTensor_(data)(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCTensor_(data)(state, output)); THCudaCheck(hipGetLastError()); if(input->nDimension == 3) THCTensor_(resize3d)(state, output, nInputPlane, oheight, owidth); THCTensor_(free)(state, input); } void THNN_(SpatialMaxUnpooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCIndexTensor *indices, int owidth, int oheight) { THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput); THCUNN_check_shape_indices(state, indices, input); long nInputCols, nInputRows, nInputPlane, batchSize; int dimw = 2; int dimh = 1; if (input->nDimension == 3) { nInputPlane = input->size[0]; batchSize = 1; } else { ++dimw; ++dimh; nInputPlane = input->size[1]; batchSize = input->size[0]; } nInputCols = input->size[dimw]; nInputRows = input->size[dimh]; if(owidth!=gradOutput->size[dimw] || oheight!=gradOutput->size[dimh]){ THError("Inconsistent gradOutput size. oheight= %d, owidth= %d, gradOutput: %dx%d", oheight, owidth,gradOutput->size[dimh],gradOutput->size[dimw]); } input = THCTensor_(newContiguous)(state, input); indices = THCIndexTensor_(newContiguous)(state, indices); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); int count = THCTensor_(nElement)(state, input); hipLaunchKernelGGL(( MaxUnpoolBackward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, THCTensor_(data)(state, gradOutput), THCIndexTensor_(data)(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCTensor_(data)(state, gradInput)); THCudaCheck(hipGetLastError()); // clean THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); } #endif
5a6498453c644d72339ffef8eb0a1ada3efe8dd3.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialMaxUnpooling.cu" #else void THNN_(SpatialMaxUnpooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *indices, int owidth, int oheight) { THCUNN_assertSameGPU(state, 3, input, output, indices); THCUNN_argCheck(state, input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D (batch mode) tensor expected for input, but got: %s"); THCUNN_check_shape_indices(state, indices, input); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCTensor_(newContiguous)(state, input); indices = THCIndexTensor_(newContiguous)(state, indices); THCTensor_(resize4d)(state, output, batchSize, nInputPlane, oheight, owidth); THCTensor_(zero)(state, output); int count = THCTensor_(nElement)(state, input); MaxUnpoolForward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCTensor_(data)(state, input), THCIndexTensor_(data)(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCTensor_(data)(state, output)); THCudaCheck(cudaGetLastError()); if(input->nDimension == 3) THCTensor_(resize3d)(state, output, nInputPlane, oheight, owidth); THCTensor_(free)(state, input); } void THNN_(SpatialMaxUnpooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCIndexTensor *indices, int owidth, int oheight) { THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput); THCUNN_check_shape_indices(state, indices, input); long nInputCols, nInputRows, nInputPlane, batchSize; int dimw = 2; int dimh = 1; if (input->nDimension == 3) { nInputPlane = input->size[0]; batchSize = 1; } else { ++dimw; ++dimh; nInputPlane = input->size[1]; batchSize = input->size[0]; } nInputCols = input->size[dimw]; nInputRows = input->size[dimh]; if(owidth!=gradOutput->size[dimw] || oheight!=gradOutput->size[dimh]){ THError("Inconsistent gradOutput size. oheight= %d, owidth= %d, gradOutput: %dx%d", oheight, owidth,gradOutput->size[dimh],gradOutput->size[dimw]); } input = THCTensor_(newContiguous)(state, input); indices = THCIndexTensor_(newContiguous)(state, indices); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); int count = THCTensor_(nElement)(state, input); MaxUnpoolBackward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCTensor_(data)(state, gradOutput), THCIndexTensor_(data)(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCTensor_(data)(state, gradInput)); THCudaCheck(cudaGetLastError()); // clean THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); } #endif
bcebe57a1a260670a838360a8c5f5bdb37941bb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // cpugcc //__sync_add_and_fetch //__sync_and_and_fetch //__sync_bool_compare_and_swap //__sync_fetch_and_add //__sync_fetch_and_and //__sync_fetch_and_nand //__sync_fetch_and_or //__sync_fetch_and_sub //__sync_fetch_and_xor //__sync_lock_release //__sync_lock_test_and_set //__sync_nand_and_fetch //__sync_or_and_fetch //__sync_sub_and_fetch //__sync_synchronize //__sync_val_compare_and_swap //__sync_xor_and_fetch //__thread //************************************************8 #include<math.h> #include<cstdio> #include<ctime> #include<stdint.h> #include<iostream> #include<cuda_runtime.h> #include<helper_cuda.h> #define LOOP_NUM 50 using std::cout; using std::endl; using std::cerr; __global__ void atomicKernel(int *atom_arr){ unsigned int tid = blockDim.x*blockIdx.x+threadIdx.x; for(int i=0; i<LOOP_NUM; i++){ atomicAdd_system(&atom_arr[0],10); atomicExch_system(&atom_arr[1],tid); atomicMax_system(&atom_arr[2],tid); atomicMin_system(&atom_arr[3],tid); // eads the 32-bit word old located at the address address in global or shared memory, // computes ((old >= val) ? 0 : (old+1)), and stores the result back to memory at // the same address. These three operations are performed in one atomic transaction. // The function returns old. atomicInc_system((unsigned int *)&atom_arr[4],17); // (((old == 0) || (old > val)) ? val : (old-1) ) atomicDec_system((unsigned int *)&atom_arr[5],137); // (old == compare ? val : old) atomicCAS_system(&atom_arr[6],tid-1,tid); atomicAnd_system(&atom_arr[7],2*tid+7); atomicOr_system(&atom_arr[8],1<<tid); atomicXor_system(&atom_arr[9],tid); } } void atomicKernel_CPU(int *atom_arr, int nThreads){ for(int i=nThreads; i<2*nThreads; i++){ for(int j=0; j<LOOP_NUM; j++){ __sync_fetch_and_add(&atom_arr[0],10); //add __sync_lock_test_and_set(&atom_arr[1],i); // exchange //max int old, expected; do{ expected = atom_arr[2]; old = __sync_val_compare_and_swap(&atom_arr[2],expected, max(expected, i)); }while(old != expected); //min do{ expected = atom_arr[3]; old = __sync_val_compare_and_swap(&atom_arr[3],expected, min(expected, i)); }while(old!= expected); //increment (modulo 17+1) int limit = 17; do{ expected = atom_arr[4]; old = __sync_val_compare_and_swap(&atom_arr[4],expected, (expected>=limit)?0:expected+1); }while(old != expected); //decrement limit = 137; do{ expected = atom_arr[5]; old = __sync_val_compare_and_swap(&atom_arr[5],expected, ((expected==0)||(expected>limit))?limit:expected-1); }while(old != expected); // compare and swap __sync_val_compare_and_swap(&atom_arr[6], i-1, i); // and __sync_fetch_and_and(&atom_arr[7], 2*i+7); // or __sync_fetch_and_or(&atom_arr[8], 1<<i); // xor // 11th 0xff __sync_fetch_and_xor(&atom_arr[9],i); } } } int main(int argc, char*argv[]){ hipDeviceProp_t prop; int idev = 0; checkCudaErrors(hipGetDeviceProperties(&prop, idev)); if(!prop.managedMemory){ cerr<<"Unified memory not supported on this device"<<endl; exit(EXIT_FAILURE); } if(prop.computeMode == hipComputeModeProhibited){ cerr<<"this sample requires a device in either default or process exclusive mode"<<endl; exit(EXIT_FAILURE); } if(prop.major < 6){ cerr<<"this sample requires a minimum CUDA compute 6.0 capablity"<<endl; exit(EXIT_FAILURE); } unsigned int numThreads = 256; unsigned int numBlocks = 64; unsigned int numData = 10; int *atom_arr; if(prop.pageableMemoryAccess){ cout<<"CAN access pageable memory"<<endl; atom_arr = (int *)malloc(sizeof(int)*numData); }else{ cout<<"CANNOT access pageable memory"<<endl; checkCudaErrors(hipMallocManaged(&atom_arr, sizeof(int)*numData)); } //-------------------------- for(unsigned int i=0; i<numData; i++) atom_arr[i] = 0; //AND XOR0 atom_arr[7] = atom_arr[9] = 0xff; hipLaunchKernelGGL(( atomicKernel), dim3(numBlocks), dim3(numThreads), 0, 0, atom_arr); checkCudaErrors(hipDeviceSynchronize()); for(unsigned int i=0; i<numData; i++) cout<<i<<": "<< atom_arr[i]<<endl; //--------------------------- cout<<"=============================="<<endl; for(unsigned int i=0; i<numData; i++) atom_arr[i] = 0; //AND XOR0 atom_arr[7] = atom_arr[9] = 0xff; atomicKernel_CPU(atom_arr, numBlocks*numThreads); for(unsigned int i=0; i<numData; i++) cout<<i<<": "<< atom_arr[i]<<endl; //--------------------------- if(prop.pageableMemoryAccess){ free(atom_arr); }else{ hipFree(atom_arr); } cout<<"systemWideAtomic completed"<<endl; }
bcebe57a1a260670a838360a8c5f5bdb37941bb6.cu
// 在cpu测的原子算子,诸如下面列出的,都是来自gcc文档 //__sync_add_and_fetch //__sync_and_and_fetch //__sync_bool_compare_and_swap //__sync_fetch_and_add //__sync_fetch_and_and //__sync_fetch_and_nand //__sync_fetch_and_or //__sync_fetch_and_sub //__sync_fetch_and_xor //__sync_lock_release //__sync_lock_test_and_set //__sync_nand_and_fetch //__sync_or_and_fetch //__sync_sub_and_fetch //__sync_synchronize //__sync_val_compare_and_swap //__sync_xor_and_fetch //__thread //************************************************8 #include<math.h> #include<cstdio> #include<ctime> #include<stdint.h> #include<iostream> #include<cuda_runtime.h> #include<helper_cuda.h> #define LOOP_NUM 50 using std::cout; using std::endl; using std::cerr; __global__ void atomicKernel(int *atom_arr){ unsigned int tid = blockDim.x*blockIdx.x+threadIdx.x; for(int i=0; i<LOOP_NUM; i++){ atomicAdd_system(&atom_arr[0],10); atomicExch_system(&atom_arr[1],tid); atomicMax_system(&atom_arr[2],tid); atomicMin_system(&atom_arr[3],tid); // eads the 32-bit word old located at the address address in global or shared memory, // computes ((old >= val) ? 0 : (old+1)), and stores the result back to memory at // the same address. These three operations are performed in one atomic transaction. // The function returns old. atomicInc_system((unsigned int *)&atom_arr[4],17); // (((old == 0) || (old > val)) ? val : (old-1) ) atomicDec_system((unsigned int *)&atom_arr[5],137); // (old == compare ? val : old) atomicCAS_system(&atom_arr[6],tid-1,tid); atomicAnd_system(&atom_arr[7],2*tid+7); atomicOr_system(&atom_arr[8],1<<tid); atomicXor_system(&atom_arr[9],tid); } } void atomicKernel_CPU(int *atom_arr, int nThreads){ for(int i=nThreads; i<2*nThreads; i++){ for(int j=0; j<LOOP_NUM; j++){ __sync_fetch_and_add(&atom_arr[0],10); //add __sync_lock_test_and_set(&atom_arr[1],i); // exchange //max int old, expected; do{ expected = atom_arr[2]; old = __sync_val_compare_and_swap(&atom_arr[2],expected, max(expected, i)); }while(old != expected); //min do{ expected = atom_arr[3]; old = __sync_val_compare_and_swap(&atom_arr[3],expected, min(expected, i)); }while(old!= expected); //increment (modulo 17+1) int limit = 17; do{ expected = atom_arr[4]; old = __sync_val_compare_and_swap(&atom_arr[4],expected, (expected>=limit)?0:expected+1); }while(old != expected); //decrement limit = 137; do{ expected = atom_arr[5]; old = __sync_val_compare_and_swap(&atom_arr[5],expected, ((expected==0)||(expected>limit))?limit:expected-1); }while(old != expected); // compare and swap __sync_val_compare_and_swap(&atom_arr[6], i-1, i); // and __sync_fetch_and_and(&atom_arr[7], 2*i+7); // or __sync_fetch_and_or(&atom_arr[8], 1<<i); // xor // 11th 元素应该是0xff __sync_fetch_and_xor(&atom_arr[9],i); } } } int main(int argc, char*argv[]){ cudaDeviceProp prop; int idev = 0; checkCudaErrors(cudaGetDeviceProperties(&prop, idev)); if(!prop.managedMemory){ cerr<<"Unified memory not supported on this device"<<endl; exit(EXIT_FAILURE); } if(prop.computeMode == cudaComputeModeProhibited){ cerr<<"this sample requires a device in either default or process exclusive mode"<<endl; exit(EXIT_FAILURE); } if(prop.major < 6){ cerr<<"this sample requires a minimum CUDA compute 6.0 capablity"<<endl; exit(EXIT_FAILURE); } unsigned int numThreads = 256; unsigned int numBlocks = 64; unsigned int numData = 10; int *atom_arr; if(prop.pageableMemoryAccess){ cout<<"CAN access pageable memory"<<endl; atom_arr = (int *)malloc(sizeof(int)*numData); }else{ cout<<"CANNOT access pageable memory"<<endl; checkCudaErrors(cudaMallocManaged(&atom_arr, sizeof(int)*numData)); } //-------------------------- for(unsigned int i=0; i<numData; i++) atom_arr[i] = 0; //为了让AND 和XOR测试能够生成不是0的结果 atom_arr[7] = atom_arr[9] = 0xff; atomicKernel<<<numBlocks, numThreads>>>(atom_arr); checkCudaErrors(cudaDeviceSynchronize()); for(unsigned int i=0; i<numData; i++) cout<<i<<": "<< atom_arr[i]<<endl; //--------------------------- cout<<"=============================="<<endl; for(unsigned int i=0; i<numData; i++) atom_arr[i] = 0; //为了让AND 和XOR测试能够生成不是0的结果 atom_arr[7] = atom_arr[9] = 0xff; atomicKernel_CPU(atom_arr, numBlocks*numThreads); for(unsigned int i=0; i<numData; i++) cout<<i<<": "<< atom_arr[i]<<endl; //--------------------------- if(prop.pageableMemoryAccess){ free(atom_arr); }else{ cudaFree(atom_arr); } cout<<"systemWideAtomic completed"<<endl; }
0cff14b9bb16ce2765c50c5794b827dc8646d40c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017 XGBoost contributors */ // GPU implementation of objective function. // Necessary to avoid extra copying of data to CPU. #include <dmlc/omp.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "../common/span.h" #include "../common/device_helpers.cuh" #include "../common/host_device_vector.h" #include "./regression_loss.h" namespace xgboost { namespace obj { using dh::DVec; DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); struct GPURegLossParam : public dmlc::Parameter<GPURegLossParam> { float scale_pos_weight; int n_gpus; int gpu_id; // declare parameters DMLC_DECLARE_PARAMETER(GPURegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); DMLC_DECLARE_FIELD(n_gpus).set_default(1).set_lower_bound(-1) .describe("Number of GPUs to use for multi-gpu algorithms (NOT IMPLEMENTED)"); DMLC_DECLARE_FIELD(gpu_id) .set_lower_bound(0) .set_default(0) .describe("gpu to use for objective function evaluation"); } }; // GPU kernel for gradient computation template<typename Loss> __global__ void get_gradient_k (common::Span<GradientPair> out_gpair, common::Span<unsigned int> label_correct, common::Span<const float> preds, common::Span<const float> labels, const float * __restrict__ weights, int n, float scale_pos_weight) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n) return; float p = Loss::PredTransform(preds[i]); float w = weights == nullptr ? 1.0f : weights[i]; float label = labels[i]; if (label == 1.0f) w *= scale_pos_weight; if (!Loss::CheckLabel(label)) atomicAnd(label_correct.data(), 0); out_gpair[i] = GradientPair (Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } // GPU kernel for predicate transformation template<typename Loss> __global__ void pred_transform_k(common::Span<float> preds, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n) return; preds[i] = Loss::PredTransform(preds[i]); } // regression loss function for evaluation on GPU (eventually) template<typename Loss> class GPURegLossObj : public ObjFunction { protected: bool copied_; HostDeviceVector<bst_float> labels_, weights_; HostDeviceVector<unsigned int> label_correct_; // allocate device data for n elements, do nothing if memory is allocated already void LazyResize(size_t n, size_t n_weights) { if (labels_.Size() == n && weights_.Size() == n_weights) return; copied_ = false; labels_.Reshard(devices_); weights_.Reshard(devices_); label_correct_.Reshard(devices_); if (labels_.Size() != n) { labels_.Resize(n); label_correct_.Resize(devices_.Size()); } if (weights_.Size() != n_weights) weights_.Resize(n_weights); } public: GPURegLossObj() : copied_(false) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.InitAllowUnknown(args); CHECK(param_.n_gpus != 0) << "Must have at least one device"; devices_ = GPUSet::Range(param_.gpu_id, dh::NDevicesAll(param_.n_gpus)); } void GetGradient(HostDeviceVector<float>* preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_NE(info.labels_.size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds->Size(), info.labels_.size()) << "labels are not correctly provided" << "preds.size=" << preds->Size() << ", label.size=" << info.labels_.size(); size_t ndata = preds->Size(); preds->Reshard(devices_); out_gpair->Reshard(devices_); out_gpair->Resize(ndata); LazyResize(ndata, info.weights_.size()); GetGradientDevice(preds, info, iter, out_gpair); } private: void GetGradientDevice(HostDeviceVector<float>* preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) { label_correct_.Fill(1); // only copy the labels and weights once, similar to how the data is copied if (!copied_) { labels_.Copy(info.labels_); if (info.weights_.size() > 0) weights_.Copy(info.weights_); copied_ = true; } // run the kernel #pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1) for (int i = 0; i < devices_.Size(); ++i) { int d = devices_[i]; dh::safe_cuda(hipSetDevice(d)); const int block = 256; size_t n = preds->DeviceSize(d); if (n > 0) { hipLaunchKernelGGL(( get_gradient_k<Loss>), dim3(dh::DivRoundUp(n, block)), dim3(block), 0, 0, out_gpair->DeviceSpan(d), label_correct_.DeviceSpan(d), preds->DeviceSpan(d), labels_.DeviceSpan(d), info.weights_.size() > 0 ? weights_.DevicePointer(d) : nullptr, n, param_.scale_pos_weight); dh::safe_cuda(hipGetLastError()); } dh::safe_cuda(hipDeviceSynchronize()); } // copy "label correct" flags back to host std::vector<unsigned int>& label_correct_h = label_correct_.HostVector(); for (int i = 0; i < devices_.Size(); ++i) { if (label_correct_h[i] == 0) LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) override { io_preds->Reshard(devices_); size_t ndata = io_preds->Size(); PredTransformDevice(io_preds); } void PredTransformDevice(HostDeviceVector<float>* preds) { #pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1) for (int i = 0; i < devices_.Size(); ++i) { int d = devices_[i]; dh::safe_cuda(hipSetDevice(d)); const int block = 256; size_t n = preds->DeviceSize(d); if (n > 0) { hipLaunchKernelGGL(( pred_transform_k<Loss>), dim3(dh::DivRoundUp(n, block)), dim3(block), 0, 0, preds->DeviceSpan(d), n); dh::safe_cuda(hipGetLastError()); } dh::safe_cuda(hipDeviceSynchronize()); } } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } protected: GPURegLossParam param_; GPUSet devices_; }; // register the objective functions DMLC_REGISTER_PARAMETER(GPURegLossParam); XGBOOST_REGISTER_OBJECTIVE(GPULinearRegression, "gpu:reg:linear") .describe("Linear regression (computed on GPU).") .set_body([]() { return new GPURegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticRegression, "gpu:reg:logistic") .describe("Logistic regression for probability regression task (computed on GPU).") .set_body([]() { return new GPURegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticClassification, "gpu:binary:logistic") .describe("Logistic regression for binary classification task (computed on GPU).") .set_body([]() { return new GPURegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticRaw, "gpu:binary:logitraw") .describe("Logistic regression for classification, output score " "before logistic transformation (computed on GPU)") .set_body([]() { return new GPURegLossObj<LogisticRaw>(); }); } // namespace obj } // namespace xgboost
0cff14b9bb16ce2765c50c5794b827dc8646d40c.cu
/*! * Copyright 2017 XGBoost contributors */ // GPU implementation of objective function. // Necessary to avoid extra copying of data to CPU. #include <dmlc/omp.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "../common/span.h" #include "../common/device_helpers.cuh" #include "../common/host_device_vector.h" #include "./regression_loss.h" namespace xgboost { namespace obj { using dh::DVec; DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); struct GPURegLossParam : public dmlc::Parameter<GPURegLossParam> { float scale_pos_weight; int n_gpus; int gpu_id; // declare parameters DMLC_DECLARE_PARAMETER(GPURegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); DMLC_DECLARE_FIELD(n_gpus).set_default(1).set_lower_bound(-1) .describe("Number of GPUs to use for multi-gpu algorithms (NOT IMPLEMENTED)"); DMLC_DECLARE_FIELD(gpu_id) .set_lower_bound(0) .set_default(0) .describe("gpu to use for objective function evaluation"); } }; // GPU kernel for gradient computation template<typename Loss> __global__ void get_gradient_k (common::Span<GradientPair> out_gpair, common::Span<unsigned int> label_correct, common::Span<const float> preds, common::Span<const float> labels, const float * __restrict__ weights, int n, float scale_pos_weight) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n) return; float p = Loss::PredTransform(preds[i]); float w = weights == nullptr ? 1.0f : weights[i]; float label = labels[i]; if (label == 1.0f) w *= scale_pos_weight; if (!Loss::CheckLabel(label)) atomicAnd(label_correct.data(), 0); out_gpair[i] = GradientPair (Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } // GPU kernel for predicate transformation template<typename Loss> __global__ void pred_transform_k(common::Span<float> preds, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n) return; preds[i] = Loss::PredTransform(preds[i]); } // regression loss function for evaluation on GPU (eventually) template<typename Loss> class GPURegLossObj : public ObjFunction { protected: bool copied_; HostDeviceVector<bst_float> labels_, weights_; HostDeviceVector<unsigned int> label_correct_; // allocate device data for n elements, do nothing if memory is allocated already void LazyResize(size_t n, size_t n_weights) { if (labels_.Size() == n && weights_.Size() == n_weights) return; copied_ = false; labels_.Reshard(devices_); weights_.Reshard(devices_); label_correct_.Reshard(devices_); if (labels_.Size() != n) { labels_.Resize(n); label_correct_.Resize(devices_.Size()); } if (weights_.Size() != n_weights) weights_.Resize(n_weights); } public: GPURegLossObj() : copied_(false) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.InitAllowUnknown(args); CHECK(param_.n_gpus != 0) << "Must have at least one device"; devices_ = GPUSet::Range(param_.gpu_id, dh::NDevicesAll(param_.n_gpus)); } void GetGradient(HostDeviceVector<float>* preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_NE(info.labels_.size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds->Size(), info.labels_.size()) << "labels are not correctly provided" << "preds.size=" << preds->Size() << ", label.size=" << info.labels_.size(); size_t ndata = preds->Size(); preds->Reshard(devices_); out_gpair->Reshard(devices_); out_gpair->Resize(ndata); LazyResize(ndata, info.weights_.size()); GetGradientDevice(preds, info, iter, out_gpair); } private: void GetGradientDevice(HostDeviceVector<float>* preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) { label_correct_.Fill(1); // only copy the labels and weights once, similar to how the data is copied if (!copied_) { labels_.Copy(info.labels_); if (info.weights_.size() > 0) weights_.Copy(info.weights_); copied_ = true; } // run the kernel #pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1) for (int i = 0; i < devices_.Size(); ++i) { int d = devices_[i]; dh::safe_cuda(cudaSetDevice(d)); const int block = 256; size_t n = preds->DeviceSize(d); if (n > 0) { get_gradient_k<Loss><<<dh::DivRoundUp(n, block), block>>> (out_gpair->DeviceSpan(d), label_correct_.DeviceSpan(d), preds->DeviceSpan(d), labels_.DeviceSpan(d), info.weights_.size() > 0 ? weights_.DevicePointer(d) : nullptr, n, param_.scale_pos_weight); dh::safe_cuda(cudaGetLastError()); } dh::safe_cuda(cudaDeviceSynchronize()); } // copy "label correct" flags back to host std::vector<unsigned int>& label_correct_h = label_correct_.HostVector(); for (int i = 0; i < devices_.Size(); ++i) { if (label_correct_h[i] == 0) LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) override { io_preds->Reshard(devices_); size_t ndata = io_preds->Size(); PredTransformDevice(io_preds); } void PredTransformDevice(HostDeviceVector<float>* preds) { #pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1) for (int i = 0; i < devices_.Size(); ++i) { int d = devices_[i]; dh::safe_cuda(cudaSetDevice(d)); const int block = 256; size_t n = preds->DeviceSize(d); if (n > 0) { pred_transform_k<Loss><<<dh::DivRoundUp(n, block), block>>>( preds->DeviceSpan(d), n); dh::safe_cuda(cudaGetLastError()); } dh::safe_cuda(cudaDeviceSynchronize()); } } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } protected: GPURegLossParam param_; GPUSet devices_; }; // register the objective functions DMLC_REGISTER_PARAMETER(GPURegLossParam); XGBOOST_REGISTER_OBJECTIVE(GPULinearRegression, "gpu:reg:linear") .describe("Linear regression (computed on GPU).") .set_body([]() { return new GPURegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticRegression, "gpu:reg:logistic") .describe("Logistic regression for probability regression task (computed on GPU).") .set_body([]() { return new GPURegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticClassification, "gpu:binary:logistic") .describe("Logistic regression for binary classification task (computed on GPU).") .set_body([]() { return new GPURegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticRaw, "gpu:binary:logitraw") .describe("Logistic regression for classification, output score " "before logistic transformation (computed on GPU)") .set_body([]() { return new GPURegLossObj<LogisticRaw>(); }); } // namespace obj } // namespace xgboost
f580f7284910e8f468c2e40cfc7ffa19fc44edc7.hip
// !!! This is a file automatically generated by hipify!!! #include "benchmark.cuh" namespace chrono = std::chrono; using clock_type = chrono::high_resolution_clock; int Benchmark::add_node(void **paramarray, cudaKernelNodeParams &param, void *func, dim3 gridsize, dim3 threads, hipGraph_t &g, hipGraphNode_t *n, std::vector<hipGraphNode_t> &dependencies, int shared_memory) { param.func = func; param.blockDim = threads; param.gridDim = gridsize; param.kernelParams = paramarray; param.sharedMemBytes = shared_memory; param.extra = NULL; return cudaGraphAddKernelNode(n, g, dependencies.data(), dependencies.size(), &param); } void Benchmark::run() { auto start_tot = clock_type::now(); auto start_tmp = clock_type::now(); auto end_tmp = clock_type::now(); // Allocation; start_tmp = clock_type::now(); alloc(); end_tmp = clock_type::now(); if (debug && err) std::cout << "error=" << err << std::endl; if (debug) std::cout << "allocation time=" << chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count() / 1000 << " ms" << std::endl; // Initialization; start_tmp = clock_type::now(); init(); end_tmp = clock_type::now(); if (debug && err) std::cout << "error=" << err << std::endl; if (debug) std::cout << "initialization time=" << chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count() / 1000 << " ms" << std::endl; // Print header; if (!debug) std::cout << "num_iter,gpu_result,total_time_sec,overhead_sec,computation_sec" << std::endl; long tot_time = 0; for (int i = 0; i < num_executions; i++) { if (debug) std::cout << "\n-- iter=" << i << std::endl; // Reset; start_tmp = clock_type::now(); reset(); end_tmp = clock_type::now(); auto reset_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count(); if (debug) std::cout << " reset=" << (float)reset_time / 1000 << " ms" << std::endl; // Execution; start_tmp = clock_type::now(); switch (policy) { case Policy::Sync: execute_sync(i); break; case Policy::CudaGraph: execute_cudagraph(i); break; case Policy::CudaGraphAsync: execute_cudagraph_manual(i); break; case Policy::CudaGraphSingle: execute_cudagraph_single(i); break; default: execute_async(i); } if (debug && err) std::cout << " error=" << err << std::endl; end_tmp = clock_type::now(); auto exec_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count(); if (i >= skip_iterations) tot_time += exec_time; if (debug) { std::cout << " result=" << print_result() << std::endl; std::cout << " execution(" << i << ")=" << (float)exec_time / 1000 << " ms" << std::endl; } else { std::cout << i << "," << print_result(true) << "," << (float)(reset_time + exec_time) / 1e6 << "," << (float)reset_time / 1e6 << "," << (float)exec_time / 1e6 << std::endl; } } auto end_time = chrono::duration_cast<chrono::microseconds>(clock_type::now() - start_tot).count(); if (debug) std::cout << "\ntotal execution time=" << end_time / 1e6 << " sec" << std::endl; if (debug) std::cout << "mean exec time=" << (float)tot_time / (1000 * (num_executions - skip_iterations)) << " ms" << std::endl; }
f580f7284910e8f468c2e40cfc7ffa19fc44edc7.cu
#include "benchmark.cuh" namespace chrono = std::chrono; using clock_type = chrono::high_resolution_clock; int Benchmark::add_node(void **paramarray, cudaKernelNodeParams &param, void *func, dim3 gridsize, dim3 threads, cudaGraph_t &g, cudaGraphNode_t *n, std::vector<cudaGraphNode_t> &dependencies, int shared_memory) { param.func = func; param.blockDim = threads; param.gridDim = gridsize; param.kernelParams = paramarray; param.sharedMemBytes = shared_memory; param.extra = NULL; return cudaGraphAddKernelNode(n, g, dependencies.data(), dependencies.size(), &param); } void Benchmark::run() { auto start_tot = clock_type::now(); auto start_tmp = clock_type::now(); auto end_tmp = clock_type::now(); // Allocation; start_tmp = clock_type::now(); alloc(); end_tmp = clock_type::now(); if (debug && err) std::cout << "error=" << err << std::endl; if (debug) std::cout << "allocation time=" << chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count() / 1000 << " ms" << std::endl; // Initialization; start_tmp = clock_type::now(); init(); end_tmp = clock_type::now(); if (debug && err) std::cout << "error=" << err << std::endl; if (debug) std::cout << "initialization time=" << chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count() / 1000 << " ms" << std::endl; // Print header; if (!debug) std::cout << "num_iter,gpu_result,total_time_sec,overhead_sec,computation_sec" << std::endl; long tot_time = 0; for (int i = 0; i < num_executions; i++) { if (debug) std::cout << "\n-- iter=" << i << std::endl; // Reset; start_tmp = clock_type::now(); reset(); end_tmp = clock_type::now(); auto reset_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count(); if (debug) std::cout << " reset=" << (float)reset_time / 1000 << " ms" << std::endl; // Execution; start_tmp = clock_type::now(); switch (policy) { case Policy::Sync: execute_sync(i); break; case Policy::CudaGraph: execute_cudagraph(i); break; case Policy::CudaGraphAsync: execute_cudagraph_manual(i); break; case Policy::CudaGraphSingle: execute_cudagraph_single(i); break; default: execute_async(i); } if (debug && err) std::cout << " error=" << err << std::endl; end_tmp = clock_type::now(); auto exec_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count(); if (i >= skip_iterations) tot_time += exec_time; if (debug) { std::cout << " result=" << print_result() << std::endl; std::cout << " execution(" << i << ")=" << (float)exec_time / 1000 << " ms" << std::endl; } else { std::cout << i << "," << print_result(true) << "," << (float)(reset_time + exec_time) / 1e6 << "," << (float)reset_time / 1e6 << "," << (float)exec_time / 1e6 << std::endl; } } auto end_time = chrono::duration_cast<chrono::microseconds>(clock_type::now() - start_tot).count(); if (debug) std::cout << "\ntotal execution time=" << end_time / 1e6 << " sec" << std::endl; if (debug) std::cout << "mean exec time=" << (float)tot_time / (1000 * (num_executions - skip_iterations)) << " ms" << std::endl; }
5e0a011e93125e479227c41a1b44684eb7443b42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <iostream> #include <random> #include <sstream> #include <stdexcept> #include <vector> #include "helper_math.h" struct Data { Data(int size) : size(size), bytes(size * sizeof(float2)) { hipMalloc(&coordinates, bytes); hipMemset(coordinates, 0, bytes); } Data(std::vector<float2>& h_coordinates) : size(h_coordinates.size()), bytes(h_coordinates.size() * sizeof(float2)) { hipMalloc(&coordinates, bytes); hipMemcpy(coordinates, h_coordinates.data(), bytes, hipMemcpyHostToDevice); } void clear() { hipMemset(coordinates, 0, bytes); } ~Data() { hipFree(coordinates); } float2* coordinates{nullptr}; int size{0}; int bytes{0}; }; __device__ inline float squared_l2_distance(float2 x1, float2 x2) { float2 diff = x1-x2; return dot(diff, diff); } __device__ inline float2 atomicAdd(float2* addr, float2 val) { float2 result; result.x = atomicAdd(&addr->x, val.x); result.y = atomicAdd(&addr->y, val.y); return result; } __global__ void assign_clusters(const float2* points, int data_size, const float2* means, float2* new_sums, int k, int* counts) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; extern __shared__ char shared_memory[]; // First part of the shared memory is for the new centroids coordinates, // the second part is for the number of particles assigned float2* block_new_sums = (float2*) shared_memory; int* block_counts = (int*)(block_new_sums + k); // Set everything in shared memory to 0 for (int i=threadIdx.x; i<k; i+=blockDim.x) { block_new_sums[i] = {0.0f, 0.0f}; block_counts[i] = 0; } __syncthreads(); // Make global loads once. const float2 point = points[index]; // Compute the closest current centroid float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = squared_l2_distance(point, means[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } // Add the point coordinate to the NEW centroid coordinate // and increment the corresponding points count atomicAdd(&block_new_sums[best_cluster], point); atomicAdd(&block_counts[best_cluster], 1); // Wait until all the threads in a block are done updating __syncthreads(); // Perform global atomics, K operations per block if (threadIdx.x < k) { atomicAdd(&new_sums[threadIdx.x], block_new_sums[threadIdx.x]); atomicAdd(&counts[threadIdx.x], block_counts[threadIdx.x]); } } __global__ void compute_new_means(float2* means, const float2* new_sum, const int* counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); means[cluster] = new_sum[cluster] / count; } int main(int argc, const char* argv[]) { if (argc < 3) { std::cerr << "usage: k-means <data-file> <k> [iterations]" << std::endl; std::exit(EXIT_FAILURE); } const auto k = std::atoi(argv[2]); const auto number_of_iterations = (argc == 4) ? std::atoi(argv[3]) : 300; std::vector<float2> h_points; std::ifstream stream(argv[1]); std::string line; while (std::getline(stream, line)) { std::istringstream line_stream(line); float x, y; uint16_t label; line_stream >> x >> y >> label; h_points.push_back({x,y}); } const size_t number_of_elements = h_points.size(); Data d_points(h_points); std::mt19937 rng(42); std::shuffle(h_points.begin(), h_points.end(), rng); std::vector<float2> initial_means{h_points.begin(), h_points.begin() + k}; Data d_means( initial_means ); const int threads = 64; const int blocks = (number_of_elements + threads - 1) / threads; // Every block keeps its own centroid data: // current x and y sum and number of points (from this block) assigned const int shared_memory = k * (sizeof(float2) + sizeof(int)); Data d_sums(k * blocks); int* d_counts; hipMalloc(&d_counts, k * blocks * sizeof(int)); hipMemset(d_counts, 0, k * blocks * sizeof(int)); const auto start = std::chrono::high_resolution_clock::now(); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { hipMemset(d_counts, 0, k * sizeof(int)); d_sums.clear(); hipLaunchKernelGGL(( assign_clusters), dim3(blocks), dim3(threads), shared_memory, 0, d_points.coordinates, d_points.size, d_means.coordinates, d_sums.coordinates, k, d_counts); hipLaunchKernelGGL(( compute_new_means), dim3(1), dim3(k), 0, 0, d_means.coordinates, d_sums.coordinates, d_counts); } hipDeviceSynchronize(); const auto end = std::chrono::high_resolution_clock::now(); const auto duration = std::chrono::duration_cast<std::chrono::duration<float>>(end - start); std::cout << "Took: " << duration.count() << "s" << std::endl; hipFree(d_counts); std::vector<float2> means(k); hipMemcpy(means.data(), d_means.coordinates, d_means.bytes, hipMemcpyDeviceToHost); for (size_t cluster = 0; cluster < k; ++cluster) { std::cout << means[cluster].x << " " << means[cluster].y << std::endl; } }
5e0a011e93125e479227c41a1b44684eb7443b42.cu
#include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <iostream> #include <random> #include <sstream> #include <stdexcept> #include <vector> #include "helper_math.h" struct Data { Data(int size) : size(size), bytes(size * sizeof(float2)) { cudaMalloc(&coordinates, bytes); cudaMemset(coordinates, 0, bytes); } Data(std::vector<float2>& h_coordinates) : size(h_coordinates.size()), bytes(h_coordinates.size() * sizeof(float2)) { cudaMalloc(&coordinates, bytes); cudaMemcpy(coordinates, h_coordinates.data(), bytes, cudaMemcpyHostToDevice); } void clear() { cudaMemset(coordinates, 0, bytes); } ~Data() { cudaFree(coordinates); } float2* coordinates{nullptr}; int size{0}; int bytes{0}; }; __device__ inline float squared_l2_distance(float2 x1, float2 x2) { float2 diff = x1-x2; return dot(diff, diff); } __device__ inline float2 atomicAdd(float2* addr, float2 val) { float2 result; result.x = atomicAdd(&addr->x, val.x); result.y = atomicAdd(&addr->y, val.y); return result; } __global__ void assign_clusters(const float2* points, int data_size, const float2* means, float2* new_sums, int k, int* counts) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; extern __shared__ char shared_memory[]; // First part of the shared memory is for the new centroids coordinates, // the second part is for the number of particles assigned float2* block_new_sums = (float2*) shared_memory; int* block_counts = (int*)(block_new_sums + k); // Set everything in shared memory to 0 for (int i=threadIdx.x; i<k; i+=blockDim.x) { block_new_sums[i] = {0.0f, 0.0f}; block_counts[i] = 0; } __syncthreads(); // Make global loads once. const float2 point = points[index]; // Compute the closest current centroid float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = squared_l2_distance(point, means[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } // Add the point coordinate to the NEW centroid coordinate // and increment the corresponding points count atomicAdd(&block_new_sums[best_cluster], point); atomicAdd(&block_counts[best_cluster], 1); // Wait until all the threads in a block are done updating __syncthreads(); // Perform global atomics, K operations per block if (threadIdx.x < k) { atomicAdd(&new_sums[threadIdx.x], block_new_sums[threadIdx.x]); atomicAdd(&counts[threadIdx.x], block_counts[threadIdx.x]); } } __global__ void compute_new_means(float2* means, const float2* new_sum, const int* counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); means[cluster] = new_sum[cluster] / count; } int main(int argc, const char* argv[]) { if (argc < 3) { std::cerr << "usage: k-means <data-file> <k> [iterations]" << std::endl; std::exit(EXIT_FAILURE); } const auto k = std::atoi(argv[2]); const auto number_of_iterations = (argc == 4) ? std::atoi(argv[3]) : 300; std::vector<float2> h_points; std::ifstream stream(argv[1]); std::string line; while (std::getline(stream, line)) { std::istringstream line_stream(line); float x, y; uint16_t label; line_stream >> x >> y >> label; h_points.push_back({x,y}); } const size_t number_of_elements = h_points.size(); Data d_points(h_points); std::mt19937 rng(42); std::shuffle(h_points.begin(), h_points.end(), rng); std::vector<float2> initial_means{h_points.begin(), h_points.begin() + k}; Data d_means( initial_means ); const int threads = 64; const int blocks = (number_of_elements + threads - 1) / threads; // Every block keeps its own centroid data: // current x and y sum and number of points (from this block) assigned const int shared_memory = k * (sizeof(float2) + sizeof(int)); Data d_sums(k * blocks); int* d_counts; cudaMalloc(&d_counts, k * blocks * sizeof(int)); cudaMemset(d_counts, 0, k * blocks * sizeof(int)); const auto start = std::chrono::high_resolution_clock::now(); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { cudaMemset(d_counts, 0, k * sizeof(int)); d_sums.clear(); assign_clusters<<<blocks, threads, shared_memory>>>(d_points.coordinates, d_points.size, d_means.coordinates, d_sums.coordinates, k, d_counts); compute_new_means<<<1, k>>>(d_means.coordinates, d_sums.coordinates, d_counts); } cudaDeviceSynchronize(); const auto end = std::chrono::high_resolution_clock::now(); const auto duration = std::chrono::duration_cast<std::chrono::duration<float>>(end - start); std::cout << "Took: " << duration.count() << "s" << std::endl; cudaFree(d_counts); std::vector<float2> means(k); cudaMemcpy(means.data(), d_means.coordinates, d_means.bytes, cudaMemcpyDeviceToHost); for (size_t cluster = 0; cluster < k; ++cluster) { std::cout << means[cluster].x << " " << means[cluster].y << std::endl; } }
aeff60917b53777a093e41d1b7d1ccc8371c0dde.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> //#include "timer.h" //#include "utils.h" #include <string> #include <stdio.h> size_t numRows(); //return # of rows in the image size_t numCols(); //return # of cols in the image void preProcess(uchar4 **h_rgbaImage, uchar4 **h_greyImage, uchar4 **d_rgbaImage, uchar4 **d_greyImage, const std::string& filename); void postProcess(const std::string& output_file); void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, uchar4* d_greyImage, size_t numRows, size_t numCols); //include the definitions of the above functions for this homework //#include "rgb2grey.hip" int main(int argc, char **argv) { uchar4 *h_rgbaImage, *d_rgbaImage; uchar4 *h_greyImage, *d_greyImage; std::string input_file; std::string output_file; if (argc == 3) { input_file = std::string(argv[1]); output_file = std::string(argv[2]); } else { std::cerr << "Usage: ./hw input_file output_file" << std::endl; exit(1); } //load the image and give us our input and output pointers preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file); //GpuTimer timer; //timer.Start(); //call the students' code std::cout<<"Input Size: "<<" "<<numCols()<<" x "<<numRows()<<std::endl; your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols()); //timer.Stop(); hipDeviceSynchronize(); //checkCudaErrors(hipGetLastError()); /* int err = printf("%f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } */ //check results and output the grey image postProcess(output_file); return 0; }
aeff60917b53777a093e41d1b7d1ccc8371c0dde.cu
#include <iostream> //#include "timer.h" //#include "utils.h" #include <string> #include <stdio.h> size_t numRows(); //return # of rows in the image size_t numCols(); //return # of cols in the image void preProcess(uchar4 **h_rgbaImage, uchar4 **h_greyImage, uchar4 **d_rgbaImage, uchar4 **d_greyImage, const std::string& filename); void postProcess(const std::string& output_file); void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, uchar4* d_greyImage, size_t numRows, size_t numCols); //include the definitions of the above functions for this homework //#include "rgb2grey.cu" int main(int argc, char **argv) { uchar4 *h_rgbaImage, *d_rgbaImage; uchar4 *h_greyImage, *d_greyImage; std::string input_file; std::string output_file; if (argc == 3) { input_file = std::string(argv[1]); output_file = std::string(argv[2]); } else { std::cerr << "Usage: ./hw input_file output_file" << std::endl; exit(1); } //load the image and give us our input and output pointers preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file); //GpuTimer timer; //timer.Start(); //call the students' code std::cout<<"Input Size: "<<" "<<numCols()<<" x "<<numRows()<<std::endl; your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols()); //timer.Stop(); cudaDeviceSynchronize(); //checkCudaErrors(cudaGetLastError()); /* int err = printf("%f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } */ //check results and output the grey image postProcess(output_file); return 0; }
1339b18e00ec64bf28b661b915f6269a5ac64f3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************/ /* Name: cmf3DHybridPottsCut_kernels.cu Authors: Martin Rajchl mrajchl@imaging.robarts.ca Jing Yuan cn.yuanjing@googlemail.com */ /***************************************************************************/ #include <stdio.h> #define SQR(x) (x)*(x) #define MAX(a,b) ( a > b ? a : b ) #define MIN(a,b) ( a <= b ? a : b ) #define SIGN(x) ( x >= 0.0 ? 1.0 : -1.0 ) #define ABS(x) ( (x) > 0.0 ? x : -(x) ) #define X(iy,ix) (ix)*iNy + iy #define Xe(iy,ix) (ix)*iNye+ (iy) #define SQRTgpu sqrt __global__ void updateP1(float *ps, float *pt, float *div, float *gk, float *u, float cc, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; float fpt = 0.0f; // if( idxVolume <= iNx*iNy*iNz){ // return; if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); if (id < 2){ fpt = ps[idxVolume]; } else{ fpt = pt[idxVolume]; } gk[idx] = div[idx] - (fpt - pt[idx] + u[idx]/cc) ; //gk[idx] = (((div[idx] - fpt) + pt[idx]) - u[idx]); } } __syncthreads(); } //} __global__ void updateP(float *bx, float *by, float *bz, float steps, float *gk, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; // if( idxVolume <= iNx*iNy*iNz){ if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); float currVal = gk[idx]; bx[idx+1] = steps * ( gk[idx+1] - currVal ) + bx[idx+1]; by[idx+iNx] = steps * ( gk[idx+iNx] - currVal ) + by[idx+iNx]; bz[idx+(iNx*iNy)] = steps * (gk[idx+(iNx*iNy)] - currVal) + bz[idx+(iNx*iNy)]; } } __syncthreads(); } //} __global__ void projStep1alpha(float *bx, float *by, float *bz, float *gk, float *penalty1, float *penalty2, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; // if( idxVolume <= iNx*iNy*iNz){ if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); float fps = 0.0f; float fpt = 0.0f; fpt = SQRTgpu((SQR(bx[idx]) + SQR(bx[idx+1]) + SQR(by[idx]) + SQR(by[idx+iNx]) + SQR(bz[idx]) + SQR(bz[idx+(iNx*iNy)]) ) * 0.5f ); if(id < 2){ fps = penalty1[idxVolume]; } else{ fps = penalty2[idxVolume]; } if(fpt > fps) gk[idx] = fps/fpt; else gk[idx] = 1.0f; } } __syncthreads(); } //} __global__ void projStep2Total(float *bx, float *by, float *bz, float *gk, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; // if( idxVolume <= iNx*iNy*iNz){ if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); float gkVal = gk[idx]; bx[idx+1] = ( gk[idx+1] + gkVal ) * 0.5f * bx[idx+1]; by[idx+iNx] = ( gk[idx+iNx] + gkVal ) * 0.5f * by[idx+iNx]; bz[idx+(iNx*iNy)] = ( gk[idx+(iNx*iNy)] + gkVal ) * 0.5f * bz[idx+(iNx*iNy)]; } } __syncthreads(); } //} __global__ void calcDivergence(float *bx, float *by, float *bz, float *div, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ // calculate divergence for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); div[idx] = bx[idx+1] - bx[idx] + by[idx+iNx] - by[idx] + bz[idx+(iNx*iNy)] - bz[idx]; } // } __syncthreads(); } __global__ void updatePstMult(float *gk, float *u, float *bx, float *by, float *bz, float *div, float *ps, float *pt, float *Ct, float *FPS, float cc, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; float fps = 0.0f; float fpt = 0.0f; FPS[idxVolume] = 0.0f; // if( idxVolume <= iNx*iNy*iNz){ if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ // calculate divergence for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); div[idx] = bx[idx+1] - bx[idx] + by[idx+iNx] - by[idx] + bz[idx+(iNx*iNy)] - bz[idx]; } // // update the sink flow field pt (x,1) fpt = ps[idxVolume] - div[idxVolume] + u[idxVolume]/cc; //fpt = ps[idxVolume] - div[idxVolume] + u[idxVolume]; for(int id = 2; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); fpt += div[idx] + pt[idx] - u[idx]/cc; //fpt += (div[idx] + pt[idx] - u[idx]); } pt[idxVolume] = fpt / 4.0f; // update the source flow ps fpt = 0.0f; for (int id = 0; id < 2; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); fpt += (div[idx] + pt[idx] - u[idx]/cc); //fpt += (div[idx] + pt[idx] - u[idx]); } ps[idxVolume] = (fpt/2.0f) + (1.0f/(cc*2.0f)); // FCP // float FCP = (1.0f/(cc*2.0f)); // FCP // ps[idxVolume] = __fadd_rz(__fdiv_rz(fpt,2), FCP); // // update the sink flow field pt(x,i) for (int id = 1; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); if (id == 1){ fps = ps[idxVolume] + u[idx]/cc - div[idx]; //fps = ps[idxVolume] + u[idx] - div[idx]; } else{ fps = pt[idxVolume] + u[idx]/cc - div[idx]; //fps = pt[idxVolume] + u[idx] - div[idx]; } pt[idx] = MIN(fps , Ct[idx]); } // /* update the multipliers */ for (int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); fpt = 0.0f; if(id < 2){ fpt = cc*(div[idx] + pt[idx] - ps[idxVolume]); //fpt = div[idx] + pt[idx] - ps[idxVolume]; } else{ fpt = cc*(div[idx] + pt[idx] - pt[idxVolume]); //fpt = div[idx] + pt[idx] - pt[idxVolume]; } u[idx] -= fpt; FPS[idxVolume] += ABS(fpt); } } __syncthreads(); } //} __global__ void errorAccumulation(float* errorBuffer, unsigned int blockSize, unsigned int arraySize){ int idx = (blockSize + blockSize) * (blockIdx.x * blockDim.x + threadIdx.x); int idxUp = idx + blockSize; float error1 = (idx < arraySize) ? errorBuffer[idx] : 0.0f; float error2 = (idxUp < arraySize) ? errorBuffer[idxUp] : 0.0f; __syncthreads(); if(idx < arraySize) errorBuffer[idx] = error1 + error2; } // NOT FUNCTIONAL YET! __global__ void resolveBoundaryCondtions(float *u, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; if(!( (idxVolume%iNx) != (iNx-1) )){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); u[idx] = u[idx-1]; } } if (! ((idxVolume/iNx)%iNy) != (iNy-1)){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); u[idx] = u[idx-iNx]; } } if(! (idxVolume/(iNx*iNy)) < (iNz-1) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); u[idx] = u[idx-(iNx*iNy)]; } } }
1339b18e00ec64bf28b661b915f6269a5ac64f3e.cu
/***************************************************************************/ /* Name: cmf3DHybridPottsCut_kernels.cu Authors: Martin Rajchl mrajchl@imaging.robarts.ca Jing Yuan cn.yuanjing@googlemail.com */ /***************************************************************************/ #include <stdio.h> #define SQR(x) (x)*(x) #define MAX(a,b) ( a > b ? a : b ) #define MIN(a,b) ( a <= b ? a : b ) #define SIGN(x) ( x >= 0.0 ? 1.0 : -1.0 ) #define ABS(x) ( (x) > 0.0 ? x : -(x) ) #define X(iy,ix) (ix)*iNy + iy #define Xe(iy,ix) (ix)*iNye+ (iy) #define SQRTgpu sqrt __global__ void updateP1(float *ps, float *pt, float *div, float *gk, float *u, float cc, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; float fpt = 0.0f; // if( idxVolume <= iNx*iNy*iNz){ // return; if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); if (id < 2){ fpt = ps[idxVolume]; } else{ fpt = pt[idxVolume]; } gk[idx] = div[idx] - (fpt - pt[idx] + u[idx]/cc) ; //gk[idx] = (((div[idx] - fpt) + pt[idx]) - u[idx]); } } __syncthreads(); } //} __global__ void updateP(float *bx, float *by, float *bz, float steps, float *gk, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; // if( idxVolume <= iNx*iNy*iNz){ if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); float currVal = gk[idx]; bx[idx+1] = steps * ( gk[idx+1] - currVal ) + bx[idx+1]; by[idx+iNx] = steps * ( gk[idx+iNx] - currVal ) + by[idx+iNx]; bz[idx+(iNx*iNy)] = steps * (gk[idx+(iNx*iNy)] - currVal) + bz[idx+(iNx*iNy)]; } } __syncthreads(); } //} __global__ void projStep1alpha(float *bx, float *by, float *bz, float *gk, float *penalty1, float *penalty2, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; // if( idxVolume <= iNx*iNy*iNz){ if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); float fps = 0.0f; float fpt = 0.0f; fpt = SQRTgpu((SQR(bx[idx]) + SQR(bx[idx+1]) + SQR(by[idx]) + SQR(by[idx+iNx]) + SQR(bz[idx]) + SQR(bz[idx+(iNx*iNy)]) ) * 0.5f ); if(id < 2){ fps = penalty1[idxVolume]; } else{ fps = penalty2[idxVolume]; } if(fpt > fps) gk[idx] = fps/fpt; else gk[idx] = 1.0f; } } __syncthreads(); } //} __global__ void projStep2Total(float *bx, float *by, float *bz, float *gk, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; // if( idxVolume <= iNx*iNy*iNz){ if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); float gkVal = gk[idx]; bx[idx+1] = ( gk[idx+1] + gkVal ) * 0.5f * bx[idx+1]; by[idx+iNx] = ( gk[idx+iNx] + gkVal ) * 0.5f * by[idx+iNx]; bz[idx+(iNx*iNy)] = ( gk[idx+(iNx*iNy)] + gkVal ) * 0.5f * bz[idx+(iNx*iNy)]; } } __syncthreads(); } //} __global__ void calcDivergence(float *bx, float *by, float *bz, float *div, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ // calculate divergence for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); div[idx] = bx[idx+1] - bx[idx] + by[idx+iNx] - by[idx] + bz[idx+(iNx*iNy)] - bz[idx]; } // } __syncthreads(); } __global__ void updatePstMult(float *gk, float *u, float *bx, float *by, float *bz, float *div, float *ps, float *pt, float *Ct, float *FPS, float cc, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; float fps = 0.0f; float fpt = 0.0f; FPS[idxVolume] = 0.0f; // if( idxVolume <= iNx*iNy*iNz){ if( ( (idxVolume%iNx) != (iNx-1) ) && ( (idxVolume/(iNx*iNy)) < (iNz-1) ) && ( ((idxVolume/iNx)%iNy) != (iNy-1)) ){ // calculate divergence for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); div[idx] = bx[idx+1] - bx[idx] + by[idx+iNx] - by[idx] + bz[idx+(iNx*iNy)] - bz[idx]; } // // update the sink flow field pt (x,1) fpt = ps[idxVolume] - div[idxVolume] + u[idxVolume]/cc; //fpt = ps[idxVolume] - div[idxVolume] + u[idxVolume]; for(int id = 2; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); fpt += div[idx] + pt[idx] - u[idx]/cc; //fpt += (div[idx] + pt[idx] - u[idx]); } pt[idxVolume] = fpt / 4.0f; // update the source flow ps fpt = 0.0f; for (int id = 0; id < 2; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); fpt += (div[idx] + pt[idx] - u[idx]/cc); //fpt += (div[idx] + pt[idx] - u[idx]); } ps[idxVolume] = (fpt/2.0f) + (1.0f/(cc*2.0f)); // FCP // float FCP = (1.0f/(cc*2.0f)); // FCP // ps[idxVolume] = __fadd_rz(__fdiv_rz(fpt,2), FCP); // // update the sink flow field pt(x,i) for (int id = 1; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); if (id == 1){ fps = ps[idxVolume] + u[idx]/cc - div[idx]; //fps = ps[idxVolume] + u[idx] - div[idx]; } else{ fps = pt[idxVolume] + u[idx]/cc - div[idx]; //fps = pt[idxVolume] + u[idx] - div[idx]; } pt[idx] = MIN(fps , Ct[idx]); } // /* update the multipliers */ for (int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); fpt = 0.0f; if(id < 2){ fpt = cc*(div[idx] + pt[idx] - ps[idxVolume]); //fpt = div[idx] + pt[idx] - ps[idxVolume]; } else{ fpt = cc*(div[idx] + pt[idx] - pt[idxVolume]); //fpt = div[idx] + pt[idx] - pt[idxVolume]; } u[idx] -= fpt; FPS[idxVolume] += ABS(fpt); } } __syncthreads(); } //} __global__ void errorAccumulation(float* errorBuffer, unsigned int blockSize, unsigned int arraySize){ int idx = (blockSize + blockSize) * (blockIdx.x * blockDim.x + threadIdx.x); int idxUp = idx + blockSize; float error1 = (idx < arraySize) ? errorBuffer[idx] : 0.0f; float error2 = (idxUp < arraySize) ? errorBuffer[idxUp] : 0.0f; __syncthreads(); if(idx < arraySize) errorBuffer[idx] = error1 + error2; } // NOT FUNCTIONAL YET! __global__ void resolveBoundaryCondtions(float *u, int iNx, int iNy, int iNz, int iLab){ int idxVolume = blockIdx.x * blockDim.x + threadIdx.x; if(!( (idxVolume%iNx) != (iNx-1) )){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); u[idx] = u[idx-1]; } } if (! ((idxVolume/iNx)%iNy) != (iNy-1)){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); u[idx] = u[idx-iNx]; } } if(! (idxVolume/(iNx*iNy)) < (iNz-1) ){ for(int id = 0; id < iLab; id++){ int idx = idxVolume + id*(iNx*iNy*iNz); u[idx] = u[idx-(iNx*iNy)]; } } }
e6eaeead122032970ab647021096a121a7e33cc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void SumV0(int* x, int* y, int* result) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int stride = gridDim.x * blockDim.x; result[tid] = x[tid] + y[tid]; } __global__ void SumV1(int *x, int* y, int* result) { int double_tid = threadIdx.x + 2 * blockDim.x * blockIdx.x; result[double_tid] = x[double_tid] + y[double_tid]; result[double_tid + blockDim.x] = x[double_tid + blockDim.x] + y[double_tid + blockDim.x]; } int main() { int array_size = 1 << 26; int *h_x = new int[array_size]; int *h_y = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_x[i] = i; h_y[i] = 2 * i; } int* d_x; int* d_y; int* d_result; int num_bytes = sizeof(*h_x) * array_size; hipMalloc(&d_x, num_bytes); hipMalloc(&d_y, num_bytes); hipMalloc(&d_result, num_bytes); hipMemcpy(d_x, h_x, num_bytes, hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, num_bytes, hipMemcpyHostToDevice); int block_size = 512; int num_blocks = (array_size + block_size - 1) / block_size; hipLaunchKernelGGL(( SumV1), dim3(num_blocks / 2), dim3(block_size), 0, 0, d_x, d_y, d_result); hipLaunchKernelGGL(( SumV0), dim3(num_blocks), dim3(block_size), 0, 0, d_x, d_y, d_result); int *h_result = new int[array_size]; hipMemcpy(h_result, d_result, num_bytes, hipMemcpyDeviceToHost); hipFree(d_x); hipFree(d_y); hipFree(d_result); delete[] h_x; delete[] h_y; delete[] h_result; return 0; }
e6eaeead122032970ab647021096a121a7e33cc2.cu
__global__ void SumV0(int* x, int* y, int* result) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int stride = gridDim.x * blockDim.x; result[tid] = x[tid] + y[tid]; } __global__ void SumV1(int *x, int* y, int* result) { int double_tid = threadIdx.x + 2 * blockDim.x * blockIdx.x; result[double_tid] = x[double_tid] + y[double_tid]; result[double_tid + blockDim.x] = x[double_tid + blockDim.x] + y[double_tid + blockDim.x]; } int main() { int array_size = 1 << 26; int *h_x = new int[array_size]; int *h_y = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_x[i] = i; h_y[i] = 2 * i; } int* d_x; int* d_y; int* d_result; int num_bytes = sizeof(*h_x) * array_size; cudaMalloc(&d_x, num_bytes); cudaMalloc(&d_y, num_bytes); cudaMalloc(&d_result, num_bytes); cudaMemcpy(d_x, h_x, num_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, num_bytes, cudaMemcpyHostToDevice); int block_size = 512; int num_blocks = (array_size + block_size - 1) / block_size; SumV1<<<num_blocks / 2, block_size>>>(d_x, d_y, d_result); SumV0<<<num_blocks, block_size>>>(d_x, d_y, d_result); int *h_result = new int[array_size]; cudaMemcpy(h_result, d_result, num_bytes, cudaMemcpyDeviceToHost); cudaFree(d_x); cudaFree(d_y); cudaFree(d_result); delete[] h_x; delete[] h_y; delete[] h_result; return 0; }
9478037596d871062bff3dde274cdddf209b1408.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cassert> #include <iostream> #include <random> #include <limits> #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/HistoContainer.h" #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" #include "HeterogeneousCore/CUDAUtilities/interface/launch.h" using namespace cms::cuda; template <typename T, int NBINS, int S, int DELTA> __global__ void mykernel(T const* __restrict__ v, uint32_t N) { assert(v); assert(N == 12000); if (threadIdx.x == 0) printf("start kernel for %d data\n", N); using Hist = HistoContainer<T, NBINS, 12000, S, uint16_t>; __shared__ Hist hist; __shared__ typename Hist::Counter ws[32]; for (auto j = threadIdx.x; j < Hist::totbins(); j += blockDim.x) { hist.off[j] = 0; } __syncthreads(); for (auto j = threadIdx.x; j < N; j += blockDim.x) hist.count(v[j]); __syncthreads(); assert(0 == hist.size()); __syncthreads(); hist.finalize(ws); __syncthreads(); assert(N == hist.size()); for (auto j = threadIdx.x; j < Hist::nbins(); j += blockDim.x) assert(hist.off[j] <= hist.off[j + 1]); __syncthreads(); if (threadIdx.x < 32) ws[threadIdx.x] = 0; // used by prefix scan... __syncthreads(); for (auto j = threadIdx.x; j < N; j += blockDim.x) hist.fill(v[j], j); __syncthreads(); assert(0 == hist.off[0]); assert(N == hist.size()); for (auto j = threadIdx.x; j < hist.size() - 1; j += blockDim.x) { auto p = hist.begin() + j; assert((*p) < N); auto k1 = Hist::bin(v[*p]); auto k2 = Hist::bin(v[*(p + 1)]); assert(k2 >= k1); } for (auto i = threadIdx.x; i < hist.size(); i += blockDim.x) { auto p = hist.begin() + i; auto j = *p; auto b0 = Hist::bin(v[j]); int tot = 0; auto ftest = [&](int k) { assert(k >= 0 && k < N); ++tot; }; forEachInWindow(hist, v[j], v[j], ftest); int rtot = hist.size(b0); assert(tot == rtot); tot = 0; auto vm = int(v[j]) - DELTA; auto vp = int(v[j]) + DELTA; constexpr int vmax = NBINS != 128 ? NBINS * 2 - 1 : std::numeric_limits<T>::max(); vm = ::max(vm, 0); vm = ::min(vm, vmax); vp = ::min(vp, vmax); vp = ::max(vp, 0); assert(vp >= vm); forEachInWindow(hist, vm, vp, ftest); int bp = Hist::bin(vp); int bm = Hist::bin(vm); rtot = hist.end(bp) - hist.begin(bm); assert(tot == rtot); } } template <typename T, int NBINS = 128, int S = 8 * sizeof(T), int DELTA = 1000> void go() { std::mt19937 eng; int rmin = std::numeric_limits<T>::min(); int rmax = std::numeric_limits<T>::max(); if (NBINS != 128) { rmin = 0; rmax = NBINS * 2 - 1; } std::uniform_int_distribution<T> rgen(rmin, rmax); constexpr int N = 12000; T v[N]; auto v_d = make_device_unique<T[]>(N, nullptr); assert(v_d.get()); using Hist = HistoContainer<T, NBINS, N, S>; std::cout << "HistoContainer " << Hist::nbits() << ' ' << Hist::nbins() << ' ' << Hist::capacity() << ' ' << (rmax - rmin) / Hist::nbins() << std::endl; std::cout << "bins " << int(Hist::bin(0)) << ' ' << int(Hist::bin(rmin)) << ' ' << int(Hist::bin(rmax)) << std::endl; for (int it = 0; it < 5; ++it) { for (long long j = 0; j < N; j++) v[j] = rgen(eng); if (it == 2) for (long long j = N / 2; j < N / 2 + N / 4; j++) v[j] = 4; assert(v_d.get()); assert(v); cudaCheck(hipMemcpy(v_d.get(), v, N * sizeof(T), hipMemcpyHostToDevice)); assert(v_d.get()); launch(mykernel<T, NBINS, S, DELTA>, {1, 256}, v_d.get(), N); } } int main() { cms::cudatest::requireDevices(); go<int16_t>(); go<uint8_t, 128, 8, 4>(); go<uint16_t, 313 / 2, 9, 4>(); return 0; }
9478037596d871062bff3dde274cdddf209b1408.cu
#include <algorithm> #include <cassert> #include <iostream> #include <random> #include <limits> #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/HistoContainer.h" #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" #include "HeterogeneousCore/CUDAUtilities/interface/launch.h" using namespace cms::cuda; template <typename T, int NBINS, int S, int DELTA> __global__ void mykernel(T const* __restrict__ v, uint32_t N) { assert(v); assert(N == 12000); if (threadIdx.x == 0) printf("start kernel for %d data\n", N); using Hist = HistoContainer<T, NBINS, 12000, S, uint16_t>; __shared__ Hist hist; __shared__ typename Hist::Counter ws[32]; for (auto j = threadIdx.x; j < Hist::totbins(); j += blockDim.x) { hist.off[j] = 0; } __syncthreads(); for (auto j = threadIdx.x; j < N; j += blockDim.x) hist.count(v[j]); __syncthreads(); assert(0 == hist.size()); __syncthreads(); hist.finalize(ws); __syncthreads(); assert(N == hist.size()); for (auto j = threadIdx.x; j < Hist::nbins(); j += blockDim.x) assert(hist.off[j] <= hist.off[j + 1]); __syncthreads(); if (threadIdx.x < 32) ws[threadIdx.x] = 0; // used by prefix scan... __syncthreads(); for (auto j = threadIdx.x; j < N; j += blockDim.x) hist.fill(v[j], j); __syncthreads(); assert(0 == hist.off[0]); assert(N == hist.size()); for (auto j = threadIdx.x; j < hist.size() - 1; j += blockDim.x) { auto p = hist.begin() + j; assert((*p) < N); auto k1 = Hist::bin(v[*p]); auto k2 = Hist::bin(v[*(p + 1)]); assert(k2 >= k1); } for (auto i = threadIdx.x; i < hist.size(); i += blockDim.x) { auto p = hist.begin() + i; auto j = *p; auto b0 = Hist::bin(v[j]); int tot = 0; auto ftest = [&](int k) { assert(k >= 0 && k < N); ++tot; }; forEachInWindow(hist, v[j], v[j], ftest); int rtot = hist.size(b0); assert(tot == rtot); tot = 0; auto vm = int(v[j]) - DELTA; auto vp = int(v[j]) + DELTA; constexpr int vmax = NBINS != 128 ? NBINS * 2 - 1 : std::numeric_limits<T>::max(); vm = std::max(vm, 0); vm = std::min(vm, vmax); vp = std::min(vp, vmax); vp = std::max(vp, 0); assert(vp >= vm); forEachInWindow(hist, vm, vp, ftest); int bp = Hist::bin(vp); int bm = Hist::bin(vm); rtot = hist.end(bp) - hist.begin(bm); assert(tot == rtot); } } template <typename T, int NBINS = 128, int S = 8 * sizeof(T), int DELTA = 1000> void go() { std::mt19937 eng; int rmin = std::numeric_limits<T>::min(); int rmax = std::numeric_limits<T>::max(); if (NBINS != 128) { rmin = 0; rmax = NBINS * 2 - 1; } std::uniform_int_distribution<T> rgen(rmin, rmax); constexpr int N = 12000; T v[N]; auto v_d = make_device_unique<T[]>(N, nullptr); assert(v_d.get()); using Hist = HistoContainer<T, NBINS, N, S>; std::cout << "HistoContainer " << Hist::nbits() << ' ' << Hist::nbins() << ' ' << Hist::capacity() << ' ' << (rmax - rmin) / Hist::nbins() << std::endl; std::cout << "bins " << int(Hist::bin(0)) << ' ' << int(Hist::bin(rmin)) << ' ' << int(Hist::bin(rmax)) << std::endl; for (int it = 0; it < 5; ++it) { for (long long j = 0; j < N; j++) v[j] = rgen(eng); if (it == 2) for (long long j = N / 2; j < N / 2 + N / 4; j++) v[j] = 4; assert(v_d.get()); assert(v); cudaCheck(cudaMemcpy(v_d.get(), v, N * sizeof(T), cudaMemcpyHostToDevice)); assert(v_d.get()); launch(mykernel<T, NBINS, S, DELTA>, {1, 256}, v_d.get(), N); } } int main() { cms::cudatest::requireDevices(); go<int16_t>(); go<uint8_t, 128, 8, 4>(); go<uint16_t, 313 / 2, 9, 4>(); return 0; }
4122f7eb84fdb66f6f488660b11505fcb0288c38.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <array> #include <iostream> #include "CudaUtils.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <thrust/reduce.h> #include <cstdint> int main() { thrust::device_vector<char> devStr1 (std::vector<char> {'a', 'b', 'c', 'd', 'e', 'f'}); thrust::device_vector<char> devStr2 (std::vector<char> {'a', 'b', 'c', 'e', 'f', 'g'}); thrust::device_vector<bool> devRes(devStr1.size()); auto begin = thrust::make_zip_iterator(thrust::make_tuple(devStr1.begin(), devStr2.begin())); auto end = thrust::make_zip_iterator(thrust::make_tuple(devStr1.end(), devStr2.end())); thrust::transform(begin, end, devRes.begin(), [] __device__ (auto pair) { return thrust::get<0>(pair) == thrust::get<1>(pair); }); auto count = thrust::reduce(devRes.begin(), devRes.end(), 0, [] __device__ (auto v1, auto v2) { return v1 + v2; }); thrust::host_vector<int> res = devRes; // Wait for the kernel to complete and check for errors checkCuda(hipPeekAtLastError()); checkCuda(hipDeviceSynchronize()); // Print the results for (int col = 0; col < res.size(); ++col) { std::cout << res[col] << std::endl; } std::cout << "Same prefix length is: " << std::to_string(count) << std::endl; }
4122f7eb84fdb66f6f488660b11505fcb0288c38.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <array> #include <iostream> #include "CudaUtils.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <thrust/reduce.h> #include <cstdint> int main() { thrust::device_vector<char> devStr1 (std::vector<char> {'a', 'b', 'c', 'd', 'e', 'f'}); thrust::device_vector<char> devStr2 (std::vector<char> {'a', 'b', 'c', 'e', 'f', 'g'}); thrust::device_vector<bool> devRes(devStr1.size()); auto begin = thrust::make_zip_iterator(thrust::make_tuple(devStr1.begin(), devStr2.begin())); auto end = thrust::make_zip_iterator(thrust::make_tuple(devStr1.end(), devStr2.end())); thrust::transform(begin, end, devRes.begin(), [] __device__ (auto pair) { return thrust::get<0>(pair) == thrust::get<1>(pair); }); auto count = thrust::reduce(devRes.begin(), devRes.end(), 0, [] __device__ (auto v1, auto v2) { return v1 + v2; }); thrust::host_vector<int> res = devRes; // Wait for the kernel to complete and check for errors checkCuda(cudaPeekAtLastError()); checkCuda(cudaDeviceSynchronize()); // Print the results for (int col = 0; col < res.size(); ++col) { std::cout << res[col] << std::endl; } std::cout << "Same prefix length is: " << std::to_string(count) << std::endl; }
de026a228ac330056878fad36ef8a61f4707d438.hip
// !!! This is a file automatically generated by hipify!!! // %%writefile block_queuing.cu #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #define AND 0 #define OR 1 #define NAND 2 #define NOR 3 #define XOR 4 #define XNOR 5 // Block Variables __shared__ int block_num; __shared__ int actual_num; __device__ int global_num; __device__ int gate_solver(int gate, int input_node, int input_neighbor) { bool result; switch (gate) { case AND: result = (input_node && input_neighbor); break; case OR: result = (input_node || input_neighbor); break; case NAND: result = !(input_node && input_neighbor); break; case NOR: result = !(input_node || input_neighbor); break; case XOR: result = (input_node ^ input_neighbor); break; case XNOR: result = !(input_node ^ input_neighbor); break; default: printf("ERROR: Input gate invalid.\n"); return -1; } return (int)result; } __global__ void block_queuing(int iteration, int num_iterations, int numBlocks, int blockSize, int blockQueueCapacity, int numCurrLevelNodes, int *currLevelNodes_h, int *nodePtrs_h, int *nodeNeighbors_h,int *nodeVisited_h, int *nodeOutput_h, int *nodeGate_h, int *nodeInput_h, int *nextLevelNodes_h, int *numNextLevelNodes_h) { int queueSize = blockQueueCapacity; extern __shared__ int shared_memory_queue[]; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; int offset = 0; for (int i = thread_id; i < numCurrLevelNodes; i += blockSize * numBlocks) { offset = iteration * blockSize * numBlocks + i; int node = currLevelNodes_h[offset]; for (int j = nodePtrs_h[node]; j < nodePtrs_h[node + 1]; j++) { int neighbor = nodeNeighbors_h[j]; if (nodeVisited_h[neighbor] == 0) { nodeVisited_h[neighbor] = 1; nodeOutput_h[neighbor] = gate_solver(nodeGate_h[neighbor], nodeOutput_h[node], nodeInput_h[neighbor]); int new_index = atomicAdd(&block_num, 1); if (new_index < queueSize) { int actual_index = atomicAdd(&actual_num, 1); shared_memory_queue[new_index] = neighbor; } else { int global_index = atomicAdd(&global_num, 1); nextLevelNodes_h[global_index] = neighbor; } } } } __syncthreads(); if ((threadIdx.x == 0) && (iteration == num_iterations - 1)) { for (int j=0; j<actual_num; j++) { int glob = atomicAdd(&global_num, 1); nextLevelNodes_h[glob] = shared_memory_queue[j]; } } numNextLevelNodes_h[0] = global_num; } void global_queuing(int numBlocks, int blockSize, int sharedQueueSize, int numCurrLevelNodes, int *currLevelNodes_h, int *nodePtrs_h, int *nodeNeighbors_h, int *nodeVisited_h, int *nodeOutput_h, int *nodeGate_h, int *nodeInput_h, int *nextLevelNodes_h, int *numNextLevelNodes_h) { int iteration=0; int num_iterations = numCurrLevelNodes / (numBlocks * blockSize) +1; while (iteration < num_iterations) { hipLaunchKernelGGL(( block_queuing), dim3(numBlocks), dim3(blockSize), 0, 0, iteration, num_iterations, numBlocks, blockSize, sharedQueueSize, numCurrLevelNodes, currLevelNodes_h, nodePtrs_h, nodeNeighbors_h, nodeVisited_h, nodeOutput_h, nodeGate_h, nodeInput_h, nextLevelNodes_h, numNextLevelNodes_h); hipDeviceSynchronize(); iteration++; } hipDeviceSynchronize(); } int read_input_one_two_four(int **input1, char *filepath) { FILE *fp = fopen(filepath, "r"); if (fp == NULL) { fprintf(stderr, "Couldn't open file for reading\n"); exit(1); } int counter = 0; int len; int length = fscanf(fp, "%d", &len); *input1 = (int *)malloc(len * sizeof(int)); int temp1; while (fscanf(fp, "%d", &temp1) == 1) { (*input1)[counter] = temp1; counter++; } fclose(fp); return len; } int read_input_three(int **input1, int **input2, int **input3, int **input4, char *filepath) { FILE *fp = fopen(filepath, "r"); if (fp == NULL) { fprintf(stderr, "Couldn't open file for reading\n"); exit(1); } int counter = 0; int len; int length = fscanf(fp, "%d", &len); *input1 = (int *)malloc(len * sizeof(int)); *input2 = (int *)malloc(len * sizeof(int)); *input3 = (int *)malloc(len * sizeof(int)); *input4 = (int *)malloc(len * sizeof(int)); int temp1; int temp2; int temp3; int temp4; while (fscanf(fp, "%d,%d,%d,%d", &temp1, &temp2, &temp3, &temp4) == 4) { (*input1)[counter] = temp1; (*input2)[counter] = temp2; (*input3)[counter] = temp3; (*input4)[counter] = temp4; counter++; } fclose(fp); return len; } int main(int argc, char *argv[]) { if (argc != 10) { printf("Error: program expects 9 agruments.\n"); return 1; } // Block params int numBlocks = atoi(argv[1]); int blockSize = atoi(argv[2]); int sharedQueueSize = atoi(argv[3]); // Input filepaths char *nodePtrs_filepath = argv[4]; char *nodeNeighbors_filepath = argv[5]; char *nodeLinks_filepath = argv[6]; char *currLevelNodes_filepath = argv[7]; // Output filepaths char *nodeOutput = argv[8]; char *nextLevelNodesOutput = argv[9]; // Variables int numNodePtrs, numNodes; int *nodePtrs_h, *nodeNeighbors_h; int *nodeGate_h, *nodeInput_h, *nodeOutput_h, *nodeVisited_h; int numTotalNeighbors_h; int *currLevelNodes_h; int numCurrLevelNodes; // Output int *nextLevelNodes_h; nextLevelNodes_h = (int *)malloc(40101 * 2 * sizeof(int)); int numNextLevelNodes_h = 0; numNodePtrs = read_input_one_two_four(&nodePtrs_h, nodePtrs_filepath); numTotalNeighbors_h = read_input_one_two_four(&nodeNeighbors_h, nodeNeighbors_filepath); numNodes = read_input_three(&nodeVisited_h, &nodeGate_h, &nodeInput_h, &nodeOutput_h, nodeLinks_filepath); numCurrLevelNodes = read_input_one_two_four(&currLevelNodes_h, currLevelNodes_filepath); int *d_numNextLevelNodes_h, *d_nodePtrs_h, *d_nodeNeighbors_h, *d_nodeVisited_h, *d_nodeGate_h, *d_nodeInput_h, *d_nodeOutput_h, *d_currLevelNodes_h, *d_nextLevelNodes_h; // CUDA memory allocation hipMalloc((void **)&d_numNextLevelNodes_h, sizeof(int)); hipMalloc((void **)&d_nodePtrs_h, numNodePtrs * sizeof(int)); hipMalloc((void **)&d_nodeNeighbors_h, numTotalNeighbors_h * sizeof(int)); hipMalloc((void **)&d_nodeVisited_h, numNodes * sizeof(int)); hipMalloc((void **)&d_nodeGate_h, numNodes * sizeof(int)); hipMalloc((void **)&d_nodeInput_h, numNodes * sizeof(int)); hipMalloc((void **)&d_nodeOutput_h, numNodes * sizeof(int)); hipMalloc((void **)&d_currLevelNodes_h, numCurrLevelNodes * sizeof(int)); hipMalloc((void **)&d_nextLevelNodes_h, numTotalNeighbors_h * sizeof(int)); // copy information hipMemcpy(d_nodePtrs_h, nodePtrs_h, numNodePtrs * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_nodeNeighbors_h, nodeNeighbors_h, numTotalNeighbors_h * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_nodeVisited_h, nodeVisited_h, numNodes * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_nodeGate_h, nodeGate_h, numNodes * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_nodeInput_h, nodeInput_h, numNodes * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_nodeOutput_h, nodeOutput_h, numNodes * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_currLevelNodes_h, currLevelNodes_h, numCurrLevelNodes * sizeof(int), hipMemcpyHostToDevice); global_queuing(numBlocks, blockSize, sharedQueueSize, numCurrLevelNodes, d_currLevelNodes_h, d_nodePtrs_h, d_nodeNeighbors_h, d_nodeVisited_h, d_nodeOutput_h, d_nodeGate_h, d_nodeInput_h, d_nextLevelNodes_h, d_numNextLevelNodes_h); // get the results hipMemcpy(&numNextLevelNodes_h, d_numNextLevelNodes_h, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(nextLevelNodes_h, d_nextLevelNodes_h, numTotalNeighbors_h * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(nodeOutput_h, d_nodeOutput_h, numNodes * sizeof(int), hipMemcpyDeviceToHost); // output information FILE *fptr = fopen(nodeOutput, "w"); if (fptr == NULL) { printf("Error: unable to open node output file."); exit(1); } for (int i = 0; i < numNodes + 1; i++) { if (i == 0) { fprintf(fptr, "%d\n", numNodes); } fprintf(fptr, "%d\n", nodeOutput_h[i]); } fclose(fptr); FILE *fptr2 = fopen(nextLevelNodesOutput, "w"); if (fptr2 == NULL) { printf("Error: unable to open next level output file."); exit(1); } for (int i = 0; i < numNextLevelNodes_h + 1; i++) { if (i == 0) { fprintf(fptr, "%d\n", numNextLevelNodes_h); } fprintf(fptr2, "%d\n", nextLevelNodes_h[i]); } fclose(fptr); fclose(fptr2); free(nodePtrs_h); free(nodeNeighbors_h); free(nodeGate_h); free(nodeInput_h); free(nodeOutput_h); free(nodeVisited_h); free(currLevelNodes_h); free(nextLevelNodes_h); hipFree(d_nextLevelNodes_h); hipFree(d_nodePtrs_h); hipFree(d_nodeNeighbors_h); hipFree(d_nodeVisited_h); hipFree(d_currLevelNodes_h); hipFree(d_nodeGate_h); hipFree(d_nodeInput_h); hipFree(d_nodeOutput_h); return 0; }
de026a228ac330056878fad36ef8a61f4707d438.cu
// %%writefile block_queuing.cu #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #define AND 0 #define OR 1 #define NAND 2 #define NOR 3 #define XOR 4 #define XNOR 5 // Block Variables __shared__ int block_num; __shared__ int actual_num; __device__ int global_num; __device__ int gate_solver(int gate, int input_node, int input_neighbor) { bool result; switch (gate) { case AND: result = (input_node && input_neighbor); break; case OR: result = (input_node || input_neighbor); break; case NAND: result = !(input_node && input_neighbor); break; case NOR: result = !(input_node || input_neighbor); break; case XOR: result = (input_node ^ input_neighbor); break; case XNOR: result = !(input_node ^ input_neighbor); break; default: printf("ERROR: Input gate invalid.\n"); return -1; } return (int)result; } __global__ void block_queuing(int iteration, int num_iterations, int numBlocks, int blockSize, int blockQueueCapacity, int numCurrLevelNodes, int *currLevelNodes_h, int *nodePtrs_h, int *nodeNeighbors_h,int *nodeVisited_h, int *nodeOutput_h, int *nodeGate_h, int *nodeInput_h, int *nextLevelNodes_h, int *numNextLevelNodes_h) { int queueSize = blockQueueCapacity; extern __shared__ int shared_memory_queue[]; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; int offset = 0; for (int i = thread_id; i < numCurrLevelNodes; i += blockSize * numBlocks) { offset = iteration * blockSize * numBlocks + i; int node = currLevelNodes_h[offset]; for (int j = nodePtrs_h[node]; j < nodePtrs_h[node + 1]; j++) { int neighbor = nodeNeighbors_h[j]; if (nodeVisited_h[neighbor] == 0) { nodeVisited_h[neighbor] = 1; nodeOutput_h[neighbor] = gate_solver(nodeGate_h[neighbor], nodeOutput_h[node], nodeInput_h[neighbor]); int new_index = atomicAdd(&block_num, 1); if (new_index < queueSize) { int actual_index = atomicAdd(&actual_num, 1); shared_memory_queue[new_index] = neighbor; } else { int global_index = atomicAdd(&global_num, 1); nextLevelNodes_h[global_index] = neighbor; } } } } __syncthreads(); if ((threadIdx.x == 0) && (iteration == num_iterations - 1)) { for (int j=0; j<actual_num; j++) { int glob = atomicAdd(&global_num, 1); nextLevelNodes_h[glob] = shared_memory_queue[j]; } } numNextLevelNodes_h[0] = global_num; } void global_queuing(int numBlocks, int blockSize, int sharedQueueSize, int numCurrLevelNodes, int *currLevelNodes_h, int *nodePtrs_h, int *nodeNeighbors_h, int *nodeVisited_h, int *nodeOutput_h, int *nodeGate_h, int *nodeInput_h, int *nextLevelNodes_h, int *numNextLevelNodes_h) { int iteration=0; int num_iterations = numCurrLevelNodes / (numBlocks * blockSize) +1; while (iteration < num_iterations) { block_queuing<<<numBlocks, blockSize>>>(iteration, num_iterations, numBlocks, blockSize, sharedQueueSize, numCurrLevelNodes, currLevelNodes_h, nodePtrs_h, nodeNeighbors_h, nodeVisited_h, nodeOutput_h, nodeGate_h, nodeInput_h, nextLevelNodes_h, numNextLevelNodes_h); cudaDeviceSynchronize(); iteration++; } cudaDeviceSynchronize(); } int read_input_one_two_four(int **input1, char *filepath) { FILE *fp = fopen(filepath, "r"); if (fp == NULL) { fprintf(stderr, "Couldn't open file for reading\n"); exit(1); } int counter = 0; int len; int length = fscanf(fp, "%d", &len); *input1 = (int *)malloc(len * sizeof(int)); int temp1; while (fscanf(fp, "%d", &temp1) == 1) { (*input1)[counter] = temp1; counter++; } fclose(fp); return len; } int read_input_three(int **input1, int **input2, int **input3, int **input4, char *filepath) { FILE *fp = fopen(filepath, "r"); if (fp == NULL) { fprintf(stderr, "Couldn't open file for reading\n"); exit(1); } int counter = 0; int len; int length = fscanf(fp, "%d", &len); *input1 = (int *)malloc(len * sizeof(int)); *input2 = (int *)malloc(len * sizeof(int)); *input3 = (int *)malloc(len * sizeof(int)); *input4 = (int *)malloc(len * sizeof(int)); int temp1; int temp2; int temp3; int temp4; while (fscanf(fp, "%d,%d,%d,%d", &temp1, &temp2, &temp3, &temp4) == 4) { (*input1)[counter] = temp1; (*input2)[counter] = temp2; (*input3)[counter] = temp3; (*input4)[counter] = temp4; counter++; } fclose(fp); return len; } int main(int argc, char *argv[]) { if (argc != 10) { printf("Error: program expects 9 agruments.\n"); return 1; } // Block params int numBlocks = atoi(argv[1]); int blockSize = atoi(argv[2]); int sharedQueueSize = atoi(argv[3]); // Input filepaths char *nodePtrs_filepath = argv[4]; char *nodeNeighbors_filepath = argv[5]; char *nodeLinks_filepath = argv[6]; char *currLevelNodes_filepath = argv[7]; // Output filepaths char *nodeOutput = argv[8]; char *nextLevelNodesOutput = argv[9]; // Variables int numNodePtrs, numNodes; int *nodePtrs_h, *nodeNeighbors_h; int *nodeGate_h, *nodeInput_h, *nodeOutput_h, *nodeVisited_h; int numTotalNeighbors_h; int *currLevelNodes_h; int numCurrLevelNodes; // Output int *nextLevelNodes_h; nextLevelNodes_h = (int *)malloc(40101 * 2 * sizeof(int)); int numNextLevelNodes_h = 0; numNodePtrs = read_input_one_two_four(&nodePtrs_h, nodePtrs_filepath); numTotalNeighbors_h = read_input_one_two_four(&nodeNeighbors_h, nodeNeighbors_filepath); numNodes = read_input_three(&nodeVisited_h, &nodeGate_h, &nodeInput_h, &nodeOutput_h, nodeLinks_filepath); numCurrLevelNodes = read_input_one_two_four(&currLevelNodes_h, currLevelNodes_filepath); int *d_numNextLevelNodes_h, *d_nodePtrs_h, *d_nodeNeighbors_h, *d_nodeVisited_h, *d_nodeGate_h, *d_nodeInput_h, *d_nodeOutput_h, *d_currLevelNodes_h, *d_nextLevelNodes_h; // CUDA memory allocation cudaMalloc((void **)&d_numNextLevelNodes_h, sizeof(int)); cudaMalloc((void **)&d_nodePtrs_h, numNodePtrs * sizeof(int)); cudaMalloc((void **)&d_nodeNeighbors_h, numTotalNeighbors_h * sizeof(int)); cudaMalloc((void **)&d_nodeVisited_h, numNodes * sizeof(int)); cudaMalloc((void **)&d_nodeGate_h, numNodes * sizeof(int)); cudaMalloc((void **)&d_nodeInput_h, numNodes * sizeof(int)); cudaMalloc((void **)&d_nodeOutput_h, numNodes * sizeof(int)); cudaMalloc((void **)&d_currLevelNodes_h, numCurrLevelNodes * sizeof(int)); cudaMalloc((void **)&d_nextLevelNodes_h, numTotalNeighbors_h * sizeof(int)); // copy information cudaMemcpy(d_nodePtrs_h, nodePtrs_h, numNodePtrs * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_nodeNeighbors_h, nodeNeighbors_h, numTotalNeighbors_h * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_nodeVisited_h, nodeVisited_h, numNodes * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_nodeGate_h, nodeGate_h, numNodes * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_nodeInput_h, nodeInput_h, numNodes * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_nodeOutput_h, nodeOutput_h, numNodes * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_currLevelNodes_h, currLevelNodes_h, numCurrLevelNodes * sizeof(int), cudaMemcpyHostToDevice); global_queuing(numBlocks, blockSize, sharedQueueSize, numCurrLevelNodes, d_currLevelNodes_h, d_nodePtrs_h, d_nodeNeighbors_h, d_nodeVisited_h, d_nodeOutput_h, d_nodeGate_h, d_nodeInput_h, d_nextLevelNodes_h, d_numNextLevelNodes_h); // get the results cudaMemcpy(&numNextLevelNodes_h, d_numNextLevelNodes_h, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(nextLevelNodes_h, d_nextLevelNodes_h, numTotalNeighbors_h * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(nodeOutput_h, d_nodeOutput_h, numNodes * sizeof(int), cudaMemcpyDeviceToHost); // output information FILE *fptr = fopen(nodeOutput, "w"); if (fptr == NULL) { printf("Error: unable to open node output file."); exit(1); } for (int i = 0; i < numNodes + 1; i++) { if (i == 0) { fprintf(fptr, "%d\n", numNodes); } fprintf(fptr, "%d\n", nodeOutput_h[i]); } fclose(fptr); FILE *fptr2 = fopen(nextLevelNodesOutput, "w"); if (fptr2 == NULL) { printf("Error: unable to open next level output file."); exit(1); } for (int i = 0; i < numNextLevelNodes_h + 1; i++) { if (i == 0) { fprintf(fptr, "%d\n", numNextLevelNodes_h); } fprintf(fptr2, "%d\n", nextLevelNodes_h[i]); } fclose(fptr); fclose(fptr2); free(nodePtrs_h); free(nodeNeighbors_h); free(nodeGate_h); free(nodeInput_h); free(nodeOutput_h); free(nodeVisited_h); free(currLevelNodes_h); free(nextLevelNodes_h); cudaFree(d_nextLevelNodes_h); cudaFree(d_nodePtrs_h); cudaFree(d_nodeNeighbors_h); cudaFree(d_nodeVisited_h); cudaFree(d_currLevelNodes_h); cudaFree(d_nodeGate_h); cudaFree(d_nodeInput_h); cudaFree(d_nodeOutput_h); return 0; }
6eee9b4e99e83a43d6fadfb7f20d757f07947867.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Part3: implement the kernel __global__ void reverseArrayBlock(int *d_out, int *d_in) { int inOffset = blockDim.x * blockIdx.x; int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int in = inOffset + threadIdx.x; int out = outOffset + (blockDim.x - 1 - threadIdx.x); d_out[out] = d_in[in]; } ///////////////////////////////////////////////////////////////////// // Program main ///////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 256 * 1024; // 256K elements (1MB total) // pointer for device memory int *d_b, *d_a; // define grid and block size int numThreadsPerBlock = 256; // Part 1: compute number of blocks needed based on // array size and desired block size int numBlocks = dimA / numThreadsPerBlock; // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); hipMalloc( (void **) &d_a, memSize ); hipMalloc( (void **) &d_b, memSize ); // Initialize input array on host for (int i = 0; i < dimA; ++i) { h_a[i] = i; } // Copy host array to device array hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); hipLaunchKernelGGL(( reverseArrayBlock), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_b, d_a ); // block until the device has completed hipDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); // device to host copy hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == dimA - 1 - i ); } // free device memory hipFree(d_a); hipFree(d_b); // free host memory free(h_a); // If the program makes it this far, then the results are // correct and there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
6eee9b4e99e83a43d6fadfb7f20d757f07947867.cu
#include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Part3: implement the kernel __global__ void reverseArrayBlock(int *d_out, int *d_in) { int inOffset = blockDim.x * blockIdx.x; int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int in = inOffset + threadIdx.x; int out = outOffset + (blockDim.x - 1 - threadIdx.x); d_out[out] = d_in[in]; } ///////////////////////////////////////////////////////////////////// // Program main ///////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 256 * 1024; // 256K elements (1MB total) // pointer for device memory int *d_b, *d_a; // define grid and block size int numThreadsPerBlock = 256; // Part 1: compute number of blocks needed based on // array size and desired block size int numBlocks = dimA / numThreadsPerBlock; // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); cudaMalloc( (void **) &d_a, memSize ); cudaMalloc( (void **) &d_b, memSize ); // Initialize input array on host for (int i = 0; i < dimA; ++i) { h_a[i] = i; } // Copy host array to device array cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); reverseArrayBlock<<< dimGrid, dimBlock >>>( d_b, d_a ); // block until the device has completed cudaThreadSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); // device to host copy cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == dimA - 1 - i ); } // free device memory cudaFree(d_a); cudaFree(d_b); // free host memory free(h_a); // If the program makes it this far, then the results are // correct and there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
dfa8f4f8f601d70c5f09341eb171a69f1575d048.hip
// !!! This is a file automatically generated by hipify!!! #include "evaluator.cuh" #include <library/cpp/cuda/wrappers/kernel.cuh> #include <library/cpp/cuda/wrappers/kernel_helpers.cuh> #include <library/cpp/cuda/wrappers/arch.cuh> #include <library/cpp/cuda/wrappers/kernel_helpers.cuh> #include <util/string/cast.h> #include <hip/hip_runtime.h> #include <assert.h> template<typename TFeatureType, TGPUDataInput::EFeatureLayout Layout> struct TFeatureAccessor { TFeatureAccessor() = default; using TFeature = TFeatureType; using TFeaturePtr = const TFeature*; i32 Stride = 0; i32 FeatureCount = 0; i32 ObjectCount = 0; TFeaturePtr FeaturesPtr = nullptr; __forceinline__ __device__ TFeature operator()(i32 featureId, i32 objectId) const { if (Layout == TGPUDataInput::EFeatureLayout::ColumnFirst) { return objectId < ObjectCount && featureId < FeatureCount ? __ldg(FeaturesPtr + featureId * Stride + objectId) : NegativeInfty(); } else { return objectId < ObjectCount && featureId < FeatureCount ? __ldg(FeaturesPtr + featureId + objectId * Stride) : NegativeInfty(); } } __forceinline__ __device__ int FeaturesCount() const { return FeatureCount; } __forceinline__ __device__ int SamplesCount() const { return ObjectCount; } }; constexpr ui32 ObjectsPerThread = 4; constexpr ui32 TreeSubBlockWidth = 8; constexpr ui32 ExtTreeBlockWidth = 128; constexpr ui32 QuantizationDocBlockSize = 256; constexpr ui32 BlockWidth = 256; constexpr ui32 EvalDocBlockSize = BlockWidth / TreeSubBlockWidth; static_assert(EvalDocBlockSize >= WarpSize, "EvalBlockSize should be greater than WarpSize"); using TTreeIndex = uint4; void TCudaQuantizedData::SetDimensions(ui32 effectiveBucketCount, ui32 objectsCount) { ObjectsCount = objectsCount; EffectiveBucketCount = effectiveBucketCount; const auto one32blockSize = WarpSize * effectiveBucketCount; const auto desiredQuantBuff = one32blockSize * NKernel::CeilDivide<ui32>(objectsCount, 128) * 4; if (BinarizedFeaturesBuffer.Size() < desiredQuantBuff) { BinarizedFeaturesBuffer = TCudaVec<TCudaQuantizationBucket>(desiredQuantBuff, EMemoryType::Device); } } void TEvaluationDataCache::PrepareCopyBufs(size_t bufSize, size_t objectsCount) { if (CopyDataBufDevice.Size() < bufSize) { CopyDataBufDevice = TCudaVec<float>(AlignBy<2048>(bufSize), EMemoryType::Device); } if (CopyDataBufHost.Size() < bufSize) { CopyDataBufHost = TCudaVec<float>(AlignBy<2048>(bufSize), EMemoryType::Host); } if (ResultsFloatBuf.Size() < objectsCount) { ResultsFloatBuf = TCudaVec<float>(AlignBy<2048>(objectsCount), EMemoryType::Device); } if (ResultsDoubleBuf.Size() < objectsCount) { ResultsDoubleBuf = TCudaVec<double>(AlignBy<2048>(objectsCount), EMemoryType::Device); } } template<typename TFloatFeatureAccessor> __launch_bounds__(QuantizationDocBlockSize, 1) __global__ void Binarize( TFloatFeatureAccessor floatAccessor, const float* __restrict__ borders, const ui32* __restrict__ featureBorderOffsets, const ui32* __restrict__ featureBordersCount, const ui32* __restrict__ floatFeatureForBucketIdx, const ui32 bucketsCount, TCudaQuantizationBucket* __restrict__ target ) { const int blockby32 = blockIdx.x * QuantizationDocBlockSize / WarpSize + threadIdx.x / WarpSize; const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + threadIdx.x % WarpSize; const int targetBucketIdx = blockIdx.y; const float* featureBorders = borders + featureBorderOffsets[targetBucketIdx]; const int featureBorderCount = __ldg(featureBordersCount + targetBucketIdx); const int featureIdx = floatFeatureForBucketIdx[targetBucketIdx]; __shared__ float bordersLocal[QuantizationDocBlockSize]; if (threadIdx.x < featureBorderCount) { bordersLocal[threadIdx.x] = __ldg(featureBorders + threadIdx.x); } __syncthreads(); float4 features; features.x = floatAccessor(featureIdx, firstDocForThread + 0 * WarpSize); features.y = floatAccessor(featureIdx, firstDocForThread + 1 * WarpSize); features.z = floatAccessor(featureIdx, firstDocForThread + 2 * WarpSize); features.w = floatAccessor(featureIdx, firstDocForThread + 3 * WarpSize); TCudaQuantizationBucket bins = { 0 }; #pragma unroll 8 for (int borderId = 0; borderId < featureBorderCount; ++borderId) { const float border = bordersLocal[borderId]; bins.x += features.x > border; bins.y += features.y > border; bins.z += features.z > border; bins.w += features.w > border; } if (firstDocForThread < floatAccessor.SamplesCount()) { target[bucketsCount * WarpSize * blockby32 + targetBucketIdx * WarpSize + threadIdx.x % WarpSize] = bins; } } template<int TreeDepth> TTreeIndex __device__ __forceinline__ CalcIndexesUnwrapped(const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { TTreeIndex result = { 0 }; #pragma unroll TreeDepth for (int depth = 0; depth < TreeDepth; ++depth) { const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth); TCudaQuantizationBucket buckets = __ldg(quantizedFeatures + bin.FeatureIdx); // |= operator fails (MLTOOLS-6839 on a100) result.x += ((buckets.x) >= bin.FeatureVal) << depth; result.y += ((buckets.y) >= bin.FeatureVal) << depth; result.z += ((buckets.z) >= bin.FeatureVal) << depth; result.w += ((buckets.w) >= bin.FeatureVal) << depth; } return result; } TTreeIndex __device__ CalcIndexesBase(int TreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { TTreeIndex bins = { 0 }; for (int depth = 0; depth < TreeDepth; ++depth) { const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth); TCudaQuantizationBucket vals = __ldg(quantizedFeatures + bin.FeatureIdx); // |= operator fails (MLTOOLS-6839 on a100) bins.x += ((vals.x) >= bin.FeatureVal) << depth; bins.y += ((vals.y) >= bin.FeatureVal) << depth; bins.z += ((vals.z) >= bin.FeatureVal) << depth; bins.w += ((vals.w) >= bin.FeatureVal) << depth; } return bins; } TTreeIndex __device__ __forceinline__ CalcTreeVals(int curTreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { switch (curTreeDepth) { case 6: return CalcIndexesUnwrapped<6>(curRepackedBinPtr, quantizedFeatures); case 7: return CalcIndexesUnwrapped<7>(curRepackedBinPtr, quantizedFeatures); case 8: return CalcIndexesUnwrapped<8>(curRepackedBinPtr, quantizedFeatures); default: return CalcIndexesBase(curTreeDepth, curRepackedBinPtr, quantizedFeatures); } } __launch_bounds__(BlockWidth, 1) __global__ void EvalObliviousTrees( const TCudaQuantizationBucket* __restrict__ quantizedFeatures, const ui32* __restrict__ treeSizes, const ui32 treeCount, const ui32* __restrict__ treeStartOffsets, const TGPURepackedBin* __restrict__ repackedBins, const ui32* __restrict__ firstLeafOfset, const ui32 bucketsCount, const TCudaEvaluatorLeafType* __restrict__ leafValues, const ui32 documentCount, TCudaEvaluatorLeafType* __restrict__ results) { const int innerBlockBy32 = threadIdx.x / WarpSize; const int blockby32 = blockIdx.y * EvalDocBlockSize / WarpSize + innerBlockBy32; const int inBlockId = threadIdx.x % WarpSize; const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + inBlockId; quantizedFeatures += bucketsCount * WarpSize * blockby32 + threadIdx.x % WarpSize; const int firstTreeIdx = TreeSubBlockWidth * ExtTreeBlockWidth * (threadIdx.y + TreeSubBlockWidth * blockIdx.x); const int lastTreeIdx = min(firstTreeIdx + TreeSubBlockWidth * ExtTreeBlockWidth, treeCount); double4 localResult = { 0 }; if (firstTreeIdx < lastTreeIdx && firstDocForThread < documentCount) { const TGPURepackedBin* __restrict__ curRepackedBinPtr = repackedBins + __ldg(treeStartOffsets + firstTreeIdx); leafValues += firstLeafOfset[firstTreeIdx]; int treeIdx = firstTreeIdx; const int lastTreeBy2 = lastTreeIdx - ((lastTreeIdx - firstTreeIdx) & 0x3); for (; treeIdx < lastTreeBy2; treeIdx += 2) { const int curTreeDepth1 = __ldg(treeSizes + treeIdx); const int curTreeDepth2 = __ldg(treeSizes + treeIdx + 1); const TTreeIndex bins1 = CalcTreeVals(curTreeDepth1, curRepackedBinPtr, quantizedFeatures); const TTreeIndex bins2 = CalcTreeVals(curTreeDepth2, curRepackedBinPtr + curTreeDepth1, quantizedFeatures); const auto leafValues2 = leafValues + (1 << curTreeDepth1); localResult.x += __ldg(leafValues + bins1.x) + __ldg(leafValues2 + bins2.x); localResult.y += __ldg(leafValues + bins1.y) + __ldg(leafValues2 + bins2.y); localResult.z += __ldg(leafValues + bins1.z) + __ldg(leafValues2 + bins2.z); localResult.w += __ldg(leafValues + bins1.w) + __ldg(leafValues2 + bins2.w); curRepackedBinPtr += curTreeDepth1 + curTreeDepth2; leafValues = leafValues2 + (1 << curTreeDepth2); } for (; treeIdx < lastTreeIdx; ++treeIdx) { const int curTreeDepth = __ldg(treeSizes + treeIdx); const TTreeIndex bins = CalcTreeVals(curTreeDepth, curRepackedBinPtr, quantizedFeatures); localResult.x += __ldg(leafValues + bins.x); localResult.y += __ldg(leafValues + bins.y); localResult.z += __ldg(leafValues + bins.z); localResult.w += __ldg(leafValues + bins.w); curRepackedBinPtr += curTreeDepth; leafValues += (1 << curTreeDepth); } } // TODO(kirillovs): reduce code is valid if those conditions met static_assert(EvalDocBlockSize * ObjectsPerThread == 128, ""); static_assert(EvalDocBlockSize == 32, ""); __shared__ TCudaEvaluatorLeafType reduceVals[EvalDocBlockSize * ObjectsPerThread * TreeSubBlockWidth]; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 0 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.x; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 1 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.y; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 2 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.z; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 3 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.w; __syncthreads(); TCudaEvaluatorLeafType lr = reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize]; for (int i = 256; i < 256 * 4; i += 256) { lr += reduceVals[i + threadIdx.x + threadIdx.y * EvalDocBlockSize]; } reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] = lr; __syncthreads(); if (threadIdx.y < ObjectsPerThread) { TAtomicAdd<TCudaEvaluatorLeafType>::Add( results + blockby32 * WarpSize * ObjectsPerThread + threadIdx.x + threadIdx.y * EvalDocBlockSize, reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] + reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize + 128] ); } } template<NCB::NModelEvaluation::EPredictionType PredictionType, bool OneDimension> __global__ void ProcessResultsImpl( const float* __restrict__ rawResults, ui32 resultsSize, const double* __restrict__ bias, double scale, double* hostMemResults, ui32 approxDimension ) { for (ui32 resultId = threadIdx.x; resultId < resultsSize; resultId += blockDim.x) { if (OneDimension) { double res = scale * __ldg(rawResults + resultId) + __ldg(bias); if (PredictionType == NCB::NModelEvaluation::EPredictionType::RawFormulaVal) { hostMemResults[resultId] = res; } else if (PredictionType == NCB::NModelEvaluation::EPredictionType::Probability) { hostMemResults[resultId] = 1 / (1 + exp(-res)); } else if (PredictionType == NCB::NModelEvaluation::EPredictionType::Class) { hostMemResults[resultId] = res > 0; } else { assert(0); } } else { const float* rawResultsSub = rawResults + resultId * approxDimension; if (PredictionType == NCB::NModelEvaluation::EPredictionType::Class) { double maxVal = scale * __ldg(rawResultsSub) + __ldg(bias); ui32 maxPos = 0; for (ui32 dim = 1; dim < approxDimension; ++dim) { double val = scale * __ldg(rawResultsSub + dim) + __ldg(bias + dim); if (val > maxVal) { maxVal = val; maxPos = dim; } } hostMemResults[resultId] = maxPos; } else { double* hostMemResultsBase = hostMemResults + resultId * approxDimension; for (ui32 dim = 0; dim < approxDimension; ++dim) { hostMemResultsBase[dim] = scale * __ldg(rawResultsSub + dim) + __ldg(bias + dim); } if (PredictionType != NCB::NModelEvaluation::EPredictionType::RawFormulaVal) { // TODO(kirillovs): write softmax assert(0); } } } } } template<bool OneDimension> void ProcessResults( const TGPUCatboostEvaluationContext& ctx, NCB::NModelEvaluation::EPredictionType predictionType, size_t objectsCount) { switch (predictionType) { case NCB::NModelEvaluation::EPredictionType::RawFormulaVal: hipLaunchKernelGGL(( ProcessResultsImpl<NCB::NModelEvaluation::EPredictionType::RawFormulaVal, OneDimension>), dim3(1), dim3(256), 0, ctx.Stream, ctx.EvalDataCache.ResultsFloatBuf.Get(), objectsCount, ctx.GPUModelData.Bias.Get(), ctx.GPUModelData.Scale, ctx.EvalDataCache.ResultsDoubleBuf.Get(), ctx.GPUModelData.ApproxDimension ); break; case NCB::NModelEvaluation::EPredictionType::Exponent: case NCB::NModelEvaluation::EPredictionType::RMSEWithUncertainty: case NCB::NModelEvaluation::EPredictionType::MultiProbability: ythrow yexception() << "Unimplemented on GPU: prediction type " << ToString(predictionType); break; case NCB::NModelEvaluation::EPredictionType::Probability: hipLaunchKernelGGL(( ProcessResultsImpl<NCB::NModelEvaluation::EPredictionType::Probability, OneDimension>), dim3(1), dim3(256), 0, ctx.Stream, ctx.EvalDataCache.ResultsFloatBuf.Get(), objectsCount, ctx.GPUModelData.Bias.Get(), ctx.GPUModelData.Scale, ctx.EvalDataCache.ResultsDoubleBuf.Get(), ctx.GPUModelData.ApproxDimension ); break; case NCB::NModelEvaluation::EPredictionType::Class: hipLaunchKernelGGL(( ProcessResultsImpl<NCB::NModelEvaluation::EPredictionType::Class, OneDimension>), dim3(1), dim3(256), 0, ctx.Stream, ctx.EvalDataCache.ResultsFloatBuf.Get(), objectsCount, ctx.GPUModelData.Bias.Get(), ctx.GPUModelData.Scale, ctx.EvalDataCache.ResultsDoubleBuf.Get(), ctx.GPUModelData.ApproxDimension ); break; } } void TGPUCatboostEvaluationContext::EvalQuantizedData( const TCudaQuantizedData* data, size_t treeStart, size_t treeEnd, TArrayRef<double> result, NCB::NModelEvaluation::EPredictionType predictionType ) const { const dim3 treeCalcDimBlock(EvalDocBlockSize, TreeSubBlockWidth); const dim3 treeCalcDimGrid( NKernel::CeilDivide<unsigned int>(GPUModelData.TreeSizes.Size(), TreeSubBlockWidth * ExtTreeBlockWidth), NKernel::CeilDivide<unsigned int>(data->GetObjectsCount(), EvalDocBlockSize * ObjectsPerThread) ); ClearMemoryAsync(EvalDataCache.ResultsFloatBuf.AsArrayRef(), Stream); hipLaunchKernelGGL(( EvalObliviousTrees), dim3(treeCalcDimGrid), dim3(treeCalcDimBlock), 0, Stream, data->BinarizedFeaturesBuffer.Get(), GPUModelData.TreeSizes.Get(), GPUModelData.TreeSizes.Size(), GPUModelData.TreeStartOffsets.Get(), GPUModelData.TreeSplits.Get(), GPUModelData.TreeFirstLeafOffsets.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), GPUModelData.ModelLeafs.Get(), data->GetObjectsCount(), EvalDataCache.ResultsFloatBuf.Get() ); if (GPUModelData.ApproxDimension == 1) { ProcessResults<true>(*this, predictionType, data->GetObjectsCount()); } else { ProcessResults<false>(*this, predictionType, data->GetObjectsCount()); } MemoryCopyAsync<double>(EvalDataCache.ResultsDoubleBuf.Slice(0, data->GetObjectsCount()), result, Stream); } void TGPUCatboostEvaluationContext::QuantizeData(const TGPUDataInput& dataInput, TCudaQuantizedData* quantizedData) const{ const dim3 quantizationDimBlock(QuantizationDocBlockSize, 1); const dim3 quantizationDimGrid( NKernel::CeilDivide<unsigned int>(dataInput.ObjectCount, QuantizationDocBlockSize * ObjectsPerThread), GPUModelData.BordersCount.Size() // float features from models ); if (dataInput.FloatFeatureLayout == TGPUDataInput::EFeatureLayout::ColumnFirst) { TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::ColumnFirst> floatFeatureAccessor; floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount; floatFeatureAccessor.Stride = dataInput.Stride; floatFeatureAccessor.ObjectCount = dataInput.ObjectCount; floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.data(); hipLaunchKernelGGL(( Binarize), dim3(quantizationDimGrid), dim3(quantizationDimBlock), 0, Stream, floatFeatureAccessor, GPUModelData.FlatBordersVector.Get(), GPUModelData.BordersOffsets.Get(), GPUModelData.BordersCount.Get(), GPUModelData.FloatFeatureForBucketIdx.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), quantizedData->BinarizedFeaturesBuffer.Get() ); } else { TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::RowFirst> floatFeatureAccessor; floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount; floatFeatureAccessor.ObjectCount = dataInput.ObjectCount; floatFeatureAccessor.Stride = dataInput.Stride; floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.data(); hipLaunchKernelGGL(( Binarize), dim3(quantizationDimGrid), dim3(quantizationDimBlock), 0, Stream, floatFeatureAccessor, GPUModelData.FlatBordersVector.Get(), GPUModelData.BordersOffsets.Get(), GPUModelData.BordersCount.Get(), GPUModelData.FloatFeatureForBucketIdx.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), quantizedData->BinarizedFeaturesBuffer.Get() ); } } void TGPUCatboostEvaluationContext::EvalData( const TGPUDataInput& dataInput, size_t treeStart, size_t treeEnd, TArrayRef<double> result, NCB::NModelEvaluation::EPredictionType predictionType) const { TCudaQuantizedData quantizedData; quantizedData.SetDimensions(GPUModelData.FloatFeatureForBucketIdx.Size(), dataInput.ObjectCount); QuantizeData(dataInput, &quantizedData); EvalQuantizedData(&quantizedData, treeStart, treeEnd, result, predictionType); }
dfa8f4f8f601d70c5f09341eb171a69f1575d048.cu
#include "evaluator.cuh" #include <library/cpp/cuda/wrappers/kernel.cuh> #include <library/cpp/cuda/wrappers/kernel_helpers.cuh> #include <library/cpp/cuda/wrappers/arch.cuh> #include <library/cpp/cuda/wrappers/kernel_helpers.cuh> #include <util/string/cast.h> #include <cuda_runtime.h> #include <assert.h> template<typename TFeatureType, TGPUDataInput::EFeatureLayout Layout> struct TFeatureAccessor { TFeatureAccessor() = default; using TFeature = TFeatureType; using TFeaturePtr = const TFeature*; i32 Stride = 0; i32 FeatureCount = 0; i32 ObjectCount = 0; TFeaturePtr FeaturesPtr = nullptr; __forceinline__ __device__ TFeature operator()(i32 featureId, i32 objectId) const { if (Layout == TGPUDataInput::EFeatureLayout::ColumnFirst) { return objectId < ObjectCount && featureId < FeatureCount ? __ldg(FeaturesPtr + featureId * Stride + objectId) : NegativeInfty(); } else { return objectId < ObjectCount && featureId < FeatureCount ? __ldg(FeaturesPtr + featureId + objectId * Stride) : NegativeInfty(); } } __forceinline__ __device__ int FeaturesCount() const { return FeatureCount; } __forceinline__ __device__ int SamplesCount() const { return ObjectCount; } }; constexpr ui32 ObjectsPerThread = 4; constexpr ui32 TreeSubBlockWidth = 8; constexpr ui32 ExtTreeBlockWidth = 128; constexpr ui32 QuantizationDocBlockSize = 256; constexpr ui32 BlockWidth = 256; constexpr ui32 EvalDocBlockSize = BlockWidth / TreeSubBlockWidth; static_assert(EvalDocBlockSize >= WarpSize, "EvalBlockSize should be greater than WarpSize"); using TTreeIndex = uint4; void TCudaQuantizedData::SetDimensions(ui32 effectiveBucketCount, ui32 objectsCount) { ObjectsCount = objectsCount; EffectiveBucketCount = effectiveBucketCount; const auto one32blockSize = WarpSize * effectiveBucketCount; const auto desiredQuantBuff = one32blockSize * NKernel::CeilDivide<ui32>(objectsCount, 128) * 4; if (BinarizedFeaturesBuffer.Size() < desiredQuantBuff) { BinarizedFeaturesBuffer = TCudaVec<TCudaQuantizationBucket>(desiredQuantBuff, EMemoryType::Device); } } void TEvaluationDataCache::PrepareCopyBufs(size_t bufSize, size_t objectsCount) { if (CopyDataBufDevice.Size() < bufSize) { CopyDataBufDevice = TCudaVec<float>(AlignBy<2048>(bufSize), EMemoryType::Device); } if (CopyDataBufHost.Size() < bufSize) { CopyDataBufHost = TCudaVec<float>(AlignBy<2048>(bufSize), EMemoryType::Host); } if (ResultsFloatBuf.Size() < objectsCount) { ResultsFloatBuf = TCudaVec<float>(AlignBy<2048>(objectsCount), EMemoryType::Device); } if (ResultsDoubleBuf.Size() < objectsCount) { ResultsDoubleBuf = TCudaVec<double>(AlignBy<2048>(objectsCount), EMemoryType::Device); } } template<typename TFloatFeatureAccessor> __launch_bounds__(QuantizationDocBlockSize, 1) __global__ void Binarize( TFloatFeatureAccessor floatAccessor, const float* __restrict__ borders, const ui32* __restrict__ featureBorderOffsets, const ui32* __restrict__ featureBordersCount, const ui32* __restrict__ floatFeatureForBucketIdx, const ui32 bucketsCount, TCudaQuantizationBucket* __restrict__ target ) { const int blockby32 = blockIdx.x * QuantizationDocBlockSize / WarpSize + threadIdx.x / WarpSize; const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + threadIdx.x % WarpSize; const int targetBucketIdx = blockIdx.y; const float* featureBorders = borders + featureBorderOffsets[targetBucketIdx]; const int featureBorderCount = __ldg(featureBordersCount + targetBucketIdx); const int featureIdx = floatFeatureForBucketIdx[targetBucketIdx]; __shared__ float bordersLocal[QuantizationDocBlockSize]; if (threadIdx.x < featureBorderCount) { bordersLocal[threadIdx.x] = __ldg(featureBorders + threadIdx.x); } __syncthreads(); float4 features; features.x = floatAccessor(featureIdx, firstDocForThread + 0 * WarpSize); features.y = floatAccessor(featureIdx, firstDocForThread + 1 * WarpSize); features.z = floatAccessor(featureIdx, firstDocForThread + 2 * WarpSize); features.w = floatAccessor(featureIdx, firstDocForThread + 3 * WarpSize); TCudaQuantizationBucket bins = { 0 }; #pragma unroll 8 for (int borderId = 0; borderId < featureBorderCount; ++borderId) { const float border = bordersLocal[borderId]; bins.x += features.x > border; bins.y += features.y > border; bins.z += features.z > border; bins.w += features.w > border; } if (firstDocForThread < floatAccessor.SamplesCount()) { target[bucketsCount * WarpSize * blockby32 + targetBucketIdx * WarpSize + threadIdx.x % WarpSize] = bins; } } template<int TreeDepth> TTreeIndex __device__ __forceinline__ CalcIndexesUnwrapped(const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { TTreeIndex result = { 0 }; #pragma unroll TreeDepth for (int depth = 0; depth < TreeDepth; ++depth) { const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth); TCudaQuantizationBucket buckets = __ldg(quantizedFeatures + bin.FeatureIdx); // |= operator fails (MLTOOLS-6839 on a100) result.x += ((buckets.x) >= bin.FeatureVal) << depth; result.y += ((buckets.y) >= bin.FeatureVal) << depth; result.z += ((buckets.z) >= bin.FeatureVal) << depth; result.w += ((buckets.w) >= bin.FeatureVal) << depth; } return result; } TTreeIndex __device__ CalcIndexesBase(int TreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { TTreeIndex bins = { 0 }; for (int depth = 0; depth < TreeDepth; ++depth) { const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth); TCudaQuantizationBucket vals = __ldg(quantizedFeatures + bin.FeatureIdx); // |= operator fails (MLTOOLS-6839 on a100) bins.x += ((vals.x) >= bin.FeatureVal) << depth; bins.y += ((vals.y) >= bin.FeatureVal) << depth; bins.z += ((vals.z) >= bin.FeatureVal) << depth; bins.w += ((vals.w) >= bin.FeatureVal) << depth; } return bins; } TTreeIndex __device__ __forceinline__ CalcTreeVals(int curTreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { switch (curTreeDepth) { case 6: return CalcIndexesUnwrapped<6>(curRepackedBinPtr, quantizedFeatures); case 7: return CalcIndexesUnwrapped<7>(curRepackedBinPtr, quantizedFeatures); case 8: return CalcIndexesUnwrapped<8>(curRepackedBinPtr, quantizedFeatures); default: return CalcIndexesBase(curTreeDepth, curRepackedBinPtr, quantizedFeatures); } } __launch_bounds__(BlockWidth, 1) __global__ void EvalObliviousTrees( const TCudaQuantizationBucket* __restrict__ quantizedFeatures, const ui32* __restrict__ treeSizes, const ui32 treeCount, const ui32* __restrict__ treeStartOffsets, const TGPURepackedBin* __restrict__ repackedBins, const ui32* __restrict__ firstLeafOfset, const ui32 bucketsCount, const TCudaEvaluatorLeafType* __restrict__ leafValues, const ui32 documentCount, TCudaEvaluatorLeafType* __restrict__ results) { const int innerBlockBy32 = threadIdx.x / WarpSize; const int blockby32 = blockIdx.y * EvalDocBlockSize / WarpSize + innerBlockBy32; const int inBlockId = threadIdx.x % WarpSize; const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + inBlockId; quantizedFeatures += bucketsCount * WarpSize * blockby32 + threadIdx.x % WarpSize; const int firstTreeIdx = TreeSubBlockWidth * ExtTreeBlockWidth * (threadIdx.y + TreeSubBlockWidth * blockIdx.x); const int lastTreeIdx = min(firstTreeIdx + TreeSubBlockWidth * ExtTreeBlockWidth, treeCount); double4 localResult = { 0 }; if (firstTreeIdx < lastTreeIdx && firstDocForThread < documentCount) { const TGPURepackedBin* __restrict__ curRepackedBinPtr = repackedBins + __ldg(treeStartOffsets + firstTreeIdx); leafValues += firstLeafOfset[firstTreeIdx]; int treeIdx = firstTreeIdx; const int lastTreeBy2 = lastTreeIdx - ((lastTreeIdx - firstTreeIdx) & 0x3); for (; treeIdx < lastTreeBy2; treeIdx += 2) { const int curTreeDepth1 = __ldg(treeSizes + treeIdx); const int curTreeDepth2 = __ldg(treeSizes + treeIdx + 1); const TTreeIndex bins1 = CalcTreeVals(curTreeDepth1, curRepackedBinPtr, quantizedFeatures); const TTreeIndex bins2 = CalcTreeVals(curTreeDepth2, curRepackedBinPtr + curTreeDepth1, quantizedFeatures); const auto leafValues2 = leafValues + (1 << curTreeDepth1); localResult.x += __ldg(leafValues + bins1.x) + __ldg(leafValues2 + bins2.x); localResult.y += __ldg(leafValues + bins1.y) + __ldg(leafValues2 + bins2.y); localResult.z += __ldg(leafValues + bins1.z) + __ldg(leafValues2 + bins2.z); localResult.w += __ldg(leafValues + bins1.w) + __ldg(leafValues2 + bins2.w); curRepackedBinPtr += curTreeDepth1 + curTreeDepth2; leafValues = leafValues2 + (1 << curTreeDepth2); } for (; treeIdx < lastTreeIdx; ++treeIdx) { const int curTreeDepth = __ldg(treeSizes + treeIdx); const TTreeIndex bins = CalcTreeVals(curTreeDepth, curRepackedBinPtr, quantizedFeatures); localResult.x += __ldg(leafValues + bins.x); localResult.y += __ldg(leafValues + bins.y); localResult.z += __ldg(leafValues + bins.z); localResult.w += __ldg(leafValues + bins.w); curRepackedBinPtr += curTreeDepth; leafValues += (1 << curTreeDepth); } } // TODO(kirillovs): reduce code is valid if those conditions met static_assert(EvalDocBlockSize * ObjectsPerThread == 128, ""); static_assert(EvalDocBlockSize == 32, ""); __shared__ TCudaEvaluatorLeafType reduceVals[EvalDocBlockSize * ObjectsPerThread * TreeSubBlockWidth]; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 0 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.x; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 1 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.y; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 2 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.z; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 3 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.w; __syncthreads(); TCudaEvaluatorLeafType lr = reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize]; for (int i = 256; i < 256 * 4; i += 256) { lr += reduceVals[i + threadIdx.x + threadIdx.y * EvalDocBlockSize]; } reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] = lr; __syncthreads(); if (threadIdx.y < ObjectsPerThread) { TAtomicAdd<TCudaEvaluatorLeafType>::Add( results + blockby32 * WarpSize * ObjectsPerThread + threadIdx.x + threadIdx.y * EvalDocBlockSize, reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] + reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize + 128] ); } } template<NCB::NModelEvaluation::EPredictionType PredictionType, bool OneDimension> __global__ void ProcessResultsImpl( const float* __restrict__ rawResults, ui32 resultsSize, const double* __restrict__ bias, double scale, double* hostMemResults, ui32 approxDimension ) { for (ui32 resultId = threadIdx.x; resultId < resultsSize; resultId += blockDim.x) { if (OneDimension) { double res = scale * __ldg(rawResults + resultId) + __ldg(bias); if (PredictionType == NCB::NModelEvaluation::EPredictionType::RawFormulaVal) { hostMemResults[resultId] = res; } else if (PredictionType == NCB::NModelEvaluation::EPredictionType::Probability) { hostMemResults[resultId] = 1 / (1 + exp(-res)); } else if (PredictionType == NCB::NModelEvaluation::EPredictionType::Class) { hostMemResults[resultId] = res > 0; } else { assert(0); } } else { const float* rawResultsSub = rawResults + resultId * approxDimension; if (PredictionType == NCB::NModelEvaluation::EPredictionType::Class) { double maxVal = scale * __ldg(rawResultsSub) + __ldg(bias); ui32 maxPos = 0; for (ui32 dim = 1; dim < approxDimension; ++dim) { double val = scale * __ldg(rawResultsSub + dim) + __ldg(bias + dim); if (val > maxVal) { maxVal = val; maxPos = dim; } } hostMemResults[resultId] = maxPos; } else { double* hostMemResultsBase = hostMemResults + resultId * approxDimension; for (ui32 dim = 0; dim < approxDimension; ++dim) { hostMemResultsBase[dim] = scale * __ldg(rawResultsSub + dim) + __ldg(bias + dim); } if (PredictionType != NCB::NModelEvaluation::EPredictionType::RawFormulaVal) { // TODO(kirillovs): write softmax assert(0); } } } } } template<bool OneDimension> void ProcessResults( const TGPUCatboostEvaluationContext& ctx, NCB::NModelEvaluation::EPredictionType predictionType, size_t objectsCount) { switch (predictionType) { case NCB::NModelEvaluation::EPredictionType::RawFormulaVal: ProcessResultsImpl<NCB::NModelEvaluation::EPredictionType::RawFormulaVal, OneDimension><<<1, 256, 0, ctx.Stream>>> ( ctx.EvalDataCache.ResultsFloatBuf.Get(), objectsCount, ctx.GPUModelData.Bias.Get(), ctx.GPUModelData.Scale, ctx.EvalDataCache.ResultsDoubleBuf.Get(), ctx.GPUModelData.ApproxDimension ); break; case NCB::NModelEvaluation::EPredictionType::Exponent: case NCB::NModelEvaluation::EPredictionType::RMSEWithUncertainty: case NCB::NModelEvaluation::EPredictionType::MultiProbability: ythrow yexception() << "Unimplemented on GPU: prediction type " << ToString(predictionType); break; case NCB::NModelEvaluation::EPredictionType::Probability: ProcessResultsImpl<NCB::NModelEvaluation::EPredictionType::Probability, OneDimension><<<1, 256, 0, ctx.Stream>>> ( ctx.EvalDataCache.ResultsFloatBuf.Get(), objectsCount, ctx.GPUModelData.Bias.Get(), ctx.GPUModelData.Scale, ctx.EvalDataCache.ResultsDoubleBuf.Get(), ctx.GPUModelData.ApproxDimension ); break; case NCB::NModelEvaluation::EPredictionType::Class: ProcessResultsImpl<NCB::NModelEvaluation::EPredictionType::Class, OneDimension><<<1, 256, 0, ctx.Stream>>> ( ctx.EvalDataCache.ResultsFloatBuf.Get(), objectsCount, ctx.GPUModelData.Bias.Get(), ctx.GPUModelData.Scale, ctx.EvalDataCache.ResultsDoubleBuf.Get(), ctx.GPUModelData.ApproxDimension ); break; } } void TGPUCatboostEvaluationContext::EvalQuantizedData( const TCudaQuantizedData* data, size_t treeStart, size_t treeEnd, TArrayRef<double> result, NCB::NModelEvaluation::EPredictionType predictionType ) const { const dim3 treeCalcDimBlock(EvalDocBlockSize, TreeSubBlockWidth); const dim3 treeCalcDimGrid( NKernel::CeilDivide<unsigned int>(GPUModelData.TreeSizes.Size(), TreeSubBlockWidth * ExtTreeBlockWidth), NKernel::CeilDivide<unsigned int>(data->GetObjectsCount(), EvalDocBlockSize * ObjectsPerThread) ); ClearMemoryAsync(EvalDataCache.ResultsFloatBuf.AsArrayRef(), Stream); EvalObliviousTrees<<<treeCalcDimGrid, treeCalcDimBlock, 0, Stream>>> ( data->BinarizedFeaturesBuffer.Get(), GPUModelData.TreeSizes.Get(), GPUModelData.TreeSizes.Size(), GPUModelData.TreeStartOffsets.Get(), GPUModelData.TreeSplits.Get(), GPUModelData.TreeFirstLeafOffsets.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), GPUModelData.ModelLeafs.Get(), data->GetObjectsCount(), EvalDataCache.ResultsFloatBuf.Get() ); if (GPUModelData.ApproxDimension == 1) { ProcessResults<true>(*this, predictionType, data->GetObjectsCount()); } else { ProcessResults<false>(*this, predictionType, data->GetObjectsCount()); } MemoryCopyAsync<double>(EvalDataCache.ResultsDoubleBuf.Slice(0, data->GetObjectsCount()), result, Stream); } void TGPUCatboostEvaluationContext::QuantizeData(const TGPUDataInput& dataInput, TCudaQuantizedData* quantizedData) const{ const dim3 quantizationDimBlock(QuantizationDocBlockSize, 1); const dim3 quantizationDimGrid( NKernel::CeilDivide<unsigned int>(dataInput.ObjectCount, QuantizationDocBlockSize * ObjectsPerThread), GPUModelData.BordersCount.Size() // float features from models ); if (dataInput.FloatFeatureLayout == TGPUDataInput::EFeatureLayout::ColumnFirst) { TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::ColumnFirst> floatFeatureAccessor; floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount; floatFeatureAccessor.Stride = dataInput.Stride; floatFeatureAccessor.ObjectCount = dataInput.ObjectCount; floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.data(); Binarize<<<quantizationDimGrid, quantizationDimBlock, 0, Stream>>> ( floatFeatureAccessor, GPUModelData.FlatBordersVector.Get(), GPUModelData.BordersOffsets.Get(), GPUModelData.BordersCount.Get(), GPUModelData.FloatFeatureForBucketIdx.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), quantizedData->BinarizedFeaturesBuffer.Get() ); } else { TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::RowFirst> floatFeatureAccessor; floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount; floatFeatureAccessor.ObjectCount = dataInput.ObjectCount; floatFeatureAccessor.Stride = dataInput.Stride; floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.data(); Binarize<<<quantizationDimGrid, quantizationDimBlock, 0, Stream>>> ( floatFeatureAccessor, GPUModelData.FlatBordersVector.Get(), GPUModelData.BordersOffsets.Get(), GPUModelData.BordersCount.Get(), GPUModelData.FloatFeatureForBucketIdx.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), quantizedData->BinarizedFeaturesBuffer.Get() ); } } void TGPUCatboostEvaluationContext::EvalData( const TGPUDataInput& dataInput, size_t treeStart, size_t treeEnd, TArrayRef<double> result, NCB::NModelEvaluation::EPredictionType predictionType) const { TCudaQuantizedData quantizedData; quantizedData.SetDimensions(GPUModelData.FloatFeatureForBucketIdx.Size(), dataInput.ObjectCount); QuantizeData(dataInput, &quantizedData); EvalQuantizedData(&quantizedData, treeStart, treeEnd, result, predictionType); }
929c760963b51e008eb88a7d0d407e0c52b96d79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // statistical kernel #define NUMBER_THREADS 512 __global__ void prepare222( long d_Ne, float *d_I, // pointer to output image (DEVICE GLOBAL MEMORY) float *d_sums, // pointer to input image (DEVICE GLOBAL MEMORY) float *d_sums2){ // indexes int bx = blockIdx.x; // get current horizontal block index int tx = threadIdx.x; // get current horizontal thread index int ei = (bx*NUMBER_THREADS)+tx; // unique thread id, more threads than actual elements !!! // copy input to output & log uncompress if(ei<d_Ne){ // do only for the number of elements, omit extra threads d_sums[ei] = d_I[ei]; d_sums2[ei] = d_I[ei]*d_I[ei]; } }
929c760963b51e008eb88a7d0d407e0c52b96d79.cu
// statistical kernel #define NUMBER_THREADS 512 __global__ void prepare222( long d_Ne, float *d_I, // pointer to output image (DEVICE GLOBAL MEMORY) float *d_sums, // pointer to input image (DEVICE GLOBAL MEMORY) float *d_sums2){ // indexes int bx = blockIdx.x; // get current horizontal block index int tx = threadIdx.x; // get current horizontal thread index int ei = (bx*NUMBER_THREADS)+tx; // unique thread id, more threads than actual elements !!! // copy input to output & log uncompress if(ei<d_Ne){ // do only for the number of elements, omit extra threads d_sums[ei] = d_I[ei]; d_sums2[ei] = d_I[ei]*d_I[ei]; } }
297b5d9cf6a00eb8f95e430f2f6086934880fdd6.hip
// !!! This is a file automatically generated by hipify!!! // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha // ============================================================================= // // Base class for processing sph force in fsi system.// // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForce.cuh" #include "chrono_fsi/utils/ChUtilsDevice.cuh" //========================================================================================================================================== namespace chrono { namespace fsi { ChFsiForce::ChFsiForce(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<NumberOfObjects> otherNumObjects) : bceWorker(otherBceWorker), sortedSphMarkersD(otherSortedSphMarkersD), markersProximityD(otherMarkersProximityD), fsiGeneralData(otherFsiGeneralData), numObjectsH(otherNumObjects), paramsH(otherParamsH) { fsiCollisionSystem = chrono_types::make_shared<ChCollisionSystemFsi>(sortedSphMarkersD, markersProximityD, paramsH, numObjectsH); sphMarkersD = NULL; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::Finalize() { hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); printf("ChFsiForce::Finalize() numAllMarkers=%zd\n", numObjectsH->numAllMarkers); vel_XSPH_Sorted_D.resize(numObjectsH->numAllMarkers); vel_vis_Sorted_D.resize(numObjectsH->numAllMarkers); derivVelRhoD_Sorted_D.resize(numObjectsH->numAllMarkers); fsiCollisionSystem->Finalize(); } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForce::~ChFsiForce() {} void ChFsiForce::SetLinearSolver(ChFsiLinearSolver::SolverType other_solverType) { switch (other_solverType) { case ChFsiLinearSolver::SolverType::BICGSTAB: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); break; case ChFsiLinearSolver::SolverType::GMRES: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverGMRES>(); break; /// Extend this function with your own linear solvers default: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); std::cout << "The ChFsiLinearSolver you chose has not been implemented, reverting back to " "ChFsiLinearSolverBiCGStab\n"; } } //-------------------------------------------------------------------------------------------------------------------------------- // use invasive to avoid one extra copy. However, keep in mind that sorted is // changed. void ChFsiForce::CopySortedToOriginal_Invasive_R3(thrust::device_vector<Real3>& original, thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R3(thrust::device_vector<Real3>& original, const thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real3> dummySorted = sorted; CopySortedToOriginal_Invasive_R3(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- // use invasive to avoid one extra copy. However, keep in mind that sorted is // changed. void ChFsiForce::CopySortedToOriginal_Invasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real4> dummySorted = sorted; CopySortedToOriginal_Invasive_R4(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- } // namespace fsi } // namespace chrono
297b5d9cf6a00eb8f95e430f2f6086934880fdd6.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha // ============================================================================= // // Base class for processing sph force in fsi system.// // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForce.cuh" #include "chrono_fsi/utils/ChUtilsDevice.cuh" //========================================================================================================================================== namespace chrono { namespace fsi { ChFsiForce::ChFsiForce(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<NumberOfObjects> otherNumObjects) : bceWorker(otherBceWorker), sortedSphMarkersD(otherSortedSphMarkersD), markersProximityD(otherMarkersProximityD), fsiGeneralData(otherFsiGeneralData), numObjectsH(otherNumObjects), paramsH(otherParamsH) { fsiCollisionSystem = chrono_types::make_shared<ChCollisionSystemFsi>(sortedSphMarkersD, markersProximityD, paramsH, numObjectsH); sphMarkersD = NULL; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::Finalize() { cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); printf("ChFsiForce::Finalize() numAllMarkers=%zd\n", numObjectsH->numAllMarkers); vel_XSPH_Sorted_D.resize(numObjectsH->numAllMarkers); vel_vis_Sorted_D.resize(numObjectsH->numAllMarkers); derivVelRhoD_Sorted_D.resize(numObjectsH->numAllMarkers); fsiCollisionSystem->Finalize(); } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForce::~ChFsiForce() {} void ChFsiForce::SetLinearSolver(ChFsiLinearSolver::SolverType other_solverType) { switch (other_solverType) { case ChFsiLinearSolver::SolverType::BICGSTAB: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); break; case ChFsiLinearSolver::SolverType::GMRES: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverGMRES>(); break; /// Extend this function with your own linear solvers default: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); std::cout << "The ChFsiLinearSolver you chose has not been implemented, reverting back to " "ChFsiLinearSolverBiCGStab\n"; } } //-------------------------------------------------------------------------------------------------------------------------------- // use invasive to avoid one extra copy. However, keep in mind that sorted is // changed. void ChFsiForce::CopySortedToOriginal_Invasive_R3(thrust::device_vector<Real3>& original, thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R3(thrust::device_vector<Real3>& original, const thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real3> dummySorted = sorted; CopySortedToOriginal_Invasive_R3(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- // use invasive to avoid one extra copy. However, keep in mind that sorted is // changed. void ChFsiForce::CopySortedToOriginal_Invasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real4> dummySorted = sorted; CopySortedToOriginal_Invasive_R4(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- } // namespace fsi } // namespace chrono
fe19f33107f0bea279a252ebcf20fa1dded0eefd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "../common/book.h" #define N 10000 __global__ void add(int* a, int* b, int* c) { /* * blockIdx is the built-in variable which contains the value of * the block index for whichever block is currently running the * device code. * Another thing to be explained is the '.x' domain. In CUDA C, * blocks are actually defined in two-dimensions */ int tid = blockIdx.x; if(tid < N) c[tid] = a[tid] + b[tid]; } int main(void) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; HANDLE_ERROR(hipMalloc((void**)&dev_a, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&dev_b, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&dev_c, N * sizeof(int))); for(int i = 0; i < N; ++i) { a[i] = -i; b[i] = i * i; } HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice)); /* * Note the <<<N, 1>>> here: * The first N represents the number of parallel blocks in which we * we would like the device to execute out kernel. The runtime will * create N copies of the kernel and running them in parallel. */ hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c); HANDLE_ERROR(hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost)); for(int i = 0; i < N; ++i) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
fe19f33107f0bea279a252ebcf20fa1dded0eefd.cu
#include <stdio.h> #include "../common/book.h" #define N 10000 __global__ void add(int* a, int* b, int* c) { /* * blockIdx is the built-in variable which contains the value of * the block index for whichever block is currently running the * device code. * Another thing to be explained is the '.x' domain. In CUDA C, * blocks are actually defined in two-dimensions */ int tid = blockIdx.x; if(tid < N) c[tid] = a[tid] + b[tid]; } int main(void) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; HANDLE_ERROR(cudaMalloc((void**)&dev_a, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&dev_b, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&dev_c, N * sizeof(int))); for(int i = 0; i < N; ++i) { a[i] = -i; b[i] = i * i; } HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice)); /* * Note the <<<N, 1>>> here: * The first N represents the number of parallel blocks in which we * we would like the device to execute out kernel. The runtime will * create N copies of the kernel and running them in parallel. */ add<<<N, 1>>>(dev_a, dev_b, dev_c); HANDLE_ERROR(cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost)); for(int i = 0; i < N; ++i) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
dc3ac1ea98779167ce6d37491e67e59dc875261d.hip
// !!! This is a file automatically generated by hipify!!! /* * Discrete Cosine Transform in row wise (DCT one) * DCT_I_Row * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_I_Row(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DCT_I_Row.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DCTI_Row_Kernel(double *A, double *C, int numARows, int numAColumns, int numCRows, int numCColumns) { double CValue = 0.0; const double PI_d = 3.141592653589793238462643383279502884; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ double As[TILE_DIM][TILE_DIM]; __shared__ double Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; } else { As[threadIdx.y][threadIdx.x] = 0.0; } if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = cos(((threadIdx.y + k*TILE_DIM)*PI_d*Col / (numAColumns - 1)))*sqrt(1.0 / (1 + DELTA(Col + 1, 1) + DELTA(Col + 1, numAColumns)))*sqrt(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1) + DELTA(numAColumns, (threadIdx.y + k*TILE_DIM) + 1)))*sqrt(2.0 / numAColumns); } //Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col]; else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDCTRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix //double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //float * hostComputedC; double * deviceA; //double * deviceB; double * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns)); //hipMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns); gpuErrchk(hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns)); //thrust::device_ptr< double >dev_ptr_A(deviceA); //thrust::device_ptr< double >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice)); //hipMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////// dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTI_Row_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(hipFree(deviceA)); //hipFree(deviceB); gpuErrchk(hipFree(deviceC)); return; }
dc3ac1ea98779167ce6d37491e67e59dc875261d.cu
/* * Discrete Cosine Transform in row wise (DCT one) * DCT_I_Row * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_I_Row(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DCT_I_Row.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cuda.h> #include <cuda_runtime.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DCTI_Row_Kernel(double *A, double *C, int numARows, int numAColumns, int numCRows, int numCColumns) { double CValue = 0.0; const double PI_d = 3.141592653589793238462643383279502884; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ double As[TILE_DIM][TILE_DIM]; __shared__ double Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; } else { As[threadIdx.y][threadIdx.x] = 0.0; } if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = cos(((threadIdx.y + k*TILE_DIM)*PI_d*Col / (numAColumns - 1)))*sqrt(1.0 / (1 + DELTA(Col + 1, 1) + DELTA(Col + 1, numAColumns)))*sqrt(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1) + DELTA(numAColumns, (threadIdx.y + k*TILE_DIM) + 1)))*sqrt(2.0 / numAColumns); } //Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col]; else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDCTRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix //double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //float * hostComputedC; double * deviceA; //double * deviceB; double * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns)); //cudaMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns); gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns)); //thrust::device_ptr< double >dev_ptr_A(deviceA); //thrust::device_ptr< double >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice)); //cudaMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////// dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTI_Row_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(cudaFree(deviceA)); //cudaFree(deviceB); gpuErrchk(cudaFree(deviceC)); return; }
ce19bb0d9bfcdd4cabc88cdf9a2a7705358a3cf1.hip
// !!! This is a file automatically generated by hipify!!! // ------------------------------------------------------------------------ // File: spmm_test.cu // S-BLAS: A Scalable Sparse-BLAS Kernel Library for Multi-GPUs. // This file tests the SPMM implementation. // ------------------------------------------------------------------------ // Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S. // Homepage: http://www.angliphd.com // Other PNNL Developers: Chenhao Xie, Jieyang Chen, Jiajia Li, Jesun Firoz // and Linghao Song // GitHub repo: http://www.github.com/uuudown/S-BLAS // PNNL-IPID: 31803-E, IR: PNNL-31803 // MIT Lincese. // ------------------------------------------------------------------------ #include "matrix.h" #include "sblas.h" #include "spmm.h" bool spmmCsrTest(const char* A_path, int b_width, double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); CsrSparseMatrix<int,double> A(A_path); DenseMatrix<int,double> B(A.width, b_width, col_major); DenseMatrix<int,double> C(A.height, b_width, 1, col_major); DenseMatrix<int,double> C_cpu(A.height, b_width, 1, col_major); //Partition and Distribute A.sync2gpu(n_gpu, replicate); B.sync2gpu(n_gpu, segment); C.sync2gpu(n_gpu, segment); CUDA_SAFE_CALL( hipDeviceSynchronize() ); load_timer.stop_timer(); run_timer.start_timer(); sblas_spmm_csr_v1<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_timer.stop_timer(); run_cpu_timer.start_timer(); sblas_spmm_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_cpu_timer.stop_timer(); //print_1d_array(C_cpu.val,C_cpu.get_mtx_num()); //print_1d_array(C.val,C.get_mtx_num()); bool correct = check_equal(C_cpu.val, C.val, C.get_mtx_num()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; return correct; } bool spmmCsrTest2(const char* A_path, int b_width, double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); //CsrSparseMatrix<int, double> A("./ash85.mtx"); CsrSparseMatrix<int,double> A(A_path); DenseMatrix<int,double> B(A.width, b_width, col_major); DenseMatrix<int,double> C(A.height, b_width, 1, col_major); DenseMatrix<int,double> C_cpu(A.height, b_width, 1, col_major); //Partition and Distribute A.sync2gpu(n_gpu, segment); B.sync2gpu(n_gpu, replicate); C.sync2gpu(n_gpu, replicate); CUDA_SAFE_CALL( hipDeviceSynchronize() ); load_timer.stop_timer(); run_timer.start_timer(); sblas_spmm_csr_v2<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_timer.stop_timer(); run_cpu_timer.start_timer(); sblas_spmm_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_cpu_timer.stop_timer(); //get data back to CPU C.sync2cpu(0); //print_1d_array(C.val,C.get_mtx_num()); //print_1d_array(C_cpu.val,C_cpu.get_mtx_num()); bool correct = check_equal(C_cpu.val, C.val, C.get_mtx_num()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; return correct; } int main(int argc, char* argv[]) { if (argc != 7) { cerr << "./spmm_test method(1:partition-B, 2:partition-A) \ A_path B_width alpha beta gpus" << endl; exit(1); } const int method = atoi(argv[1]); //method-1: partition-B, method-2: partition-A const char* A_path = argv[2]; const int B_width = atof(argv[3]); const double alpha = atof(argv[4]); const double beta = atof(argv[5]); const unsigned gpus = atoi(argv[6]); if (method == 1) { spmmCsrTest(A_path, B_width, alpha, beta, gpus); } else if (method == 2) { spmmCsrTest2(A_path, B_width, alpha, beta, gpus); //spmmCsrTest2(256,3.0,4.0,4); } else { cerr << "Method can be only 1 or 2." << endl; exit(1); } return 0; }
ce19bb0d9bfcdd4cabc88cdf9a2a7705358a3cf1.cu
// ------------------------------------------------------------------------ // File: spmm_test.cu // S-BLAS: A Scalable Sparse-BLAS Kernel Library for Multi-GPUs. // This file tests the SPMM implementation. // ------------------------------------------------------------------------ // Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S. // Homepage: http://www.angliphd.com // Other PNNL Developers: Chenhao Xie, Jieyang Chen, Jiajia Li, Jesun Firoz // and Linghao Song // GitHub repo: http://www.github.com/uuudown/S-BLAS // PNNL-IPID: 31803-E, IR: PNNL-31803 // MIT Lincese. // ------------------------------------------------------------------------ #include "matrix.h" #include "sblas.h" #include "spmm.h" bool spmmCsrTest(const char* A_path, int b_width, double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); CsrSparseMatrix<int,double> A(A_path); DenseMatrix<int,double> B(A.width, b_width, col_major); DenseMatrix<int,double> C(A.height, b_width, 1, col_major); DenseMatrix<int,double> C_cpu(A.height, b_width, 1, col_major); //Partition and Distribute A.sync2gpu(n_gpu, replicate); B.sync2gpu(n_gpu, segment); C.sync2gpu(n_gpu, segment); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); load_timer.stop_timer(); run_timer.start_timer(); sblas_spmm_csr_v1<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_timer.stop_timer(); run_cpu_timer.start_timer(); sblas_spmm_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_cpu_timer.stop_timer(); //print_1d_array(C_cpu.val,C_cpu.get_mtx_num()); //print_1d_array(C.val,C.get_mtx_num()); bool correct = check_equal(C_cpu.val, C.val, C.get_mtx_num()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; return correct; } bool spmmCsrTest2(const char* A_path, int b_width, double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); //CsrSparseMatrix<int, double> A("./ash85.mtx"); CsrSparseMatrix<int,double> A(A_path); DenseMatrix<int,double> B(A.width, b_width, col_major); DenseMatrix<int,double> C(A.height, b_width, 1, col_major); DenseMatrix<int,double> C_cpu(A.height, b_width, 1, col_major); //Partition and Distribute A.sync2gpu(n_gpu, segment); B.sync2gpu(n_gpu, replicate); C.sync2gpu(n_gpu, replicate); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); load_timer.stop_timer(); run_timer.start_timer(); sblas_spmm_csr_v2<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_timer.stop_timer(); run_cpu_timer.start_timer(); sblas_spmm_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_cpu_timer.stop_timer(); //get data back to CPU C.sync2cpu(0); //print_1d_array(C.val,C.get_mtx_num()); //print_1d_array(C_cpu.val,C_cpu.get_mtx_num()); bool correct = check_equal(C_cpu.val, C.val, C.get_mtx_num()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; return correct; } int main(int argc, char* argv[]) { if (argc != 7) { cerr << "./spmm_test method(1:partition-B, 2:partition-A) \ A_path B_width alpha beta gpus" << endl; exit(1); } const int method = atoi(argv[1]); //method-1: partition-B, method-2: partition-A const char* A_path = argv[2]; const int B_width = atof(argv[3]); const double alpha = atof(argv[4]); const double beta = atof(argv[5]); const unsigned gpus = atoi(argv[6]); if (method == 1) { spmmCsrTest(A_path, B_width, alpha, beta, gpus); } else if (method == 2) { spmmCsrTest2(A_path, B_width, alpha, beta, gpus); //spmmCsrTest2(256,3.0,4.0,4); } else { cerr << "Method can be only 1 or 2." << endl; exit(1); } return 0; }
c7531fb9dbbcd634904cf7d11f83598210677407.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUDAEF.h" #include "CUDAData.h" extern CUDAData* cudaData; texture<float, 1, hipReadModeElementType> g_texHeaviside; //a single object and a single view per video card at any one time __device__ __constant__ int viewTransform[4]; __device__ __constant__ float invP[16]; __device__ __constant__ float Mplane[4][4]; __device__ __constant__ float projectionParams[5]; __device__ __constant__ float invPM[16]; __device__ __constant__ float q[4]; __device__ __constant__ int histOffsets[4]; __device__ __constant__ int histFactors[4]; __device__ __constant__ int histNoBins[4]; __host__ void initialiseEF(int width, int height, float* heavisideFunction, int heavisideFunctionSize) { //TODO FIXME VARHISTBINS int noVarBinHistograms, noVarBinHistogramBins[4]; int h_histOffsets[8], h_histFactors[8], h_noBins[8]; noVarBinHistograms = 4; noVarBinHistogramBins[0] = 8; noVarBinHistogramBins[1] = 16; noVarBinHistogramBins[2] = 32; noVarBinHistogramBins[3] = 64; h_histFactors[0] = 5; h_histFactors[1] = 4; h_histFactors[2] = 3; h_histFactors[3] = 2; cudaData->histogramSize = 0; for (int i=0; i<noVarBinHistograms; i++) { h_noBins[i] = noVarBinHistogramBins[i]; h_histOffsets[i] = cudaData->histogramSize; cudaData->histogramSize += h_noBins[i] * h_noBins[i] * h_noBins[i]; } perseusSafeCall(hipMemcpyToSymbol(histOffsets, h_histOffsets, noVarBinHistograms * sizeof(int), 0, hipMemcpyHostToDevice)); perseusSafeCall(hipMemcpyToSymbol(histFactors, h_histFactors, noVarBinHistograms * sizeof(int), 0, hipMemcpyHostToDevice)); perseusSafeCall(hipMemcpyToSymbol(histNoBins, h_noBins, noVarBinHistograms * sizeof(int), 0, hipMemcpyHostToDevice)); perseusSafeCall(hipMalloc((void**)&cudaData->dfxTranslation, 10000 * sizeof(float3))); //1080p image 16x16 blocks perseusSafeCall(hipMalloc((void**)&cudaData->dfxRotation, 10000 * sizeof(float4))); //1080p image 16x16 blocks perseusSafeCall(hipHostMalloc((void**)&cudaData->dfxResultTranslation, 10000 * sizeof(float3))); //1080p image 16x16 blocks perseusSafeCall(hipHostMalloc((void**)&cudaData->dfxResultRotation, 10000 * sizeof(float4))); //1080p image 16x16 blocks // copy the heviside function from the host memory into the device memory hipChannelFormatDesc descTexHeaviside = hipCreateChannelDesc<float>(); perseusSafeCall(hipMallocArray(&cudaData->arrayHeaviside, &descTexHeaviside, heavisideFunctionSize, 1)); perseusSafeCall(hipMemcpyToArray(cudaData->arrayHeaviside, 0, 0, heavisideFunction, heavisideFunctionSize * sizeof(float), hipMemcpyHostToDevice)); } __host__ void shutdownEF() { perseusSafeCall(hipFreeArray(cudaData->arrayHeaviside)); perseusSafeCall(hipFree(cudaData->dfxTranslation)); perseusSafeCall(hipFree(cudaData->dfxRotation)); perseusSafeCall(hipHostFree(cudaData->dfxResultTranslation)); perseusSafeCall(hipHostFree(cudaData->dfxResultRotation)); } __host__ void registerViewGeometricData(float *invP_EF, float *projectionParams_EF, int *viewTransform_EF) { perseusSafeCall(hipMemcpyToSymbol(invP, invP_EF, 16 * sizeof(float), 0, hipMemcpyHostToDevice)); perseusSafeCall(hipMemcpyToSymbol(projectionParams, projectionParams_EF, 5 * sizeof(float), 0, hipMemcpyHostToDevice)); perseusSafeCall(hipMemcpyToSymbol(viewTransform, viewTransform_EF, 4 * sizeof(int), 0, hipMemcpyHostToDevice)); } __host__ void registerObjectGeometricData(float* rotationQuaternion_EF, float* invPM_EF) { perseusSafeCall(hipMemcpyToSymbol(invPM, invPM_EF, 16 * sizeof(float), 0, hipMemcpyHostToDevice)); rotationQuaternion_EF[0] *= 2; rotationQuaternion_EF[1] *= 2; rotationQuaternion_EF[2] *= 2; rotationQuaternion_EF[3] *= 2; perseusSafeCall(hipMemcpyToSymbol(q, rotationQuaternion_EF, 4 * sizeof(float), 0, hipMemcpyHostToDevice)); } __host__ void processEFD1(float* dpose, int *roiNormalised, int *roiGenerated, float2* histogram, uchar4 *imageRegistered, unsigned char *imageObjects, bool isMultiobject, unsigned int *imageZBuffer, unsigned int *imageZBufferInverse, float *dt, int *dtPosX, int *dtPosY, float *dtDX, float *dtDY, int objectId) { size_t i; dim3 threadSize(16,16); dim3 blockSize((int)ceil((float)roiNormalised[4] / (float)16), (int)ceil((float)roiNormalised[5] / (float)16)); // copy heaviside function table into the GPU texture perseusSafeCall(hipBindTextureToArray(g_texHeaviside, cudaData->arrayHeaviside)); hipLaunchKernelGGL(( processEFD1_global), dim3(blockSize), dim3(threadSize), 0, 0, cudaData->dfxTranslation, cudaData->dfxRotation, histogram, imageRegistered, imageObjects, isMultiobject, imageZBuffer, imageZBufferInverse, dt, dtPosX, dtPosY, dtDX, dtDY, roiGenerated[0], roiGenerated[1], roiGenerated[4], roiGenerated[5], objectId); perseusSafeCall(hipUnbindTexture(g_texHeaviside)); perseusSafeCall(hipDeviceSynchronize()); perseusSafeCall(hipDeviceSynchronize()); hipMemcpy(cudaData->dfxResultTranslation, cudaData->dfxTranslation, blockSize.x * blockSize.y * sizeof(float3), hipMemcpyDeviceToHost); hipMemcpy(cudaData->dfxResultRotation, cudaData->dfxRotation, blockSize.x * blockSize.y * sizeof(float4), hipMemcpyDeviceToHost); for (i=0; i<7; i++) dpose[i] = 0; for (size_t i=0; i < blockSize.x * blockSize.y; i++) { dpose[0] += cudaData->dfxResultTranslation[i].x; dpose[1] += cudaData->dfxResultTranslation[i].y; dpose[2] += cudaData->dfxResultTranslation[i].z; dpose[3] += cudaData->dfxResultRotation[i].x; dpose[4] += cudaData->dfxResultRotation[i].y; dpose[5] += cudaData->dfxResultRotation[i].z; dpose[6] += cudaData->dfxResultRotation[i].w; } } __global__ void processEFD1_global( float3 *dfxTranslation, float4 *dfxRotation, float2 *histogram, uchar4 *imageRegistered, unsigned char *imageObjects, bool isMultiobject, unsigned int *imageZBuffer, unsigned int *imageZBufferInverse, float *dt, int *dtPosX, int *dtPosY, float *dtDX, float *dtDY, int minX, int minY, int widthROI, int heightROI, int objectId) { __shared__ float3 sdataTranslation[256]; __shared__ float4 sdataRotation[256]; int offsetX = threadIdx.x + blockIdx.x * blockDim.x; int offsetY = threadIdx.y + blockIdx.y * blockDim.y; int offset = offsetX + offsetY * widthROI; int offsetInBlock = threadIdx.x + blockDim.x * threadIdx.y; float3 dfPPTranslation; dfPPTranslation.x = 0; dfPPTranslation.y = 0; dfPPTranslation.z = 0; float4 dfPPRotation; dfPPRotation.x = 0; dfPPRotation.y = 0; dfPPRotation.z = 0; dfPPRotation.w = 0; sdataTranslation[offsetInBlock] = dfPPTranslation; sdataRotation[offsetInBlock] = dfPPRotation; if (offsetX < widthROI && offsetY < heightROI) { uchar4 imagePixel; int n_icX, n_icY, n_icZ; int n_greyPixel, n_currentHistogram; int n_hidx, n_pidx; float2 histogramPixel; float f_pYB, f_pYF; float f_xProjected[4], f_xUnprojected[4], f_xUnrotated[4]; float f_fPPGeneric, f_dirac, f_heaviside; float f_otherInfo[2]; float f_precalcX, d_precalcY, d_precalcXY; float f_dtIdx, f_norm; if (dtPosY[offset] >= 0)// && imageRegistered[offset].w > 128) { f_dtIdx = dt[offset]; n_icX = offsetX; n_icY = offsetY; if (f_dtIdx < 0) { n_icX = dtPosX[offset]; n_icY = dtPosY[offset]; } n_icZ = n_icX + n_icY * widthROI; if (!isMultiobject || (isMultiobject && (imageObjects[n_icZ] - 1) == objectId && ((imageObjects[offsetX + offsetY * widthROI] - 1) == objectId || (imageObjects[offsetX + offsetY * widthROI] - 1) == -1 ))) { n_hidx = 4096 + 512 * f_dtIdx; if (n_hidx >= 0 && n_hidx < 8192) { f_heaviside = tex1D(g_texHeaviside, n_hidx); imagePixel = imageRegistered[offset]; n_greyPixel = int(float(imagePixel.x) * 0.3f + float(imagePixel.y) * 0.59f + float(imagePixel.z) * 0.11f); n_currentHistogram = 0; if (n_greyPixel < 128) n_currentHistogram = 3; else if (n_greyPixel < 192) n_currentHistogram = 2; else if (n_greyPixel < 224) n_currentHistogram = 1; //currentHistogram = 2; imagePixel.x = (imagePixel.x >> histFactors[n_currentHistogram]) & (histNoBins[n_currentHistogram] - 1); imagePixel.y = (imagePixel.y >> histFactors[n_currentHistogram]) & (histNoBins[n_currentHistogram] - 1); imagePixel.z = (imagePixel.z >> histFactors[n_currentHistogram]) & (histNoBins[n_currentHistogram] - 1); n_pidx = (imagePixel.x + imagePixel.y * histNoBins[n_currentHistogram]) * histNoBins[n_currentHistogram] + imagePixel.z; histogramPixel = histogram[histOffsets[n_currentHistogram] + n_pidx]; f_pYF = histogramPixel.x + 0.0000001f; f_pYB = histogramPixel.y + 0.0000001f; f_dirac = (1.0f / float(PI)) * (1.0f / (f_dtIdx * f_dtIdx + 1.0f) + float(1e-3)); f_fPPGeneric = f_dirac * (f_pYF - f_pYB) / (f_heaviside * (f_pYF - f_pYB) + f_pYB); f_xProjected[0] = 2.0f * (n_icX + minX - (float) viewTransform[0]) / (float) viewTransform[2] - 1.0f; f_xProjected[1] = 2.0f * (n_icY + minY - (float) viewTransform[1]) / (float) viewTransform[3] - 1.0f; f_xProjected[2] = 2.0f * ((float)imageZBuffer[n_icZ] / (float)MAX_INT) - 1.0f; f_xProjected[3] = 1.0f; f_xUnprojected[0] = invP[0] * f_xProjected[0] + invP[4] * f_xProjected[1] + invP[8] * f_xProjected[2] + invP[12] * f_xProjected[3]; f_xUnprojected[1] = invP[1] * f_xProjected[0] + invP[5] * f_xProjected[1] + invP[9] * f_xProjected[2] + invP[13] * f_xProjected[3]; f_xUnprojected[2] = invP[2] * f_xProjected[0] + invP[6] * f_xProjected[1] + invP[10] * f_xProjected[2] + invP[14] * f_xProjected[3]; f_xUnprojected[3] = invP[3] * f_xProjected[0] + invP[7] * f_xProjected[1] + invP[11] * f_xProjected[2] + invP[15] * f_xProjected[3]; f_norm = 1.0f/f_xUnprojected[3]; f_xUnprojected[0] *= f_norm; f_xUnprojected[1] *= f_norm; f_xUnprojected[2] *= f_norm; f_xUnprojected[3] *= f_norm; f_xUnrotated[0] = invPM[0] * f_xProjected[0] + invPM[4] * f_xProjected[1] + invPM[8] * f_xProjected[2] + invPM[12] * f_xProjected[3]; f_xUnrotated[1] = invPM[1] * f_xProjected[0] + invPM[5] * f_xProjected[1] + invPM[9] * f_xProjected[2] + invPM[13] * f_xProjected[3]; f_xUnrotated[2] = invPM[2] * f_xProjected[0] + invPM[6] * f_xProjected[1] + invPM[10] * f_xProjected[2] + invPM[14] * f_xProjected[3]; f_xUnrotated[3] = invPM[3] * f_xProjected[0] + invPM[7] * f_xProjected[1] + invPM[11] * f_xProjected[2] + invPM[15] * f_xProjected[3]; f_norm = 1.0f/f_xUnrotated[3]; f_xUnrotated[0] *= f_norm; f_xUnrotated[1] *= f_norm; f_xUnrotated[2] *= f_norm; f_xUnrotated[3] *= f_norm; f_otherInfo[0] = projectionParams[0] * dtDX[offset]; f_otherInfo[1] = projectionParams[1] * dtDY[offset]; d_precalcXY = f_xUnprojected[2] * f_xUnprojected[2]; dfPPTranslation.x = -f_otherInfo[0] / f_xUnprojected[2]; dfPPTranslation.y = -f_otherInfo[1] / f_xUnprojected[2]; dfPPTranslation.z = (f_otherInfo[0] * f_xUnprojected[0] + f_otherInfo[1] * f_xUnprojected[1]) / d_precalcXY; f_precalcX = -f_otherInfo[0] / d_precalcXY; d_precalcY = -f_otherInfo[1] / d_precalcXY; dfPPRotation.x = f_precalcX * (f_xUnprojected[2] * (q[1]*f_xUnrotated[1] + q[2]*f_xUnrotated[2]) - f_xUnprojected[0] * (q[2]*f_xUnrotated[0] + q[3]*f_xUnrotated[1] - 2*q[0]*f_xUnrotated[2])) + d_precalcY * (f_xUnprojected[2] * (q[1]*f_xUnrotated[0] - 2*q[0]*f_xUnrotated[1] - q[3]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[2]*f_xUnrotated[0] + q[3]*f_xUnrotated[1] - 2*q[0]*f_xUnrotated[2])); dfPPRotation.y = f_precalcX * (f_xUnprojected[2] * (q[0]*f_xUnrotated[1] - 2*q[1]*f_xUnrotated[0] + q[3]*f_xUnrotated[2]) - f_xUnprojected[0] * (q[2]*f_xUnrotated[1] - q[3]*f_xUnrotated[0] - 2*q[1]*f_xUnrotated[2])) + d_precalcY * (f_xUnprojected[2] * (q[0]*f_xUnrotated[0] + q[2]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[2]*f_xUnrotated[1] - q[3]*f_xUnrotated[0] - 2*q[1]*f_xUnrotated[2])); dfPPRotation.z = f_precalcX * (f_xUnprojected[2] * (q[0]*f_xUnrotated[2] - q[3]*f_xUnrotated[1] - 2*q[2]*f_xUnrotated[0]) - f_xUnprojected[0] * (q[0]*f_xUnrotated[0] + q[1]*f_xUnrotated[1])) + d_precalcY * (f_xUnprojected[2] * (q[3]*f_xUnrotated[0] - 2*q[2]*f_xUnrotated[1] + q[1]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[0]*f_xUnrotated[0] + q[1]*f_xUnrotated[1])); dfPPRotation.w = f_precalcX * (f_xUnprojected[2] * (q[1]*f_xUnrotated[2] - q[2]*f_xUnrotated[1]) - f_xUnprojected[0] * (q[0]*f_xUnrotated[1] - q[1]*f_xUnrotated[0])) + d_precalcY * (f_xUnprojected[2] * (q[2]*f_xUnrotated[0] - q[0]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[0]*f_xUnrotated[1] - q[1]*f_xUnrotated[0])); f_xProjected[0] = 2.0f * (n_icX + minX - (float) viewTransform[0]) / (float) viewTransform[2] - 1.0f; f_xProjected[1] = 2.0f * (n_icY + minY - (float) viewTransform[1]) / (float) viewTransform[3] - 1.0f; f_xProjected[2] = 2.0f * ((float)imageZBufferInverse[n_icZ] / (float)MAX_INT) - 1.0f; f_xProjected[3] = 1.0f; f_xUnprojected[0] = invP[0] * f_xProjected[0] + invP[4] * f_xProjected[1] + invP[8] * f_xProjected[2] + invP[12] * f_xProjected[3]; f_xUnprojected[1] = invP[1] * f_xProjected[0] + invP[5] * f_xProjected[1] + invP[9] * f_xProjected[2] + invP[13] * f_xProjected[3]; f_xUnprojected[2] = invP[2] * f_xProjected[0] + invP[6] * f_xProjected[1] + invP[10] * f_xProjected[2] + invP[14] * f_xProjected[3]; f_xUnprojected[3] = invP[3] * f_xProjected[0] + invP[7] * f_xProjected[1] + invP[11] * f_xProjected[2] + invP[15] * f_xProjected[3]; f_norm = 1.0f/f_xUnprojected[3]; f_xUnprojected[0] *= f_norm; f_xUnprojected[1] *= f_norm; f_xUnprojected[2] *= f_norm; f_xUnprojected[3] *= f_norm; f_xUnrotated[0] = invPM[0] * f_xProjected[0] + invPM[4] * f_xProjected[1] + invPM[8] * f_xProjected[2] + invPM[12] * f_xProjected[3]; f_xUnrotated[1] = invPM[1] * f_xProjected[0] + invPM[5] * f_xProjected[1] + invPM[9] * f_xProjected[2] + invPM[13] * f_xProjected[3]; f_xUnrotated[2] = invPM[2] * f_xProjected[0] + invPM[6] * f_xProjected[1] + invPM[10] * f_xProjected[2] + invPM[14] * f_xProjected[3]; f_xUnrotated[3] = invPM[3] * f_xProjected[0] + invPM[7] * f_xProjected[1] + invPM[11] * f_xProjected[2] + invPM[15] * f_xProjected[3]; f_norm = 1.0f/f_xUnrotated[3]; f_xUnrotated[0] *= f_norm; f_xUnrotated[1] *= f_norm; f_xUnrotated[2] *= f_norm; f_xUnrotated[3] *= f_norm; d_precalcXY = f_xUnprojected[2] * f_xUnprojected[2]; dfPPTranslation.x += -f_otherInfo[0] / f_xUnprojected[2]; dfPPTranslation.y += -f_otherInfo[1] / f_xUnprojected[2]; dfPPTranslation.z += (f_otherInfo[0] * f_xUnprojected[0] + f_otherInfo[1] * f_xUnprojected[1]) / d_precalcXY; f_precalcX = -f_otherInfo[0] / d_precalcXY; d_precalcY = -f_otherInfo[1] / d_precalcXY; dfPPRotation.x += f_precalcX * (f_xUnprojected[2] * (q[1]*f_xUnrotated[1] + q[2]*f_xUnrotated[2]) - f_xUnprojected[0] * (q[2]*f_xUnrotated[0] + q[3]*f_xUnrotated[1] - 2*q[0]*f_xUnrotated[2])) + d_precalcY * (f_xUnprojected[2] * (q[1]*f_xUnrotated[0] - 2*q[0]*f_xUnrotated[1] - q[3]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[2]*f_xUnrotated[0] + q[3]*f_xUnrotated[1] - 2*q[0]*f_xUnrotated[2])); dfPPRotation.y += f_precalcX * (f_xUnprojected[2] * (q[0]*f_xUnrotated[1] - 2*q[1]*f_xUnrotated[0] + q[3]*f_xUnrotated[2]) - f_xUnprojected[0] * (q[2]*f_xUnrotated[1] - q[3]*f_xUnrotated[0] - 2*q[1]*f_xUnrotated[2])) + d_precalcY * (f_xUnprojected[2] * (q[0]*f_xUnrotated[0] + q[2]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[2]*f_xUnrotated[1] - q[3]*f_xUnrotated[0] - 2*q[1]*f_xUnrotated[2])); dfPPRotation.z += f_precalcX * (f_xUnprojected[2] * (q[0]*f_xUnrotated[2] - q[3]*f_xUnrotated[1] - 2*q[2]*f_xUnrotated[0]) - f_xUnprojected[0] * (q[0]*f_xUnrotated[0] + q[1]*f_xUnrotated[1])) + d_precalcY * (f_xUnprojected[2] * (q[3]*f_xUnrotated[0] - 2*q[2]*f_xUnrotated[1] + q[1]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[0]*f_xUnrotated[0] + q[1]*f_xUnrotated[1])); dfPPRotation.w += f_precalcX * (f_xUnprojected[2] * (q[1]*f_xUnrotated[2] - q[2]*f_xUnrotated[1]) - f_xUnprojected[0] * (q[0]*f_xUnrotated[1] - q[1]*f_xUnrotated[0])) + d_precalcY * (f_xUnprojected[2] * (q[2]*f_xUnrotated[0] - q[0]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[0]*f_xUnrotated[1] - q[1]*f_xUnrotated[0])); dfPPTranslation.x *= f_fPPGeneric; dfPPTranslation.y *= f_fPPGeneric; dfPPTranslation.z *= f_fPPGeneric; dfPPRotation.x *= f_fPPGeneric; dfPPRotation.y *= f_fPPGeneric; dfPPRotation.z *= f_fPPGeneric; dfPPRotation.w *= f_fPPGeneric; sdataTranslation[offsetInBlock].x = dfPPTranslation.x; sdataTranslation[offsetInBlock].y = dfPPTranslation.y; sdataTranslation[offsetInBlock].z = dfPPTranslation.z; sdataRotation[offsetInBlock].x = dfPPRotation.x; sdataRotation[offsetInBlock].y = dfPPRotation.y; sdataRotation[offsetInBlock].z = dfPPRotation.z; sdataRotation[offsetInBlock].w = dfPPRotation.w; } } } } __syncthreads(); int sdataTargetOffset; for(unsigned int s = blockDim.x >> 1; s>0; s>>=1) { if (threadIdx.x < s) { sdataTargetOffset = (threadIdx.x + s) + blockDim.x * threadIdx.y; sdataTranslation[offsetInBlock].x += sdataTranslation[sdataTargetOffset].x; sdataTranslation[offsetInBlock].y += sdataTranslation[sdataTargetOffset].y; sdataTranslation[offsetInBlock].z += sdataTranslation[sdataTargetOffset].z; } __syncthreads(); } for(unsigned int s = blockDim.y >> 1; s>0; s>>=1) { if (threadIdx.y < s) { sdataTargetOffset = threadIdx.x + blockDim.x * (threadIdx.y + s); sdataTranslation[offsetInBlock].x += sdataTranslation[sdataTargetOffset].x; sdataTranslation[offsetInBlock].y += sdataTranslation[sdataTargetOffset].y; sdataTranslation[offsetInBlock].z += sdataTranslation[sdataTargetOffset].z; } __syncthreads(); } for(unsigned int s = blockDim.x >> 1; s>0; s>>=1) { if (threadIdx.x < s) { sdataTargetOffset = (threadIdx.x + s) + blockDim.x * threadIdx.y; sdataRotation[offsetInBlock].x += sdataRotation[sdataTargetOffset].x; sdataRotation[offsetInBlock].y += sdataRotation[sdataTargetOffset].y; sdataRotation[offsetInBlock].z += sdataRotation[sdataTargetOffset].z; sdataRotation[offsetInBlock].w += sdataRotation[sdataTargetOffset].w; } __syncthreads(); } for(unsigned int s = blockDim.y >> 1; s>0; s>>=1) { if (threadIdx.y < s) { sdataTargetOffset = threadIdx.x + blockDim.x * (threadIdx.y + s); sdataRotation[offsetInBlock].x += sdataRotation[sdataTargetOffset].x; sdataRotation[offsetInBlock].y += sdataRotation[sdataTargetOffset].y; sdataRotation[offsetInBlock].z += sdataRotation[sdataTargetOffset].z; sdataRotation[offsetInBlock].w += sdataRotation[sdataTargetOffset].w; } __syncthreads(); } if (threadIdx.x == 0 && threadIdx.y == 0) { int offsetDfx = blockIdx.x + blockIdx.y * gridDim.x; dfxTranslation[offsetDfx] = sdataTranslation[offsetInBlock]; dfxRotation[offsetDfx] = sdataRotation[offsetInBlock]; } }
c7531fb9dbbcd634904cf7d11f83598210677407.cu
#include "CUDAEF.h" #include "CUDAData.h" extern CUDAData* cudaData; texture<float, 1, cudaReadModeElementType> g_texHeaviside; //a single object and a single view per video card at any one time __device__ __constant__ int viewTransform[4]; __device__ __constant__ float invP[16]; __device__ __constant__ float Mplane[4][4]; __device__ __constant__ float projectionParams[5]; __device__ __constant__ float invPM[16]; __device__ __constant__ float q[4]; __device__ __constant__ int histOffsets[4]; __device__ __constant__ int histFactors[4]; __device__ __constant__ int histNoBins[4]; __host__ void initialiseEF(int width, int height, float* heavisideFunction, int heavisideFunctionSize) { //TODO FIXME VARHISTBINS int noVarBinHistograms, noVarBinHistogramBins[4]; int h_histOffsets[8], h_histFactors[8], h_noBins[8]; noVarBinHistograms = 4; noVarBinHistogramBins[0] = 8; noVarBinHistogramBins[1] = 16; noVarBinHistogramBins[2] = 32; noVarBinHistogramBins[3] = 64; h_histFactors[0] = 5; h_histFactors[1] = 4; h_histFactors[2] = 3; h_histFactors[3] = 2; cudaData->histogramSize = 0; for (int i=0; i<noVarBinHistograms; i++) { h_noBins[i] = noVarBinHistogramBins[i]; h_histOffsets[i] = cudaData->histogramSize; cudaData->histogramSize += h_noBins[i] * h_noBins[i] * h_noBins[i]; } perseusSafeCall(cudaMemcpyToSymbol(histOffsets, h_histOffsets, noVarBinHistograms * sizeof(int), 0, cudaMemcpyHostToDevice)); perseusSafeCall(cudaMemcpyToSymbol(histFactors, h_histFactors, noVarBinHistograms * sizeof(int), 0, cudaMemcpyHostToDevice)); perseusSafeCall(cudaMemcpyToSymbol(histNoBins, h_noBins, noVarBinHistograms * sizeof(int), 0, cudaMemcpyHostToDevice)); perseusSafeCall(cudaMalloc((void**)&cudaData->dfxTranslation, 10000 * sizeof(float3))); //1080p image 16x16 blocks perseusSafeCall(cudaMalloc((void**)&cudaData->dfxRotation, 10000 * sizeof(float4))); //1080p image 16x16 blocks perseusSafeCall(cudaMallocHost((void**)&cudaData->dfxResultTranslation, 10000 * sizeof(float3))); //1080p image 16x16 blocks perseusSafeCall(cudaMallocHost((void**)&cudaData->dfxResultRotation, 10000 * sizeof(float4))); //1080p image 16x16 blocks // copy the heviside function from the host memory into the device memory cudaChannelFormatDesc descTexHeaviside = cudaCreateChannelDesc<float>(); perseusSafeCall(cudaMallocArray(&cudaData->arrayHeaviside, &descTexHeaviside, heavisideFunctionSize, 1)); perseusSafeCall(cudaMemcpyToArray(cudaData->arrayHeaviside, 0, 0, heavisideFunction, heavisideFunctionSize * sizeof(float), cudaMemcpyHostToDevice)); } __host__ void shutdownEF() { perseusSafeCall(cudaFreeArray(cudaData->arrayHeaviside)); perseusSafeCall(cudaFree(cudaData->dfxTranslation)); perseusSafeCall(cudaFree(cudaData->dfxRotation)); perseusSafeCall(cudaFreeHost(cudaData->dfxResultTranslation)); perseusSafeCall(cudaFreeHost(cudaData->dfxResultRotation)); } __host__ void registerViewGeometricData(float *invP_EF, float *projectionParams_EF, int *viewTransform_EF) { perseusSafeCall(cudaMemcpyToSymbol(invP, invP_EF, 16 * sizeof(float), 0, cudaMemcpyHostToDevice)); perseusSafeCall(cudaMemcpyToSymbol(projectionParams, projectionParams_EF, 5 * sizeof(float), 0, cudaMemcpyHostToDevice)); perseusSafeCall(cudaMemcpyToSymbol(viewTransform, viewTransform_EF, 4 * sizeof(int), 0, cudaMemcpyHostToDevice)); } __host__ void registerObjectGeometricData(float* rotationQuaternion_EF, float* invPM_EF) { perseusSafeCall(cudaMemcpyToSymbol(invPM, invPM_EF, 16 * sizeof(float), 0, cudaMemcpyHostToDevice)); rotationQuaternion_EF[0] *= 2; rotationQuaternion_EF[1] *= 2; rotationQuaternion_EF[2] *= 2; rotationQuaternion_EF[3] *= 2; perseusSafeCall(cudaMemcpyToSymbol(q, rotationQuaternion_EF, 4 * sizeof(float), 0, cudaMemcpyHostToDevice)); } __host__ void processEFD1(float* dpose, int *roiNormalised, int *roiGenerated, float2* histogram, uchar4 *imageRegistered, unsigned char *imageObjects, bool isMultiobject, unsigned int *imageZBuffer, unsigned int *imageZBufferInverse, float *dt, int *dtPosX, int *dtPosY, float *dtDX, float *dtDY, int objectId) { size_t i; dim3 threadSize(16,16); dim3 blockSize((int)ceil((float)roiNormalised[4] / (float)16), (int)ceil((float)roiNormalised[5] / (float)16)); // copy heaviside function table into the GPU texture perseusSafeCall(cudaBindTextureToArray(g_texHeaviside, cudaData->arrayHeaviside)); processEFD1_global<<<blockSize, threadSize>>>(cudaData->dfxTranslation, cudaData->dfxRotation, histogram, imageRegistered, imageObjects, isMultiobject, imageZBuffer, imageZBufferInverse, dt, dtPosX, dtPosY, dtDX, dtDY, roiGenerated[0], roiGenerated[1], roiGenerated[4], roiGenerated[5], objectId); perseusSafeCall(cudaUnbindTexture(g_texHeaviside)); perseusSafeCall(cudaDeviceSynchronize()); perseusSafeCall(cudaThreadSynchronize()); cudaMemcpy(cudaData->dfxResultTranslation, cudaData->dfxTranslation, blockSize.x * blockSize.y * sizeof(float3), cudaMemcpyDeviceToHost); cudaMemcpy(cudaData->dfxResultRotation, cudaData->dfxRotation, blockSize.x * blockSize.y * sizeof(float4), cudaMemcpyDeviceToHost); for (i=0; i<7; i++) dpose[i] = 0; for (size_t i=0; i < blockSize.x * blockSize.y; i++) { dpose[0] += cudaData->dfxResultTranslation[i].x; dpose[1] += cudaData->dfxResultTranslation[i].y; dpose[2] += cudaData->dfxResultTranslation[i].z; dpose[3] += cudaData->dfxResultRotation[i].x; dpose[4] += cudaData->dfxResultRotation[i].y; dpose[5] += cudaData->dfxResultRotation[i].z; dpose[6] += cudaData->dfxResultRotation[i].w; } } __global__ void processEFD1_global( float3 *dfxTranslation, float4 *dfxRotation, float2 *histogram, uchar4 *imageRegistered, unsigned char *imageObjects, bool isMultiobject, unsigned int *imageZBuffer, unsigned int *imageZBufferInverse, float *dt, int *dtPosX, int *dtPosY, float *dtDX, float *dtDY, int minX, int minY, int widthROI, int heightROI, int objectId) { __shared__ float3 sdataTranslation[256]; __shared__ float4 sdataRotation[256]; int offsetX = threadIdx.x + blockIdx.x * blockDim.x; int offsetY = threadIdx.y + blockIdx.y * blockDim.y; int offset = offsetX + offsetY * widthROI; int offsetInBlock = threadIdx.x + blockDim.x * threadIdx.y; float3 dfPPTranslation; dfPPTranslation.x = 0; dfPPTranslation.y = 0; dfPPTranslation.z = 0; float4 dfPPRotation; dfPPRotation.x = 0; dfPPRotation.y = 0; dfPPRotation.z = 0; dfPPRotation.w = 0; sdataTranslation[offsetInBlock] = dfPPTranslation; sdataRotation[offsetInBlock] = dfPPRotation; if (offsetX < widthROI && offsetY < heightROI) { uchar4 imagePixel; int n_icX, n_icY, n_icZ; int n_greyPixel, n_currentHistogram; int n_hidx, n_pidx; float2 histogramPixel; float f_pYB, f_pYF; float f_xProjected[4], f_xUnprojected[4], f_xUnrotated[4]; float f_fPPGeneric, f_dirac, f_heaviside; float f_otherInfo[2]; float f_precalcX, d_precalcY, d_precalcXY; float f_dtIdx, f_norm; if (dtPosY[offset] >= 0)// && imageRegistered[offset].w > 128) { f_dtIdx = dt[offset]; n_icX = offsetX; n_icY = offsetY; if (f_dtIdx < 0) { n_icX = dtPosX[offset]; n_icY = dtPosY[offset]; } n_icZ = n_icX + n_icY * widthROI; if (!isMultiobject || (isMultiobject && (imageObjects[n_icZ] - 1) == objectId && ((imageObjects[offsetX + offsetY * widthROI] - 1) == objectId || (imageObjects[offsetX + offsetY * widthROI] - 1) == -1 ))) { n_hidx = 4096 + 512 * f_dtIdx; if (n_hidx >= 0 && n_hidx < 8192) { f_heaviside = tex1D(g_texHeaviside, n_hidx); imagePixel = imageRegistered[offset]; n_greyPixel = int(float(imagePixel.x) * 0.3f + float(imagePixel.y) * 0.59f + float(imagePixel.z) * 0.11f); n_currentHistogram = 0; if (n_greyPixel < 128) n_currentHistogram = 3; else if (n_greyPixel < 192) n_currentHistogram = 2; else if (n_greyPixel < 224) n_currentHistogram = 1; //currentHistogram = 2; imagePixel.x = (imagePixel.x >> histFactors[n_currentHistogram]) & (histNoBins[n_currentHistogram] - 1); imagePixel.y = (imagePixel.y >> histFactors[n_currentHistogram]) & (histNoBins[n_currentHistogram] - 1); imagePixel.z = (imagePixel.z >> histFactors[n_currentHistogram]) & (histNoBins[n_currentHistogram] - 1); n_pidx = (imagePixel.x + imagePixel.y * histNoBins[n_currentHistogram]) * histNoBins[n_currentHistogram] + imagePixel.z; histogramPixel = histogram[histOffsets[n_currentHistogram] + n_pidx]; f_pYF = histogramPixel.x + 0.0000001f; f_pYB = histogramPixel.y + 0.0000001f; f_dirac = (1.0f / float(PI)) * (1.0f / (f_dtIdx * f_dtIdx + 1.0f) + float(1e-3)); f_fPPGeneric = f_dirac * (f_pYF - f_pYB) / (f_heaviside * (f_pYF - f_pYB) + f_pYB); f_xProjected[0] = 2.0f * (n_icX + minX - (float) viewTransform[0]) / (float) viewTransform[2] - 1.0f; f_xProjected[1] = 2.0f * (n_icY + minY - (float) viewTransform[1]) / (float) viewTransform[3] - 1.0f; f_xProjected[2] = 2.0f * ((float)imageZBuffer[n_icZ] / (float)MAX_INT) - 1.0f; f_xProjected[3] = 1.0f; f_xUnprojected[0] = invP[0] * f_xProjected[0] + invP[4] * f_xProjected[1] + invP[8] * f_xProjected[2] + invP[12] * f_xProjected[3]; f_xUnprojected[1] = invP[1] * f_xProjected[0] + invP[5] * f_xProjected[1] + invP[9] * f_xProjected[2] + invP[13] * f_xProjected[3]; f_xUnprojected[2] = invP[2] * f_xProjected[0] + invP[6] * f_xProjected[1] + invP[10] * f_xProjected[2] + invP[14] * f_xProjected[3]; f_xUnprojected[3] = invP[3] * f_xProjected[0] + invP[7] * f_xProjected[1] + invP[11] * f_xProjected[2] + invP[15] * f_xProjected[3]; f_norm = 1.0f/f_xUnprojected[3]; f_xUnprojected[0] *= f_norm; f_xUnprojected[1] *= f_norm; f_xUnprojected[2] *= f_norm; f_xUnprojected[3] *= f_norm; f_xUnrotated[0] = invPM[0] * f_xProjected[0] + invPM[4] * f_xProjected[1] + invPM[8] * f_xProjected[2] + invPM[12] * f_xProjected[3]; f_xUnrotated[1] = invPM[1] * f_xProjected[0] + invPM[5] * f_xProjected[1] + invPM[9] * f_xProjected[2] + invPM[13] * f_xProjected[3]; f_xUnrotated[2] = invPM[2] * f_xProjected[0] + invPM[6] * f_xProjected[1] + invPM[10] * f_xProjected[2] + invPM[14] * f_xProjected[3]; f_xUnrotated[3] = invPM[3] * f_xProjected[0] + invPM[7] * f_xProjected[1] + invPM[11] * f_xProjected[2] + invPM[15] * f_xProjected[3]; f_norm = 1.0f/f_xUnrotated[3]; f_xUnrotated[0] *= f_norm; f_xUnrotated[1] *= f_norm; f_xUnrotated[2] *= f_norm; f_xUnrotated[3] *= f_norm; f_otherInfo[0] = projectionParams[0] * dtDX[offset]; f_otherInfo[1] = projectionParams[1] * dtDY[offset]; d_precalcXY = f_xUnprojected[2] * f_xUnprojected[2]; dfPPTranslation.x = -f_otherInfo[0] / f_xUnprojected[2]; dfPPTranslation.y = -f_otherInfo[1] / f_xUnprojected[2]; dfPPTranslation.z = (f_otherInfo[0] * f_xUnprojected[0] + f_otherInfo[1] * f_xUnprojected[1]) / d_precalcXY; f_precalcX = -f_otherInfo[0] / d_precalcXY; d_precalcY = -f_otherInfo[1] / d_precalcXY; dfPPRotation.x = f_precalcX * (f_xUnprojected[2] * (q[1]*f_xUnrotated[1] + q[2]*f_xUnrotated[2]) - f_xUnprojected[0] * (q[2]*f_xUnrotated[0] + q[3]*f_xUnrotated[1] - 2*q[0]*f_xUnrotated[2])) + d_precalcY * (f_xUnprojected[2] * (q[1]*f_xUnrotated[0] - 2*q[0]*f_xUnrotated[1] - q[3]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[2]*f_xUnrotated[0] + q[3]*f_xUnrotated[1] - 2*q[0]*f_xUnrotated[2])); dfPPRotation.y = f_precalcX * (f_xUnprojected[2] * (q[0]*f_xUnrotated[1] - 2*q[1]*f_xUnrotated[0] + q[3]*f_xUnrotated[2]) - f_xUnprojected[0] * (q[2]*f_xUnrotated[1] - q[3]*f_xUnrotated[0] - 2*q[1]*f_xUnrotated[2])) + d_precalcY * (f_xUnprojected[2] * (q[0]*f_xUnrotated[0] + q[2]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[2]*f_xUnrotated[1] - q[3]*f_xUnrotated[0] - 2*q[1]*f_xUnrotated[2])); dfPPRotation.z = f_precalcX * (f_xUnprojected[2] * (q[0]*f_xUnrotated[2] - q[3]*f_xUnrotated[1] - 2*q[2]*f_xUnrotated[0]) - f_xUnprojected[0] * (q[0]*f_xUnrotated[0] + q[1]*f_xUnrotated[1])) + d_precalcY * (f_xUnprojected[2] * (q[3]*f_xUnrotated[0] - 2*q[2]*f_xUnrotated[1] + q[1]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[0]*f_xUnrotated[0] + q[1]*f_xUnrotated[1])); dfPPRotation.w = f_precalcX * (f_xUnprojected[2] * (q[1]*f_xUnrotated[2] - q[2]*f_xUnrotated[1]) - f_xUnprojected[0] * (q[0]*f_xUnrotated[1] - q[1]*f_xUnrotated[0])) + d_precalcY * (f_xUnprojected[2] * (q[2]*f_xUnrotated[0] - q[0]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[0]*f_xUnrotated[1] - q[1]*f_xUnrotated[0])); f_xProjected[0] = 2.0f * (n_icX + minX - (float) viewTransform[0]) / (float) viewTransform[2] - 1.0f; f_xProjected[1] = 2.0f * (n_icY + minY - (float) viewTransform[1]) / (float) viewTransform[3] - 1.0f; f_xProjected[2] = 2.0f * ((float)imageZBufferInverse[n_icZ] / (float)MAX_INT) - 1.0f; f_xProjected[3] = 1.0f; f_xUnprojected[0] = invP[0] * f_xProjected[0] + invP[4] * f_xProjected[1] + invP[8] * f_xProjected[2] + invP[12] * f_xProjected[3]; f_xUnprojected[1] = invP[1] * f_xProjected[0] + invP[5] * f_xProjected[1] + invP[9] * f_xProjected[2] + invP[13] * f_xProjected[3]; f_xUnprojected[2] = invP[2] * f_xProjected[0] + invP[6] * f_xProjected[1] + invP[10] * f_xProjected[2] + invP[14] * f_xProjected[3]; f_xUnprojected[3] = invP[3] * f_xProjected[0] + invP[7] * f_xProjected[1] + invP[11] * f_xProjected[2] + invP[15] * f_xProjected[3]; f_norm = 1.0f/f_xUnprojected[3]; f_xUnprojected[0] *= f_norm; f_xUnprojected[1] *= f_norm; f_xUnprojected[2] *= f_norm; f_xUnprojected[3] *= f_norm; f_xUnrotated[0] = invPM[0] * f_xProjected[0] + invPM[4] * f_xProjected[1] + invPM[8] * f_xProjected[2] + invPM[12] * f_xProjected[3]; f_xUnrotated[1] = invPM[1] * f_xProjected[0] + invPM[5] * f_xProjected[1] + invPM[9] * f_xProjected[2] + invPM[13] * f_xProjected[3]; f_xUnrotated[2] = invPM[2] * f_xProjected[0] + invPM[6] * f_xProjected[1] + invPM[10] * f_xProjected[2] + invPM[14] * f_xProjected[3]; f_xUnrotated[3] = invPM[3] * f_xProjected[0] + invPM[7] * f_xProjected[1] + invPM[11] * f_xProjected[2] + invPM[15] * f_xProjected[3]; f_norm = 1.0f/f_xUnrotated[3]; f_xUnrotated[0] *= f_norm; f_xUnrotated[1] *= f_norm; f_xUnrotated[2] *= f_norm; f_xUnrotated[3] *= f_norm; d_precalcXY = f_xUnprojected[2] * f_xUnprojected[2]; dfPPTranslation.x += -f_otherInfo[0] / f_xUnprojected[2]; dfPPTranslation.y += -f_otherInfo[1] / f_xUnprojected[2]; dfPPTranslation.z += (f_otherInfo[0] * f_xUnprojected[0] + f_otherInfo[1] * f_xUnprojected[1]) / d_precalcXY; f_precalcX = -f_otherInfo[0] / d_precalcXY; d_precalcY = -f_otherInfo[1] / d_precalcXY; dfPPRotation.x += f_precalcX * (f_xUnprojected[2] * (q[1]*f_xUnrotated[1] + q[2]*f_xUnrotated[2]) - f_xUnprojected[0] * (q[2]*f_xUnrotated[0] + q[3]*f_xUnrotated[1] - 2*q[0]*f_xUnrotated[2])) + d_precalcY * (f_xUnprojected[2] * (q[1]*f_xUnrotated[0] - 2*q[0]*f_xUnrotated[1] - q[3]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[2]*f_xUnrotated[0] + q[3]*f_xUnrotated[1] - 2*q[0]*f_xUnrotated[2])); dfPPRotation.y += f_precalcX * (f_xUnprojected[2] * (q[0]*f_xUnrotated[1] - 2*q[1]*f_xUnrotated[0] + q[3]*f_xUnrotated[2]) - f_xUnprojected[0] * (q[2]*f_xUnrotated[1] - q[3]*f_xUnrotated[0] - 2*q[1]*f_xUnrotated[2])) + d_precalcY * (f_xUnprojected[2] * (q[0]*f_xUnrotated[0] + q[2]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[2]*f_xUnrotated[1] - q[3]*f_xUnrotated[0] - 2*q[1]*f_xUnrotated[2])); dfPPRotation.z += f_precalcX * (f_xUnprojected[2] * (q[0]*f_xUnrotated[2] - q[3]*f_xUnrotated[1] - 2*q[2]*f_xUnrotated[0]) - f_xUnprojected[0] * (q[0]*f_xUnrotated[0] + q[1]*f_xUnrotated[1])) + d_precalcY * (f_xUnprojected[2] * (q[3]*f_xUnrotated[0] - 2*q[2]*f_xUnrotated[1] + q[1]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[0]*f_xUnrotated[0] + q[1]*f_xUnrotated[1])); dfPPRotation.w += f_precalcX * (f_xUnprojected[2] * (q[1]*f_xUnrotated[2] - q[2]*f_xUnrotated[1]) - f_xUnprojected[0] * (q[0]*f_xUnrotated[1] - q[1]*f_xUnrotated[0])) + d_precalcY * (f_xUnprojected[2] * (q[2]*f_xUnrotated[0] - q[0]*f_xUnrotated[2]) - f_xUnprojected[1] * (q[0]*f_xUnrotated[1] - q[1]*f_xUnrotated[0])); dfPPTranslation.x *= f_fPPGeneric; dfPPTranslation.y *= f_fPPGeneric; dfPPTranslation.z *= f_fPPGeneric; dfPPRotation.x *= f_fPPGeneric; dfPPRotation.y *= f_fPPGeneric; dfPPRotation.z *= f_fPPGeneric; dfPPRotation.w *= f_fPPGeneric; sdataTranslation[offsetInBlock].x = dfPPTranslation.x; sdataTranslation[offsetInBlock].y = dfPPTranslation.y; sdataTranslation[offsetInBlock].z = dfPPTranslation.z; sdataRotation[offsetInBlock].x = dfPPRotation.x; sdataRotation[offsetInBlock].y = dfPPRotation.y; sdataRotation[offsetInBlock].z = dfPPRotation.z; sdataRotation[offsetInBlock].w = dfPPRotation.w; } } } } __syncthreads(); int sdataTargetOffset; for(unsigned int s = blockDim.x >> 1; s>0; s>>=1) { if (threadIdx.x < s) { sdataTargetOffset = (threadIdx.x + s) + blockDim.x * threadIdx.y; sdataTranslation[offsetInBlock].x += sdataTranslation[sdataTargetOffset].x; sdataTranslation[offsetInBlock].y += sdataTranslation[sdataTargetOffset].y; sdataTranslation[offsetInBlock].z += sdataTranslation[sdataTargetOffset].z; } __syncthreads(); } for(unsigned int s = blockDim.y >> 1; s>0; s>>=1) { if (threadIdx.y < s) { sdataTargetOffset = threadIdx.x + blockDim.x * (threadIdx.y + s); sdataTranslation[offsetInBlock].x += sdataTranslation[sdataTargetOffset].x; sdataTranslation[offsetInBlock].y += sdataTranslation[sdataTargetOffset].y; sdataTranslation[offsetInBlock].z += sdataTranslation[sdataTargetOffset].z; } __syncthreads(); } for(unsigned int s = blockDim.x >> 1; s>0; s>>=1) { if (threadIdx.x < s) { sdataTargetOffset = (threadIdx.x + s) + blockDim.x * threadIdx.y; sdataRotation[offsetInBlock].x += sdataRotation[sdataTargetOffset].x; sdataRotation[offsetInBlock].y += sdataRotation[sdataTargetOffset].y; sdataRotation[offsetInBlock].z += sdataRotation[sdataTargetOffset].z; sdataRotation[offsetInBlock].w += sdataRotation[sdataTargetOffset].w; } __syncthreads(); } for(unsigned int s = blockDim.y >> 1; s>0; s>>=1) { if (threadIdx.y < s) { sdataTargetOffset = threadIdx.x + blockDim.x * (threadIdx.y + s); sdataRotation[offsetInBlock].x += sdataRotation[sdataTargetOffset].x; sdataRotation[offsetInBlock].y += sdataRotation[sdataTargetOffset].y; sdataRotation[offsetInBlock].z += sdataRotation[sdataTargetOffset].z; sdataRotation[offsetInBlock].w += sdataRotation[sdataTargetOffset].w; } __syncthreads(); } if (threadIdx.x == 0 && threadIdx.y == 0) { int offsetDfx = blockIdx.x + blockIdx.y * gridDim.x; dfxTranslation[offsetDfx] = sdataTranslation[offsetInBlock]; dfxRotation[offsetDfx] = sdataRotation[offsetInBlock]; } }
d3c1211f58f4e784c89873e4caa2945167078ed6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sha1.h" #include "aes_core.h" #include "aes_kernel.h" #include <stdint.h> #include <assert.h> /* AES counter mode + HMAC SHA-1, the encryption of each block in AES counter mode is not parallelized in this implementation */ __global__ void aes_ctr_sha1_kernel( const uint8_t *input_buf, uint8_t *output_buf, const uint8_t *aes_keys, uint8_t *ivs, const char *hmac_keys, const uint32_t *pkt_offset, const uint16_t *length, const unsigned int num_flows, uint8_t *checkbits=0) { /************************************************************************** AES Encryption is started first ***************************************************************************/ __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* Private counter 128 bits */ uint64_t keystream[2]; /* initialize T boxes */ for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) { unsigned index = threadIdx.x + i * blockDim.x; if (index >= 256) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } for(unsigned i = 0; i * blockDim.x < 10; i++){ int index = threadIdx.x + blockDim.x * i; if(index < 10){ shared_Rcon[index] = rcon[index]; } } /* ----debug-----*/ if (idx >= num_flows) return; /* make sure T boxes have been initialized. */ __syncthreads(); /* Encrypt using counter mode, this is the actual length of the packet */ /* pkt_offset[idx + 1] - pkt_offset[idx] is used for "length[idx] + padding for HMAC + HMAC sha-1 tag" */ unsigned long len = length[idx]; /* Skip RTP header to Locate the data to be encrypted */ uint8_t *in = pkt_offset[idx] + input_buf; uint8_t cc = (in[0] & 0x80) >> 4; /* Get the number of CSRC identifiers */ uint32_t header_len = (uint32_t *)((uint8_t *)in + 96 + 32 * cc + 4); /* Get the optional header length */ header_len = 128 + 32 * cc + header_len; /* Get the total header length */ /* FIXME: optimization : copy the RTP header to output */ for (i = 0; i < header_len; i ++) { ((char *)out)[i] = ((char *)in)[i]; } /* Jump to the parts need encryption */ in = in + header_len /* Get to the payload */ uint8_t *out = pkt_offset[idx] + output_buf; out = out + header_len; /* Get to the payload */ /* data length that needs encryption */ len -= header_len; /* ----debug----- */ if (len <= 0) return; const uint8_t *key = idx * 16 + aes_keys; uint64_t *iv = (uint64_t *) (idx * AES_BLOCK_SIZE + ivs); while (len >= AES_BLOCK_SIZE) { /* for the ith block, its input is ((iv + i) mod 2^128)*/ iv[0] ++; if (iv[0] == 0) iv[1] ++; /* Get the keystream here */ AES_128_encrypt(iv, keystream, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); *((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)keystream); *(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)keystream) + 1); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { /* for the ith block, its input is ((iv + i) mod 2^128)*/ iv[0] ++; if (iv[0] == 0) iv[1] ++; AES_128_encrypt(iv, keystream, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); for(unsigned n = 0; n < len; ++n) out[n] = in[n] ^ ((uint8_t *)keystream)[n]; } __syncthreads(); /************************************************************************** AES Encryption completed, Now we start SHA-1 Calculation ***************************************************************************/ uint32_t w_register[16]; int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_flows) { uint32_t *w = w_register; hash_digest_t h; uint32_t offset = pkt_offset[idx]; unsigned long length = length[idx]; uint16_t sha1_pad_len = (length + 63 + 9) & (~0x3F); uint32_t *sha1_out = intput_buf + offset + sha1_pad_len; for (unsigned i = 0; i < 16; i++) w[i] = 0x36363636; /* In SRTP, HMAC_KEY_SIZE is 160 bits = 20 bytes */ xorpads(w, (uint32_t *)(hmac_keys + 20 * idx)); h.h1 = 0x67452301; h.h2 = 0xEFCDAB89; h.h3 = 0x98BADCFE; h.h4 = 0x10325476; h.h5 = 0xC3D2E1F0; //SHA1 compute on ipad computeSHA1Block((char*)w, w, 0, 64, h); //SHA1 compute on message unsigned num_iter = (length + 63 + 9) / 64; for (unsigned i = 0; i < num_iter; i ++) computeSHA1Block(input_buf + offset, w, i * 64, length, h); /* In SRTP, sha1_out has only 80 bits output 32+32+16 = 80 */ *(sha1_out) = swap(h.h1); *(sha1_out+1) = swap(h.h2); uint32_t temp = swap(h.h3); *(uint16_t *)(sha1_out+2) = ((uint16_t *)&temp)[0]; //*(sha1_out+2) = swap(h.h3); //*(sha1_out+3) = swap(h.h4); //*(sha1_out+4) = swap(h.h5); h.h1 = 0x67452301; h.h2 = 0xEFCDAB89; h.h3 = 0x98BADCFE; h.h4 = 0x10325476; h.h5 = 0xC3D2E1F0; for (unsigned i = 0; i < 16; i++) w[i] = 0x5c5c5c5c; xorpads(w, (uint32_t*)(hmac_keys + 20 * idx)); //SHA 1 compute on opads computeSHA1Block((char*)w, w, 0, 64, h); //SHA 1 compute on (hash of ipad|m) //HMAC_TAG_SIZE = 10 computeSHA1Block((char*)sha1_out, w, 0, 10, h); *(sha1_out) = swap(h.h1); *(sha1_out+1) = swap(h.h2); temp = swap(h.h3); *(uint16_t *)(sha1_out+2) = ((uint16_t *)&temp)[0]; //*(sha1_out+2) = swap(h.h3); //*(sha1_out+3) = swap(h.h4); //*(sha1_out+4) = swap(h.h5); } __syncthreads(); // Now we set the checkbits if (threadIdx.x == 0) *(checkbits + blockIdx.x) = 1; } void co_aes_sha1_gpu( const uint8_t *in, uint8_t *out, const uint8_t *aes_keys, uint8_t *ivs, const char *hmac_keys, const uint32_t *pkt_offset, const uint16_t *actual_length, const unsigned int num_flows, uint8_t *checkbits, unsigned threads_per_blk, hipStream_t stream) { int num_blks = (N + threads_per_blk - 1) / threads_per_blk; if (stream == 0) { hipLaunchKernelGGL(( aes_ctr_sha1_kernel), dim3(num_blks), dim3(threads_per_blk), 0, 0, in, out, aes_keys, ivs, hmac_keys, pkt_offset, actual_length, num_flows, checkbits); } else { hipLaunchKernelGGL(( aes_ctr_sha1_kernel), dim3(num_blks), dim3(threads_per_blk), 0, stream, in, out, aes_keys, ivs, hmac_keys, pkt_offset, actual_length, num_flows, checkbits); } }
d3c1211f58f4e784c89873e4caa2945167078ed6.cu
#include "sha1.h" #include "aes_core.h" #include "aes_kernel.h" #include <stdint.h> #include <assert.h> /* AES counter mode + HMAC SHA-1, the encryption of each block in AES counter mode is not parallelized in this implementation */ __global__ void aes_ctr_sha1_kernel( const uint8_t *input_buf, uint8_t *output_buf, const uint8_t *aes_keys, uint8_t *ivs, const char *hmac_keys, const uint32_t *pkt_offset, const uint16_t *length, const unsigned int num_flows, uint8_t *checkbits=0) { /************************************************************************** AES Encryption is started first ***************************************************************************/ __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* Private counter 128 bits */ uint64_t keystream[2]; /* initialize T boxes */ for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) { unsigned index = threadIdx.x + i * blockDim.x; if (index >= 256) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } for(unsigned i = 0; i * blockDim.x < 10; i++){ int index = threadIdx.x + blockDim.x * i; if(index < 10){ shared_Rcon[index] = rcon[index]; } } /* ----debug-----*/ if (idx >= num_flows) return; /* make sure T boxes have been initialized. */ __syncthreads(); /* Encrypt using counter mode, this is the actual length of the packet */ /* pkt_offset[idx + 1] - pkt_offset[idx] is used for "length[idx] + padding for HMAC + HMAC sha-1 tag" */ unsigned long len = length[idx]; /* Skip RTP header to Locate the data to be encrypted */ uint8_t *in = pkt_offset[idx] + input_buf; uint8_t cc = (in[0] & 0x80) >> 4; /* Get the number of CSRC identifiers */ uint32_t header_len = (uint32_t *)((uint8_t *)in + 96 + 32 * cc + 4); /* Get the optional header length */ header_len = 128 + 32 * cc + header_len; /* Get the total header length */ /* FIXME: optimization : copy the RTP header to output */ for (i = 0; i < header_len; i ++) { ((char *)out)[i] = ((char *)in)[i]; } /* Jump to the parts need encryption */ in = in + header_len /* Get to the payload */ uint8_t *out = pkt_offset[idx] + output_buf; out = out + header_len; /* Get to the payload */ /* data length that needs encryption */ len -= header_len; /* ----debug----- */ if (len <= 0) return; const uint8_t *key = idx * 16 + aes_keys; uint64_t *iv = (uint64_t *) (idx * AES_BLOCK_SIZE + ivs); while (len >= AES_BLOCK_SIZE) { /* for the ith block, its input is ((iv + i) mod 2^128)*/ iv[0] ++; if (iv[0] == 0) iv[1] ++; /* Get the keystream here */ AES_128_encrypt(iv, keystream, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); *((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)keystream); *(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)keystream) + 1); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { /* for the ith block, its input is ((iv + i) mod 2^128)*/ iv[0] ++; if (iv[0] == 0) iv[1] ++; AES_128_encrypt(iv, keystream, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); for(unsigned n = 0; n < len; ++n) out[n] = in[n] ^ ((uint8_t *)keystream)[n]; } __syncthreads(); /************************************************************************** AES Encryption completed, Now we start SHA-1 Calculation ***************************************************************************/ uint32_t w_register[16]; int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_flows) { uint32_t *w = w_register; hash_digest_t h; uint32_t offset = pkt_offset[idx]; unsigned long length = length[idx]; uint16_t sha1_pad_len = (length + 63 + 9) & (~0x3F); uint32_t *sha1_out = intput_buf + offset + sha1_pad_len; for (unsigned i = 0; i < 16; i++) w[i] = 0x36363636; /* In SRTP, HMAC_KEY_SIZE is 160 bits = 20 bytes */ xorpads(w, (uint32_t *)(hmac_keys + 20 * idx)); h.h1 = 0x67452301; h.h2 = 0xEFCDAB89; h.h3 = 0x98BADCFE; h.h4 = 0x10325476; h.h5 = 0xC3D2E1F0; //SHA1 compute on ipad computeSHA1Block((char*)w, w, 0, 64, h); //SHA1 compute on message unsigned num_iter = (length + 63 + 9) / 64; for (unsigned i = 0; i < num_iter; i ++) computeSHA1Block(input_buf + offset, w, i * 64, length, h); /* In SRTP, sha1_out has only 80 bits output 32+32+16 = 80 */ *(sha1_out) = swap(h.h1); *(sha1_out+1) = swap(h.h2); uint32_t temp = swap(h.h3); *(uint16_t *)(sha1_out+2) = ((uint16_t *)&temp)[0]; //*(sha1_out+2) = swap(h.h3); //*(sha1_out+3) = swap(h.h4); //*(sha1_out+4) = swap(h.h5); h.h1 = 0x67452301; h.h2 = 0xEFCDAB89; h.h3 = 0x98BADCFE; h.h4 = 0x10325476; h.h5 = 0xC3D2E1F0; for (unsigned i = 0; i < 16; i++) w[i] = 0x5c5c5c5c; xorpads(w, (uint32_t*)(hmac_keys + 20 * idx)); //SHA 1 compute on opads computeSHA1Block((char*)w, w, 0, 64, h); //SHA 1 compute on (hash of ipad|m) //HMAC_TAG_SIZE = 10 computeSHA1Block((char*)sha1_out, w, 0, 10, h); *(sha1_out) = swap(h.h1); *(sha1_out+1) = swap(h.h2); temp = swap(h.h3); *(uint16_t *)(sha1_out+2) = ((uint16_t *)&temp)[0]; //*(sha1_out+2) = swap(h.h3); //*(sha1_out+3) = swap(h.h4); //*(sha1_out+4) = swap(h.h5); } __syncthreads(); // Now we set the checkbits if (threadIdx.x == 0) *(checkbits + blockIdx.x) = 1; } void co_aes_sha1_gpu( const uint8_t *in, uint8_t *out, const uint8_t *aes_keys, uint8_t *ivs, const char *hmac_keys, const uint32_t *pkt_offset, const uint16_t *actual_length, const unsigned int num_flows, uint8_t *checkbits, unsigned threads_per_blk, cudaStream_t stream) { int num_blks = (N + threads_per_blk - 1) / threads_per_blk; if (stream == 0) { aes_ctr_sha1_kernel<<<num_blks, threads_per_blk>>>( in, out, aes_keys, ivs, hmac_keys, pkt_offset, actual_length, num_flows, checkbits); } else { aes_ctr_sha1_kernel<<<num_blks, threads_per_blk, 0, stream>>>( in, out, aes_keys, ivs, hmac_keys, pkt_offset, actual_length, num_flows, checkbits); } }
91739130d6a86285f8c24fb4ccc25c78ce320324.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hyperbolic_tangent_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "../hyperbolic_tangent_layer.h" #include "../neural_network_exception.h" #include "../nn_types.h" #include "util_cuda.h" static __forceinline__ __device__ float hyperbolic_tangent( float x, float hyperbolic_tangent_steepness2, float hyperbolic_tangent_major_multiplier) { float y = __expf(x * hyperbolic_tangent_steepness2); return __fdividef(y - 1.0F, y + 1.0F) * hyperbolic_tangent_major_multiplier; } __global__ void hyperbolic_tangent_upd_kernel( const float4 * __restrict input, float4 * __restrict output, float hyperbolic_tangent_steepness2, float hyperbolic_tangent_major_multiplier, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = hyperbolic_tangent(val.x, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.y = hyperbolic_tangent(val.y, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.z = hyperbolic_tangent(val.z, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.w = hyperbolic_tangent(val.w, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); output[elem_id] = val; } } static __forceinline__ __device__ float hyperbolic_tangent_deriviative( float x, float hyperbolic_tangent_major_multiplier_reverted, float hyperbolic_tangent_steepness3) { float normalized_value = x * hyperbolic_tangent_major_multiplier_reverted; return hyperbolic_tangent_steepness3 * (1.0F - (normalized_value * normalized_value)); } __global__ void hyperbolic_tangent_deriviative_upd_kernel( float4 * __restrict errors, const float4 * __restrict output_neurons, float hyperbolic_tangent_major_multiplier_reverted, float hyperbolic_tangent_steepness3, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = output_neurons[elem_id]; val.x = hyperbolic_tangent_deriviative(val.x, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.y = hyperbolic_tangent_deriviative(val.y, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.z = hyperbolic_tangent_deriviative(val.z, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.w = hyperbolic_tangent_deriviative(val.w, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); float4 current_error = errors[elem_id]; current_error.x *= val.x; current_error.y *= val.y; current_error.z *= val.z; current_error.w *= val.w; errors[elem_id] = current_error; } } namespace nnforge { namespace cuda { hyperbolic_tangent_layer_updater_cuda::hyperbolic_tangent_layer_updater_cuda() { } hyperbolic_tangent_layer_updater_cuda::~hyperbolic_tangent_layer_updater_cuda() { } void hyperbolic_tangent_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { if (offset_input_entry_id > 0) throw neural_network_exception("hyperbolic_tangent_layer_updater_cuda is not able to run using offset"); int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( hyperbolic_tangent_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_neurons_buffer, *output_neurons_buffer, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier, elem_count); } void hyperbolic_tangent_layer_updater_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( hyperbolic_tangent_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_errors_buffer, *output_neurons_buffer, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3, elem_count); } bool hyperbolic_tangent_layer_updater_cuda::is_in_place_backprop() const { return true; } void hyperbolic_tangent_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const hyperbolic_tangent_layer> layer_derived = nnforge_dynamic_pointer_cast<const hyperbolic_tangent_layer>(layer_schema); hyperbolic_tangent_steepness2 = layer_derived->steepness * 2.0F; hyperbolic_tangent_major_multiplier = layer_derived->major_multiplier; hyperbolic_tangent_steepness3 = layer_derived->steepness * layer_derived->major_multiplier; hyperbolic_tangent_major_multiplier_reverted = 1.0F / layer_derived->major_multiplier; } } }
91739130d6a86285f8c24fb4ccc25c78ce320324.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hyperbolic_tangent_layer_updater_cuda.h" #include <cuda_runtime.h> #include "../hyperbolic_tangent_layer.h" #include "../neural_network_exception.h" #include "../nn_types.h" #include "util_cuda.h" static __forceinline__ __device__ float hyperbolic_tangent( float x, float hyperbolic_tangent_steepness2, float hyperbolic_tangent_major_multiplier) { float y = __expf(x * hyperbolic_tangent_steepness2); return __fdividef(y - 1.0F, y + 1.0F) * hyperbolic_tangent_major_multiplier; } __global__ void hyperbolic_tangent_upd_kernel( const float4 * __restrict input, float4 * __restrict output, float hyperbolic_tangent_steepness2, float hyperbolic_tangent_major_multiplier, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = hyperbolic_tangent(val.x, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.y = hyperbolic_tangent(val.y, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.z = hyperbolic_tangent(val.z, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.w = hyperbolic_tangent(val.w, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); output[elem_id] = val; } } static __forceinline__ __device__ float hyperbolic_tangent_deriviative( float x, float hyperbolic_tangent_major_multiplier_reverted, float hyperbolic_tangent_steepness3) { float normalized_value = x * hyperbolic_tangent_major_multiplier_reverted; return hyperbolic_tangent_steepness3 * (1.0F - (normalized_value * normalized_value)); } __global__ void hyperbolic_tangent_deriviative_upd_kernel( float4 * __restrict errors, const float4 * __restrict output_neurons, float hyperbolic_tangent_major_multiplier_reverted, float hyperbolic_tangent_steepness3, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = output_neurons[elem_id]; val.x = hyperbolic_tangent_deriviative(val.x, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.y = hyperbolic_tangent_deriviative(val.y, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.z = hyperbolic_tangent_deriviative(val.z, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.w = hyperbolic_tangent_deriviative(val.w, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); float4 current_error = errors[elem_id]; current_error.x *= val.x; current_error.y *= val.y; current_error.z *= val.z; current_error.w *= val.w; errors[elem_id] = current_error; } } namespace nnforge { namespace cuda { hyperbolic_tangent_layer_updater_cuda::hyperbolic_tangent_layer_updater_cuda() { } hyperbolic_tangent_layer_updater_cuda::~hyperbolic_tangent_layer_updater_cuda() { } void hyperbolic_tangent_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { if (offset_input_entry_id > 0) throw neural_network_exception("hyperbolic_tangent_layer_updater_cuda is not able to run using offset"); int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hyperbolic_tangent_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_neurons_buffer, *output_neurons_buffer, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier, elem_count); } void hyperbolic_tangent_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hyperbolic_tangent_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_errors_buffer, *output_neurons_buffer, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3, elem_count); } bool hyperbolic_tangent_layer_updater_cuda::is_in_place_backprop() const { return true; } void hyperbolic_tangent_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const hyperbolic_tangent_layer> layer_derived = nnforge_dynamic_pointer_cast<const hyperbolic_tangent_layer>(layer_schema); hyperbolic_tangent_steepness2 = layer_derived->steepness * 2.0F; hyperbolic_tangent_major_multiplier = layer_derived->major_multiplier; hyperbolic_tangent_steepness3 = layer_derived->steepness * layer_derived->major_multiplier; hyperbolic_tangent_major_multiplier_reverted = 1.0F / layer_derived->major_multiplier; } } }
e7827787560869aa940c688cbca54a2b36aea90d.hip
// !!! This is a file automatically generated by hipify!!! // for this simple illustration, it is assumed that the code runs in // just one block, and that the number of threads evenly divides n // improvements that could be made: // 1. change to multiple blocks, to try to use all SMs // 2. possibly use shared memory // 3. have each thread work on staggered elements of dx, rather than // on contiguous ones, to get more efficient bank access #include <hip/hip_runtime.h> #include <stdio.h> __global__ void cumulker(int *dx, int n) { int me = threadIdx.x; int csize = n / blockDim.x; int start = me * csize; int i,j,base; for (i = 1; i < csize; i++) { j = start + i; dx[j] = dx[j-1] + dx[j]; } __syncthreads(); if (me > 0) { base = 0; for (j = 0; j < me; j++) base += dx[(j+1)*csize-1]; } if (me > 0) { for (i = start; i < start + csize; i++) dx[i] += base; } } int main(int argc, char **argv) { int n = atoi(argv[1]), // length of array nth = atoi(argv[2]); // number of threads int *ha, // host array *da, // device array nint = n * sizeof(int); ha = (int *) malloc(nint); // test example for (int i = 0; i < n; i++) ha[i] = i*i % 5; if (n < 100) for(int i=0; i<n; i++) printf("%d ",ha[i]); printf("\n"); hipMalloc((void **)&da,nint); hipMemcpy(da,ha,nint,hipMemcpyHostToDevice); dim3 dimGrid(1,1); dim3 dimBlock(n/nth,1,1); hipLaunchKernelGGL(( cumulker), dim3(dimGrid),dim3(dimBlock), 0, 0, da,n); hipDeviceSynchronize(); hipMemcpy(ha,da,nint,hipMemcpyDeviceToHost); if (n < 100) for(int i=0; i<n; i++) printf("%d ",ha[i]); printf("\n"); free(ha); hipFree(da); }
e7827787560869aa940c688cbca54a2b36aea90d.cu
// for this simple illustration, it is assumed that the code runs in // just one block, and that the number of threads evenly divides n // improvements that could be made: // 1. change to multiple blocks, to try to use all SMs // 2. possibly use shared memory // 3. have each thread work on staggered elements of dx, rather than // on contiguous ones, to get more efficient bank access #include <cuda.h> #include <stdio.h> __global__ void cumulker(int *dx, int n) { int me = threadIdx.x; int csize = n / blockDim.x; int start = me * csize; int i,j,base; for (i = 1; i < csize; i++) { j = start + i; dx[j] = dx[j-1] + dx[j]; } __syncthreads(); if (me > 0) { base = 0; for (j = 0; j < me; j++) base += dx[(j+1)*csize-1]; } if (me > 0) { for (i = start; i < start + csize; i++) dx[i] += base; } } int main(int argc, char **argv) { int n = atoi(argv[1]), // length of array nth = atoi(argv[2]); // number of threads int *ha, // host array *da, // device array nint = n * sizeof(int); ha = (int *) malloc(nint); // test example for (int i = 0; i < n; i++) ha[i] = i*i % 5; if (n < 100) for(int i=0; i<n; i++) printf("%d ",ha[i]); printf("\n"); cudaMalloc((void **)&da,nint); cudaMemcpy(da,ha,nint,cudaMemcpyHostToDevice); dim3 dimGrid(1,1); dim3 dimBlock(n/nth,1,1); cumulker<<<dimGrid,dimBlock>>>(da,n); cudaDeviceSynchronize(); cudaMemcpy(ha,da,nint,cudaMemcpyDeviceToHost); if (n < 100) for(int i=0; i<n; i++) printf("%d ",ha[i]); printf("\n"); free(ha); cudaFree(da); }
ad5a60ab89251c3c350cc0eede4d6537c0fdacc7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/model_update_kernel_util.h" #include "oneflow/user/kernels/multi_tensor_model_update_kernel_util.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { constexpr int kBlockSize = 256; constexpr int kUnrollSize = 4; unsigned int ComputeGridSize(ep::Stream* stream, const int32_t block_size, const int64_t elem_cnt) { auto* cuda_stream = stream->As<ep::CudaStream>(); const int32_t max_threads_multi_process = cuda_stream->device_properties().maxThreadsPerMultiProcessor; const int32_t multi_processor_count = cuda_stream->device_properties().multiProcessorCount; unsigned int blocks_per_sm = max_threads_multi_process / block_size; unsigned int grid_size = ((elem_cnt + block_size - 1) / block_size); grid_size = ::min((unsigned int)multi_processor_count * blocks_per_sm, grid_size); return grid_size; } template<typename T, typename G, int N> __global__ void MultiTensorSGDUpdateGpu(int64_t num_tensor, T scale, const float l1, const float l2, const float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<N> tensor_tuple_params) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } learning_rate_val *= lr_scale; int64_t v_block_id = blockIdx.x; for (int64_t tensor_idx = 0; tensor_idx < num_tensor; tensor_idx++) { const int64_t tensor_elem_cnt = tensor_tuple_params.sizes[tensor_idx]; T* model_ptr = (T*)tensor_tuple_params.ptr[0][tensor_idx]; G* model_diff_ptr = (G*)tensor_tuple_params.ptr[1][tensor_idx]; half* model_copy_ptr = nullptr; if (N == 3) { model_copy_ptr = (half*)tensor_tuple_params.ptr[2][tensor_idx]; } for (int64_t i = v_block_id * blockDim.x * kUnrollSize + threadIdx.x; i < tensor_elem_cnt; i += blockDim.x * gridDim.x * kUnrollSize) { T model_val[kUnrollSize] = {0}; G model_diff[kUnrollSize] = {0}; #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] = *(model_ptr + actual_idx); model_diff[ilp] = *(model_diff_ptr + actual_idx); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { T model_diff_t = CastScaleRegularizeGradientFunctor<T, G>()( model_diff[ilp], model_val[ilp], scale, l1, l2); model_val[ilp] = model_val[ilp] - learning_rate_val * (model_diff_t + weight_decay * model_val[ilp]); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { *(model_ptr + actual_idx) = model_val[ilp]; if (N == 3) { *(model_copy_ptr + actual_idx) = static_cast<half>(model_val[ilp]); } } } } v_block_id -= tensor_tuple_params.block_offset[tensor_idx]; if (v_block_id < 0) { v_block_id += gridDim.x; } } } template<typename T, typename G> struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<2> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<2> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } hipLaunchKernelGGL(( MultiTensorSGDUpdateGpu<T, G, 2>) , dim3(grid_size), dim3(kBlockSize), 0, stream->As<ep::CudaStream>()->cuda_stream(), n_tensor, static_cast<T>(scale), l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, tensor_tuple_params); } template<typename T> struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<2> tensor_tuple_params); }; template<typename T> void MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<2> tensor_tuple_params) { MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, tensor_tuple_params); } template struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, double, double>; template struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G, int N> __global__ void MultiTensorMomentumUpdateGpu( int64_t num_tensor, T scale, const float l1, const float l2, const float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<N> tensor_tuple_params) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } learning_rate_val *= lr_scale; int64_t v_block_id = blockIdx.x; for (int64_t tensor_idx = 0; tensor_idx < num_tensor; tensor_idx++) { const int64_t tensor_elem_cnt = tensor_tuple_params.sizes[tensor_idx]; T* model_ptr = (T*)tensor_tuple_params.ptr[0][tensor_idx]; G* model_diff_ptr = (G*)tensor_tuple_params.ptr[1][tensor_idx]; T* momentum_buf_ptr = (T*)tensor_tuple_params.ptr[2][tensor_idx]; half* model_copy_ptr = nullptr; if (N == 4) { model_copy_ptr = (half*)tensor_tuple_params.ptr[3][tensor_idx]; } for (int64_t i = v_block_id * blockDim.x * kUnrollSize + threadIdx.x; i < tensor_elem_cnt; i += blockDim.x * gridDim.x * kUnrollSize) { T model_val[kUnrollSize] = {0}; G model_diff[kUnrollSize] = {0}; T momentum_buf[kUnrollSize] = {0}; #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] = *(model_ptr + actual_idx); model_diff[ilp] = *(model_diff_ptr + actual_idx); momentum_buf[ilp] = *(momentum_buf_ptr + actual_idx); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { T model_diff_t = CastScaleRegularizeGradientFunctor<T, G>()( model_diff[ilp], model_val[ilp], scale, l1, l2); if (weight_decay != 0.f) { model_diff_t += weight_decay * model_val[ilp]; } momentum_buf[ilp] = momentum * momentum_buf[ilp] + (1.f - dampening) * model_diff_t; if (nesterov) model_diff_t += momentum * momentum_buf[ilp]; else model_diff_t = momentum_buf[ilp]; T alpha = -learning_rate_val; if (maximize) alpha = learning_rate_val; model_val[ilp] += alpha * model_diff_t; } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { *(model_ptr + actual_idx) = model_val[ilp]; *(momentum_buf_ptr + actual_idx) = momentum_buf[ilp]; if (N == 4) { *(model_copy_ptr + actual_idx) = static_cast<half>(model_val[ilp]); } } } } v_block_id -= tensor_tuple_params.block_offset[tensor_idx]; if (v_block_id < 0) { v_block_id += gridDim.x; } } } template<typename T, typename G> struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<3> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<3> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } hipLaunchKernelGGL(( MultiTensorMomentumUpdateGpu<T, G, 3>) , dim3(grid_size), dim3(kBlockSize), 0, stream->As<ep::CudaStream>()->cuda_stream(), n_tensor, static_cast<T>(scale), l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, momentum, dampening, nesterov, maximize, tensor_tuple_params); } template<typename T> struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<3> tensor_tuple_params); }; template<typename T> void MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<3> tensor_tuple_params) { MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, momentum, dampening, nesterov, maximize, tensor_tuple_params); } template struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, double, double>; template struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G, int N> __global__ void MultiTensorAdamUpdateGpu(int64_t num_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr, const float* bias_correction2_ptr, TensorTupleParams<N> tensor_tuple_params) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } if (bias_correction1_ptr != nullptr) { bias_correction1_val = *bias_correction1_ptr; } if (bias_correction2_ptr != nullptr) { bias_correction2_val = *bias_correction2_ptr; } learning_rate_val *= lr_scale; int64_t v_block_id = blockIdx.x; for (int64_t tensor_idx = 0; tensor_idx < num_tensor; tensor_idx++) { const int64_t tensor_elem_cnt = tensor_tuple_params.sizes[tensor_idx]; T* model_ptr = (T*)tensor_tuple_params.ptr[0][tensor_idx]; G* model_diff_ptr = (G*)tensor_tuple_params.ptr[1][tensor_idx]; T* m_ptr = (T*)tensor_tuple_params.ptr[2][tensor_idx]; T* v_ptr = (T*)tensor_tuple_params.ptr[3][tensor_idx]; half* model_copy_ptr = nullptr; if (N == 5) { model_copy_ptr = (half*)tensor_tuple_params.ptr[4][tensor_idx]; } for (int64_t i = v_block_id * blockDim.x * kUnrollSize + threadIdx.x; i < tensor_elem_cnt; i += blockDim.x * gridDim.x * kUnrollSize) { T model_val[kUnrollSize] = {0}; T m_val[kUnrollSize] = {0}; T v_val[kUnrollSize] = {0}; G model_diff[kUnrollSize] = {0}; #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] = *(model_ptr + actual_idx); m_val[ilp] = *(m_ptr + actual_idx); v_val[ilp] = *(v_ptr + actual_idx); model_diff[ilp] = *(model_diff_ptr + actual_idx); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { T model_diff_t = CastScaleRegularizeGradientFunctor<T, G>()( model_diff[ilp], model_val[ilp], scale, l1, l2); m_val[ilp] = beta1 * m_val[ilp] + (1 - beta1) * model_diff_t; v_val[ilp] = beta2 * v_val[ilp] + (1 - beta2) * model_diff_t * model_diff_t; T denom = (sqrt(v_val[ilp]) / sqrt(bias_correction2_val)) + epsilon; const T step_size = learning_rate_val / bias_correction1_val; model_val[ilp] = model_val[ilp] - step_size * (m_val[ilp] / denom) - learning_rate_val * weight_decay * model_val[ilp]; } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { *(model_ptr + actual_idx) = model_val[ilp]; *(m_ptr + actual_idx) = m_val[ilp]; *(v_ptr + actual_idx) = v_val[ilp]; if (N == 5) { *(model_copy_ptr + actual_idx) = static_cast<half>(model_val[ilp]); } } } } v_block_id -= tensor_tuple_params.block_offset[tensor_idx]; if (v_block_id < 0) { v_block_id += gridDim.x; } } } template<typename T, typename G> struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<4> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<4> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } hipLaunchKernelGGL(( MultiTensorAdamUpdateGpu<T, G>) , dim3(grid_size), dim3(kBlockSize), 0, stream->As<ep::CudaStream>()->cuda_stream(), n_tensor, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, lr_scale, learning_rate, scale_by_ptr, skip_if, bias_correction1, bias_correction2, tensor_tuple_params); } template<typename T> struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<4> tensor_tuple_params); }; template<typename T> void MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<4> tensor_tuple_params) { MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, lr_scale, learning_rate, scale_by_ptr, skip_if, bias_correction1, bias_correction2, tensor_tuple_params); } template struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, double, double>; template struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G> struct MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<3> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<3> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } hipLaunchKernelGGL(( MultiTensorSGDUpdateGpu<T, G, 3>) , dim3(grid_size), dim3(kBlockSize), 0, stream->As<ep::CudaStream>()->cuda_stream(), n_tensor, static_cast<T>(scale), l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, tensor_tuple_params); } template<typename T> struct MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<3> tensor_tuple_params); }; template<typename T> void MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<3> tensor_tuple_params) { MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, tensor_tuple_params); } template struct MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G> struct MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<4> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<4> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } hipLaunchKernelGGL(( MultiTensorMomentumUpdateGpu<T, G, 4>) , dim3(grid_size), dim3(kBlockSize), 0, stream->As<ep::CudaStream>()->cuda_stream(), n_tensor, static_cast<T>(scale), l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, momentum, dampening, nesterov, maximize, tensor_tuple_params); } template<typename T> struct MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<4> tensor_tuple_params); }; template<typename T> void MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<4> tensor_tuple_params) { MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, momentum, dampening, nesterov, maximize, tensor_tuple_params); } template struct MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G> struct MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<5> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<5> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } hipLaunchKernelGGL(( MultiTensorAdamUpdateGpu<T, G, 5>) , dim3(grid_size), dim3(kBlockSize), 0, stream->As<ep::CudaStream>()->cuda_stream(), n_tensor, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, lr_scale, learning_rate, scale_by_ptr, skip_if, bias_correction1, bias_correction2, tensor_tuple_params); } template<typename T> struct MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<5> tensor_tuple_params); }; template<typename T> void MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<5> tensor_tuple_params) { MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, lr_scale, learning_rate, scale_by_ptr, skip_if, bias_correction1, bias_correction2, tensor_tuple_params); } template struct MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, int N> __global__ void MultiTensorYoloModelEmaUpdateGpu(int64_t num_tensor, const float d, TensorTupleParams<N> tensor_tuple_params) { int64_t v_block_id = blockIdx.x; for (int64_t tensor_idx = 0; tensor_idx < num_tensor; tensor_idx++) { const int64_t tensor_elem_cnt = tensor_tuple_params.sizes[tensor_idx]; T* model_ptr = (T*)tensor_tuple_params.ptr[0][tensor_idx]; T* model_update_ptr = (T*)tensor_tuple_params.ptr[1][tensor_idx]; for (int64_t i = v_block_id * blockDim.x * kUnrollSize + threadIdx.x; i < tensor_elem_cnt; i += blockDim.x * gridDim.x * kUnrollSize) { T model_val[kUnrollSize] = {0}; T model_update_val[kUnrollSize] = {0}; #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] = *(model_ptr + actual_idx); model_update_val[ilp] = *(model_update_ptr + actual_idx); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] *= d; model_val[ilp] += (1 - d) * model_update_val[ilp]; } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { *(model_ptr + actual_idx) = model_val[ilp]; *(model_update_ptr + actual_idx) = model_update_val[ilp]; } } } v_block_id -= tensor_tuple_params.block_offset[tensor_idx]; if (v_block_id < 0) { v_block_id += gridDim.x; } } } template<typename T> struct MultiTensorYoloV5WeightUpdateKernelUtil<DeviceType::kCUDA, T> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, float d, TensorTupleParams<2> tensor_tuple_params); }; template<> struct MultiTensorYoloV5WeightUpdateKernelUtil<DeviceType::kCUDA, half> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, float d, TensorTupleParams<2> tensor_tuple_params); }; template<typename T> void MultiTensorYoloV5WeightUpdateKernelUtil<DeviceType::kCUDA, T>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, float d, TensorTupleParams<2> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } hipLaunchKernelGGL(( MultiTensorYoloModelEmaUpdateGpu<T>) , dim3(grid_size), dim3(kBlockSize), 0, stream->As<ep::CudaStream>()->cuda_stream(), n_tensor, d, tensor_tuple_params); } template struct MultiTensorYoloV5WeightUpdateKernelUtil<DeviceType::kCUDA, float>; } // namespace oneflow
ad5a60ab89251c3c350cc0eede4d6537c0fdacc7.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/model_update_kernel_util.h" #include "oneflow/user/kernels/multi_tensor_model_update_kernel_util.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { constexpr int kBlockSize = 256; constexpr int kUnrollSize = 4; unsigned int ComputeGridSize(ep::Stream* stream, const int32_t block_size, const int64_t elem_cnt) { auto* cuda_stream = stream->As<ep::CudaStream>(); const int32_t max_threads_multi_process = cuda_stream->device_properties().maxThreadsPerMultiProcessor; const int32_t multi_processor_count = cuda_stream->device_properties().multiProcessorCount; unsigned int blocks_per_sm = max_threads_multi_process / block_size; unsigned int grid_size = ((elem_cnt + block_size - 1) / block_size); grid_size = std::min((unsigned int)multi_processor_count * blocks_per_sm, grid_size); return grid_size; } template<typename T, typename G, int N> __global__ void MultiTensorSGDUpdateGpu(int64_t num_tensor, T scale, const float l1, const float l2, const float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<N> tensor_tuple_params) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } learning_rate_val *= lr_scale; int64_t v_block_id = blockIdx.x; for (int64_t tensor_idx = 0; tensor_idx < num_tensor; tensor_idx++) { const int64_t tensor_elem_cnt = tensor_tuple_params.sizes[tensor_idx]; T* model_ptr = (T*)tensor_tuple_params.ptr[0][tensor_idx]; G* model_diff_ptr = (G*)tensor_tuple_params.ptr[1][tensor_idx]; half* model_copy_ptr = nullptr; if (N == 3) { model_copy_ptr = (half*)tensor_tuple_params.ptr[2][tensor_idx]; } for (int64_t i = v_block_id * blockDim.x * kUnrollSize + threadIdx.x; i < tensor_elem_cnt; i += blockDim.x * gridDim.x * kUnrollSize) { T model_val[kUnrollSize] = {0}; G model_diff[kUnrollSize] = {0}; #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] = *(model_ptr + actual_idx); model_diff[ilp] = *(model_diff_ptr + actual_idx); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { T model_diff_t = CastScaleRegularizeGradientFunctor<T, G>()( model_diff[ilp], model_val[ilp], scale, l1, l2); model_val[ilp] = model_val[ilp] - learning_rate_val * (model_diff_t + weight_decay * model_val[ilp]); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { *(model_ptr + actual_idx) = model_val[ilp]; if (N == 3) { *(model_copy_ptr + actual_idx) = static_cast<half>(model_val[ilp]); } } } } v_block_id -= tensor_tuple_params.block_offset[tensor_idx]; if (v_block_id < 0) { v_block_id += gridDim.x; } } } template<typename T, typename G> struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<2> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<2> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } MultiTensorSGDUpdateGpu<T, G, 2> <<<grid_size, kBlockSize, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( n_tensor, static_cast<T>(scale), l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, tensor_tuple_params); } template<typename T> struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<2> tensor_tuple_params); }; template<typename T> void MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<2> tensor_tuple_params) { MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, tensor_tuple_params); } template struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, double, double>; template struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorSGDUpdateKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G, int N> __global__ void MultiTensorMomentumUpdateGpu( int64_t num_tensor, T scale, const float l1, const float l2, const float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<N> tensor_tuple_params) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } learning_rate_val *= lr_scale; int64_t v_block_id = blockIdx.x; for (int64_t tensor_idx = 0; tensor_idx < num_tensor; tensor_idx++) { const int64_t tensor_elem_cnt = tensor_tuple_params.sizes[tensor_idx]; T* model_ptr = (T*)tensor_tuple_params.ptr[0][tensor_idx]; G* model_diff_ptr = (G*)tensor_tuple_params.ptr[1][tensor_idx]; T* momentum_buf_ptr = (T*)tensor_tuple_params.ptr[2][tensor_idx]; half* model_copy_ptr = nullptr; if (N == 4) { model_copy_ptr = (half*)tensor_tuple_params.ptr[3][tensor_idx]; } for (int64_t i = v_block_id * blockDim.x * kUnrollSize + threadIdx.x; i < tensor_elem_cnt; i += blockDim.x * gridDim.x * kUnrollSize) { T model_val[kUnrollSize] = {0}; G model_diff[kUnrollSize] = {0}; T momentum_buf[kUnrollSize] = {0}; #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] = *(model_ptr + actual_idx); model_diff[ilp] = *(model_diff_ptr + actual_idx); momentum_buf[ilp] = *(momentum_buf_ptr + actual_idx); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { T model_diff_t = CastScaleRegularizeGradientFunctor<T, G>()( model_diff[ilp], model_val[ilp], scale, l1, l2); if (weight_decay != 0.f) { model_diff_t += weight_decay * model_val[ilp]; } momentum_buf[ilp] = momentum * momentum_buf[ilp] + (1.f - dampening) * model_diff_t; if (nesterov) model_diff_t += momentum * momentum_buf[ilp]; else model_diff_t = momentum_buf[ilp]; T alpha = -learning_rate_val; if (maximize) alpha = learning_rate_val; model_val[ilp] += alpha * model_diff_t; } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { *(model_ptr + actual_idx) = model_val[ilp]; *(momentum_buf_ptr + actual_idx) = momentum_buf[ilp]; if (N == 4) { *(model_copy_ptr + actual_idx) = static_cast<half>(model_val[ilp]); } } } } v_block_id -= tensor_tuple_params.block_offset[tensor_idx]; if (v_block_id < 0) { v_block_id += gridDim.x; } } } template<typename T, typename G> struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<3> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<3> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } MultiTensorMomentumUpdateGpu<T, G, 3> <<<grid_size, kBlockSize, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( n_tensor, static_cast<T>(scale), l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, momentum, dampening, nesterov, maximize, tensor_tuple_params); } template<typename T> struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<3> tensor_tuple_params); }; template<typename T> void MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<3> tensor_tuple_params) { MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, momentum, dampening, nesterov, maximize, tensor_tuple_params); } template struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, double, double>; template struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorMomentumUpdateKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G, int N> __global__ void MultiTensorAdamUpdateGpu(int64_t num_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr, const float* bias_correction2_ptr, TensorTupleParams<N> tensor_tuple_params) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } if (bias_correction1_ptr != nullptr) { bias_correction1_val = *bias_correction1_ptr; } if (bias_correction2_ptr != nullptr) { bias_correction2_val = *bias_correction2_ptr; } learning_rate_val *= lr_scale; int64_t v_block_id = blockIdx.x; for (int64_t tensor_idx = 0; tensor_idx < num_tensor; tensor_idx++) { const int64_t tensor_elem_cnt = tensor_tuple_params.sizes[tensor_idx]; T* model_ptr = (T*)tensor_tuple_params.ptr[0][tensor_idx]; G* model_diff_ptr = (G*)tensor_tuple_params.ptr[1][tensor_idx]; T* m_ptr = (T*)tensor_tuple_params.ptr[2][tensor_idx]; T* v_ptr = (T*)tensor_tuple_params.ptr[3][tensor_idx]; half* model_copy_ptr = nullptr; if (N == 5) { model_copy_ptr = (half*)tensor_tuple_params.ptr[4][tensor_idx]; } for (int64_t i = v_block_id * blockDim.x * kUnrollSize + threadIdx.x; i < tensor_elem_cnt; i += blockDim.x * gridDim.x * kUnrollSize) { T model_val[kUnrollSize] = {0}; T m_val[kUnrollSize] = {0}; T v_val[kUnrollSize] = {0}; G model_diff[kUnrollSize] = {0}; #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] = *(model_ptr + actual_idx); m_val[ilp] = *(m_ptr + actual_idx); v_val[ilp] = *(v_ptr + actual_idx); model_diff[ilp] = *(model_diff_ptr + actual_idx); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { T model_diff_t = CastScaleRegularizeGradientFunctor<T, G>()( model_diff[ilp], model_val[ilp], scale, l1, l2); m_val[ilp] = beta1 * m_val[ilp] + (1 - beta1) * model_diff_t; v_val[ilp] = beta2 * v_val[ilp] + (1 - beta2) * model_diff_t * model_diff_t; T denom = (sqrt(v_val[ilp]) / sqrt(bias_correction2_val)) + epsilon; const T step_size = learning_rate_val / bias_correction1_val; model_val[ilp] = model_val[ilp] - step_size * (m_val[ilp] / denom) - learning_rate_val * weight_decay * model_val[ilp]; } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { *(model_ptr + actual_idx) = model_val[ilp]; *(m_ptr + actual_idx) = m_val[ilp]; *(v_ptr + actual_idx) = v_val[ilp]; if (N == 5) { *(model_copy_ptr + actual_idx) = static_cast<half>(model_val[ilp]); } } } } v_block_id -= tensor_tuple_params.block_offset[tensor_idx]; if (v_block_id < 0) { v_block_id += gridDim.x; } } } template<typename T, typename G> struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<4> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<4> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } MultiTensorAdamUpdateGpu<T, G> <<<grid_size, kBlockSize, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( n_tensor, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, lr_scale, learning_rate, scale_by_ptr, skip_if, bias_correction1, bias_correction2, tensor_tuple_params); } template<typename T> struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<4> tensor_tuple_params); }; template<typename T> void MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<4> tensor_tuple_params) { MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, lr_scale, learning_rate, scale_by_ptr, skip_if, bias_correction1, bias_correction2, tensor_tuple_params); } template struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, double, double>; template struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorAdamUpdateKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G> struct MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<3> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<3> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } MultiTensorSGDUpdateGpu<T, G, 3> <<<grid_size, kBlockSize, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( n_tensor, static_cast<T>(scale), l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, tensor_tuple_params); } template<typename T> struct MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<3> tensor_tuple_params); }; template<typename T> void MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, TensorTupleParams<3> tensor_tuple_params) { MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, tensor_tuple_params); } template struct MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorSGDUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G> struct MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<4> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<4> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } MultiTensorMomentumUpdateGpu<T, G, 4> <<<grid_size, kBlockSize, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( n_tensor, static_cast<T>(scale), l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, momentum, dampening, nesterov, maximize, tensor_tuple_params); } template<typename T> struct MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<4> tensor_tuple_params); }; template<typename T> void MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float weight_decay, float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float momentum, const float dampening, const bool nesterov, const bool maximize, TensorTupleParams<4> tensor_tuple_params) { MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, momentum, dampening, nesterov, maximize, tensor_tuple_params); } template struct MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorMomentumUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, typename G> struct MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<5> tensor_tuple_params); }; template<typename T, typename G> void MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, G>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<5> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } MultiTensorAdamUpdateGpu<T, G, 5> <<<grid_size, kBlockSize, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( n_tensor, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, lr_scale, learning_rate, scale_by_ptr, skip_if, bias_correction1, bias_correction2, tensor_tuple_params); } template<typename T> struct MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<5> tensor_tuple_params); }; template<typename T> void MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, float16>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1, const float* bias_correction2, TensorTupleParams<5> tensor_tuple_params) { MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, T, half>::Update( stream, elem_cnt, n_tensor, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, lr_scale, learning_rate, scale_by_ptr, skip_if, bias_correction1, bias_correction2, tensor_tuple_params); } template struct MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float>; template struct MultiTensorAdamUpdateWithCastKernelUtil<DeviceType::kCUDA, float, float16>; template<typename T, int N> __global__ void MultiTensorYoloModelEmaUpdateGpu(int64_t num_tensor, const float d, TensorTupleParams<N> tensor_tuple_params) { int64_t v_block_id = blockIdx.x; for (int64_t tensor_idx = 0; tensor_idx < num_tensor; tensor_idx++) { const int64_t tensor_elem_cnt = tensor_tuple_params.sizes[tensor_idx]; T* model_ptr = (T*)tensor_tuple_params.ptr[0][tensor_idx]; T* model_update_ptr = (T*)tensor_tuple_params.ptr[1][tensor_idx]; for (int64_t i = v_block_id * blockDim.x * kUnrollSize + threadIdx.x; i < tensor_elem_cnt; i += blockDim.x * gridDim.x * kUnrollSize) { T model_val[kUnrollSize] = {0}; T model_update_val[kUnrollSize] = {0}; #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] = *(model_ptr + actual_idx); model_update_val[ilp] = *(model_update_ptr + actual_idx); } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { model_val[ilp] *= d; model_val[ilp] += (1 - d) * model_update_val[ilp]; } } #pragma unroll for (int32_t ilp = 0; ilp < kUnrollSize; ilp++) { int64_t actual_idx = i + ilp * blockDim.x; if (actual_idx < tensor_elem_cnt) { *(model_ptr + actual_idx) = model_val[ilp]; *(model_update_ptr + actual_idx) = model_update_val[ilp]; } } } v_block_id -= tensor_tuple_params.block_offset[tensor_idx]; if (v_block_id < 0) { v_block_id += gridDim.x; } } } template<typename T> struct MultiTensorYoloV5WeightUpdateKernelUtil<DeviceType::kCUDA, T> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, float d, TensorTupleParams<2> tensor_tuple_params); }; template<> struct MultiTensorYoloV5WeightUpdateKernelUtil<DeviceType::kCUDA, half> { static void Update(ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, float d, TensorTupleParams<2> tensor_tuple_params); }; template<typename T> void MultiTensorYoloV5WeightUpdateKernelUtil<DeviceType::kCUDA, T>::Update( ep::Stream* stream, const int64_t elem_cnt, const int64_t n_tensor, float d, TensorTupleParams<2> tensor_tuple_params) { const unsigned int grid_size = ComputeGridSize(stream->As<ep::CudaStream>(), kBlockSize, elem_cnt); for (int i = 0; i < n_tensor; i++) { tensor_tuple_params.block_offset[i] = ((tensor_tuple_params.sizes[i] + kBlockSize * kUnrollSize - 1) / (kBlockSize * kUnrollSize)) % grid_size; } MultiTensorYoloModelEmaUpdateGpu<T> <<<grid_size, kBlockSize, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( n_tensor, d, tensor_tuple_params); } template struct MultiTensorYoloV5WeightUpdateKernelUtil<DeviceType::kCUDA, float>; } // namespace oneflow
ProjCoreOrig.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ProjHelperFun.cu.h" #include "Constants.h" #include "InitKernels.cu.h" #include "CoreKernels.cu.h" //////////////////////////////////////////////////////////////////////////////// ////////////////////////////DEBUGGING////////////////////// __global__ void getList(PrivGlobsCuda* globsList, REAL* res_out, const unsigned size // REAL* mat ){ const unsigned int gid = threadIdx.x + blockIdx.x*blockDim.x; PrivGlobsCuda globs = globsList[8]; if(gid < size){ //res_out[gid] = globs.myVarX[gid]; //res_out[gid] = globs.myTimeline[gid]; // res_out[gid] = globs.myResult[gid]; //res_out[gid] = mat[gid]; res_out[0] = (REAL) globs.myXindex; res_out[1] = (REAL) globs.myYindex; res_out[2] = (REAL) globs.myResult[idx2d(globs.myXindex, globs.myYindex, globs.myResultCols)]; //res_out[gid] = globs.myResult[gid]; } /* for( unsigned j = 0; j < outer; ++ j ) { //par res[j] = globs[j].myResult[globs[j].myXindex][globs[j].myYindex]; } */ } //////////////////////////////////////////////////////////////////////////////// //wrapper for the kernelUpdate void updateWrapper( PrivGlobsCuda* globsList, const unsigned g, const unsigned numX, const unsigned numY, const unsigned outer, const REAL alpha, const REAL beta, const REAL nu ){ //8*8*8 = 512 =< 1024 const int x = numX; const int y = numY; const int z = outer; const int dimx = ceil( ((float)x) / TVAL ); const int dimy = ceil( ((float)y) / TVAL ); const int dimz = ceil( ((float)z)); dim3 block(TVAL,TVAL,1), grid(dimx,dimy,dimz); hipLaunchKernelGGL(( kernelUpdate) , dim3(grid), dim3(block), 0, 0, globsList, g, x, y, z, alpha, beta, nu); hipDeviceSynchronize(); } void rollbackWrapper(PrivGlobsCuda* globsList, const unsigned g, const unsigned outer, const unsigned numX, const unsigned numY, const unsigned numZ ){ // create all arrays as multidim arrays for rollback() REAL *u, *uT, *v, *y, *yy; //[3.dim][1.dim][2.dim] //u = [numY][numX][outer]; numY rows, numX cols hipMalloc((void**)&u, outer*( numY*numX*sizeof(REAL) )); hipMalloc((void**)&uT, outer*( numX*numY*sizeof(REAL) )); hipMalloc((void**)&v, outer*( numX*numY*sizeof(REAL) )); hipMalloc((void**)&y, outer*( numX*numY*sizeof(REAL) )); hipMalloc((void**)&yy, outer*( numX*numY*sizeof(REAL) )); // hipMalloc((void**)&yy, outer*( numX*sizeof(REAL) )); REAL *a, *b, *c, *aT, *bT, *cT; hipMalloc((void**)&a, outer*( numY*numX*sizeof(REAL) )); hipMalloc((void**)&b, outer*( numY*numX*sizeof(REAL) )); hipMalloc((void**)&c, outer*( numY*numX*sizeof(REAL) )); hipMalloc((void**)&aT, outer*( numX*numY*sizeof(REAL) )); hipMalloc((void**)&bT, outer*( numX*numY*sizeof(REAL) )); hipMalloc((void**)&cT, outer*( numX*numY*sizeof(REAL) )); const int x = numZ; //max(myXsize, numY), myXsize = numX //const int y = numZ = x; //max(y, myYsize), myYsize = numY int dimx = ceil( ((float)x) / TVAL ); int dimy = ceil( ((float)x) / TVAL ); int dimz = outer; dim3 block(TVAL,TVAL,1), grid(dimx,dimy,dimz); const unsigned n = numY*numX; unsigned int block_size = 512; unsigned int num_blocks = (n + (block_size - 1)) / block_size; unsigned int sh_mem_size = block_size * 32; hipLaunchKernelGGL(( kernelRollback1) , dim3(grid), dim3(block) , 0, 0, globsList, g, outer, u, uT, v, y, a, b, c, aT, bT, cT); hipDeviceSynchronize(); hipLaunchKernelGGL(( transpose3dTiled<TVAL>), dim3(grid), dim3(block) , 0, 0, uT, u, numY, numX); hipDeviceSynchronize(); hipLaunchKernelGGL(( transpose3dTiled<TVAL>), dim3(grid), dim3(block) , 0, 0, aT, a, numX, numY); hipDeviceSynchronize(); hipLaunchKernelGGL(( transpose3dTiled<TVAL>), dim3(grid), dim3(block) , 0, 0, bT, b, numX, numY); hipDeviceSynchronize(); hipLaunchKernelGGL(( transpose3dTiled<TVAL>), dim3(grid), dim3(block) , 0, 0, cT, c, numX, numY); hipDeviceSynchronize(); //Tridag 1 //tridag1(outer, u, yy, a, b, c, numX, numY, numZ); hipLaunchKernelGGL(( kernelTridag1) , dim3(num_blocks), dim3(block_size), sh_mem_size , 0, outer, u, yy, a, b, c, numX, numY); hipDeviceSynchronize(); hipLaunchKernelGGL(( kernelRollback2) , dim3(grid), dim3(block), 0, 0, globsList, g, outer, u, uT, v, y, yy, a, b, c, aT, bT, cT); hipDeviceSynchronize(); hipLaunchKernelGGL(( transpose3dTiled<TVAL>), dim3(grid), dim3(block) , 0, 0, aT, a, numY, numX); hipDeviceSynchronize(); hipLaunchKernelGGL(( transpose3dTiled<TVAL>), dim3(grid), dim3(block) , 0, 0, bT, b, numY, numX); hipDeviceSynchronize(); hipLaunchKernelGGL(( transpose3dTiled<TVAL>), dim3(grid), dim3(block) , 0, 0, cT, c, numY, numX); hipDeviceSynchronize(); hipLaunchKernelGGL(( transpose3dTiled<TVAL>), dim3(grid), dim3(block) , 0, 0, u, uT, numX, numY); hipDeviceSynchronize(); hipLaunchKernelGGL(( kernelRollback3) , dim3(grid), dim3(block), 0, 0, globsList, g, outer, uT, v, y); hipDeviceSynchronize(); //tridag2(globsList, outer, y, yy, aT, bT, cT, numX, numY, numZ); hipLaunchKernelGGL(( kernelTridag2) , dim3(num_blocks), dim3(block_size), sh_mem_size , 0, globsList, outer, y, yy, aT, bT, cT, numX, numY); hipDeviceSynchronize(); { //unsigned s = numX*numY; unsigned size = 3; unsigned mem_size = size*sizeof(REAL); unsigned num_threads = size; unsigned block_size = 512; unsigned int num_blocks = ceil(((float) num_threads) / block_size); REAL *res, *d_res; hipMalloc((void**)&d_res, mem_size); res = (REAL*) malloc(mem_size); hipLaunchKernelGGL(( getList), dim3(num_blocks), dim3(block_size), 0, 0, globsList, d_res, size); hipDeviceSynchronize(); hipMemcpy(res, d_res, mem_size, hipMemcpyDeviceToHost); printf("\nres = [\n"); for(unsigned i=0; i < size; i++) printf("[%d] = %.5f\n", i, res[i]); printf("\n]\n"); //exit(0); } hipFree(u); hipFree(uT); hipFree(v); hipFree(y); hipFree(yy); hipFree(a); hipFree(b); hipFree(c); hipFree(aT); hipFree(bT); hipFree(cT); } void getResultsWrapper(PrivGlobsCuda* globsList, const unsigned outer, REAL* res){ const unsigned int num_threads = outer; const unsigned int block_size = 512; unsigned int num_blocks = ceil(((float) num_threads) / block_size); unsigned int mem_size = outer * sizeof(REAL); //(*res) = (REAL*) malloc(mem_size); { float* d_out; hipMalloc((void**)&d_out, mem_size); hipLaunchKernelGGL(( kernelGetResults), dim3(num_blocks), dim3(block_size), 0, 0, globsList, d_out, outer); hipDeviceSynchronize(); //cuda results to mem hipMemcpy(res, d_out, mem_size, hipMemcpyDeviceToHost); hipFree(d_out); } } void run_GPU( const unsigned int& outer, const unsigned int& numX, const unsigned int& numY, const unsigned int& numT, const REAL& s0, const REAL& t, const REAL& alpha, const REAL& nu, const REAL& beta, REAL* res // [outer] RESULT ) { // sequential loop distributed. PrivGlobsCuda* globsList; const unsigned numZ = max(numX, numY); //hipMalloc((void**)&globsList, outer*sizeof(struct PrivGlobsCuda)); printf("init begin\n"); init(&globsList, outer, s0, alpha, nu, t, numX, numY, numT); printf("init done\n"); /////////////////////////////////////////////////// // { // //unsigned s = numX*numY; // unsigned size = 4; // unsigned mem_size = size*sizeof(REAL); // unsigned num_threads = size; // unsigned block_size = 512; // unsigned int num_blocks = ceil(((float) num_threads) / block_size); // REAL *res, *d_res; // hipMalloc((void**)&d_res, mem_size); // res = (REAL*) malloc(mem_size); // hipLaunchKernelGGL(( getList), dim3(num_blocks), dim3(block_size), 0, 0, globsList, d_res, size); // hipDeviceSynchronize(); // hipMemcpy(res, d_res, mem_size, hipMemcpyDeviceToHost); // printf("\nres = [\n"); // for(unsigned i=0; i < size; i++) // printf("[%d] = %.5f\n", i, res[i]); // printf("\n]\n"); // //exit(0); // } ////////////////////////////////////////////////////// for(int g = numT-2;g>=0;--g){ //seq //updateParams() printf("update begin\n"); updateWrapper(globsList, g, numX, numY, outer, alpha, beta, nu); printf("update done\n"); //rollback() printf("rollback begin\n"); rollbackWrapper(globsList, g, outer, numX, numY, numZ); printf("rollback done\n"); } getResultsWrapper(globsList, outer, res); } //#endif // PROJ_CORE_ORIG
ProjCoreOrig.cu
#include "ProjHelperFun.cu.h" #include "Constants.h" #include "InitKernels.cu.h" #include "CoreKernels.cu.h" //////////////////////////////////////////////////////////////////////////////// ////////////////////////////DEBUGGING////////////////////// __global__ void getList(PrivGlobsCuda* globsList, REAL* res_out, const unsigned size // REAL* mat ){ const unsigned int gid = threadIdx.x + blockIdx.x*blockDim.x; PrivGlobsCuda globs = globsList[8]; if(gid < size){ //res_out[gid] = globs.myVarX[gid]; //res_out[gid] = globs.myTimeline[gid]; // res_out[gid] = globs.myResult[gid]; //res_out[gid] = mat[gid]; res_out[0] = (REAL) globs.myXindex; res_out[1] = (REAL) globs.myYindex; res_out[2] = (REAL) globs.myResult[idx2d(globs.myXindex, globs.myYindex, globs.myResultCols)]; //res_out[gid] = globs.myResult[gid]; } /* for( unsigned j = 0; j < outer; ++ j ) { //par res[j] = globs[j].myResult[globs[j].myXindex][globs[j].myYindex]; } */ } //////////////////////////////////////////////////////////////////////////////// //wrapper for the kernelUpdate void updateWrapper( PrivGlobsCuda* globsList, const unsigned g, const unsigned numX, const unsigned numY, const unsigned outer, const REAL alpha, const REAL beta, const REAL nu ){ //8*8*8 = 512 =< 1024 const int x = numX; const int y = numY; const int z = outer; const int dimx = ceil( ((float)x) / TVAL ); const int dimy = ceil( ((float)y) / TVAL ); const int dimz = ceil( ((float)z)); dim3 block(TVAL,TVAL,1), grid(dimx,dimy,dimz); kernelUpdate <<< grid, block>>>(globsList, g, x, y, z, alpha, beta, nu); cudaThreadSynchronize(); } void rollbackWrapper(PrivGlobsCuda* globsList, const unsigned g, const unsigned outer, const unsigned numX, const unsigned numY, const unsigned numZ ){ // create all arrays as multidim arrays for rollback() REAL *u, *uT, *v, *y, *yy; //[3.dim][1.dim][2.dim] //u = [numY][numX][outer]; numY rows, numX cols cudaMalloc((void**)&u, outer*( numY*numX*sizeof(REAL) )); cudaMalloc((void**)&uT, outer*( numX*numY*sizeof(REAL) )); cudaMalloc((void**)&v, outer*( numX*numY*sizeof(REAL) )); cudaMalloc((void**)&y, outer*( numX*numY*sizeof(REAL) )); cudaMalloc((void**)&yy, outer*( numX*numY*sizeof(REAL) )); // cudaMalloc((void**)&yy, outer*( numX*sizeof(REAL) )); REAL *a, *b, *c, *aT, *bT, *cT; cudaMalloc((void**)&a, outer*( numY*numX*sizeof(REAL) )); cudaMalloc((void**)&b, outer*( numY*numX*sizeof(REAL) )); cudaMalloc((void**)&c, outer*( numY*numX*sizeof(REAL) )); cudaMalloc((void**)&aT, outer*( numX*numY*sizeof(REAL) )); cudaMalloc((void**)&bT, outer*( numX*numY*sizeof(REAL) )); cudaMalloc((void**)&cT, outer*( numX*numY*sizeof(REAL) )); const int x = numZ; //max(myXsize, numY), myXsize = numX //const int y = numZ = x; //max(y, myYsize), myYsize = numY int dimx = ceil( ((float)x) / TVAL ); int dimy = ceil( ((float)x) / TVAL ); int dimz = outer; dim3 block(TVAL,TVAL,1), grid(dimx,dimy,dimz); const unsigned n = numY*numX; unsigned int block_size = 512; unsigned int num_blocks = (n + (block_size - 1)) / block_size; unsigned int sh_mem_size = block_size * 32; kernelRollback1 <<< grid, block >>> ( globsList, g, outer, u, uT, v, y, a, b, c, aT, bT, cT); cudaThreadSynchronize(); transpose3dTiled<TVAL><<< grid, block >>>(uT, u, numY, numX); cudaThreadSynchronize(); transpose3dTiled<TVAL><<< grid, block >>>(aT, a, numX, numY); cudaThreadSynchronize(); transpose3dTiled<TVAL><<< grid, block >>>(bT, b, numX, numY); cudaThreadSynchronize(); transpose3dTiled<TVAL><<< grid, block >>>(cT, c, numX, numY); cudaThreadSynchronize(); //Tridag 1 //tridag1(outer, u, yy, a, b, c, numX, numY, numZ); kernelTridag1 <<< num_blocks, block_size, sh_mem_size >>> (outer, u, yy, a, b, c, numX, numY); cudaThreadSynchronize(); kernelRollback2 <<< grid, block>>> ( globsList, g, outer, u, uT, v, y, yy, a, b, c, aT, bT, cT); cudaThreadSynchronize(); transpose3dTiled<TVAL><<< grid, block >>>(aT, a, numY, numX); cudaThreadSynchronize(); transpose3dTiled<TVAL><<< grid, block >>>(bT, b, numY, numX); cudaThreadSynchronize(); transpose3dTiled<TVAL><<< grid, block >>>(cT, c, numY, numX); cudaThreadSynchronize(); transpose3dTiled<TVAL><<< grid, block >>>(u, uT, numX, numY); cudaThreadSynchronize(); kernelRollback3 <<< grid, block>>> (globsList, g, outer, uT, v, y); cudaThreadSynchronize(); //tridag2(globsList, outer, y, yy, aT, bT, cT, numX, numY, numZ); kernelTridag2 <<< num_blocks, block_size, sh_mem_size >>> (globsList, outer, y, yy, aT, bT, cT, numX, numY); cudaThreadSynchronize(); { //unsigned s = numX*numY; unsigned size = 3; unsigned mem_size = size*sizeof(REAL); unsigned num_threads = size; unsigned block_size = 512; unsigned int num_blocks = ceil(((float) num_threads) / block_size); REAL *res, *d_res; cudaMalloc((void**)&d_res, mem_size); res = (REAL*) malloc(mem_size); getList<<< num_blocks, block_size>>>(globsList, d_res, size); cudaThreadSynchronize(); cudaMemcpy(res, d_res, mem_size, cudaMemcpyDeviceToHost); printf("\nres = [\n"); for(unsigned i=0; i < size; i++) printf("[%d] = %.5f\n", i, res[i]); printf("\n]\n"); //exit(0); } cudaFree(u); cudaFree(uT); cudaFree(v); cudaFree(y); cudaFree(yy); cudaFree(a); cudaFree(b); cudaFree(c); cudaFree(aT); cudaFree(bT); cudaFree(cT); } void getResultsWrapper(PrivGlobsCuda* globsList, const unsigned outer, REAL* res){ const unsigned int num_threads = outer; const unsigned int block_size = 512; unsigned int num_blocks = ceil(((float) num_threads) / block_size); unsigned int mem_size = outer * sizeof(REAL); //(*res) = (REAL*) malloc(mem_size); { float* d_out; cudaMalloc((void**)&d_out, mem_size); kernelGetResults<<< num_blocks, block_size>>> (globsList, d_out, outer); cudaThreadSynchronize(); //cuda results to mem cudaMemcpy(res, d_out, mem_size, cudaMemcpyDeviceToHost); cudaFree(d_out); } } void run_GPU( const unsigned int& outer, const unsigned int& numX, const unsigned int& numY, const unsigned int& numT, const REAL& s0, const REAL& t, const REAL& alpha, const REAL& nu, const REAL& beta, REAL* res // [outer] RESULT ) { // sequential loop distributed. PrivGlobsCuda* globsList; const unsigned numZ = max(numX, numY); //cudaMalloc((void**)&globsList, outer*sizeof(struct PrivGlobsCuda)); printf("init begin\n"); init(&globsList, outer, s0, alpha, nu, t, numX, numY, numT); printf("init done\n"); /////////////////////////////////////////////////// // { // //unsigned s = numX*numY; // unsigned size = 4; // unsigned mem_size = size*sizeof(REAL); // unsigned num_threads = size; // unsigned block_size = 512; // unsigned int num_blocks = ceil(((float) num_threads) / block_size); // REAL *res, *d_res; // cudaMalloc((void**)&d_res, mem_size); // res = (REAL*) malloc(mem_size); // getList<<< num_blocks, block_size>>>(globsList, d_res, size); // cudaThreadSynchronize(); // cudaMemcpy(res, d_res, mem_size, cudaMemcpyDeviceToHost); // printf("\nres = [\n"); // for(unsigned i=0; i < size; i++) // printf("[%d] = %.5f\n", i, res[i]); // printf("\n]\n"); // //exit(0); // } ////////////////////////////////////////////////////// for(int g = numT-2;g>=0;--g){ //seq //updateParams() printf("update begin\n"); updateWrapper(globsList, g, numX, numY, outer, alpha, beta, nu); printf("update done\n"); //rollback() printf("rollback begin\n"); rollbackWrapper(globsList, g, outer, numX, numY, numZ); printf("rollback done\n"); } getResultsWrapper(globsList, outer, res); } //#endif // PROJ_CORE_ORIG
e8fd74d7352547cd82fdd7d8f3ee05eb2591c42d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> static const int M = 16;// static const int N = 32;// #define CHECK_STATUS(status) \ if (status != hipSuccess) \ fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\ hipGetErrorString(status)) // __global__ void MatAdd(float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && j < N) { int index = i * N + j; C[index] = A[index] + B[index]; } } int main(int argc, char **argv) { CHECK_STATUS(hipSetDevice(0)); const int SIZE = M * N; float a[SIZE]; float b[SIZE]; for(int i = 0;i<SIZE;i++){ a[i] = i; b[i] = i; } float c[SIZE]; float *d_a,*d_b,*d_c; // CHECK_STATUS(hipMalloc(&d_a, SIZE*sizeof(float))); CHECK_STATUS(hipMalloc(&d_b, SIZE*sizeof(float))); CHECK_STATUS(hipMalloc(&d_c, SIZE*sizeof(float))); // CHECK_STATUS(hipMemcpy(d_a,a,SIZE* sizeof(float),hipMemcpyHostToDevice)); CHECK_STATUS(hipMemcpy(d_b,b,SIZE* sizeof(float),hipMemcpyHostToDevice)); // kernel dim3 threadsPerBlock(16, 16); dim3 numBlocks(M / threadsPerBlock.x, N / threadsPerBlock.y); hipLaunchKernelGGL(( MatAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_c); // CHECK_STATUS(hipGetLastError()); // CHECK_STATUS(hipMemcpy(c,d_c,SIZE* sizeof(float),hipMemcpyDeviceToHost)); // for(int i=0;i<M;i++) { for(int j=0;j<N;j++) printf("%f\t",c[i*N + j]); printf("\n"); } // CHECK_STATUS(hipFree(d_a)); CHECK_STATUS(hipFree(d_b)); CHECK_STATUS(hipFree(d_c)); return 0; }
e8fd74d7352547cd82fdd7d8f3ee05eb2591c42d.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> static const int M = 16;//行 static const int N = 32;//列 #define CHECK_STATUS(status) \ if (status != cudaSuccess) \ fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\ cudaGetErrorString(status)) //二维数组相加 __global__ void MatAdd(float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < N && j < N) { int index = i * N + j; C[index] = A[index] + B[index]; } } int main(int argc, char **argv) { CHECK_STATUS(cudaSetDevice(0)); const int SIZE = M * N; float a[SIZE]; float b[SIZE]; for(int i = 0;i<SIZE;i++){ a[i] = i; b[i] = i; } float c[SIZE]; float *d_a,*d_b,*d_c; //分配显存 CHECK_STATUS(cudaMalloc(&d_a, SIZE*sizeof(float))); CHECK_STATUS(cudaMalloc(&d_b, SIZE*sizeof(float))); CHECK_STATUS(cudaMalloc(&d_c, SIZE*sizeof(float))); // 把数据从内存复制到显存 CHECK_STATUS(cudaMemcpy(d_a,a,SIZE* sizeof(float),cudaMemcpyHostToDevice)); CHECK_STATUS(cudaMemcpy(d_b,b,SIZE* sizeof(float),cudaMemcpyHostToDevice)); // 调用kernel dim3 threadsPerBlock(16, 16); dim3 numBlocks(M / threadsPerBlock.x, N / threadsPerBlock.y); MatAdd<<<numBlocks, threadsPerBlock>>>(d_a, d_b, d_c); // 检查错误 CHECK_STATUS(cudaGetLastError()); // 从显存把数据复制到内存 CHECK_STATUS(cudaMemcpy(c,d_c,SIZE* sizeof(float),cudaMemcpyDeviceToHost)); // 打印 for(int i=0;i<M;i++) { for(int j=0;j<N;j++) printf("%f\t",c[i*N + j]); printf("\n"); } //释放显存 CHECK_STATUS(cudaFree(d_a)); CHECK_STATUS(cudaFree(d_b)); CHECK_STATUS(cudaFree(d_c)); return 0; }
c52fb29cb7796ddffc1d9a06efd0c0bc980e14ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file nms.cu * \brief NMS Operator * \author Yanghao Li */ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include "../tensor/sort_op.h" #include <map> #include <vector> #include <string> #include <utility> #include <ctime> #include <iterator> #include "../operator_common.h" #include "../mshadow_op.h" #include "./nms-inl.h" #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) #define FRCNN_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { namespace { // copy score and init order // dets (n, 5); score (n, ); order (n, ) // count should be n (total anchors or proposals) template<typename Dtype> __global__ void CopyScoreKernel(const int count, const Dtype* dets, Dtype* score, int* order) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { score[index] = dets[index * 5 + 4]; order[index] = index; } } // reorder proposals according to order and keep the top_n proposals // prev_dets (n, 5); order (n, ); dets (n, 5) // count should be output anchor numbers (top_n) template<typename Dtype> __global__ void ReorderProposalsKernel(const int count, const Dtype* prev_dets, const int* order, Dtype* dets) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { const int order_i = order[index]; for (int j = 0; j < 5; j ++) { dets[index * 5 + j] = prev_dets[order_i * 5 + j]; } } } __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, uint64_t *dev_mask) { const int threadsPerBlock = sizeof(uint64_t) * 8; const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; uint64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _nms(const mshadow::Tensor<gpu, 2>& boxes, const float nms_overlap_thresh, int *keep, int *num_out, uint64_t *mask_dev, uint64_t *mask_host) { /* @input boxes: (pre_nms_top_n, 5) @return keep @return num_out @tmp mask_dev @tmp mask_host */ const int threadsPerBlock = sizeof(uint64_t) * 8; const int boxes_num = boxes.size(0); const int boxes_dim = boxes.size(1); float* boxes_dev = boxes.dptr_; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); FRCNN_CUDA_CHECK(hipPeekAtLastError()); // TODO: need to be rewritten FRCNN_CUDA_CHECK(hipMemcpy(mask_host, mask_dev, sizeof(uint64_t) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<uint64_t> remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep[num_to_keep++] = i; uint64_t *p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; } // copy proposals to output // dets (top_n, 5); keep (top_n, ); out (top_n, ) // count should be top_n (total anchors or proposals) template<typename Dtype> __global__ void PrepareOutput(const int count, const Dtype* dets, const int* keep, const int out_size, const int batchIdx, Dtype* out, Dtype* score) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { // out[index * 5] = batchIdx; if (index < out_size) { int keep_i = keep[index]; for (int j = 0; j < 4; ++j) { out[index * 4 + j] = dets[keep_i * 5 + j]; } score[index] = dets[keep_i * 5 + 4]; } else { //int keep_i = keep[index % out_size]; for (int j = 0; j < 4; ++j) { out[index * 4 + j] = 0.0f; } score[index] = 0; } } } } // namespace } // namespace cuda } // namespace mshadow namespace mxnet { namespace op { template<typename xpu> class NMSGPUOp : public Operator{ public: explicit NMSGPUOp(NMSParam param) { this->param_ = param; } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; using namespace mshadow::cuda; CHECK_EQ(in_data.size(), 1); CHECK_EQ(out_data.size(), 2); CHECK_GT(req.size(), 1); // CHECK_EQ(req[proposal::kOut], kWriteTo); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 3> proposals = in_data[nms::kBBox].get<xpu, 3, float>(s); // batch_idx, rois_idx, 5(x1, y1, x2, y2, score) Tensor<xpu, 3> out = out_data[nms::kOut].get<xpu, 3, float>(s); // batch_idx, rois_idx, 4(x1, y1, x2, y2) Tensor<xpu, 3> out_score = out_data[nms::kScore].get<xpu, 3, float>(s); // batch_idx, rois_idx, 1(score) uint64_t WORKSPACE_LIMIT = 1024 * 1024 * param_.workspace; // 256 MB should be sufficient Tensor<xpu, 1, uint8_t> workspace = ctx.requested[nms::kTempSpace].get_space_typed<xpu, 1, uint8_t>(Shape1(WORKSPACE_LIMIT), s); uint64_t allocated_bytes = 0ULL; uint64_t allocated_bytes_outside_loop = 0ULL; int nbatch = proposals.size(0); int count = proposals.size(1); // set to -1 for max int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count; rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count); int rpn_post_nms_top_n = ::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n); /* copy anchors for all images in batch */ for (int i = 0; i < nbatch; i++) { float* batch_proposals = proposals.dptr_ + i * 5 * count; /* copy score to a continuous memory */ dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock); dim3 dimBlock(kMaxThreadsPerBlock); Tensor<xpu, 1> score(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(count)); allocated_bytes += count * sizeof(float); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; Tensor<xpu, 1, int> order(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(count)); allocated_bytes += count * sizeof(int); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; CheckLaunchParam(dimGrid, dimBlock, "CopyScore"); hipLaunchKernelGGL(( CopyScoreKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, count, batch_proposals, score.dptr_, order.dptr_); FRCNN_CUDA_CHECK(hipPeekAtLastError()); if (!param_.already_sorted) { /* argsort score, save order */ thrust::stable_sort_by_key(thrust::device, score.dptr_, score.dptr_ + score.size(0), order.dptr_, thrust::greater<float>()); FRCNN_CUDA_CHECK(hipPeekAtLastError()); } /* Reorder proposals according to order */ Tensor<xpu, 2> ordered_proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape2(rpn_pre_nms_top_n, 5)); allocated_bytes += rpn_pre_nms_top_n * 5 * sizeof(float); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals"); hipLaunchKernelGGL(( ReorderProposalsKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, rpn_pre_nms_top_n, batch_proposals, order.dptr_, ordered_proposals.dptr_); FRCNN_CUDA_CHECK(hipPeekAtLastError()); /* perform nms */ std::vector<int> _keep(rpn_pre_nms_top_n); int out_size = 0; const int boxes_num = rpn_pre_nms_top_n; const int col_blocks = DIVUP(boxes_num, sizeof(uint64_t) * 8); // take special care when allocate memory of 8-byte alignment. allocated_bytes += allocated_bytes % sizeof(uint64_t); Tensor<xpu, 1, uint64_t> mask_tensor(reinterpret_cast<uint64_t *>(workspace.dptr_ + allocated_bytes), Shape1(boxes_num * col_blocks)); allocated_bytes += boxes_num * col_blocks * sizeof(uint64_t); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; // the following line does not need change since it the only place where requires host workspace Tensor<cpu, 1, uint64_t> mask_host_tensor = ctx.requested[nms::kTempSpace].get_host_space_typed<1, uint64_t>(Shape1(boxes_num * col_blocks)); uint64_t *mask_dev = mask_tensor.dptr_; uint64_t *mask_host = mask_host_tensor.dptr_; _nms(ordered_proposals, param_.threshold, &_keep[0], &out_size, mask_dev, mask_host); /* copy nms result to gpu */ Tensor<xpu, 1, int> keep(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(_keep.size())); allocated_bytes += _keep.size() * sizeof(int); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; FRCNN_CUDA_CHECK(hipMemcpy(keep.dptr_, &_keep[0], sizeof(int) * _keep.size(), hipMemcpyHostToDevice)); // less than 64K /* copy results after nms */ dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput"); hipLaunchKernelGGL(( PrepareOutput), dim3(dimGrid), dim3(dimBlock), 0, 0, rpn_post_nms_top_n, ordered_proposals.dptr_, keep.dptr_, out_size, i, out.dptr_ + i * 4 * rpn_post_nms_top_n, out_score.dptr_ + i * rpn_post_nms_top_n); FRCNN_CUDA_CHECK(hipPeekAtLastError()); // recycle all bytes allocated within loop allocated_bytes = allocated_bytes_outside_loop; } } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), 1); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 3> gbbox = in_grad[nms::kBBox].get<xpu, 3, real_t>(s); Assign(gbbox, req[nms::kBBox], 0); } private: NMSParam param_; }; // class NMSGPUOp template<> Operator* CreateOp<gpu>(NMSParam param) { return new NMSGPUOp<gpu>(param); } } // namespace op } // namespace mxnet
c52fb29cb7796ddffc1d9a06efd0c0bc980e14ee.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file nms.cu * \brief NMS Operator * \author Yanghao Li */ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include "../tensor/sort_op.h" #include <map> #include <vector> #include <string> #include <utility> #include <ctime> #include <iterator> #include "../operator_common.h" #include "../mshadow_op.h" #include "./nms-inl.h" #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) #define FRCNN_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { namespace { // copy score and init order // dets (n, 5); score (n, ); order (n, ) // count should be n (total anchors or proposals) template<typename Dtype> __global__ void CopyScoreKernel(const int count, const Dtype* dets, Dtype* score, int* order) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { score[index] = dets[index * 5 + 4]; order[index] = index; } } // reorder proposals according to order and keep the top_n proposals // prev_dets (n, 5); order (n, ); dets (n, 5) // count should be output anchor numbers (top_n) template<typename Dtype> __global__ void ReorderProposalsKernel(const int count, const Dtype* prev_dets, const int* order, Dtype* dets) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { const int order_i = order[index]; for (int j = 0; j < 5; j ++) { dets[index * 5 + j] = prev_dets[order_i * 5 + j]; } } } __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, uint64_t *dev_mask) { const int threadsPerBlock = sizeof(uint64_t) * 8; const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; uint64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _nms(const mshadow::Tensor<gpu, 2>& boxes, const float nms_overlap_thresh, int *keep, int *num_out, uint64_t *mask_dev, uint64_t *mask_host) { /* @input boxes: (pre_nms_top_n, 5) @return keep @return num_out @tmp mask_dev @tmp mask_host */ const int threadsPerBlock = sizeof(uint64_t) * 8; const int boxes_num = boxes.size(0); const int boxes_dim = boxes.size(1); float* boxes_dev = boxes.dptr_; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); // TODO: need to be rewritten FRCNN_CUDA_CHECK(cudaMemcpy(mask_host, mask_dev, sizeof(uint64_t) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<uint64_t> remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep[num_to_keep++] = i; uint64_t *p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; } // copy proposals to output // dets (top_n, 5); keep (top_n, ); out (top_n, ) // count should be top_n (total anchors or proposals) template<typename Dtype> __global__ void PrepareOutput(const int count, const Dtype* dets, const int* keep, const int out_size, const int batchIdx, Dtype* out, Dtype* score) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { // out[index * 5] = batchIdx; if (index < out_size) { int keep_i = keep[index]; for (int j = 0; j < 4; ++j) { out[index * 4 + j] = dets[keep_i * 5 + j]; } score[index] = dets[keep_i * 5 + 4]; } else { //int keep_i = keep[index % out_size]; for (int j = 0; j < 4; ++j) { out[index * 4 + j] = 0.0f; } score[index] = 0; } } } } // namespace } // namespace cuda } // namespace mshadow namespace mxnet { namespace op { template<typename xpu> class NMSGPUOp : public Operator{ public: explicit NMSGPUOp(NMSParam param) { this->param_ = param; } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; using namespace mshadow::cuda; CHECK_EQ(in_data.size(), 1); CHECK_EQ(out_data.size(), 2); CHECK_GT(req.size(), 1); // CHECK_EQ(req[proposal::kOut], kWriteTo); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 3> proposals = in_data[nms::kBBox].get<xpu, 3, float>(s); // batch_idx, rois_idx, 5(x1, y1, x2, y2, score) Tensor<xpu, 3> out = out_data[nms::kOut].get<xpu, 3, float>(s); // batch_idx, rois_idx, 4(x1, y1, x2, y2) Tensor<xpu, 3> out_score = out_data[nms::kScore].get<xpu, 3, float>(s); // batch_idx, rois_idx, 1(score) uint64_t WORKSPACE_LIMIT = 1024 * 1024 * param_.workspace; // 256 MB should be sufficient Tensor<xpu, 1, uint8_t> workspace = ctx.requested[nms::kTempSpace].get_space_typed<xpu, 1, uint8_t>(Shape1(WORKSPACE_LIMIT), s); uint64_t allocated_bytes = 0ULL; uint64_t allocated_bytes_outside_loop = 0ULL; int nbatch = proposals.size(0); int count = proposals.size(1); // set to -1 for max int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count; rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count); int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n); /* copy anchors for all images in batch */ for (int i = 0; i < nbatch; i++) { float* batch_proposals = proposals.dptr_ + i * 5 * count; /* copy score to a continuous memory */ dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock); dim3 dimBlock(kMaxThreadsPerBlock); Tensor<xpu, 1> score(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(count)); allocated_bytes += count * sizeof(float); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; Tensor<xpu, 1, int> order(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(count)); allocated_bytes += count * sizeof(int); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; CheckLaunchParam(dimGrid, dimBlock, "CopyScore"); CopyScoreKernel<<<dimGrid, dimBlock>>>( count, batch_proposals, score.dptr_, order.dptr_); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); if (!param_.already_sorted) { /* argsort score, save order */ thrust::stable_sort_by_key(thrust::device, score.dptr_, score.dptr_ + score.size(0), order.dptr_, thrust::greater<float>()); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); } /* Reorder proposals according to order */ Tensor<xpu, 2> ordered_proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape2(rpn_pre_nms_top_n, 5)); allocated_bytes += rpn_pre_nms_top_n * 5 * sizeof(float); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals"); ReorderProposalsKernel<<<dimGrid, dimBlock>>>( rpn_pre_nms_top_n, batch_proposals, order.dptr_, ordered_proposals.dptr_); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); /* perform nms */ std::vector<int> _keep(rpn_pre_nms_top_n); int out_size = 0; const int boxes_num = rpn_pre_nms_top_n; const int col_blocks = DIVUP(boxes_num, sizeof(uint64_t) * 8); // take special care when allocate memory of 8-byte alignment. allocated_bytes += allocated_bytes % sizeof(uint64_t); Tensor<xpu, 1, uint64_t> mask_tensor(reinterpret_cast<uint64_t *>(workspace.dptr_ + allocated_bytes), Shape1(boxes_num * col_blocks)); allocated_bytes += boxes_num * col_blocks * sizeof(uint64_t); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; // the following line does not need change since it the only place where requires host workspace Tensor<cpu, 1, uint64_t> mask_host_tensor = ctx.requested[nms::kTempSpace].get_host_space_typed<1, uint64_t>(Shape1(boxes_num * col_blocks)); uint64_t *mask_dev = mask_tensor.dptr_; uint64_t *mask_host = mask_host_tensor.dptr_; _nms(ordered_proposals, param_.threshold, &_keep[0], &out_size, mask_dev, mask_host); /* copy nms result to gpu */ Tensor<xpu, 1, int> keep(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(_keep.size())); allocated_bytes += _keep.size() * sizeof(int); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; FRCNN_CUDA_CHECK(cudaMemcpy(keep.dptr_, &_keep[0], sizeof(int) * _keep.size(), cudaMemcpyHostToDevice)); // less than 64K /* copy results after nms */ dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput"); PrepareOutput<<<dimGrid, dimBlock>>>( rpn_post_nms_top_n, ordered_proposals.dptr_, keep.dptr_, out_size, i, out.dptr_ + i * 4 * rpn_post_nms_top_n, out_score.dptr_ + i * rpn_post_nms_top_n); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); // recycle all bytes allocated within loop allocated_bytes = allocated_bytes_outside_loop; } } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), 1); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 3> gbbox = in_grad[nms::kBBox].get<xpu, 3, real_t>(s); Assign(gbbox, req[nms::kBBox], 0); } private: NMSParam param_; }; // class NMSGPUOp template<> Operator* CreateOp<gpu>(NMSParam param) { return new NMSGPUOp<gpu>(param); } } // namespace op } // namespace mxnet
32f8e3e86209c142ef72de5ebf674e64bf915d07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<cuda.h> #define arraySize 5 #define threadPerBlock 5 __global__ void addKernel(int* d_a, int* d_b) { int count = 0; int tid = threadIdx.x; int ttid = blockIdx.x * threadPerBlock + tid; int val = d_a[ttid]; __shared__ int cache[threadPerBlock]; for (int i = tid; i < arraySize; i += threadPerBlock) { cache[tid] = d_a[i]; __syncthreads(); for (int j = 0; j < threadPerBlock; j++) { if (val > cache[j]) { count++; } } __syncthreads(); } d_b[count] = val; } int main() { int h_a[arraySize] = {3434 , 942, 3234234, 23424, 3438 }; int h_b[arraySize]; int* d_a, * d_b; hipMalloc((void**)&d_b, arraySize * sizeof(int)); hipMalloc((void**)&d_a, arraySize * sizeof(int)); hipMemcpy(d_a, h_a, arraySize * sizeof(int), hipMemcpyHostToDevice); addKernel << <arraySize / threadPerBlock, threadPerBlock >> > (d_a, d_b); hipDeviceSynchronize(); hipMemcpy(h_b, d_b, arraySize * sizeof(int), hipMemcpyDeviceToHost); printf("The Enumeration sorted Array is: \n"); for (int i = 0; i < arraySize; i++) { printf("%d\n", h_b[i]); } hipFree(d_a); hipFree(d_b); return 0; }
32f8e3e86209c142ef72de5ebf674e64bf915d07.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<cuda.h> #define arraySize 5 #define threadPerBlock 5 __global__ void addKernel(int* d_a, int* d_b) { int count = 0; int tid = threadIdx.x; int ttid = blockIdx.x * threadPerBlock + tid; int val = d_a[ttid]; __shared__ int cache[threadPerBlock]; for (int i = tid; i < arraySize; i += threadPerBlock) { cache[tid] = d_a[i]; __syncthreads(); for (int j = 0; j < threadPerBlock; j++) { if (val > cache[j]) { count++; } } __syncthreads(); } d_b[count] = val; } int main() { int h_a[arraySize] = {3434 , 942, 3234234, 23424, 3438 }; int h_b[arraySize]; int* d_a, * d_b; cudaMalloc((void**)&d_b, arraySize * sizeof(int)); cudaMalloc((void**)&d_a, arraySize * sizeof(int)); cudaMemcpy(d_a, h_a, arraySize * sizeof(int), cudaMemcpyHostToDevice); addKernel << <arraySize / threadPerBlock, threadPerBlock >> > (d_a, d_b); cudaDeviceSynchronize(); cudaMemcpy(h_b, d_b, arraySize * sizeof(int), cudaMemcpyDeviceToHost); printf("The Enumeration sorted Array is: \n"); for (int i = 0; i < arraySize; i++) { printf("%d\n", h_b[i]); } cudaFree(d_a); cudaFree(d_b); return 0; }
48aee64b24ca24701b6a98b3bcce08be7038afba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "FieldStructureCompactClass.h" BEG_METIL_NAMESPACE void FieldStructureCompactClass::update_ptr_cpu_load( ST off ) { (char *&)patterns.types.data_ += off; for( ST i = 0; i < patterns.types.size_; ++i ) { (char *&)patterns.types.data_[ i ].permutation.data_ += off; (char *&)patterns.types.data_[ i ].sides.data_ += off; for( ST j = 0; j < patterns.types.data_[ i ].sides.size_; ++j ) { (char *&)patterns.types.data_[ i ].sides.data_[ j ].data_ += off; } } (char *&)group_elements.data_ += off; for( ST i = 0; i < group_elements.size_; ++i ) { (char *&)group_elements.data_[ i ].pt.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].pt.size_; ++j ) { (char *&)group_elements.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].mat_prop.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].mat_prop.size_; ++j ) { (char *&)group_elements.data_[ i ].mat_prop.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].volumic_force.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].volumic_force.size_; ++j ) { (char *&)group_elements.data_[ i ].volumic_force.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].mat_elem.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].mat_elem.size_; ++j ) { (char *&)group_elements.data_[ i ].mat_elem.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].size.data_ += off; } (char *&)group_interfaces.data_ += off; for( ST i = 0; i < group_interfaces.size_; ++i ) { (char *&)group_interfaces.data_[ i ].pt.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].pt.size_; ++j ) { (char *&)group_interfaces.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].link_prop.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].link_prop.size_; ++j ) { (char *&)group_interfaces.data_[ i ].link_prop.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].BC_step_prop.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].BC_step_prop.size_; ++j ) { (char *&)group_interfaces.data_[ i ].BC_step_prop.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].bc.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].bc.size_; ++j ) { (char *&)group_interfaces.data_[ i ].bc.data_[ j ].data_ += off; } } } __global__ void FieldStructureCompactClass__update_ptr_gpu_load( FieldStructureCompactClass *obj, ST off ) { (char *&)obj->patterns.types.data_ += off; for( ST i = 0; i < obj->patterns.types.size_; ++i ) { (char *&)obj->patterns.types.data_[ i ].permutation.data_ += off; (char *&)obj->patterns.types.data_[ i ].sides.data_ += off; for( ST j = 0; j < obj->patterns.types.data_[ i ].sides.size_; ++j ) { (char *&)obj->patterns.types.data_[ i ].sides.data_[ j ].data_ += off; } } (char *&)obj->group_elements.data_ += off; for( ST i = 0; i < obj->group_elements.size_; ++i ) { (char *&)obj->group_elements.data_[ i ].pt.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].pt.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].mat_prop.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].mat_prop.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].mat_prop.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].volumic_force.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].volumic_force.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].volumic_force.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].mat_elem.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].mat_elem.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].mat_elem.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].size.data_ += off; } (char *&)obj->group_interfaces.data_ += off; for( ST i = 0; i < obj->group_interfaces.size_; ++i ) { (char *&)obj->group_interfaces.data_[ i ].pt.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].pt.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].link_prop.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].link_prop.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].link_prop.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].BC_step_prop.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].BC_step_prop.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].BC_step_prop.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].bc.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].bc.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].bc.data_[ j ].data_ += off; } } } void FieldStructureCompactClass::update_ptr_gpu_load( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__update_ptr_gpu_load), dim3(1),dim3(1), 0, 0, this, off ); } void FieldStructureCompactClass::update_ptr_cpu_save( ST off ) { for( ST i = 0; i < patterns.types.size_; ++i ) { (char *&)patterns.types.data_[ i ].permutation.data_ += off; for( ST j = 0; j < patterns.types.data_[ i ].sides.size_; ++j ) { (char *&)patterns.types.data_[ i ].sides.data_[ j ].data_ += off; } (char *&)patterns.types.data_[ i ].sides.data_ += off; } (char *&)patterns.types.data_ += off; for( ST i = 0; i < group_elements.size_; ++i ) { for( ST j = 0; j < group_elements.data_[ i ].pt.size_; ++j ) { (char *&)group_elements.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].pt.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].mat_prop.size_; ++j ) { (char *&)group_elements.data_[ i ].mat_prop.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].mat_prop.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].volumic_force.size_; ++j ) { (char *&)group_elements.data_[ i ].volumic_force.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].volumic_force.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].mat_elem.size_; ++j ) { (char *&)group_elements.data_[ i ].mat_elem.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].mat_elem.data_ += off; (char *&)group_elements.data_[ i ].size.data_ += off; } (char *&)group_elements.data_ += off; for( ST i = 0; i < group_interfaces.size_; ++i ) { for( ST j = 0; j < group_interfaces.data_[ i ].pt.size_; ++j ) { (char *&)group_interfaces.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].pt.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].link_prop.size_; ++j ) { (char *&)group_interfaces.data_[ i ].link_prop.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].link_prop.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].BC_step_prop.size_; ++j ) { (char *&)group_interfaces.data_[ i ].BC_step_prop.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].BC_step_prop.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].bc.size_; ++j ) { (char *&)group_interfaces.data_[ i ].bc.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].bc.data_ += off; } (char *&)group_interfaces.data_ += off; } __global__ void FieldStructureCompactClass__update_ptr_gpu_save( FieldStructureCompactClass *obj, ST off ) { for( ST i = 0; i < obj->patterns.types.size_; ++i ) { (char *&)obj->patterns.types.data_[ i ].permutation.data_ += off; for( ST j = 0; j < obj->patterns.types.data_[ i ].sides.size_; ++j ) { (char *&)obj->patterns.types.data_[ i ].sides.data_[ j ].data_ += off; } (char *&)obj->patterns.types.data_[ i ].sides.data_ += off; } (char *&)obj->patterns.types.data_ += off; for( ST i = 0; i < obj->group_elements.size_; ++i ) { for( ST j = 0; j < obj->group_elements.data_[ i ].pt.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].pt.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].mat_prop.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].mat_prop.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].mat_prop.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].volumic_force.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].volumic_force.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].volumic_force.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].mat_elem.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].mat_elem.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].mat_elem.data_ += off; (char *&)obj->group_elements.data_[ i ].size.data_ += off; } (char *&)obj->group_elements.data_ += off; for( ST i = 0; i < obj->group_interfaces.size_; ++i ) { for( ST j = 0; j < obj->group_interfaces.data_[ i ].pt.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].pt.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].link_prop.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].link_prop.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].link_prop.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].BC_step_prop.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].BC_step_prop.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].BC_step_prop.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].bc.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].bc.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].bc.data_ += off; } (char *&)obj->group_interfaces.data_ += off; } void FieldStructureCompactClass::update_ptr_gpu_save( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__update_ptr_gpu_save), dim3(1),dim3(1), 0, 0, this, off ); } void FieldStructureCompactClass::Patterns::update_ptr_cpu_load( ST off ) { (char *&)types.data_ += off; for( ST i = 0; i < types.size_; ++i ) { (char *&)types.data_[ i ].permutation.data_ += off; (char *&)types.data_[ i ].sides.data_ += off; for( ST j = 0; j < types.data_[ i ].sides.size_; ++j ) { (char *&)types.data_[ i ].sides.data_[ j ].data_ += off; } } } __global__ void FieldStructureCompactClass__Patterns__update_ptr_gpu_load( FieldStructureCompactClass::Patterns *obj, ST off ) { (char *&)obj->types.data_ += off; for( ST i = 0; i < obj->types.size_; ++i ) { (char *&)obj->types.data_[ i ].permutation.data_ += off; (char *&)obj->types.data_[ i ].sides.data_ += off; for( ST j = 0; j < obj->types.data_[ i ].sides.size_; ++j ) { (char *&)obj->types.data_[ i ].sides.data_[ j ].data_ += off; } } } void FieldStructureCompactClass::Patterns::update_ptr_gpu_load( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__Patterns__update_ptr_gpu_load), dim3(1),dim3(1), 0, 0, this, off ); } void FieldStructureCompactClass::Patterns::update_ptr_cpu_save( ST off ) { for( ST i = 0; i < types.size_; ++i ) { (char *&)types.data_[ i ].permutation.data_ += off; for( ST j = 0; j < types.data_[ i ].sides.size_; ++j ) { (char *&)types.data_[ i ].sides.data_[ j ].data_ += off; } (char *&)types.data_[ i ].sides.data_ += off; } (char *&)types.data_ += off; } __global__ void FieldStructureCompactClass__Patterns__update_ptr_gpu_save( FieldStructureCompactClass::Patterns *obj, ST off ) { for( ST i = 0; i < obj->types.size_; ++i ) { (char *&)obj->types.data_[ i ].permutation.data_ += off; for( ST j = 0; j < obj->types.data_[ i ].sides.size_; ++j ) { (char *&)obj->types.data_[ i ].sides.data_[ j ].data_ += off; } (char *&)obj->types.data_[ i ].sides.data_ += off; } (char *&)obj->types.data_ += off; } void FieldStructureCompactClass::Patterns::update_ptr_gpu_save( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__Patterns__update_ptr_gpu_save), dim3(1),dim3(1), 0, 0, this, off ); } void FieldStructureCompactClass::Patterns::Types::update_ptr_cpu_load( ST off ) { (char *&)permutation.data_ += off; (char *&)sides.data_ += off; for( ST i = 0; i < sides.size_; ++i ) { (char *&)sides.data_[ i ].data_ += off; } } __global__ void FieldStructureCompactClass__Patterns__Types__update_ptr_gpu_load( FieldStructureCompactClass::Patterns::Types *obj, ST off ) { (char *&)obj->permutation.data_ += off; (char *&)obj->sides.data_ += off; for( ST i = 0; i < obj->sides.size_; ++i ) { (char *&)obj->sides.data_[ i ].data_ += off; } } void FieldStructureCompactClass::Patterns::Types::update_ptr_gpu_load( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__Patterns__Types__update_ptr_gpu_load), dim3(1),dim3(1), 0, 0, this, off ); } void FieldStructureCompactClass::Patterns::Types::update_ptr_cpu_save( ST off ) { (char *&)permutation.data_ += off; for( ST i = 0; i < sides.size_; ++i ) { (char *&)sides.data_[ i ].data_ += off; } (char *&)sides.data_ += off; } __global__ void FieldStructureCompactClass__Patterns__Types__update_ptr_gpu_save( FieldStructureCompactClass::Patterns::Types *obj, ST off ) { (char *&)obj->permutation.data_ += off; for( ST i = 0; i < obj->sides.size_; ++i ) { (char *&)obj->sides.data_[ i ].data_ += off; } (char *&)obj->sides.data_ += off; } void FieldStructureCompactClass::Patterns::Types::update_ptr_gpu_save( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__Patterns__Types__update_ptr_gpu_save), dim3(1),dim3(1), 0, 0, this, off ); } void FieldStructureCompactClass::GroupFieldStructureElements::update_ptr_cpu_load( ST off ) { (char *&)pt.data_ += off; for( ST i = 0; i < pt.size_; ++i ) { (char *&)pt.data_[ i ].data_ += off; } (char *&)mat_prop.data_ += off; for( ST i = 0; i < mat_prop.size_; ++i ) { (char *&)mat_prop.data_[ i ].data_ += off; } (char *&)volumic_force.data_ += off; for( ST i = 0; i < volumic_force.size_; ++i ) { (char *&)volumic_force.data_[ i ].data_ += off; } (char *&)mat_elem.data_ += off; for( ST i = 0; i < mat_elem.size_; ++i ) { (char *&)mat_elem.data_[ i ].data_ += off; } (char *&)size.data_ += off; } __global__ void FieldStructureCompactClass__GroupFieldStructureElements__update_ptr_gpu_load( FieldStructureCompactClass::GroupFieldStructureElements *obj, ST off ) { (char *&)obj->pt.data_ += off; for( ST i = 0; i < obj->pt.size_; ++i ) { (char *&)obj->pt.data_[ i ].data_ += off; } (char *&)obj->mat_prop.data_ += off; for( ST i = 0; i < obj->mat_prop.size_; ++i ) { (char *&)obj->mat_prop.data_[ i ].data_ += off; } (char *&)obj->volumic_force.data_ += off; for( ST i = 0; i < obj->volumic_force.size_; ++i ) { (char *&)obj->volumic_force.data_[ i ].data_ += off; } (char *&)obj->mat_elem.data_ += off; for( ST i = 0; i < obj->mat_elem.size_; ++i ) { (char *&)obj->mat_elem.data_[ i ].data_ += off; } (char *&)obj->size.data_ += off; } void FieldStructureCompactClass::GroupFieldStructureElements::update_ptr_gpu_load( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__GroupFieldStructureElements__update_ptr_gpu_load), dim3(1),dim3(1), 0, 0, this, off ); } void FieldStructureCompactClass::GroupFieldStructureElements::update_ptr_cpu_save( ST off ) { for( ST i = 0; i < pt.size_; ++i ) { (char *&)pt.data_[ i ].data_ += off; } (char *&)pt.data_ += off; for( ST i = 0; i < mat_prop.size_; ++i ) { (char *&)mat_prop.data_[ i ].data_ += off; } (char *&)mat_prop.data_ += off; for( ST i = 0; i < volumic_force.size_; ++i ) { (char *&)volumic_force.data_[ i ].data_ += off; } (char *&)volumic_force.data_ += off; for( ST i = 0; i < mat_elem.size_; ++i ) { (char *&)mat_elem.data_[ i ].data_ += off; } (char *&)mat_elem.data_ += off; (char *&)size.data_ += off; } __global__ void FieldStructureCompactClass__GroupFieldStructureElements__update_ptr_gpu_save( FieldStructureCompactClass::GroupFieldStructureElements *obj, ST off ) { for( ST i = 0; i < obj->pt.size_; ++i ) { (char *&)obj->pt.data_[ i ].data_ += off; } (char *&)obj->pt.data_ += off; for( ST i = 0; i < obj->mat_prop.size_; ++i ) { (char *&)obj->mat_prop.data_[ i ].data_ += off; } (char *&)obj->mat_prop.data_ += off; for( ST i = 0; i < obj->volumic_force.size_; ++i ) { (char *&)obj->volumic_force.data_[ i ].data_ += off; } (char *&)obj->volumic_force.data_ += off; for( ST i = 0; i < obj->mat_elem.size_; ++i ) { (char *&)obj->mat_elem.data_[ i ].data_ += off; } (char *&)obj->mat_elem.data_ += off; (char *&)obj->size.data_ += off; } void FieldStructureCompactClass::GroupFieldStructureElements::update_ptr_gpu_save( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__GroupFieldStructureElements__update_ptr_gpu_save), dim3(1),dim3(1), 0, 0, this, off ); } void FieldStructureCompactClass::GroupFieldStructureInterfaces::update_ptr_cpu_load( ST off ) { (char *&)pt.data_ += off; for( ST i = 0; i < pt.size_; ++i ) { (char *&)pt.data_[ i ].data_ += off; } (char *&)link_prop.data_ += off; for( ST i = 0; i < link_prop.size_; ++i ) { (char *&)link_prop.data_[ i ].data_ += off; } (char *&)BC_step_prop.data_ += off; for( ST i = 0; i < BC_step_prop.size_; ++i ) { (char *&)BC_step_prop.data_[ i ].data_ += off; } (char *&)bc.data_ += off; for( ST i = 0; i < bc.size_; ++i ) { (char *&)bc.data_[ i ].data_ += off; } } __global__ void FieldStructureCompactClass__GroupFieldStructureInterfaces__update_ptr_gpu_load( FieldStructureCompactClass::GroupFieldStructureInterfaces *obj, ST off ) { (char *&)obj->pt.data_ += off; for( ST i = 0; i < obj->pt.size_; ++i ) { (char *&)obj->pt.data_[ i ].data_ += off; } (char *&)obj->link_prop.data_ += off; for( ST i = 0; i < obj->link_prop.size_; ++i ) { (char *&)obj->link_prop.data_[ i ].data_ += off; } (char *&)obj->BC_step_prop.data_ += off; for( ST i = 0; i < obj->BC_step_prop.size_; ++i ) { (char *&)obj->BC_step_prop.data_[ i ].data_ += off; } (char *&)obj->bc.data_ += off; for( ST i = 0; i < obj->bc.size_; ++i ) { (char *&)obj->bc.data_[ i ].data_ += off; } } void FieldStructureCompactClass::GroupFieldStructureInterfaces::update_ptr_gpu_load( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__GroupFieldStructureInterfaces__update_ptr_gpu_load), dim3(1),dim3(1), 0, 0, this, off ); } void FieldStructureCompactClass::GroupFieldStructureInterfaces::update_ptr_cpu_save( ST off ) { for( ST i = 0; i < pt.size_; ++i ) { (char *&)pt.data_[ i ].data_ += off; } (char *&)pt.data_ += off; for( ST i = 0; i < link_prop.size_; ++i ) { (char *&)link_prop.data_[ i ].data_ += off; } (char *&)link_prop.data_ += off; for( ST i = 0; i < BC_step_prop.size_; ++i ) { (char *&)BC_step_prop.data_[ i ].data_ += off; } (char *&)BC_step_prop.data_ += off; for( ST i = 0; i < bc.size_; ++i ) { (char *&)bc.data_[ i ].data_ += off; } (char *&)bc.data_ += off; } __global__ void FieldStructureCompactClass__GroupFieldStructureInterfaces__update_ptr_gpu_save( FieldStructureCompactClass::GroupFieldStructureInterfaces *obj, ST off ) { for( ST i = 0; i < obj->pt.size_; ++i ) { (char *&)obj->pt.data_[ i ].data_ += off; } (char *&)obj->pt.data_ += off; for( ST i = 0; i < obj->link_prop.size_; ++i ) { (char *&)obj->link_prop.data_[ i ].data_ += off; } (char *&)obj->link_prop.data_ += off; for( ST i = 0; i < obj->BC_step_prop.size_; ++i ) { (char *&)obj->BC_step_prop.data_[ i ].data_ += off; } (char *&)obj->BC_step_prop.data_ += off; for( ST i = 0; i < obj->bc.size_; ++i ) { (char *&)obj->bc.data_[ i ].data_ += off; } (char *&)obj->bc.data_ += off; } void FieldStructureCompactClass::GroupFieldStructureInterfaces::update_ptr_gpu_save( ST off ) { hipLaunchKernelGGL(( FieldStructureCompactClass__GroupFieldStructureInterfaces__update_ptr_gpu_save), dim3(1),dim3(1), 0, 0, this, off ); } END_METIL_NAMESPACE
48aee64b24ca24701b6a98b3bcce08be7038afba.cu
#include "FieldStructureCompactClass.h" BEG_METIL_NAMESPACE void FieldStructureCompactClass::update_ptr_cpu_load( ST off ) { (char *&)patterns.types.data_ += off; for( ST i = 0; i < patterns.types.size_; ++i ) { (char *&)patterns.types.data_[ i ].permutation.data_ += off; (char *&)patterns.types.data_[ i ].sides.data_ += off; for( ST j = 0; j < patterns.types.data_[ i ].sides.size_; ++j ) { (char *&)patterns.types.data_[ i ].sides.data_[ j ].data_ += off; } } (char *&)group_elements.data_ += off; for( ST i = 0; i < group_elements.size_; ++i ) { (char *&)group_elements.data_[ i ].pt.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].pt.size_; ++j ) { (char *&)group_elements.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].mat_prop.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].mat_prop.size_; ++j ) { (char *&)group_elements.data_[ i ].mat_prop.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].volumic_force.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].volumic_force.size_; ++j ) { (char *&)group_elements.data_[ i ].volumic_force.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].mat_elem.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].mat_elem.size_; ++j ) { (char *&)group_elements.data_[ i ].mat_elem.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].size.data_ += off; } (char *&)group_interfaces.data_ += off; for( ST i = 0; i < group_interfaces.size_; ++i ) { (char *&)group_interfaces.data_[ i ].pt.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].pt.size_; ++j ) { (char *&)group_interfaces.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].link_prop.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].link_prop.size_; ++j ) { (char *&)group_interfaces.data_[ i ].link_prop.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].BC_step_prop.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].BC_step_prop.size_; ++j ) { (char *&)group_interfaces.data_[ i ].BC_step_prop.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].bc.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].bc.size_; ++j ) { (char *&)group_interfaces.data_[ i ].bc.data_[ j ].data_ += off; } } } __global__ void FieldStructureCompactClass__update_ptr_gpu_load( FieldStructureCompactClass *obj, ST off ) { (char *&)obj->patterns.types.data_ += off; for( ST i = 0; i < obj->patterns.types.size_; ++i ) { (char *&)obj->patterns.types.data_[ i ].permutation.data_ += off; (char *&)obj->patterns.types.data_[ i ].sides.data_ += off; for( ST j = 0; j < obj->patterns.types.data_[ i ].sides.size_; ++j ) { (char *&)obj->patterns.types.data_[ i ].sides.data_[ j ].data_ += off; } } (char *&)obj->group_elements.data_ += off; for( ST i = 0; i < obj->group_elements.size_; ++i ) { (char *&)obj->group_elements.data_[ i ].pt.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].pt.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].mat_prop.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].mat_prop.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].mat_prop.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].volumic_force.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].volumic_force.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].volumic_force.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].mat_elem.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].mat_elem.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].mat_elem.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].size.data_ += off; } (char *&)obj->group_interfaces.data_ += off; for( ST i = 0; i < obj->group_interfaces.size_; ++i ) { (char *&)obj->group_interfaces.data_[ i ].pt.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].pt.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].link_prop.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].link_prop.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].link_prop.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].BC_step_prop.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].BC_step_prop.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].BC_step_prop.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].bc.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].bc.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].bc.data_[ j ].data_ += off; } } } void FieldStructureCompactClass::update_ptr_gpu_load( ST off ) { FieldStructureCompactClass__update_ptr_gpu_load<<<1,1>>>( this, off ); } void FieldStructureCompactClass::update_ptr_cpu_save( ST off ) { for( ST i = 0; i < patterns.types.size_; ++i ) { (char *&)patterns.types.data_[ i ].permutation.data_ += off; for( ST j = 0; j < patterns.types.data_[ i ].sides.size_; ++j ) { (char *&)patterns.types.data_[ i ].sides.data_[ j ].data_ += off; } (char *&)patterns.types.data_[ i ].sides.data_ += off; } (char *&)patterns.types.data_ += off; for( ST i = 0; i < group_elements.size_; ++i ) { for( ST j = 0; j < group_elements.data_[ i ].pt.size_; ++j ) { (char *&)group_elements.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].pt.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].mat_prop.size_; ++j ) { (char *&)group_elements.data_[ i ].mat_prop.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].mat_prop.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].volumic_force.size_; ++j ) { (char *&)group_elements.data_[ i ].volumic_force.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].volumic_force.data_ += off; for( ST j = 0; j < group_elements.data_[ i ].mat_elem.size_; ++j ) { (char *&)group_elements.data_[ i ].mat_elem.data_[ j ].data_ += off; } (char *&)group_elements.data_[ i ].mat_elem.data_ += off; (char *&)group_elements.data_[ i ].size.data_ += off; } (char *&)group_elements.data_ += off; for( ST i = 0; i < group_interfaces.size_; ++i ) { for( ST j = 0; j < group_interfaces.data_[ i ].pt.size_; ++j ) { (char *&)group_interfaces.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].pt.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].link_prop.size_; ++j ) { (char *&)group_interfaces.data_[ i ].link_prop.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].link_prop.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].BC_step_prop.size_; ++j ) { (char *&)group_interfaces.data_[ i ].BC_step_prop.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].BC_step_prop.data_ += off; for( ST j = 0; j < group_interfaces.data_[ i ].bc.size_; ++j ) { (char *&)group_interfaces.data_[ i ].bc.data_[ j ].data_ += off; } (char *&)group_interfaces.data_[ i ].bc.data_ += off; } (char *&)group_interfaces.data_ += off; } __global__ void FieldStructureCompactClass__update_ptr_gpu_save( FieldStructureCompactClass *obj, ST off ) { for( ST i = 0; i < obj->patterns.types.size_; ++i ) { (char *&)obj->patterns.types.data_[ i ].permutation.data_ += off; for( ST j = 0; j < obj->patterns.types.data_[ i ].sides.size_; ++j ) { (char *&)obj->patterns.types.data_[ i ].sides.data_[ j ].data_ += off; } (char *&)obj->patterns.types.data_[ i ].sides.data_ += off; } (char *&)obj->patterns.types.data_ += off; for( ST i = 0; i < obj->group_elements.size_; ++i ) { for( ST j = 0; j < obj->group_elements.data_[ i ].pt.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].pt.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].mat_prop.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].mat_prop.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].mat_prop.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].volumic_force.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].volumic_force.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].volumic_force.data_ += off; for( ST j = 0; j < obj->group_elements.data_[ i ].mat_elem.size_; ++j ) { (char *&)obj->group_elements.data_[ i ].mat_elem.data_[ j ].data_ += off; } (char *&)obj->group_elements.data_[ i ].mat_elem.data_ += off; (char *&)obj->group_elements.data_[ i ].size.data_ += off; } (char *&)obj->group_elements.data_ += off; for( ST i = 0; i < obj->group_interfaces.size_; ++i ) { for( ST j = 0; j < obj->group_interfaces.data_[ i ].pt.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].pt.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].pt.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].link_prop.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].link_prop.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].link_prop.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].BC_step_prop.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].BC_step_prop.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].BC_step_prop.data_ += off; for( ST j = 0; j < obj->group_interfaces.data_[ i ].bc.size_; ++j ) { (char *&)obj->group_interfaces.data_[ i ].bc.data_[ j ].data_ += off; } (char *&)obj->group_interfaces.data_[ i ].bc.data_ += off; } (char *&)obj->group_interfaces.data_ += off; } void FieldStructureCompactClass::update_ptr_gpu_save( ST off ) { FieldStructureCompactClass__update_ptr_gpu_save<<<1,1>>>( this, off ); } void FieldStructureCompactClass::Patterns::update_ptr_cpu_load( ST off ) { (char *&)types.data_ += off; for( ST i = 0; i < types.size_; ++i ) { (char *&)types.data_[ i ].permutation.data_ += off; (char *&)types.data_[ i ].sides.data_ += off; for( ST j = 0; j < types.data_[ i ].sides.size_; ++j ) { (char *&)types.data_[ i ].sides.data_[ j ].data_ += off; } } } __global__ void FieldStructureCompactClass__Patterns__update_ptr_gpu_load( FieldStructureCompactClass::Patterns *obj, ST off ) { (char *&)obj->types.data_ += off; for( ST i = 0; i < obj->types.size_; ++i ) { (char *&)obj->types.data_[ i ].permutation.data_ += off; (char *&)obj->types.data_[ i ].sides.data_ += off; for( ST j = 0; j < obj->types.data_[ i ].sides.size_; ++j ) { (char *&)obj->types.data_[ i ].sides.data_[ j ].data_ += off; } } } void FieldStructureCompactClass::Patterns::update_ptr_gpu_load( ST off ) { FieldStructureCompactClass__Patterns__update_ptr_gpu_load<<<1,1>>>( this, off ); } void FieldStructureCompactClass::Patterns::update_ptr_cpu_save( ST off ) { for( ST i = 0; i < types.size_; ++i ) { (char *&)types.data_[ i ].permutation.data_ += off; for( ST j = 0; j < types.data_[ i ].sides.size_; ++j ) { (char *&)types.data_[ i ].sides.data_[ j ].data_ += off; } (char *&)types.data_[ i ].sides.data_ += off; } (char *&)types.data_ += off; } __global__ void FieldStructureCompactClass__Patterns__update_ptr_gpu_save( FieldStructureCompactClass::Patterns *obj, ST off ) { for( ST i = 0; i < obj->types.size_; ++i ) { (char *&)obj->types.data_[ i ].permutation.data_ += off; for( ST j = 0; j < obj->types.data_[ i ].sides.size_; ++j ) { (char *&)obj->types.data_[ i ].sides.data_[ j ].data_ += off; } (char *&)obj->types.data_[ i ].sides.data_ += off; } (char *&)obj->types.data_ += off; } void FieldStructureCompactClass::Patterns::update_ptr_gpu_save( ST off ) { FieldStructureCompactClass__Patterns__update_ptr_gpu_save<<<1,1>>>( this, off ); } void FieldStructureCompactClass::Patterns::Types::update_ptr_cpu_load( ST off ) { (char *&)permutation.data_ += off; (char *&)sides.data_ += off; for( ST i = 0; i < sides.size_; ++i ) { (char *&)sides.data_[ i ].data_ += off; } } __global__ void FieldStructureCompactClass__Patterns__Types__update_ptr_gpu_load( FieldStructureCompactClass::Patterns::Types *obj, ST off ) { (char *&)obj->permutation.data_ += off; (char *&)obj->sides.data_ += off; for( ST i = 0; i < obj->sides.size_; ++i ) { (char *&)obj->sides.data_[ i ].data_ += off; } } void FieldStructureCompactClass::Patterns::Types::update_ptr_gpu_load( ST off ) { FieldStructureCompactClass__Patterns__Types__update_ptr_gpu_load<<<1,1>>>( this, off ); } void FieldStructureCompactClass::Patterns::Types::update_ptr_cpu_save( ST off ) { (char *&)permutation.data_ += off; for( ST i = 0; i < sides.size_; ++i ) { (char *&)sides.data_[ i ].data_ += off; } (char *&)sides.data_ += off; } __global__ void FieldStructureCompactClass__Patterns__Types__update_ptr_gpu_save( FieldStructureCompactClass::Patterns::Types *obj, ST off ) { (char *&)obj->permutation.data_ += off; for( ST i = 0; i < obj->sides.size_; ++i ) { (char *&)obj->sides.data_[ i ].data_ += off; } (char *&)obj->sides.data_ += off; } void FieldStructureCompactClass::Patterns::Types::update_ptr_gpu_save( ST off ) { FieldStructureCompactClass__Patterns__Types__update_ptr_gpu_save<<<1,1>>>( this, off ); } void FieldStructureCompactClass::GroupFieldStructureElements::update_ptr_cpu_load( ST off ) { (char *&)pt.data_ += off; for( ST i = 0; i < pt.size_; ++i ) { (char *&)pt.data_[ i ].data_ += off; } (char *&)mat_prop.data_ += off; for( ST i = 0; i < mat_prop.size_; ++i ) { (char *&)mat_prop.data_[ i ].data_ += off; } (char *&)volumic_force.data_ += off; for( ST i = 0; i < volumic_force.size_; ++i ) { (char *&)volumic_force.data_[ i ].data_ += off; } (char *&)mat_elem.data_ += off; for( ST i = 0; i < mat_elem.size_; ++i ) { (char *&)mat_elem.data_[ i ].data_ += off; } (char *&)size.data_ += off; } __global__ void FieldStructureCompactClass__GroupFieldStructureElements__update_ptr_gpu_load( FieldStructureCompactClass::GroupFieldStructureElements *obj, ST off ) { (char *&)obj->pt.data_ += off; for( ST i = 0; i < obj->pt.size_; ++i ) { (char *&)obj->pt.data_[ i ].data_ += off; } (char *&)obj->mat_prop.data_ += off; for( ST i = 0; i < obj->mat_prop.size_; ++i ) { (char *&)obj->mat_prop.data_[ i ].data_ += off; } (char *&)obj->volumic_force.data_ += off; for( ST i = 0; i < obj->volumic_force.size_; ++i ) { (char *&)obj->volumic_force.data_[ i ].data_ += off; } (char *&)obj->mat_elem.data_ += off; for( ST i = 0; i < obj->mat_elem.size_; ++i ) { (char *&)obj->mat_elem.data_[ i ].data_ += off; } (char *&)obj->size.data_ += off; } void FieldStructureCompactClass::GroupFieldStructureElements::update_ptr_gpu_load( ST off ) { FieldStructureCompactClass__GroupFieldStructureElements__update_ptr_gpu_load<<<1,1>>>( this, off ); } void FieldStructureCompactClass::GroupFieldStructureElements::update_ptr_cpu_save( ST off ) { for( ST i = 0; i < pt.size_; ++i ) { (char *&)pt.data_[ i ].data_ += off; } (char *&)pt.data_ += off; for( ST i = 0; i < mat_prop.size_; ++i ) { (char *&)mat_prop.data_[ i ].data_ += off; } (char *&)mat_prop.data_ += off; for( ST i = 0; i < volumic_force.size_; ++i ) { (char *&)volumic_force.data_[ i ].data_ += off; } (char *&)volumic_force.data_ += off; for( ST i = 0; i < mat_elem.size_; ++i ) { (char *&)mat_elem.data_[ i ].data_ += off; } (char *&)mat_elem.data_ += off; (char *&)size.data_ += off; } __global__ void FieldStructureCompactClass__GroupFieldStructureElements__update_ptr_gpu_save( FieldStructureCompactClass::GroupFieldStructureElements *obj, ST off ) { for( ST i = 0; i < obj->pt.size_; ++i ) { (char *&)obj->pt.data_[ i ].data_ += off; } (char *&)obj->pt.data_ += off; for( ST i = 0; i < obj->mat_prop.size_; ++i ) { (char *&)obj->mat_prop.data_[ i ].data_ += off; } (char *&)obj->mat_prop.data_ += off; for( ST i = 0; i < obj->volumic_force.size_; ++i ) { (char *&)obj->volumic_force.data_[ i ].data_ += off; } (char *&)obj->volumic_force.data_ += off; for( ST i = 0; i < obj->mat_elem.size_; ++i ) { (char *&)obj->mat_elem.data_[ i ].data_ += off; } (char *&)obj->mat_elem.data_ += off; (char *&)obj->size.data_ += off; } void FieldStructureCompactClass::GroupFieldStructureElements::update_ptr_gpu_save( ST off ) { FieldStructureCompactClass__GroupFieldStructureElements__update_ptr_gpu_save<<<1,1>>>( this, off ); } void FieldStructureCompactClass::GroupFieldStructureInterfaces::update_ptr_cpu_load( ST off ) { (char *&)pt.data_ += off; for( ST i = 0; i < pt.size_; ++i ) { (char *&)pt.data_[ i ].data_ += off; } (char *&)link_prop.data_ += off; for( ST i = 0; i < link_prop.size_; ++i ) { (char *&)link_prop.data_[ i ].data_ += off; } (char *&)BC_step_prop.data_ += off; for( ST i = 0; i < BC_step_prop.size_; ++i ) { (char *&)BC_step_prop.data_[ i ].data_ += off; } (char *&)bc.data_ += off; for( ST i = 0; i < bc.size_; ++i ) { (char *&)bc.data_[ i ].data_ += off; } } __global__ void FieldStructureCompactClass__GroupFieldStructureInterfaces__update_ptr_gpu_load( FieldStructureCompactClass::GroupFieldStructureInterfaces *obj, ST off ) { (char *&)obj->pt.data_ += off; for( ST i = 0; i < obj->pt.size_; ++i ) { (char *&)obj->pt.data_[ i ].data_ += off; } (char *&)obj->link_prop.data_ += off; for( ST i = 0; i < obj->link_prop.size_; ++i ) { (char *&)obj->link_prop.data_[ i ].data_ += off; } (char *&)obj->BC_step_prop.data_ += off; for( ST i = 0; i < obj->BC_step_prop.size_; ++i ) { (char *&)obj->BC_step_prop.data_[ i ].data_ += off; } (char *&)obj->bc.data_ += off; for( ST i = 0; i < obj->bc.size_; ++i ) { (char *&)obj->bc.data_[ i ].data_ += off; } } void FieldStructureCompactClass::GroupFieldStructureInterfaces::update_ptr_gpu_load( ST off ) { FieldStructureCompactClass__GroupFieldStructureInterfaces__update_ptr_gpu_load<<<1,1>>>( this, off ); } void FieldStructureCompactClass::GroupFieldStructureInterfaces::update_ptr_cpu_save( ST off ) { for( ST i = 0; i < pt.size_; ++i ) { (char *&)pt.data_[ i ].data_ += off; } (char *&)pt.data_ += off; for( ST i = 0; i < link_prop.size_; ++i ) { (char *&)link_prop.data_[ i ].data_ += off; } (char *&)link_prop.data_ += off; for( ST i = 0; i < BC_step_prop.size_; ++i ) { (char *&)BC_step_prop.data_[ i ].data_ += off; } (char *&)BC_step_prop.data_ += off; for( ST i = 0; i < bc.size_; ++i ) { (char *&)bc.data_[ i ].data_ += off; } (char *&)bc.data_ += off; } __global__ void FieldStructureCompactClass__GroupFieldStructureInterfaces__update_ptr_gpu_save( FieldStructureCompactClass::GroupFieldStructureInterfaces *obj, ST off ) { for( ST i = 0; i < obj->pt.size_; ++i ) { (char *&)obj->pt.data_[ i ].data_ += off; } (char *&)obj->pt.data_ += off; for( ST i = 0; i < obj->link_prop.size_; ++i ) { (char *&)obj->link_prop.data_[ i ].data_ += off; } (char *&)obj->link_prop.data_ += off; for( ST i = 0; i < obj->BC_step_prop.size_; ++i ) { (char *&)obj->BC_step_prop.data_[ i ].data_ += off; } (char *&)obj->BC_step_prop.data_ += off; for( ST i = 0; i < obj->bc.size_; ++i ) { (char *&)obj->bc.data_[ i ].data_ += off; } (char *&)obj->bc.data_ += off; } void FieldStructureCompactClass::GroupFieldStructureInterfaces::update_ptr_gpu_save( ST off ) { FieldStructureCompactClass__GroupFieldStructureInterfaces__update_ptr_gpu_save<<<1,1>>>( this, off ); } END_METIL_NAMESPACE
a5a82d4e3d4221a14aece8a48894234f5e8a2aef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cunnx_WindowGate_updateGradInput_kernel( float *gradInput, float *error, float* targetCentroids, const float *centroids,const float *input, const float *outputIndice, const float* output, const float* gradOutput, int inputSize, int outputSize, int outputWindowSize, float c, float d, float e, float lr) { __shared__ float buffer[WINDOWGATE_THREADS+1]; unsigned int tx = threadIdx.x; unsigned int k = blockIdx.x; const float *gradOutput_k = gradOutput + outputWindowSize*k; const float *output_k = output + outputWindowSize*k; const float *input_k = input + inputSize*k; float *gradInput_k = gradInput + inputSize*k; float centroid = centroids[k]; // get gradient of centroid buffer[tx] = 0; for (unsigned int i=tx; i<outputWindowSize; i+=blockDim.x) { buffer[tx] += gradOutput_k[i]*output_k[i]*((float)(i+1) - centroid); } // add (reduce) for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (tx < stride) buffer[tx] += buffer[tx+stride]; } if (tx == 0) { int outputIdx = outputIndice[k]; float gradCentroid = buffer[0]*c; centroid -= (lr*gradCentroid); centroid += outputIdx-1; centroid /= (float)(outputSize); targetCentroids[k] = centroid; buffer[WINDOWGATE_THREADS] = centroid*(float)(inputSize); } __syncthreads(); float targetCentroid = buffer[WINDOWGATE_THREADS]; buffer[tx] = 0; // target is a gaussian blur for (int i=tx; i<inputSize; i+=blockDim.x) { float target = (float)(i+1)-targetCentroid; target = d*expf(target*target*e); float input = input_k[i]; // dot product of logProbInput and probTarget (NLL) buffer[tx] -= logf(input + 0.0000001)*target; // grad input w.r.t. NLL gradInput_k[i] = -target/(input + 0.0000001); } // add (reduce) for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (tx < stride) buffer[tx] += buffer[tx+stride]; } if (tx == 0) error[k] = buffer[tx]; }
a5a82d4e3d4221a14aece8a48894234f5e8a2aef.cu
#include "includes.h" __global__ void cunnx_WindowGate_updateGradInput_kernel( float *gradInput, float *error, float* targetCentroids, const float *centroids,const float *input, const float *outputIndice, const float* output, const float* gradOutput, int inputSize, int outputSize, int outputWindowSize, float c, float d, float e, float lr) { __shared__ float buffer[WINDOWGATE_THREADS+1]; unsigned int tx = threadIdx.x; unsigned int k = blockIdx.x; const float *gradOutput_k = gradOutput + outputWindowSize*k; const float *output_k = output + outputWindowSize*k; const float *input_k = input + inputSize*k; float *gradInput_k = gradInput + inputSize*k; float centroid = centroids[k]; // get gradient of centroid buffer[tx] = 0; for (unsigned int i=tx; i<outputWindowSize; i+=blockDim.x) { buffer[tx] += gradOutput_k[i]*output_k[i]*((float)(i+1) - centroid); } // add (reduce) for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (tx < stride) buffer[tx] += buffer[tx+stride]; } if (tx == 0) { int outputIdx = outputIndice[k]; float gradCentroid = buffer[0]*c; centroid -= (lr*gradCentroid); centroid += outputIdx-1; centroid /= (float)(outputSize); targetCentroids[k] = centroid; buffer[WINDOWGATE_THREADS] = centroid*(float)(inputSize); } __syncthreads(); float targetCentroid = buffer[WINDOWGATE_THREADS]; buffer[tx] = 0; // target is a gaussian blur for (int i=tx; i<inputSize; i+=blockDim.x) { float target = (float)(i+1)-targetCentroid; target = d*expf(target*target*e); float input = input_k[i]; // dot product of logProbInput and probTarget (NLL) buffer[tx] -= logf(input + 0.0000001)*target; // grad input w.r.t. NLL gradInput_k[i] = -target/(input + 0.0000001); } // add (reduce) for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (tx < stride) buffer[tx] += buffer[tx+stride]; } if (tx == 0) error[k] = buffer[tx]; }
66784ddaede4ddcd281e38f9935c808af3e6aa0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstddef> #include <boost/gil/image.hpp> #include <boost/gil/image_view.hpp> #include <boost/gil/typedefs.hpp> namespace gil = boost::gil; // namespace cuda // { // // view_type_from_pixel<Pixel, IsPlanar = false>::type // // iterator_type_from_pixe<Pixel, IsPlanar = false, IsStep = false, bool IsMutable = false> = Pixel*, const version is const Pixel* // // type_from_x_iterator<Pixel*> = {step_iterator_t = memory_based_step_iterator<Pixel*> xy_locator_t = memory_based_2d_locator<step_iterator_t>} // template <typename Pixel> // class memory_based_step_iterator // { // private: // }; // template <typename Pixel> // class buffer_view // { // using value_type = Pixel; // using reference = value_type&; // }; // template <typename Pixel> // class image_buffer // { // using point_t = gil::point_t; // template <typename View> // image_buffer(View view) // { // const auto dimensions = view.dimensions(); // } // private: // unsigned char* memory; // std::size_t allocated_bytes; // }; // } template <typename View> __global__ void check_pixels(View view) { std::ptrdiff_t x = blockIdx.x * blockDim.x + threadIdx.x; std::ptrdiff_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= view.width() || y >= view.height()) { return; } view(x, y) *= 2; } int main() { std::cout << "hello\n"; using pixel = gil::rgb8_pixel_t; pixel* contents = nullptr; std::size_t size = 1920 * 1080; hipMalloc(&contents, sizeof(pixel) * size); hipMemset(contents, 100, sizeof(pixel) * size); auto buffer_view = gil::interleaved_view(1920, 1080, contents, 1920); auto deref = [](pixel& p) -> decltype(auto) { return p.at(std::integral_constant<int, 0>{}); }; // auto view = gil::rgb8_view_t::add_deref<decltype(deref)>::make(buffer_view, deref); // std::cout << view(0, 0); }
66784ddaede4ddcd281e38f9935c808af3e6aa0a.cu
#include <iostream> #include <cstddef> #include <boost/gil/image.hpp> #include <boost/gil/image_view.hpp> #include <boost/gil/typedefs.hpp> namespace gil = boost::gil; // namespace cuda // { // // view_type_from_pixel<Pixel, IsPlanar = false>::type // // iterator_type_from_pixe<Pixel, IsPlanar = false, IsStep = false, bool IsMutable = false> = Pixel*, const version is const Pixel* // // type_from_x_iterator<Pixel*> = {step_iterator_t = memory_based_step_iterator<Pixel*> xy_locator_t = memory_based_2d_locator<step_iterator_t>} // template <typename Pixel> // class memory_based_step_iterator // { // private: // }; // template <typename Pixel> // class buffer_view // { // using value_type = Pixel; // using reference = value_type&; // }; // template <typename Pixel> // class image_buffer // { // using point_t = gil::point_t; // template <typename View> // image_buffer(View view) // { // const auto dimensions = view.dimensions(); // } // private: // unsigned char* memory; // std::size_t allocated_bytes; // }; // } template <typename View> __global__ void check_pixels(View view) { std::ptrdiff_t x = blockIdx.x * blockDim.x + threadIdx.x; std::ptrdiff_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= view.width() || y >= view.height()) { return; } view(x, y) *= 2; } int main() { std::cout << "hello\n"; using pixel = gil::rgb8_pixel_t; pixel* contents = nullptr; std::size_t size = 1920 * 1080; cudaMalloc(&contents, sizeof(pixel) * size); cudaMemset(contents, 100, sizeof(pixel) * size); auto buffer_view = gil::interleaved_view(1920, 1080, contents, 1920); auto deref = [](pixel& p) -> decltype(auto) { return p.at(std::integral_constant<int, 0>{}); }; // auto view = gil::rgb8_view_t::add_deref<decltype(deref)>::make(buffer_view, deref); // std::cout << view(0, 0); }
f1d5b760772909cdddbeedd381cc05120ab5fb1a.hip
// !!! This is a file automatically generated by hipify!!! #include <cupy/complex.cuh> #include <hipcub/hipcub.hpp> #include "cupy_cub.h" #include <stdexcept> using namespace cub; // Minimum boilerplate to support complex numbers in sum(), min(), and max(): // - This works only because all data fields in the *Traits struct are not // used in <hipcub/hipcub.hpp>. // - DO NOT USE THIS STUB for supporting CUB sorting!!!!!! // - The Max() and Lowest() below are chosen to comply with NumPy's lexical // ordering; note that std::numeric_limits<T> does not support complex // numbers as in general the comparison is ill defined. template <> struct FpLimits<complex<float>> { static __host__ __device__ __forceinline__ complex<float> Max() { return (complex<float>(FLT_MAX, FLT_MAX)); } static __host__ __device__ __forceinline__ complex<float> Lowest() { return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1))); } }; template <> struct FpLimits<complex<double>> { static __host__ __device__ __forceinline__ complex<double> Max() { return (complex<double>(DBL_MAX, DBL_MAX)); } static __host__ __device__ __forceinline__ complex<double> Lowest() { return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1))); } }; template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {}; template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {}; // end of boilerplate // // **** dtype_dispatcher **** // // This is implemented with reference to the following implementation. // https://github.com/rapidsai/cudf/blob/branch-0.6/cpp/src/utilities/type_dispatcher.hpp // template <class functor_t, typename... Ts> void dtype_dispatcher(int dtype_id, functor_t f, Ts&&... args) { switch (dtype_id) { case CUPY_CUB_INT8: return f.template operator()<char>(std::forward<Ts>(args)...); case CUPY_CUB_INT16: return f.template operator()<short>(std::forward<Ts>(args)...); case CUPY_CUB_INT32: return f.template operator()<int>(std::forward<Ts>(args)...); case CUPY_CUB_INT64: return f.template operator()<long>(std::forward<Ts>(args)...); case CUPY_CUB_UINT8: return f.template operator()<unsigned char>(std::forward<Ts>(args)...); case CUPY_CUB_UINT16: return f.template operator()<unsigned short>(std::forward<Ts>(args)...); case CUPY_CUB_UINT32: return f.template operator()<unsigned int>(std::forward<Ts>(args)...); case CUPY_CUB_UINT64: return f.template operator()<unsigned long>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT32: return f.template operator()<float>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT64: return f.template operator()<double>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX64: return f.template operator()<complex<float>>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX128: return f.template operator()<complex<double>>(std::forward<Ts>(args)...); default: throw std::runtime_error("Unsupported dtype ID"); } } // // **** cub_reduce_sum **** // struct _cub_reduce_sum { template <typename T> void operator()(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, hipStream_t s) { DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; void cub_reduce_sum(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, hipStream_t stream, int dtype_id) { dtype_dispatcher(dtype_id, _cub_reduce_sum(), x, y, num_items, workspace, workspace_size, stream); } size_t cub_reduce_sum_get_workspace_size(void *x, void *y, int num_items, hipStream_t stream, int dtype_id) { size_t workspace_size = 0; cub_reduce_sum(x, y, num_items, NULL, workspace_size, stream, dtype_id); return workspace_size; } // // **** cub_reduce_min **** // struct _cub_reduce_min { template <typename T> void operator()(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, hipStream_t s) { DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; void cub_reduce_min(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, hipStream_t stream, int dtype_id) { dtype_dispatcher(dtype_id, _cub_reduce_min(), x, y, num_items, workspace, workspace_size, stream); } size_t cub_reduce_min_get_workspace_size(void *x, void *y, int num_items, hipStream_t stream, int dtype_id) { size_t workspace_size = 0; cub_reduce_min(x, y, num_items, NULL, workspace_size, stream, dtype_id); return workspace_size; } // // **** cub_reduce_max **** // struct _cub_reduce_max { template <typename T> void operator()(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, hipStream_t s) { DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; void cub_reduce_max(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, hipStream_t stream, int dtype_id) { dtype_dispatcher(dtype_id, _cub_reduce_max(), x, y, num_items, workspace, workspace_size, stream); } size_t cub_reduce_max_get_workspace_size(void *x, void *y, int num_items, hipStream_t stream, int dtype_id) { size_t workspace_size = 0; cub_reduce_max(x, y, num_items, NULL, workspace_size, stream, dtype_id); return workspace_size; }
f1d5b760772909cdddbeedd381cc05120ab5fb1a.cu
#include <cupy/complex.cuh> #include <cub/device/device_reduce.cuh> #include "cupy_cub.h" #include <stdexcept> using namespace cub; // Minimum boilerplate to support complex numbers in sum(), min(), and max(): // - This works only because all data fields in the *Traits struct are not // used in <cub/device/device_reduce.cuh>. // - DO NOT USE THIS STUB for supporting CUB sorting!!!!!! // - The Max() and Lowest() below are chosen to comply with NumPy's lexical // ordering; note that std::numeric_limits<T> does not support complex // numbers as in general the comparison is ill defined. template <> struct FpLimits<complex<float>> { static __host__ __device__ __forceinline__ complex<float> Max() { return (complex<float>(FLT_MAX, FLT_MAX)); } static __host__ __device__ __forceinline__ complex<float> Lowest() { return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1))); } }; template <> struct FpLimits<complex<double>> { static __host__ __device__ __forceinline__ complex<double> Max() { return (complex<double>(DBL_MAX, DBL_MAX)); } static __host__ __device__ __forceinline__ complex<double> Lowest() { return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1))); } }; template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {}; template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {}; // end of boilerplate // // **** dtype_dispatcher **** // // This is implemented with reference to the following implementation. // https://github.com/rapidsai/cudf/blob/branch-0.6/cpp/src/utilities/type_dispatcher.hpp // template <class functor_t, typename... Ts> void dtype_dispatcher(int dtype_id, functor_t f, Ts&&... args) { switch (dtype_id) { case CUPY_CUB_INT8: return f.template operator()<char>(std::forward<Ts>(args)...); case CUPY_CUB_INT16: return f.template operator()<short>(std::forward<Ts>(args)...); case CUPY_CUB_INT32: return f.template operator()<int>(std::forward<Ts>(args)...); case CUPY_CUB_INT64: return f.template operator()<long>(std::forward<Ts>(args)...); case CUPY_CUB_UINT8: return f.template operator()<unsigned char>(std::forward<Ts>(args)...); case CUPY_CUB_UINT16: return f.template operator()<unsigned short>(std::forward<Ts>(args)...); case CUPY_CUB_UINT32: return f.template operator()<unsigned int>(std::forward<Ts>(args)...); case CUPY_CUB_UINT64: return f.template operator()<unsigned long>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT32: return f.template operator()<float>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT64: return f.template operator()<double>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX64: return f.template operator()<complex<float>>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX128: return f.template operator()<complex<double>>(std::forward<Ts>(args)...); default: throw std::runtime_error("Unsupported dtype ID"); } } // // **** cub_reduce_sum **** // struct _cub_reduce_sum { template <typename T> void operator()(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, cudaStream_t s) { DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; void cub_reduce_sum(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, cudaStream_t stream, int dtype_id) { dtype_dispatcher(dtype_id, _cub_reduce_sum(), x, y, num_items, workspace, workspace_size, stream); } size_t cub_reduce_sum_get_workspace_size(void *x, void *y, int num_items, cudaStream_t stream, int dtype_id) { size_t workspace_size = 0; cub_reduce_sum(x, y, num_items, NULL, workspace_size, stream, dtype_id); return workspace_size; } // // **** cub_reduce_min **** // struct _cub_reduce_min { template <typename T> void operator()(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, cudaStream_t s) { DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; void cub_reduce_min(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, cudaStream_t stream, int dtype_id) { dtype_dispatcher(dtype_id, _cub_reduce_min(), x, y, num_items, workspace, workspace_size, stream); } size_t cub_reduce_min_get_workspace_size(void *x, void *y, int num_items, cudaStream_t stream, int dtype_id) { size_t workspace_size = 0; cub_reduce_min(x, y, num_items, NULL, workspace_size, stream, dtype_id); return workspace_size; } // // **** cub_reduce_max **** // struct _cub_reduce_max { template <typename T> void operator()(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, cudaStream_t s) { DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; void cub_reduce_max(void *x, void *y, int num_items, void *workspace, size_t &workspace_size, cudaStream_t stream, int dtype_id) { dtype_dispatcher(dtype_id, _cub_reduce_max(), x, y, num_items, workspace, workspace_size, stream); } size_t cub_reduce_max_get_workspace_size(void *x, void *y, int num_items, cudaStream_t stream, int dtype_id) { size_t workspace_size = 0; cub_reduce_max(x, y, num_items, NULL, workspace_size, stream, dtype_id); return workspace_size; }
48f72e247991cf0ba13b023193052fd5a8367270.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define N 1024 __global__ void scan(float *g_odata, float *g_idata, int n); __global__ void prescan(float *g_odata, float *g_idata, int n); void scanCPU(float *f_out, float *f_in, int i_n); double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } int main() { float a[N], c[N], g[N]; timeval start, end; float *dev_a, *dev_g; int size = N * sizeof(float); double d_gpuTime, d_cpuTime; // initialize matrices a for (int i = 0; i < N; i++) { a[i] = i; //printf("a[%i] = %f\n", i, a[i]); } // initialize a and b matrices here hipMalloc((void **) &dev_a, size); hipMalloc((void **) &dev_g, size); gettimeofday(&start, NULL); hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice); int thread_num = 8; hipLaunchKernelGGL(( prescan), dim3(4), dim3(8) ,2*thread_num*sizeof(float), 0, dev_g, dev_a, N); hipDeviceSynchronize(); hipMemcpy(g, dev_g, size, hipMemcpyDeviceToHost); gettimeofday(&end, NULL); d_gpuTime = myDiffTime(start, end); gettimeofday(&start, NULL); scanCPU(c, a, N); gettimeofday(&end, NULL); d_cpuTime = myDiffTime(start, end); hipFree(dev_a); hipFree(dev_g); for (int i = 0; i < N; i++) { printf("c[%i] = %0.3f, g[%i] = %0.3f\n", i, c[i], i, g[i]); if (c[i] != g[i]) { printf("Results do not match! c[%i]=%f, g[%i]=%f\n", i, c[i], i, g[i]); break; } } printf("GPU Time for scan size %i: %f\n", N, d_gpuTime); printf("CPU Time for scan size %i: %f\n", N, d_cpuTime); } __global__ void prescan(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; // allocated on invocation int thid = threadIdx.x; // bid + (2 * tid) + offset will be the index of the array int bid = blockIdx.x * blockDim.x; int offset = 1; if(bid + 2*thid < n) { temp[bid+2*thid] = g_idata[bid+2*thid]; temp[bid+2*thid+1] = g_idata[bid+2*thid+1]; } for (int d = blockDim.x>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = bid + offset*(2*thid+1)-1; int bi = bid + offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[blockDim.x - 1] = 0; } // clear the last element for (int d = 1; d < blockDim.x; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = bid + offset*(2*thid+1)-1; int bi = bid + offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid+2*thid] = temp[bid+2*thid]; // write results to device memory g_odata[bid+2*thid+1] = temp[bid+2*thid+1]; } void scanCPU(float *f_out, float *f_in, int i_n) { f_out[0] = 0; for (int i = 1; i < i_n; i++) f_out[i] = f_out[i-1] + f_in[i-1]; } __global__ void scan(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; // allocated on invocation int thid = threadIdx.x; int pout = 0, pin = 1; // Load input into shared memory. // This is exclusive scan, so shift right by one // and set first element to 0 temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0; __syncthreads(); for (int offset = 1; offset < n; offset *= 2) { pout = 1 - pout; // swap double buffer indices pin = 1 - pout; if (thid >= offset) temp[pout*n+thid] += temp[pin*n+thid - offset]; else temp[pout*n+thid] = temp[pin*n+thid]; __syncthreads(); } g_odata[thid] = temp[pout*n+thid]; // write output }
48f72e247991cf0ba13b023193052fd5a8367270.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define N 1024 __global__ void scan(float *g_odata, float *g_idata, int n); __global__ void prescan(float *g_odata, float *g_idata, int n); void scanCPU(float *f_out, float *f_in, int i_n); double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } int main() { float a[N], c[N], g[N]; timeval start, end; float *dev_a, *dev_g; int size = N * sizeof(float); double d_gpuTime, d_cpuTime; // initialize matrices a for (int i = 0; i < N; i++) { a[i] = i; //printf("a[%i] = %f\n", i, a[i]); } // initialize a and b matrices here cudaMalloc((void **) &dev_a, size); cudaMalloc((void **) &dev_g, size); gettimeofday(&start, NULL); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); int thread_num = 8; prescan<<<4, 8 ,2*thread_num*sizeof(float)>>>(dev_g, dev_a, N); cudaDeviceSynchronize(); cudaMemcpy(g, dev_g, size, cudaMemcpyDeviceToHost); gettimeofday(&end, NULL); d_gpuTime = myDiffTime(start, end); gettimeofday(&start, NULL); scanCPU(c, a, N); gettimeofday(&end, NULL); d_cpuTime = myDiffTime(start, end); cudaFree(dev_a); cudaFree(dev_g); for (int i = 0; i < N; i++) { printf("c[%i] = %0.3f, g[%i] = %0.3f\n", i, c[i], i, g[i]); if (c[i] != g[i]) { printf("Results do not match! c[%i]=%f, g[%i]=%f\n", i, c[i], i, g[i]); break; } } printf("GPU Time for scan size %i: %f\n", N, d_gpuTime); printf("CPU Time for scan size %i: %f\n", N, d_cpuTime); } __global__ void prescan(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; // allocated on invocation int thid = threadIdx.x; // bid + (2 * tid) + offset will be the index of the array int bid = blockIdx.x * blockDim.x; int offset = 1; if(bid + 2*thid < n) { temp[bid+2*thid] = g_idata[bid+2*thid]; temp[bid+2*thid+1] = g_idata[bid+2*thid+1]; } for (int d = blockDim.x>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = bid + offset*(2*thid+1)-1; int bi = bid + offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[blockDim.x - 1] = 0; } // clear the last element for (int d = 1; d < blockDim.x; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = bid + offset*(2*thid+1)-1; int bi = bid + offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid+2*thid] = temp[bid+2*thid]; // write results to device memory g_odata[bid+2*thid+1] = temp[bid+2*thid+1]; } void scanCPU(float *f_out, float *f_in, int i_n) { f_out[0] = 0; for (int i = 1; i < i_n; i++) f_out[i] = f_out[i-1] + f_in[i-1]; } __global__ void scan(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; // allocated on invocation int thid = threadIdx.x; int pout = 0, pin = 1; // Load input into shared memory. // This is exclusive scan, so shift right by one // and set first element to 0 temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0; __syncthreads(); for (int offset = 1; offset < n; offset *= 2) { pout = 1 - pout; // swap double buffer indices pin = 1 - pout; if (thid >= offset) temp[pout*n+thid] += temp[pin*n+thid - offset]; else temp[pout*n+thid] = temp[pin*n+thid]; __syncthreads(); } g_odata[thid] = temp[pout*n+thid]; // write output }
779ab41fe6c73be251c65b1c623ccf9fbe8e5e45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <matrix_coloring/matrix_coloring.h> #include <blas.h> #include <basic_types.h> #include <error.h> #include <types.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/count.h> #include <thrust/binary_search.h> #include <assert.h> #include <sm_utils.inl> #include <algorithm> #include <amgx_types/util.h> #include <amgx_types/math.h> using namespace std; namespace amgx { /*************************************** * Source Definitions ***************************************/ template<class TConfig> MatrixColoring<TConfig>::MatrixColoring(AMG_Config &cfg, const std::string &cfg_scope) : m_num_colors(0), m_row_colors(0), m_sorted_rows_by_color(0), m_offsets_rows_per_color(0), m_ref_count(1), m_boundary_coloring(SYNC_COLORS), m_halo_coloring(FIRST) { m_coloring_level = cfg.getParameter<int>("coloring_level", cfg_scope); m_boundary_coloring = cfg.getParameter<ColoringType>("boundary_coloring", cfg_scope); m_halo_coloring = cfg.getParameter<ColoringType>("halo_coloring", cfg_scope); } template<class TConfig> MatrixColoring<TConfig>::~MatrixColoring() { } __global__ void findSeparation(INDEX_TYPE *rows_by_color, INDEX_TYPE *offsets_by_color, INDEX_TYPE *separation, INDEX_TYPE boundary, INDEX_TYPE num_colors, INDEX_TYPE num_rows) { int block_offset = blockIdx.x * (blockDim.x / 32) * 31; //each warp does 31 rows, 1 on the left side is redundant int lane = threadIdx.x % 32; int element = block_offset + (threadIdx.x / 32) * 31 + lane - 1; while (element < num_rows) { int color = 0; int row = -1; if (element != -1) { row = rows_by_color[element]; while ((color < num_colors) && ((element < offsets_by_color[color]) || (element >= offsets_by_color[color + 1]))) { color ++; } if ((element == offsets_by_color[color]) && (row >= boundary)) { separation[color] = element; } //special case when first row of color is immediately a boundary node if ((element == offsets_by_color[color + 1] - 1) && (row < boundary)) { separation[color] = element + 1; } //special case when I am the last, but I am still not a boundary } unsigned int result = utils::ballot(row >= boundary, utils::activemask()); //if (result>0) printf("%x\n", result); if (lane > 0 && row >= boundary && ((result >> (lane - 1)) & 1) == 0) { separation[color] = element; } element += gridDim.x * (blockDim.x / 32) * 31; } } //prints how many edges fail to obey coloring property //if the optional aggregates parameter is specified, it also measures the downwind coloring property: //for each incoming edge (j,i), where i and j share the same aggregate, holds: color(j) < color(i) template <class TConfig> void MatrixColoring<TConfig>::assertColoring( Matrix<TConfig> &A, IVector &aggregates ) { IndexType numRows = A.get_num_rows(); IndexType nnz = A.get_num_nz(); IndexType blocksize = A.get_block_dimx() * A.get_block_dimy(); IVector &coloring = this->m_row_colors; bool check_downwind = aggregates.size() == A.get_num_rows(); //allocate host memory IndexType *ia = new IndexType[numRows + 1]; IndexType *ja = new IndexType[nnz]; ValueType *aa = new ValueType[nnz * blocksize]; IndexType *color = new IndexType[numRows]; //copy to host hipMemcpy( ia, A.row_offsets.raw(), sizeof(IndexType) * (numRows + 1), hipMemcpyDeviceToHost ); hipMemcpy( ja, A.col_indices.raw(), sizeof(IndexType)*nnz, hipMemcpyDeviceToHost ); hipMemcpy( aa, A.values.raw(), sizeof(ValueType)*blocksize * nnz, hipMemcpyDeviceToHost ); hipMemcpy( color, coloring.raw(), sizeof(IndexType)*numRows, hipMemcpyDeviceToHost ); IndexType *agg = new IndexType[numRows]; if ( check_downwind ) { hipMemcpy( agg, aggregates.raw(), sizeof(IndexType)*numRows, hipMemcpyDeviceToHost ); } //count how many nodes have a color IndexType *color_used = new IndexType[numRows]; for (IndexType i = 0; i < numRows; i++) { color_used[i] = 0; } for (IndexType i = 0; i < numRows; i++) { if ( color[i] >= 0 && color[i] < numRows ) { color_used[color[i]]++; } else { std::cout << "color out of range: color[" << i << "] = " << color[i] << std::endl; } } // count violations of these two properties: // 1. locally downwind: for incoming edges (j,i) in same aggregate: color(j) < color(i) // 2. valid coloring: for neighbors j: color(j) != color(i) int violation_1 = 0; int property_1 = 0; int violation_2 = 0; int property_2 = 0; int inner_edges = 0; //note: property 1 cannot be enforeced all the time. Each cycle for example will violate it regardless of the coloring. for (IndexType i = 0; i < numRows; i++) { for (IndexType ii = ia[i]; ii < ia[i + 1]; ii++) { IndexType j = ja[ii]; if ( j == i ) { continue; } //check coloring property if ( color[j] == color[i] ) { violation_2++; } property_2++; if ( check_downwind && agg[j] == agg[i] ) { //look for transpose edge to decide outgoing or not bool outgoing = true; for (IndexType jj = ia[j]; jj < ia[j + 1]; jj++) { //found if ( ja[jj] == i ) { ValueType weight = types::util<ValueType>::get_zero(); for (IndexType iii = ii * blocksize; iii < (ii + 1)*blocksize; iii++) { weight = weight + aa[iii] * aa[iii]; } ValueType counter_weight = types::util<ValueType>::get_zero(); for (IndexType jjj = jj * blocksize; jjj < (jj + 1)*blocksize; jjj++) { counter_weight = counter_weight + aa[jjj] * aa[jjj]; } outgoing = types::util<ValueType>::abs(weight) > types::util<ValueType>::abs(counter_weight); break; } } //outgoing -> check downwind property if ( outgoing ) { if ( color[j] <= color[i] ) { violation_1++; } property_1++; } inner_edges++; } } } //tell results if ( check_downwind ) { std::cout << 200 * property_1 / double(inner_edges) << "% of all edges inside an aggregate are directed" << std::endl; if ( property_1 > 0 ) { std::cout << 100 * violation_1 / double(property_1) << "% of all outgoing edges inside an aggregate are not colored downwind" << std::endl; } } std::cout << 100 * violation_2 / double(property_2) << "% of all edges violated coloring property" << std::endl; std::cout << "number of nodes that use this color:" << std::endl; for (IndexType i = 0; i < numRows; i++) if ( color_used[i] > 0 ) { std::cout << i << ": " << color_used[i] << std::endl; } //free! delete [] ia; delete [] ja; delete [] aa; delete [] agg; delete [] color; delete [] color_used; } template<class TConfig> void MatrixColoring<TConfig>::createColorArrays(Matrix<TConfig> &A) { ViewType old = A.currentView(); A.setViewExterior(); int num_rows = A.get_num_rows(); //Disabled since currently we are not doing halo exchanges during colored execution /*typedef TemplateConfig<AMGX_host,AMGX_vecInt,matPrec,indPrec> hvector_type; typedef Vector<hvector_type> HVector; if (!A.is_matrix_singleGPU()) { HVector num_colors(1); std::vector<HVector> partition_num_colors(0); num_colors[0] = m_num_colors; A.manager->getComms()->global_reduce(partition_num_colors, num_colors, A, 6332); int max_partition_colors = 0; for (int i = 0; i < partition_num_colors.size(); i++) max_partition_colors = ::max(partition_num_colors[i][0],max_partition_colors); m_num_colors = max_partition_colors; }*/ if (m_halo_coloring == LAST) { thrust::fill(m_row_colors.begin() + num_rows, m_row_colors.end(), m_num_colors); cudaCheckError(); } IVector offsets_rows_per_color; if (m_offsets_rows_per_color.size() == 0) { // Sort the vertices based o their color m_sorted_rows_by_color.resize(num_rows); // Copy row colors IVector row_colors(m_row_colors); thrust::sequence(m_sorted_rows_by_color.begin(), m_sorted_rows_by_color.end()); thrust::sort_by_key(row_colors.begin(), row_colors.begin() + num_rows, m_sorted_rows_by_color.begin()); cudaCheckError(); // Compute the offset for each color offsets_rows_per_color.resize(m_num_colors + 1); m_offsets_rows_per_color.resize(m_num_colors + 1); // Compute interior-exterior separation for every color m_offsets_rows_per_color_separation.resize(m_num_colors); //m_offsets_rows_per_color_separation_halo.resize(m_num_colors); thrust::lower_bound(row_colors.begin(), row_colors.begin() + num_rows, thrust::counting_iterator<IndexType>(0), thrust::counting_iterator<IndexType>(offsets_rows_per_color.size()), offsets_rows_per_color.begin()); // Copy from device to host m_offsets_rows_per_color = offsets_rows_per_color; cudaCheckError(); } else { m_offsets_rows_per_color_separation.resize(m_num_colors); } cudaCheckError(); if (!A.is_matrix_singleGPU() && (A.getViewExterior() != A.getViewInterior())) { A.setViewInterior(); int separation = A.get_num_rows(); if (TConfig::memSpace == AMGX_host) { for (int i = 0; i < m_num_colors; i++) { m_offsets_rows_per_color_separation[i] = m_offsets_rows_per_color[i] + (thrust::lower_bound(m_sorted_rows_by_color.begin() + m_offsets_rows_per_color[i], m_sorted_rows_by_color.begin() + m_offsets_rows_per_color[i + 1], separation) - (m_sorted_rows_by_color.begin() + m_offsets_rows_per_color[i])); } cudaCheckError(); } // this is not a proper search, rather we look at every single element. But it is still a lot faster than the above (~10*) else { IVector separation_offsets_rows_per_color(m_num_colors); int size = num_rows; int num_blocks = min(4096, (size + 123) / 124); hipLaunchKernelGGL(( findSeparation) , dim3(num_blocks), dim3(128), 0, 0, m_sorted_rows_by_color.raw(), offsets_rows_per_color.raw(), separation_offsets_rows_per_color.raw(), separation, m_num_colors, num_rows); thrust::copy(separation_offsets_rows_per_color.begin(), separation_offsets_rows_per_color.end(), m_offsets_rows_per_color_separation.begin()); cudaCheckError(); for (int i = 0; i < m_num_colors; i++) { if (this->m_offsets_rows_per_color[i] == this->m_offsets_rows_per_color[i + 1]) { this->m_offsets_rows_per_color_separation[i] = this->m_offsets_rows_per_color[i + 1]; } } } } else { thrust::copy(m_offsets_rows_per_color.begin() + 1, m_offsets_rows_per_color.end(), m_offsets_rows_per_color_separation.begin()); cudaCheckError(); } A.setView(old); } template<class TConfig> std::map<std::string, MatrixColoringFactory<TConfig>*> & MatrixColoringFactory<TConfig>::getFactories( ) { static std::map<std::string, MatrixColoringFactory<TConfig> *> s_factories; return s_factories; } template<class TConfig> void MatrixColoringFactory<TConfig>::registerFactory(string name, MatrixColoringFactory<TConfig> *f) { std::map<std::string, MatrixColoringFactory<TConfig> *> &factories = getFactories( ); typename map<string, MatrixColoringFactory<TConfig> *>::iterator it = factories.find(name); if (it != factories.end()) { string error = "MatrixColoringFactory '" + name + "' has already been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } factories[name] = f; } template<class TConfig> void MatrixColoringFactory<TConfig>::unregisterFactory(std::string name) { std::map<std::string, MatrixColoringFactory<TConfig>*> &factories = getFactories( ); typename std::map<std::string, MatrixColoringFactory<TConfig> *>::iterator it = factories.find(name); if (it == factories.end()) { std::string error = "MatrixColoringFactory '" + name + "' has not been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } MatrixColoringFactory<TConfig> *factory = it->second; assert( factory != NULL ); delete factory; factories.erase(it); } template<class TConfig> void MatrixColoringFactory<TConfig>::unregisterFactories( ) { std::map<std::string, MatrixColoringFactory<TConfig>*> &factories = getFactories( ); typename map<string, MatrixColoringFactory<TConfig> *>::iterator it = factories.begin( ); for ( ; it != factories.end( ) ; ) { MatrixColoringFactory<TConfig> *factory = it->second; assert( factory != NULL ); it++; delete factory; } factories.clear( ); } template<class TConfig> MatrixColoring<TConfig> *MatrixColoringFactory<TConfig>::allocate(AMG_Config &cfg, const std::string &cfg_scope) { std::map<std::string, MatrixColoringFactory<TConfig> *> &factories = getFactories( ); string matrix_coloring_scheme = cfg.getParameter<string>("matrix_coloring_scheme", cfg_scope); typename map<string, MatrixColoringFactory<TConfig> *>::const_iterator it = factories.find(matrix_coloring_scheme); if (it == factories.end()) { string error = "MatrixColoringFactory '" + matrix_coloring_scheme + "' has not been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } return it->second->create(cfg, cfg_scope); }; /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class MatrixColoring<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class MatrixColoringFactory<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE }
779ab41fe6c73be251c65b1c623ccf9fbe8e5e45.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <matrix_coloring/matrix_coloring.h> #include <blas.h> #include <basic_types.h> #include <error.h> #include <types.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/count.h> #include <thrust/binary_search.h> #include <assert.h> #include <sm_utils.inl> #include <algorithm> #include <amgx_types/util.h> #include <amgx_types/math.h> using namespace std; namespace amgx { /*************************************** * Source Definitions ***************************************/ template<class TConfig> MatrixColoring<TConfig>::MatrixColoring(AMG_Config &cfg, const std::string &cfg_scope) : m_num_colors(0), m_row_colors(0), m_sorted_rows_by_color(0), m_offsets_rows_per_color(0), m_ref_count(1), m_boundary_coloring(SYNC_COLORS), m_halo_coloring(FIRST) { m_coloring_level = cfg.getParameter<int>("coloring_level", cfg_scope); m_boundary_coloring = cfg.getParameter<ColoringType>("boundary_coloring", cfg_scope); m_halo_coloring = cfg.getParameter<ColoringType>("halo_coloring", cfg_scope); } template<class TConfig> MatrixColoring<TConfig>::~MatrixColoring() { } __global__ void findSeparation(INDEX_TYPE *rows_by_color, INDEX_TYPE *offsets_by_color, INDEX_TYPE *separation, INDEX_TYPE boundary, INDEX_TYPE num_colors, INDEX_TYPE num_rows) { int block_offset = blockIdx.x * (blockDim.x / 32) * 31; //each warp does 31 rows, 1 on the left side is redundant int lane = threadIdx.x % 32; int element = block_offset + (threadIdx.x / 32) * 31 + lane - 1; while (element < num_rows) { int color = 0; int row = -1; if (element != -1) { row = rows_by_color[element]; while ((color < num_colors) && ((element < offsets_by_color[color]) || (element >= offsets_by_color[color + 1]))) { color ++; } if ((element == offsets_by_color[color]) && (row >= boundary)) { separation[color] = element; } //special case when first row of color is immediately a boundary node if ((element == offsets_by_color[color + 1] - 1) && (row < boundary)) { separation[color] = element + 1; } //special case when I am the last, but I am still not a boundary } unsigned int result = utils::ballot(row >= boundary, utils::activemask()); //if (result>0) printf("%x\n", result); if (lane > 0 && row >= boundary && ((result >> (lane - 1)) & 1) == 0) { separation[color] = element; } element += gridDim.x * (blockDim.x / 32) * 31; } } //prints how many edges fail to obey coloring property //if the optional aggregates parameter is specified, it also measures the downwind coloring property: //for each incoming edge (j,i), where i and j share the same aggregate, holds: color(j) < color(i) template <class TConfig> void MatrixColoring<TConfig>::assertColoring( Matrix<TConfig> &A, IVector &aggregates ) { IndexType numRows = A.get_num_rows(); IndexType nnz = A.get_num_nz(); IndexType blocksize = A.get_block_dimx() * A.get_block_dimy(); IVector &coloring = this->m_row_colors; bool check_downwind = aggregates.size() == A.get_num_rows(); //allocate host memory IndexType *ia = new IndexType[numRows + 1]; IndexType *ja = new IndexType[nnz]; ValueType *aa = new ValueType[nnz * blocksize]; IndexType *color = new IndexType[numRows]; //copy to host cudaMemcpy( ia, A.row_offsets.raw(), sizeof(IndexType) * (numRows + 1), cudaMemcpyDeviceToHost ); cudaMemcpy( ja, A.col_indices.raw(), sizeof(IndexType)*nnz, cudaMemcpyDeviceToHost ); cudaMemcpy( aa, A.values.raw(), sizeof(ValueType)*blocksize * nnz, cudaMemcpyDeviceToHost ); cudaMemcpy( color, coloring.raw(), sizeof(IndexType)*numRows, cudaMemcpyDeviceToHost ); IndexType *agg = new IndexType[numRows]; if ( check_downwind ) { cudaMemcpy( agg, aggregates.raw(), sizeof(IndexType)*numRows, cudaMemcpyDeviceToHost ); } //count how many nodes have a color IndexType *color_used = new IndexType[numRows]; for (IndexType i = 0; i < numRows; i++) { color_used[i] = 0; } for (IndexType i = 0; i < numRows; i++) { if ( color[i] >= 0 && color[i] < numRows ) { color_used[color[i]]++; } else { std::cout << "color out of range: color[" << i << "] = " << color[i] << std::endl; } } // count violations of these two properties: // 1. locally downwind: for incoming edges (j,i) in same aggregate: color(j) < color(i) // 2. valid coloring: for neighbors j: color(j) != color(i) int violation_1 = 0; int property_1 = 0; int violation_2 = 0; int property_2 = 0; int inner_edges = 0; //note: property 1 cannot be enforeced all the time. Each cycle for example will violate it regardless of the coloring. for (IndexType i = 0; i < numRows; i++) { for (IndexType ii = ia[i]; ii < ia[i + 1]; ii++) { IndexType j = ja[ii]; if ( j == i ) { continue; } //check coloring property if ( color[j] == color[i] ) { violation_2++; } property_2++; if ( check_downwind && agg[j] == agg[i] ) { //look for transpose edge to decide outgoing or not bool outgoing = true; for (IndexType jj = ia[j]; jj < ia[j + 1]; jj++) { //found if ( ja[jj] == i ) { ValueType weight = types::util<ValueType>::get_zero(); for (IndexType iii = ii * blocksize; iii < (ii + 1)*blocksize; iii++) { weight = weight + aa[iii] * aa[iii]; } ValueType counter_weight = types::util<ValueType>::get_zero(); for (IndexType jjj = jj * blocksize; jjj < (jj + 1)*blocksize; jjj++) { counter_weight = counter_weight + aa[jjj] * aa[jjj]; } outgoing = types::util<ValueType>::abs(weight) > types::util<ValueType>::abs(counter_weight); break; } } //outgoing -> check downwind property if ( outgoing ) { if ( color[j] <= color[i] ) { violation_1++; } property_1++; } inner_edges++; } } } //tell results if ( check_downwind ) { std::cout << 200 * property_1 / double(inner_edges) << "% of all edges inside an aggregate are directed" << std::endl; if ( property_1 > 0 ) { std::cout << 100 * violation_1 / double(property_1) << "% of all outgoing edges inside an aggregate are not colored downwind" << std::endl; } } std::cout << 100 * violation_2 / double(property_2) << "% of all edges violated coloring property" << std::endl; std::cout << "number of nodes that use this color:" << std::endl; for (IndexType i = 0; i < numRows; i++) if ( color_used[i] > 0 ) { std::cout << i << ": " << color_used[i] << std::endl; } //free! delete [] ia; delete [] ja; delete [] aa; delete [] agg; delete [] color; delete [] color_used; } template<class TConfig> void MatrixColoring<TConfig>::createColorArrays(Matrix<TConfig> &A) { ViewType old = A.currentView(); A.setViewExterior(); int num_rows = A.get_num_rows(); //Disabled since currently we are not doing halo exchanges during colored execution /*typedef TemplateConfig<AMGX_host,AMGX_vecInt,matPrec,indPrec> hvector_type; typedef Vector<hvector_type> HVector; if (!A.is_matrix_singleGPU()) { HVector num_colors(1); std::vector<HVector> partition_num_colors(0); num_colors[0] = m_num_colors; A.manager->getComms()->global_reduce(partition_num_colors, num_colors, A, 6332); int max_partition_colors = 0; for (int i = 0; i < partition_num_colors.size(); i++) max_partition_colors = std::max(partition_num_colors[i][0],max_partition_colors); m_num_colors = max_partition_colors; }*/ if (m_halo_coloring == LAST) { thrust::fill(m_row_colors.begin() + num_rows, m_row_colors.end(), m_num_colors); cudaCheckError(); } IVector offsets_rows_per_color; if (m_offsets_rows_per_color.size() == 0) { // Sort the vertices based o their color m_sorted_rows_by_color.resize(num_rows); // Copy row colors IVector row_colors(m_row_colors); thrust::sequence(m_sorted_rows_by_color.begin(), m_sorted_rows_by_color.end()); thrust::sort_by_key(row_colors.begin(), row_colors.begin() + num_rows, m_sorted_rows_by_color.begin()); cudaCheckError(); // Compute the offset for each color offsets_rows_per_color.resize(m_num_colors + 1); m_offsets_rows_per_color.resize(m_num_colors + 1); // Compute interior-exterior separation for every color m_offsets_rows_per_color_separation.resize(m_num_colors); //m_offsets_rows_per_color_separation_halo.resize(m_num_colors); thrust::lower_bound(row_colors.begin(), row_colors.begin() + num_rows, thrust::counting_iterator<IndexType>(0), thrust::counting_iterator<IndexType>(offsets_rows_per_color.size()), offsets_rows_per_color.begin()); // Copy from device to host m_offsets_rows_per_color = offsets_rows_per_color; cudaCheckError(); } else { m_offsets_rows_per_color_separation.resize(m_num_colors); } cudaCheckError(); if (!A.is_matrix_singleGPU() && (A.getViewExterior() != A.getViewInterior())) { A.setViewInterior(); int separation = A.get_num_rows(); if (TConfig::memSpace == AMGX_host) { for (int i = 0; i < m_num_colors; i++) { m_offsets_rows_per_color_separation[i] = m_offsets_rows_per_color[i] + (thrust::lower_bound(m_sorted_rows_by_color.begin() + m_offsets_rows_per_color[i], m_sorted_rows_by_color.begin() + m_offsets_rows_per_color[i + 1], separation) - (m_sorted_rows_by_color.begin() + m_offsets_rows_per_color[i])); } cudaCheckError(); } // this is not a proper search, rather we look at every single element. But it is still a lot faster than the above (~10*) else { IVector separation_offsets_rows_per_color(m_num_colors); int size = num_rows; int num_blocks = min(4096, (size + 123) / 124); findSeparation <<< num_blocks, 128>>>(m_sorted_rows_by_color.raw(), offsets_rows_per_color.raw(), separation_offsets_rows_per_color.raw(), separation, m_num_colors, num_rows); thrust::copy(separation_offsets_rows_per_color.begin(), separation_offsets_rows_per_color.end(), m_offsets_rows_per_color_separation.begin()); cudaCheckError(); for (int i = 0; i < m_num_colors; i++) { if (this->m_offsets_rows_per_color[i] == this->m_offsets_rows_per_color[i + 1]) { this->m_offsets_rows_per_color_separation[i] = this->m_offsets_rows_per_color[i + 1]; } } } } else { thrust::copy(m_offsets_rows_per_color.begin() + 1, m_offsets_rows_per_color.end(), m_offsets_rows_per_color_separation.begin()); cudaCheckError(); } A.setView(old); } template<class TConfig> std::map<std::string, MatrixColoringFactory<TConfig>*> & MatrixColoringFactory<TConfig>::getFactories( ) { static std::map<std::string, MatrixColoringFactory<TConfig> *> s_factories; return s_factories; } template<class TConfig> void MatrixColoringFactory<TConfig>::registerFactory(string name, MatrixColoringFactory<TConfig> *f) { std::map<std::string, MatrixColoringFactory<TConfig> *> &factories = getFactories( ); typename map<string, MatrixColoringFactory<TConfig> *>::iterator it = factories.find(name); if (it != factories.end()) { string error = "MatrixColoringFactory '" + name + "' has already been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } factories[name] = f; } template<class TConfig> void MatrixColoringFactory<TConfig>::unregisterFactory(std::string name) { std::map<std::string, MatrixColoringFactory<TConfig>*> &factories = getFactories( ); typename std::map<std::string, MatrixColoringFactory<TConfig> *>::iterator it = factories.find(name); if (it == factories.end()) { std::string error = "MatrixColoringFactory '" + name + "' has not been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } MatrixColoringFactory<TConfig> *factory = it->second; assert( factory != NULL ); delete factory; factories.erase(it); } template<class TConfig> void MatrixColoringFactory<TConfig>::unregisterFactories( ) { std::map<std::string, MatrixColoringFactory<TConfig>*> &factories = getFactories( ); typename map<string, MatrixColoringFactory<TConfig> *>::iterator it = factories.begin( ); for ( ; it != factories.end( ) ; ) { MatrixColoringFactory<TConfig> *factory = it->second; assert( factory != NULL ); it++; delete factory; } factories.clear( ); } template<class TConfig> MatrixColoring<TConfig> *MatrixColoringFactory<TConfig>::allocate(AMG_Config &cfg, const std::string &cfg_scope) { std::map<std::string, MatrixColoringFactory<TConfig> *> &factories = getFactories( ); string matrix_coloring_scheme = cfg.getParameter<string>("matrix_coloring_scheme", cfg_scope); typename map<string, MatrixColoringFactory<TConfig> *>::const_iterator it = factories.find(matrix_coloring_scheme); if (it == factories.end()) { string error = "MatrixColoringFactory '" + matrix_coloring_scheme + "' has not been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } return it->second->create(cfg, cfg_scope); }; /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class MatrixColoring<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class MatrixColoringFactory<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE }
50721babb7fd608512cb22f7180ab67895c1c2e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void shared4R1W1Gs(float *A, float *B, float *C, const int N) { __shared__ float Smem[512]; int i = blockIdx.x * blockDim.x + threadIdx.x; Smem[threadIdx.x] = i; __syncthreads(); if (i < N) { C[i] = Smem[(threadIdx.x+1)%512]+Smem[(threadIdx.x+2)%512]+Smem[(threadIdx.x+3)%512]+Smem[(threadIdx.x+4)%512]; } }
50721babb7fd608512cb22f7180ab67895c1c2e9.cu
#include "includes.h" __global__ void shared4R1W1Gs(float *A, float *B, float *C, const int N) { __shared__ float Smem[512]; int i = blockIdx.x * blockDim.x + threadIdx.x; Smem[threadIdx.x] = i; __syncthreads(); if (i < N) { C[i] = Smem[(threadIdx.x+1)%512]+Smem[(threadIdx.x+2)%512]+Smem[(threadIdx.x+3)%512]+Smem[(threadIdx.x+4)%512]; } }
e3eabbc8f442ad060d0327e00704f95b972f6672.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <chrono> #include <numeric> #include "common_hip.cuh" #include "simulation_hip.cuh" int main() { const double nu = 0.05; const double sigma = 0.25; const double width = 2; const double height = 2; const double dx = width / (IMAX-1); const double dy = height / (JMAX-1); const double dz = height / (KMAX-1); const double dt = sigma * dx * dy * dz / nu; const double cx = (nu * dt / (dx * dx)); const double cy = (nu * dt / (dy * dy)); const double cz = (nu * dt / (dz * dz)); // Host Data Initialization std::vector<double> thost(IMAX * JMAX * KMAX); for (int k = 0; k < KMAX; ++k) { for (int j = 0; j < JMAX; ++j) { for (int i = 0; i < IMAX; ++i) { if (i < HOTCORNER_IMAX && j < HOTCORNER_JMAX && k < HOTCORNER_KMAX) { thost[INDEX3D(i, j, k)] = 2.0; } else { thost[INDEX3D(i, j, k)] = 1.0; } } } } std::chrono::steady_clock::time_point t_start = std::chrono::steady_clock::now(); // Device Data Initialization double *tnow; double *tnext; hipMalloc((void **) &tnow, IMAX * JMAX * KMAX * sizeof(double)); hipMalloc((void **) &tnext, IMAX * JMAX * KMAX * sizeof(double)); hipMemcpy(tnow, thost.data(), IMAX * JMAX * KMAX * sizeof(double), hipMemcpyHostToDevice); // Calculate initial (inner) temperature const unsigned long all_cells = (IMAX-2) * (JMAX-2) * (KMAX-2); const unsigned long hot_cells = (HOTCORNER_IMAX-1) * (HOTCORNER_JMAX-1) * (HOTCORNER_KMAX-1); double expected = hot_cells * 2.0 + (all_cells-hot_cells) * 1.0; double temperature = 0.0; for (int k = 1; k < KMAX-1; ++k) { for (int j = 1; j < JMAX-1; ++j) { temperature = std::accumulate(&thost[INDEX3D(1, j, k)], &thost[INDEX3D(IMAX-1, j, k)], temperature); } } std::cout << "Initial Temperature: " << temperature << " Expected: " << expected << std::endl; const dim3 dim_block(8, 8, 8); const dim3 dim_grid((IMAX - 2 + dim_block.x - 1) / dim_block.x, (JMAX - 2 + dim_block.y - 1) / dim_block.y, (KMAX - 2 + dim_block.z - 1) / dim_block.z); const unsigned int smem_bytes = (dim_block.x + 2) * (dim_block.y + 2) * (dim_block.z +2) * sizeof(double); std::chrono::steady_clock::time_point t_sim_start = std::chrono::steady_clock::now(); for (int ts = 0; ts < TIMESTEPS; ++ts) { hipLaunchKernelGGL(( DiffuseKnl), dim3(dim_grid), dim3(dim_block), smem_bytes, 0, tnow, tnext, cx, cy, cz); std::swap(tnow, tnext); } hipDeviceSynchronize(); std::chrono::steady_clock::time_point t_sim_end = std::chrono::steady_clock::now(); hipMemcpy(thost.data(), tnow, IMAX * JMAX * KMAX * sizeof(double), hipMemcpyDeviceToHost); temperature = 0.0; for (int k = 1; k < KMAX-1; ++k) { for (int j = 1; j < JMAX-1; ++j) { temperature = std::accumulate(&thost[INDEX3D(1, j, k)], &thost[INDEX3D(IMAX-1, j, k)], temperature); } } hipFree(tnow); hipFree(tnext); hipDeviceReset(); std::chrono::steady_clock::time_point t_end = std::chrono::steady_clock::now(); std::chrono::duration<double> runtime = std::chrono::duration_cast<std::chrono::duration<double>>(t_end-t_start); std::chrono::duration<double> sim_runtime = std::chrono::duration_cast<std::chrono::duration<double>>(t_sim_end-t_sim_start); std::cout << "Final Temperature: " << temperature << " Expected: " << expected << std::endl; std::cout << "Time Elapsed (simulation): " << sim_runtime.count() << "s" << std::endl; std::cout << "Time Elapsed (total): " << runtime.count() << "s" << std::endl; return EXIT_SUCCESS; }
e3eabbc8f442ad060d0327e00704f95b972f6672.cu
#include <iostream> #include <vector> #include <chrono> #include <numeric> #include "common.cuh" #include "simulation.cuh" int main() { const double nu = 0.05; const double sigma = 0.25; const double width = 2; const double height = 2; const double dx = width / (IMAX-1); const double dy = height / (JMAX-1); const double dz = height / (KMAX-1); const double dt = sigma * dx * dy * dz / nu; const double cx = (nu * dt / (dx * dx)); const double cy = (nu * dt / (dy * dy)); const double cz = (nu * dt / (dz * dz)); // Host Data Initialization std::vector<double> thost(IMAX * JMAX * KMAX); for (int k = 0; k < KMAX; ++k) { for (int j = 0; j < JMAX; ++j) { for (int i = 0; i < IMAX; ++i) { if (i < HOTCORNER_IMAX && j < HOTCORNER_JMAX && k < HOTCORNER_KMAX) { thost[INDEX3D(i, j, k)] = 2.0; } else { thost[INDEX3D(i, j, k)] = 1.0; } } } } std::chrono::steady_clock::time_point t_start = std::chrono::steady_clock::now(); // Device Data Initialization double *tnow; double *tnext; cudaMalloc((void **) &tnow, IMAX * JMAX * KMAX * sizeof(double)); cudaMalloc((void **) &tnext, IMAX * JMAX * KMAX * sizeof(double)); cudaMemcpy(tnow, thost.data(), IMAX * JMAX * KMAX * sizeof(double), cudaMemcpyHostToDevice); // Calculate initial (inner) temperature const unsigned long all_cells = (IMAX-2) * (JMAX-2) * (KMAX-2); const unsigned long hot_cells = (HOTCORNER_IMAX-1) * (HOTCORNER_JMAX-1) * (HOTCORNER_KMAX-1); double expected = hot_cells * 2.0 + (all_cells-hot_cells) * 1.0; double temperature = 0.0; for (int k = 1; k < KMAX-1; ++k) { for (int j = 1; j < JMAX-1; ++j) { temperature = std::accumulate(&thost[INDEX3D(1, j, k)], &thost[INDEX3D(IMAX-1, j, k)], temperature); } } std::cout << "Initial Temperature: " << temperature << " Expected: " << expected << std::endl; const dim3 dim_block(8, 8, 8); const dim3 dim_grid((IMAX - 2 + dim_block.x - 1) / dim_block.x, (JMAX - 2 + dim_block.y - 1) / dim_block.y, (KMAX - 2 + dim_block.z - 1) / dim_block.z); const unsigned int smem_bytes = (dim_block.x + 2) * (dim_block.y + 2) * (dim_block.z +2) * sizeof(double); std::chrono::steady_clock::time_point t_sim_start = std::chrono::steady_clock::now(); for (int ts = 0; ts < TIMESTEPS; ++ts) { DiffuseKnl<<<dim_grid, dim_block, smem_bytes>>>(tnow, tnext, cx, cy, cz); std::swap(tnow, tnext); } cudaDeviceSynchronize(); std::chrono::steady_clock::time_point t_sim_end = std::chrono::steady_clock::now(); cudaMemcpy(thost.data(), tnow, IMAX * JMAX * KMAX * sizeof(double), cudaMemcpyDeviceToHost); temperature = 0.0; for (int k = 1; k < KMAX-1; ++k) { for (int j = 1; j < JMAX-1; ++j) { temperature = std::accumulate(&thost[INDEX3D(1, j, k)], &thost[INDEX3D(IMAX-1, j, k)], temperature); } } cudaFree(tnow); cudaFree(tnext); cudaDeviceReset(); std::chrono::steady_clock::time_point t_end = std::chrono::steady_clock::now(); std::chrono::duration<double> runtime = std::chrono::duration_cast<std::chrono::duration<double>>(t_end-t_start); std::chrono::duration<double> sim_runtime = std::chrono::duration_cast<std::chrono::duration<double>>(t_sim_end-t_sim_start); std::cout << "Final Temperature: " << temperature << " Expected: " << expected << std::endl; std::cout << "Time Elapsed (simulation): " << sim_runtime.count() << "s" << std::endl; std::cout << "Time Elapsed (total): " << runtime.count() << "s" << std::endl; return EXIT_SUCCESS; }
c43d1ed301dff6a5edd59073f25bb7e56c85b328.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "MDSystem_interface.h" #include "common.h" #include "BoxGeometry.h" #include "MDSystem.h" #include "RandomGenerator.h" #include "Auxiliary.h" #include "NeighborList_interface.h" #include"Statistic.h" #include "Integrator_interface.h" #include "InteractionEngine_interface.h" #include "tmp.h" #include "Reshuffle_interface.h" #include "Displacement_interface.h" #include "Topology.h" #include "SystemBondedInteraction.h" #include "BondInteraction.h" #include "NonBondedInteraction.h" #define NThreadsPerBlockCell 128 #define NThreadsPerBlockAtom 96 int main(int argc, char * argv[]) { ScalorType rcut1 = 5.f; char * filename; if (argc != 4){ printf ("Usage:\n%s conf.gro rcut1 device\n", argv[0]); return 1; } if (argc != 1){ rcut1 = atof(argv[2]); filename = argv[1]; } printf ("# setting device to %d\n", atoi(argv[3])); hipSetDevice (atoi(argv[3])); checkCUDAError ("set device"); MDSystem sys; sys.initConfig(filename); Topology::System sysTop; Topology::Molecule mol; mol.pushAtom (Topology::Atom (1.0, 0.0, 0)); LennardJones6_12Parameter ljparam; // ScalorType rcut2 = sys.box.size.z / 2 - 1.f; ScalorType rcut2 = rcut1 * 3; if (rcut2 > sys.box.size.z / 2.f - 1.f) rcut2 = sys.box.size.z / 2 - 1.f; printf ("# rcut1 is %f\n", rcut1); printf ("# rcut2 is %f\n", rcut2); ljparam.reinit (1.f, 1.f, 0.f, rcut1, rcut2); sysTop.addNonBondedInteraction (Topology::NonBondedInteraction(0, 0, ljparam)); sysTop.addMolecules (mol, sys.hdata.numAtom); sys.initTopology (sysTop); sys.initDeviceData (); SystemNonBondedInteraction sysNbInter; sysNbInter.reinit (sysTop); // ScalorType maxrcut = sysNbInter.maxRcut(); // ScalorType rlist = rcut2; // CellList clist (sys, rlist, NThreadsPerBlockCell, NThreadsPerBlockAtom); // NeighborList nlist (sysNbInter, sys, rlist, NThreadsPerBlockAtom, 2.f); // sys.normalizeDeviceData (); // clist.rebuild (sys, NULL); // nlist.rebuild (sys, clist, NULL); InteractionEngine inter (sys, NThreadsPerBlockAtom); inter.registNonBondedInteraction (sysNbInter); try{ inter.clearInteraction (sys); // inter.applyNonBondedInteraction (sys, nlist, NULL, NULL); inter.applyNonBondedInteraction (sys, rcut2); sys.updateHostFromDevice (NULL); FILE *fp = fopen ("force.out", "w"); fprintf (fp, "%d\n%f %f %f\n", sys.ddata.numAtom, sys.box.size.x, sys.box.size.y, sys.box.size.z); for (unsigned i = 0; i < sys.ddata.numAtom; ++i){ fprintf (fp, "%e %e %e %e %e %e\n", sys.hdata.coord[i].x, sys.hdata.coord[i].y, sys.hdata.coord[i].z, sys.hdata.forcx[i], sys.hdata.forcy[i], sys.hdata.forcz[i]); } fclose (fp); } catch (MDException &e){ fprintf (stderr, "%s\n", e.what()); return 1; } return 0; }
c43d1ed301dff6a5edd59073f25bb7e56c85b328.cu
#include <stdio.h> #include "MDSystem_interface.h" #include "common.h" #include "BoxGeometry.h" #include "MDSystem.h" #include "RandomGenerator.h" #include "Auxiliary.h" #include "NeighborList_interface.h" #include"Statistic.h" #include "Integrator_interface.h" #include "InteractionEngine_interface.h" #include "tmp.h" #include "Reshuffle_interface.h" #include "Displacement_interface.h" #include "Topology.h" #include "SystemBondedInteraction.h" #include "BondInteraction.h" #include "NonBondedInteraction.h" #define NThreadsPerBlockCell 128 #define NThreadsPerBlockAtom 96 int main(int argc, char * argv[]) { ScalorType rcut1 = 5.f; char * filename; if (argc != 4){ printf ("Usage:\n%s conf.gro rcut1 device\n", argv[0]); return 1; } if (argc != 1){ rcut1 = atof(argv[2]); filename = argv[1]; } printf ("# setting device to %d\n", atoi(argv[3])); cudaSetDevice (atoi(argv[3])); checkCUDAError ("set device"); MDSystem sys; sys.initConfig(filename); Topology::System sysTop; Topology::Molecule mol; mol.pushAtom (Topology::Atom (1.0, 0.0, 0)); LennardJones6_12Parameter ljparam; // ScalorType rcut2 = sys.box.size.z / 2 - 1.f; ScalorType rcut2 = rcut1 * 3; if (rcut2 > sys.box.size.z / 2.f - 1.f) rcut2 = sys.box.size.z / 2 - 1.f; printf ("# rcut1 is %f\n", rcut1); printf ("# rcut2 is %f\n", rcut2); ljparam.reinit (1.f, 1.f, 0.f, rcut1, rcut2); sysTop.addNonBondedInteraction (Topology::NonBondedInteraction(0, 0, ljparam)); sysTop.addMolecules (mol, sys.hdata.numAtom); sys.initTopology (sysTop); sys.initDeviceData (); SystemNonBondedInteraction sysNbInter; sysNbInter.reinit (sysTop); // ScalorType maxrcut = sysNbInter.maxRcut(); // ScalorType rlist = rcut2; // CellList clist (sys, rlist, NThreadsPerBlockCell, NThreadsPerBlockAtom); // NeighborList nlist (sysNbInter, sys, rlist, NThreadsPerBlockAtom, 2.f); // sys.normalizeDeviceData (); // clist.rebuild (sys, NULL); // nlist.rebuild (sys, clist, NULL); InteractionEngine inter (sys, NThreadsPerBlockAtom); inter.registNonBondedInteraction (sysNbInter); try{ inter.clearInteraction (sys); // inter.applyNonBondedInteraction (sys, nlist, NULL, NULL); inter.applyNonBondedInteraction (sys, rcut2); sys.updateHostFromDevice (NULL); FILE *fp = fopen ("force.out", "w"); fprintf (fp, "%d\n%f %f %f\n", sys.ddata.numAtom, sys.box.size.x, sys.box.size.y, sys.box.size.z); for (unsigned i = 0; i < sys.ddata.numAtom; ++i){ fprintf (fp, "%e %e %e %e %e %e\n", sys.hdata.coord[i].x, sys.hdata.coord[i].y, sys.hdata.coord[i].z, sys.hdata.forcx[i], sys.hdata.forcy[i], sys.hdata.forcz[i]); } fclose (fp); } catch (MDException &e){ fprintf (stderr, "%s\n", e.what()); return 1; } return 0; }
9d32cb0a3028658975af943c52981edb02de3e10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "boost/program_options.hpp" #include "psrdada_cpp/cli_utils.hpp" #include "psrdada_cpp/common.hpp" #include "psrdada_cpp/dada_client_base.hpp" #include "psrdada_cpp/dada_input_stream.hpp" #include "psrdada_cpp/dada_null_sink.hpp" #include "psrdada_cpp/dada_output_stream.hpp" #include "psrdada_cpp/multilog.hpp" #include <thrust/extrema.h> #include "psrdada_cpp/effelsberg/edd/DadaBufferLayout.hpp" #include "psrdada_cpp/effelsberg/edd/Packer.cuh" #include <unistd.h> #include <iomanip> #include <cstring> #include <ctime> #include <iostream> #include <time.h> using namespace psrdada_cpp; namespace { const size_t ERROR_IN_COMMAND_LINE = 1; const size_t SUCCESS = 0; const size_t ERROR_UNHANDLED_EXCEPTION = 2; } // namespace __device__ __forceinline__ uint64_t swap64(uint64_t x) { uint64_t result; uint2 t; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(t.x), "=r"(t.y) : "l"(x)); t.x = __byte_perm(t.x, 0, 0x0123); t.y = __byte_perm(t.y, 0, 0x0123); asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(t.y), "r"(t.x)); return result; } __global__ void toNetworkEndianess(uint64_t *s, size_t N) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; (i < N); i += blockDim.x * gridDim.x) { s[i] = swap64(s[i]); } } int main(int argc, char **argv) { try { key_t output_key; unsigned int input_bit_depth; unsigned int delay; size_t nSideChannels; size_t nblocks; size_t speadHeapSize; std::string mode; /** Define and parse the program options */ namespace po = boost::program_options; po::options_description desc("Options"); desc.add_options()("help,h", "Print help messages"); desc.add_options()( "output_key,o", po::value<std::string>()->default_value("dada")->notifier( [&output_key](std::string in) { output_key = string_to_key(in); }), "The shared memory key for the dada buffer to write to (hex " "string)"); desc.add_options()("input_bit_depth,b", po::value<unsigned int>(&input_bit_depth)->required(), "The number of bits per sample in the " "packetiser output (8 or 12)"); // desc.add_options()("mode,m", po::value<std::string >(&mode)->required(), // " Type of data to generate:\n " // " gated: "); desc.add_options()("delay,d", po::value<unsigned int>(&delay)->required(), "The delay between writing two consecutive blocks [ms]."); desc.add_options()("nblocks,n", po::value<size_t>()->default_value(0)->notifier( [&nblocks](size_t in) { nblocks = in; }), "Number of blocks to write in total. Default 0 means no-limit."); desc.add_options()("speadheap_size", po::value<size_t>()->default_value(4096)->notifier( [&speadHeapSize](size_t in) { speadHeapSize = in; }), "size of the spead data heaps. The number of the " "heaps in the dada block depends on the number of " "side channel items."); desc.add_options()("nsidechannelitems,s", po::value<size_t>()->default_value(1)->notifier( [&nSideChannels](size_t in) { nSideChannels = in; }), "Number of side channel items ( s >= 1)"); desc.add_options()( "log_level", po::value<std::string>()->default_value("info")->notifier( [](std::string level) { set_log_level(level); }), "The logging level to use " "(debug, info, warning, " "error)"); po::variables_map vm; try { po::store(po::parse_command_line(argc, argv, desc), vm); if (vm.count("help")) { std::cout << "Fill dada buffer with dummy data" << std::endl << desc << std::endl; return SUCCESS; } po::notify(vm); } catch (po::error &e) { std::cerr << "ERROR: " << e.what() << std::endl << std::endl; std::cerr << desc << std::endl; return ERROR_IN_COMMAND_LINE; } if (input_bit_depth != 8) { std::cerr << " Currently only 8 bit supported!\n"; return ERROR_IN_COMMAND_LINE; } MultiLog log("edd::DummyDataGenerator"); DadaOutputStream sink(output_key, log); char header[4096]; std::strcpy(header, "HEADER DADA\nHDR_VERSION 1.0\nHDR_SIZE 4096\nDADA_VERSION 1.0\nFILE_SIZE 2013265920\nNBIT 32\nNDIM 2\nNPOL 1\nNCHAN 4096\nRESOLUTION 1\nDSB 1\nSYNC_TIME 1234567890\nSAMPLE_CLOCK_START 175671842316288\n"); RawBytes headerBlock(header, 4096, 4096); sink.init(headerBlock); effelsberg::edd::DadaBufferLayout dadaBufferLayout(output_key, speadHeapSize, nSideChannels); size_t n_samples = dadaBufferLayout.sizeOfData() * 8 / input_bit_depth; size_t nFreqs = n_samples/ 2 + 1; thrust::device_vector<hipfftComplex> input_dummy_data_freq(nFreqs); thrust::device_vector<float> tmp(dadaBufferLayout.sizeOfData() * 8 / input_bit_depth); thrust::device_vector<uint32_t> packed_data(tmp.size() * 8 / 32); input_dummy_data_freq[nFreqs / 3] = make_cuComplex(50.f, 0.0f); input_dummy_data_freq[nFreqs / 2] = make_cuComplex(20.f, 0.0f); hipfftHandle plan; hipfftPlan1d(&plan, tmp.size(), HIPFFT_C2R, 1); hipfftExecC2R(plan, (hipfftComplex*)thrust::raw_pointer_cast(input_dummy_data_freq.data()),(hipfftReal*)thrust::raw_pointer_cast(tmp.data())); float min = thrust::min_element(tmp.begin(), tmp.end())[0]; float max = thrust::max_element(tmp.begin(), tmp.end())[0]; hipLaunchKernelGGL(( effelsberg::edd::kernels::packNbit<8>), dim3(128), dim3(1024), 0, 0, thrust::raw_pointer_cast(tmp.data()), (uint32_t*)thrust::raw_pointer_cast(packed_data.data()), tmp.size(), min, max); //toNetworkEndianess<<<64, 1024>>>((uint64_t*)thrust::raw_pointer_cast(packed_data.data()), packed_data.size() /2); thrust::host_vector<uint32_t> output(packed_data); // convert from 8 bit unsigned to 8 bit signed uint8_t *A_unsigned = reinterpret_cast<uint8_t*>(thrust::raw_pointer_cast(output.data())); int8_t *A_signed = reinterpret_cast<int8_t*>(thrust::raw_pointer_cast(output.data())); for(size_t i = 0; i < output.size() * 4; i++) { int f = A_unsigned[i]; A_signed[i] = f - 128; } size_t counter = 0; while(true) { counter += 1; RawBytes dataBlock((char*) thrust::raw_pointer_cast(output.data()), output.size() * 32 / 8, output.size() * 32 / 8 ); sink(dataBlock); std::cout << "Wrote " << counter << std::endl; if (counter == nblocks) break; usleep(delay * 1000); } } catch (std::exception &e) { std::cerr << "Unhandled Exception reached the top of main: " << e.what() << ", application will now exit" << std::endl; return ERROR_UNHANDLED_EXCEPTION; } return SUCCESS; }
9d32cb0a3028658975af943c52981edb02de3e10.cu
#include "boost/program_options.hpp" #include "psrdada_cpp/cli_utils.hpp" #include "psrdada_cpp/common.hpp" #include "psrdada_cpp/dada_client_base.hpp" #include "psrdada_cpp/dada_input_stream.hpp" #include "psrdada_cpp/dada_null_sink.hpp" #include "psrdada_cpp/dada_output_stream.hpp" #include "psrdada_cpp/multilog.hpp" #include <thrust/extrema.h> #include "psrdada_cpp/effelsberg/edd/DadaBufferLayout.hpp" #include "psrdada_cpp/effelsberg/edd/Packer.cuh" #include <unistd.h> #include <iomanip> #include <cstring> #include <ctime> #include <iostream> #include <time.h> using namespace psrdada_cpp; namespace { const size_t ERROR_IN_COMMAND_LINE = 1; const size_t SUCCESS = 0; const size_t ERROR_UNHANDLED_EXCEPTION = 2; } // namespace __device__ __forceinline__ uint64_t swap64(uint64_t x) { uint64_t result; uint2 t; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(t.x), "=r"(t.y) : "l"(x)); t.x = __byte_perm(t.x, 0, 0x0123); t.y = __byte_perm(t.y, 0, 0x0123); asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(t.y), "r"(t.x)); return result; } __global__ void toNetworkEndianess(uint64_t *s, size_t N) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; (i < N); i += blockDim.x * gridDim.x) { s[i] = swap64(s[i]); } } int main(int argc, char **argv) { try { key_t output_key; unsigned int input_bit_depth; unsigned int delay; size_t nSideChannels; size_t nblocks; size_t speadHeapSize; std::string mode; /** Define and parse the program options */ namespace po = boost::program_options; po::options_description desc("Options"); desc.add_options()("help,h", "Print help messages"); desc.add_options()( "output_key,o", po::value<std::string>()->default_value("dada")->notifier( [&output_key](std::string in) { output_key = string_to_key(in); }), "The shared memory key for the dada buffer to write to (hex " "string)"); desc.add_options()("input_bit_depth,b", po::value<unsigned int>(&input_bit_depth)->required(), "The number of bits per sample in the " "packetiser output (8 or 12)"); // desc.add_options()("mode,m", po::value<std::string >(&mode)->required(), // " Type of data to generate:\n " // " gated: "); desc.add_options()("delay,d", po::value<unsigned int>(&delay)->required(), "The delay between writing two consecutive blocks [ms]."); desc.add_options()("nblocks,n", po::value<size_t>()->default_value(0)->notifier( [&nblocks](size_t in) { nblocks = in; }), "Number of blocks to write in total. Default 0 means no-limit."); desc.add_options()("speadheap_size", po::value<size_t>()->default_value(4096)->notifier( [&speadHeapSize](size_t in) { speadHeapSize = in; }), "size of the spead data heaps. The number of the " "heaps in the dada block depends on the number of " "side channel items."); desc.add_options()("nsidechannelitems,s", po::value<size_t>()->default_value(1)->notifier( [&nSideChannels](size_t in) { nSideChannels = in; }), "Number of side channel items ( s >= 1)"); desc.add_options()( "log_level", po::value<std::string>()->default_value("info")->notifier( [](std::string level) { set_log_level(level); }), "The logging level to use " "(debug, info, warning, " "error)"); po::variables_map vm; try { po::store(po::parse_command_line(argc, argv, desc), vm); if (vm.count("help")) { std::cout << "Fill dada buffer with dummy data" << std::endl << desc << std::endl; return SUCCESS; } po::notify(vm); } catch (po::error &e) { std::cerr << "ERROR: " << e.what() << std::endl << std::endl; std::cerr << desc << std::endl; return ERROR_IN_COMMAND_LINE; } if (input_bit_depth != 8) { std::cerr << " Currently only 8 bit supported!\n"; return ERROR_IN_COMMAND_LINE; } MultiLog log("edd::DummyDataGenerator"); DadaOutputStream sink(output_key, log); char header[4096]; std::strcpy(header, "HEADER DADA\nHDR_VERSION 1.0\nHDR_SIZE 4096\nDADA_VERSION 1.0\nFILE_SIZE 2013265920\nNBIT 32\nNDIM 2\nNPOL 1\nNCHAN 4096\nRESOLUTION 1\nDSB 1\nSYNC_TIME 1234567890\nSAMPLE_CLOCK_START 175671842316288\n"); RawBytes headerBlock(header, 4096, 4096); sink.init(headerBlock); effelsberg::edd::DadaBufferLayout dadaBufferLayout(output_key, speadHeapSize, nSideChannels); size_t n_samples = dadaBufferLayout.sizeOfData() * 8 / input_bit_depth; size_t nFreqs = n_samples/ 2 + 1; thrust::device_vector<cufftComplex> input_dummy_data_freq(nFreqs); thrust::device_vector<float> tmp(dadaBufferLayout.sizeOfData() * 8 / input_bit_depth); thrust::device_vector<uint32_t> packed_data(tmp.size() * 8 / 32); input_dummy_data_freq[nFreqs / 3] = make_cuComplex(50.f, 0.0f); input_dummy_data_freq[nFreqs / 2] = make_cuComplex(20.f, 0.0f); cufftHandle plan; cufftPlan1d(&plan, tmp.size(), CUFFT_C2R, 1); cufftExecC2R(plan, (cufftComplex*)thrust::raw_pointer_cast(input_dummy_data_freq.data()),(cufftReal*)thrust::raw_pointer_cast(tmp.data())); float min = thrust::min_element(tmp.begin(), tmp.end())[0]; float max = thrust::max_element(tmp.begin(), tmp.end())[0]; effelsberg::edd::kernels::packNbit<8><<<128, 1024>>> (thrust::raw_pointer_cast(tmp.data()), (uint32_t*)thrust::raw_pointer_cast(packed_data.data()), tmp.size(), min, max); //toNetworkEndianess<<<64, 1024>>>((uint64_t*)thrust::raw_pointer_cast(packed_data.data()), packed_data.size() /2); thrust::host_vector<uint32_t> output(packed_data); // convert from 8 bit unsigned to 8 bit signed uint8_t *A_unsigned = reinterpret_cast<uint8_t*>(thrust::raw_pointer_cast(output.data())); int8_t *A_signed = reinterpret_cast<int8_t*>(thrust::raw_pointer_cast(output.data())); for(size_t i = 0; i < output.size() * 4; i++) { int f = A_unsigned[i]; A_signed[i] = f - 128; } size_t counter = 0; while(true) { counter += 1; RawBytes dataBlock((char*) thrust::raw_pointer_cast(output.data()), output.size() * 32 / 8, output.size() * 32 / 8 ); sink(dataBlock); std::cout << "Wrote " << counter << std::endl; if (counter == nblocks) break; usleep(delay * 1000); } } catch (std::exception &e) { std::cerr << "Unhandled Exception reached the top of main: " << e.what() << ", application will now exit" << std::endl; return ERROR_UNHANDLED_EXCEPTION; } return SUCCESS; }
f873c163fcfd9f0464a3610bd73570896e934054.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <helper_cuda.h> #include "datatools.h" #include "matadd.h" int main(int argc, char *argv[]) { double *h_A,*h_b,*h_c,*d_A,*d_b,*d_c; int i, j, m=32, n=5; h_A = (double *)malloc(m*n * sizeof(double)); h_b = (double *)malloc(n * sizeof(double)); h_c = (double *)malloc(m * sizeof(double)); hipMalloc((void **)&d_A,m*n * sizeof(double)); hipMalloc((void **)&d_b,n * sizeof(double)); hipMalloc((void **)&d_c,m * sizeof(double)); init_1d(n,m,h_b,h_c); init(m,n,h_A); hipMemcpy(d_A, h_A, m*n * sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, n * sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_c, h_c, m * sizeof(double),hipMemcpyHostToDevice); hipLaunchKernelGGL(( matmultvec), dim3(m/16),dim3(16), 0, 0, m, d_A, d_b, d_c); hipDeviceSynchronize(); hipMemcpy(h_c, d_c, m * sizeof(double),hipMemcpyDeviceToHost); for (j=0;j<n;j++){ printf("%lf ", h_b[j]); } for (i=0;i<m;i++){ printf("%lf ", h_c[i]); } free(h_A); free(h_b); free(h_c); hipFree(d_A); hipFree(d_b); hipFree(d_c); }
f873c163fcfd9f0464a3610bd73570896e934054.cu
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <helper_cuda.h> #include "datatools.h" #include "matadd.h" int main(int argc, char *argv[]) { double *h_A,*h_b,*h_c,*d_A,*d_b,*d_c; int i, j, m=32, n=5; h_A = (double *)malloc(m*n * sizeof(double)); h_b = (double *)malloc(n * sizeof(double)); h_c = (double *)malloc(m * sizeof(double)); cudaMalloc((void **)&d_A,m*n * sizeof(double)); cudaMalloc((void **)&d_b,n * sizeof(double)); cudaMalloc((void **)&d_c,m * sizeof(double)); init_1d(n,m,h_b,h_c); init(m,n,h_A); cudaMemcpy(d_A, h_A, m*n * sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, n * sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_c, h_c, m * sizeof(double),cudaMemcpyHostToDevice); matmultvec<<<m/16,16>>>(m, d_A, d_b, d_c); cudaDeviceSynchronize(); cudaMemcpy(h_c, d_c, m * sizeof(double),cudaMemcpyDeviceToHost); for (j=0;j<n;j++){ printf("%lf ", h_b[j]); } for (i=0;i<m;i++){ printf("%lf ", h_c[i]); } free(h_A); free(h_b); free(h_c); cudaFree(d_A); cudaFree(d_b); cudaFree(d_c); }
4b10c7290e18b39986a9f9b75ae025c7193ce815.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * spGPU - Sparse matrices on GPU library. * * Copyright (C) 2010 - 2014 * Davide Barbieri - University of Rome Tor Vergata * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 3 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "cudadebug.h" #include "cudalang.h" #include "hip/hip_complex.h" extern "C" { #include "core.h" #include "vector.h" } #include "debug.h" #define BLOCK_SIZE 512 __global__ void spgpuCaxpby_krn(cuFloatComplex *z, int n, cuFloatComplex beta, cuFloatComplex *y, cuFloatComplex alpha, cuFloatComplex* x) { int id = threadIdx.x + BLOCK_SIZE*blockIdx.x; if (id < n) { // Since z, x and y are accessed with the same offset by the same thread, // and the write to z follows the x and y read, x, y and z can share the same base address (in-place computing). if (cuFloatComplex_isZero(beta)) z[id] = cuCmulf(alpha,x[id]); else z[id] = cuCfmaf(beta, y[id], cuCmulf(alpha, x[id])); } } void spgpuCaxpby_(spgpuHandle_t handle, __device cuFloatComplex *z, int n, cuFloatComplex beta, __device cuFloatComplex *y, cuFloatComplex alpha, __device cuFloatComplex* x) { int msize = (n+BLOCK_SIZE-1)/BLOCK_SIZE; dim3 block(BLOCK_SIZE); dim3 grid(msize); hipLaunchKernelGGL(( spgpuCaxpby_krn), dim3(grid), dim3(block), 0, handle->currentStream, z, n, beta, y, alpha, x); } void spgpuCaxpby(spgpuHandle_t handle, __device cuFloatComplex *z, int n, cuFloatComplex beta, __device cuFloatComplex *y, cuFloatComplex alpha, __device cuFloatComplex* x) { int maxNForACall = max(handle->maxGridSizeX, BLOCK_SIZE*handle->maxGridSizeX); while (n > maxNForACall) //managing large vectors { spgpuCaxpby_(handle, z, maxNForACall, beta, y, alpha, x); x = x + maxNForACall; y = y + maxNForACall; z = z + maxNForACall; n -= maxNForACall; } spgpuCaxpby_(handle, z, n, beta, y, alpha, x); cudaCheckError("CUDA error on saxpby"); } void spgpuCmaxpby(spgpuHandle_t handle, __device cuFloatComplex *z, int n, cuFloatComplex beta, __device cuFloatComplex *y, cuFloatComplex alpha, __device cuFloatComplex* x, int count, int pitch) { for (int i=0; i<count; i++) spgpuCaxpby(handle, z+pitch*i, n, beta, y+pitch*i, alpha, x+pitch*i); }
4b10c7290e18b39986a9f9b75ae025c7193ce815.cu
/* * spGPU - Sparse matrices on GPU library. * * Copyright (C) 2010 - 2014 * Davide Barbieri - University of Rome Tor Vergata * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 3 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "cudadebug.h" #include "cudalang.h" #include "cuComplex.h" extern "C" { #include "core.h" #include "vector.h" } #include "debug.h" #define BLOCK_SIZE 512 __global__ void spgpuCaxpby_krn(cuFloatComplex *z, int n, cuFloatComplex beta, cuFloatComplex *y, cuFloatComplex alpha, cuFloatComplex* x) { int id = threadIdx.x + BLOCK_SIZE*blockIdx.x; if (id < n) { // Since z, x and y are accessed with the same offset by the same thread, // and the write to z follows the x and y read, x, y and z can share the same base address (in-place computing). if (cuFloatComplex_isZero(beta)) z[id] = cuCmulf(alpha,x[id]); else z[id] = cuCfmaf(beta, y[id], cuCmulf(alpha, x[id])); } } void spgpuCaxpby_(spgpuHandle_t handle, __device cuFloatComplex *z, int n, cuFloatComplex beta, __device cuFloatComplex *y, cuFloatComplex alpha, __device cuFloatComplex* x) { int msize = (n+BLOCK_SIZE-1)/BLOCK_SIZE; dim3 block(BLOCK_SIZE); dim3 grid(msize); spgpuCaxpby_krn<<<grid, block, 0, handle->currentStream>>>(z, n, beta, y, alpha, x); } void spgpuCaxpby(spgpuHandle_t handle, __device cuFloatComplex *z, int n, cuFloatComplex beta, __device cuFloatComplex *y, cuFloatComplex alpha, __device cuFloatComplex* x) { int maxNForACall = max(handle->maxGridSizeX, BLOCK_SIZE*handle->maxGridSizeX); while (n > maxNForACall) //managing large vectors { spgpuCaxpby_(handle, z, maxNForACall, beta, y, alpha, x); x = x + maxNForACall; y = y + maxNForACall; z = z + maxNForACall; n -= maxNForACall; } spgpuCaxpby_(handle, z, n, beta, y, alpha, x); cudaCheckError("CUDA error on saxpby"); } void spgpuCmaxpby(spgpuHandle_t handle, __device cuFloatComplex *z, int n, cuFloatComplex beta, __device cuFloatComplex *y, cuFloatComplex alpha, __device cuFloatComplex* x, int count, int pitch) { for (int i=0; i<count; i++) spgpuCaxpby(handle, z+pitch*i, n, beta, y+pitch*i, alpha, x+pitch*i); }
612c59ffd1d869e78d5e4234c1f0b20aebfe2b08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <color_spinor_field.h> #include <color_spinor_field_order.h> #include <index_helper.cuh> #include <tune_quda.h> #define FINE_GRAINED_ACCESS namespace quda { template <typename Field> struct PackGhostArg { Field field; void **ghost; const void *v; int X[QUDA_MAX_DIM]; const int volumeCB; const int nDim; const int nFace; const int parity; const int nParity; const int dagger; const QudaDWFPCType pc_type; int commDim[4]; // whether a given dimension is partitioned or not PackGhostArg(Field field, void **ghost, const ColorSpinorField &a, int parity, int dagger) : field(field), ghost(ghost), v(a.V()), volumeCB(a.VolumeCB()), nDim(a.Ndim()), nFace(a.Nspin() == 1 ? 3 : 1), parity(parity), nParity(a.SiteSubset()), dagger(dagger), pc_type(a.DWFPCtype()) { for (int d=0; d<nDim; d++) X[d] = a.X(d); X[0] *= (nParity == 1) ? 2 : 1; // set to full lattice dimensions X[4] = (nDim == 5) ? a.X(4) : 1; // set fifth dimension correctly for (int i=0; i<4; i++) { commDim[i] = comm_dim_partitioned(i); } } }; template <typename Float, int Ns, int Nc, typename Arg> __device__ __host__ inline void packGhost(Arg &arg, int cb_idx, int parity, int spinor_parity) { typedef typename mapper<Float>::type RegType; const int *X = arg.X; int x[5] = { }; if (arg.nDim == 5) getCoords5(x, cb_idx, X, parity, arg.pc_type); else getCoords(x, cb_idx, X, parity); #pragma unroll for (int dim=0; dim<4; dim++) { if (arg.commDim[dim] && x[dim] < arg.nFace){ #ifdef FINE_GRAINED_ACCESS for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { arg.field.Ghost(dim, 0, spinor_parity, ghostFaceIndex<0>(x,arg.X,dim,arg.nFace), s, c) = arg.field(spinor_parity, cb_idx, s, c); } } #else RegType tmp[2*Ns*Nc]; arg.field.load(tmp, cb_idx, spinor_parity); arg.field.saveGhost(tmp, ghostFaceIndex<0>(x,arg.X,dim,arg.nFace), dim, 0, spinor_parity); #endif } if (arg.commDim[dim] && x[dim] >= X[dim] - arg.nFace){ #ifdef FINE_GRAINED_ACCESS for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { arg.field.Ghost(dim, 1, spinor_parity, ghostFaceIndex<1>(x,arg.X,dim,arg.nFace), s, c) = arg.field(spinor_parity, cb_idx, s, c); } } #else RegType tmp[2*Ns*Nc]; arg.field.load(tmp, cb_idx, spinor_parity); arg.field.saveGhost(tmp, ghostFaceIndex<1>(x,arg.X,dim,arg.nFace), dim, 1, spinor_parity); #endif } } } template <typename Float, int Ns, int Nc, typename Arg> void GenericPackGhost(Arg &arg) { for (int parity=0; parity<arg.nParity; parity++) { parity = (arg.nParity == 2) ? parity : arg.parity; const int spinor_parity = (arg.nParity == 2) ? parity : 0; for (int i=0; i<arg.volumeCB; i++) packGhost<Float,Ns,Nc>(arg, i, parity, spinor_parity); } } template <typename Float, int Ns, int Nc, typename Arg> __global__ void GenericPackGhostKernel(Arg arg) { int x_cb = blockIdx.x*blockDim.x + threadIdx.x; if (x_cb >= arg.volumeCB) return; const int parity = (blockDim.y == 2) ? threadIdx.y : arg.parity; const int spinor_parity = (blockDim.y == 2) ? parity : 0; packGhost<Float,Ns,Nc>(arg, x_cb, parity, spinor_parity); } template <typename Float, int Ns, int Nc, typename Arg> class GenericPackGhostLauncher : public Tunable { protected: Arg &arg; const ColorSpinorField &meta; long long flops() const { return 0; } long long bytes() const { // FIXME take into account paritioning size_t totalBytes = 0; for (int d=0; d<4; d++) { if (!comm_dim_partitioned(d)) continue; totalBytes += 2*arg.nFace*2*Ns*Nc*meta.SurfaceCB(d)*meta.Precision(); } return totalBytes; } unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneGridDim() const { return false; } unsigned int minThreads() const { return arg.volumeCB; } bool advanceTuneParam(TuneParam &param) const { bool rtn = Tunable::advanceTuneParam(param); param.block.y = arg.nParity; return rtn; } virtual void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.block.y = arg.nParity; } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.block.y = arg.nParity; } public: GenericPackGhostLauncher(Arg &arg, const ColorSpinorField &meta) : arg(arg), meta(meta) { strcpy(aux, meta.AuxString()); #ifdef MULTI_GPU char comm[5]; comm[0] = (arg.commDim[0] ? '1' : '0'); comm[1] = (arg.commDim[1] ? '1' : '0'); comm[2] = (arg.commDim[2] ? '1' : '0'); comm[3] = (arg.commDim[3] ? '1' : '0'); comm[4] = '\0'; strcat(aux,",comm="); strcat(aux,comm); #endif } virtual ~GenericPackGhostLauncher() { } void apply(const hipStream_t &stream) { if (meta.Location() == QUDA_CPU_FIELD_LOCATION) { GenericPackGhost<Float,Ns,Nc,Arg>(arg); } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( GenericPackGhostKernel<Float,Ns,Nc,Arg>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), meta.AuxString()); } }; template <typename Float, QudaFieldOrder order, int Ns, int Nc> void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { #ifdef FINE_GRAINED_ACCESS typedef typename colorspinor::FieldOrderCB<Float,Ns,Nc,1,order> Q; Q field(a, 0, ghost); #else typedef typename colorspinor_order_mapper<Float,order,Ns,Nc>::type Q; Q field(a, (Float*)0, (float*)0, (Float**)ghost); #endif PackGhostArg<Q> arg(field, ghost, a, parity, dagger); GenericPackGhostLauncher<Float,Ns,Nc,PackGhostArg<Q> > launch(arg, a); launch.apply(0); } template <typename Float, QudaFieldOrder order, int Ns> void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { if (a.Ncolor() == 2) { genericPackGhost<Float,order,Ns,2>(ghost, a, parity, dagger); } else if (a.Ncolor() == 3) { genericPackGhost<Float,order,Ns,3>(ghost, a, parity, dagger); } else if (a.Ncolor() == 4) { genericPackGhost<Float,order,Ns,4>(ghost, a, parity, dagger); } else if (a.Ncolor() == 6) { genericPackGhost<Float,order,Ns,6>(ghost, a, parity, dagger); } else if (a.Ncolor() == 12) { genericPackGhost<Float,order,Ns,12>(ghost, a, parity, dagger); } else if (a.Ncolor() == 16) { genericPackGhost<Float,order,Ns,16>(ghost, a, parity, dagger); } else if (a.Ncolor() == 20) { genericPackGhost<Float,order,Ns,20>(ghost, a, parity, dagger); } else if (a.Ncolor() == 24) { genericPackGhost<Float,order,Ns,24>(ghost, a, parity, dagger); } else if (a.Ncolor() == 48) { genericPackGhost<Float,order,Ns,48>(ghost, a, parity, dagger); } else if (a.Ncolor() == 72) { genericPackGhost<Float,order,Ns,72>(ghost, a, parity, dagger); } else if (a.Ncolor() == 256) { genericPackGhost<Float,order,Ns,256>(ghost, a, parity, dagger); } else if (a.Ncolor() == 576) { genericPackGhost<Float,order,Ns,576>(ghost, a, parity, dagger); } else { errorQuda("Unsupported nColor = %d", a.Ncolor()); } } template <typename Float, QudaFieldOrder order> void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { if (a.Nspin() == 4) { genericPackGhost<Float,order,4>(ghost, a, parity, dagger); } else if (a.Nspin() == 2) { genericPackGhost<Float,order,2>(ghost, a, parity, dagger); #ifdef GPU_STAGGERED_DIRAC } else if (a.Nspin() == 1) { genericPackGhost<Float,order,1>(ghost, a, parity, dagger); #endif } else { errorQuda("Unsupported nSpin = %d", a.Nspin()); } } template <typename Float> void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { if (a.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) { genericPackGhost<Float,QUDA_FLOAT2_FIELD_ORDER>(ghost, a, parity, dagger); } else if (a.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) { genericPackGhost<Float,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER>(ghost, a, parity, dagger); } else { errorQuda("Unsupported field order = %d", a.FieldOrder()); } } void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { if (a.FieldOrder() == QUDA_QOP_DOMAIN_WALL_FIELD_ORDER) { errorQuda("Field order %d not supported", a.FieldOrder()); } // only do packing if one of the dimensions is partitioned bool partitioned; for (int d=0; d<4; d++) if (comm_dim_partitioned(d)) partitioned = true; if (!partitioned) return; if (a.Precision() == QUDA_DOUBLE_PRECISION) { genericPackGhost<double>(ghost, a, parity, dagger); } else if (a.Precision() == QUDA_SINGLE_PRECISION) { genericPackGhost<float>(ghost, a, parity, dagger); } else { errorQuda("Unsupported precision %d", a.Precision()); } } } // namespace quda
612c59ffd1d869e78d5e4234c1f0b20aebfe2b08.cu
#include <color_spinor_field.h> #include <color_spinor_field_order.h> #include <index_helper.cuh> #include <tune_quda.h> #define FINE_GRAINED_ACCESS namespace quda { template <typename Field> struct PackGhostArg { Field field; void **ghost; const void *v; int X[QUDA_MAX_DIM]; const int volumeCB; const int nDim; const int nFace; const int parity; const int nParity; const int dagger; const QudaDWFPCType pc_type; int commDim[4]; // whether a given dimension is partitioned or not PackGhostArg(Field field, void **ghost, const ColorSpinorField &a, int parity, int dagger) : field(field), ghost(ghost), v(a.V()), volumeCB(a.VolumeCB()), nDim(a.Ndim()), nFace(a.Nspin() == 1 ? 3 : 1), parity(parity), nParity(a.SiteSubset()), dagger(dagger), pc_type(a.DWFPCtype()) { for (int d=0; d<nDim; d++) X[d] = a.X(d); X[0] *= (nParity == 1) ? 2 : 1; // set to full lattice dimensions X[4] = (nDim == 5) ? a.X(4) : 1; // set fifth dimension correctly for (int i=0; i<4; i++) { commDim[i] = comm_dim_partitioned(i); } } }; template <typename Float, int Ns, int Nc, typename Arg> __device__ __host__ inline void packGhost(Arg &arg, int cb_idx, int parity, int spinor_parity) { typedef typename mapper<Float>::type RegType; const int *X = arg.X; int x[5] = { }; if (arg.nDim == 5) getCoords5(x, cb_idx, X, parity, arg.pc_type); else getCoords(x, cb_idx, X, parity); #pragma unroll for (int dim=0; dim<4; dim++) { if (arg.commDim[dim] && x[dim] < arg.nFace){ #ifdef FINE_GRAINED_ACCESS for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { arg.field.Ghost(dim, 0, spinor_parity, ghostFaceIndex<0>(x,arg.X,dim,arg.nFace), s, c) = arg.field(spinor_parity, cb_idx, s, c); } } #else RegType tmp[2*Ns*Nc]; arg.field.load(tmp, cb_idx, spinor_parity); arg.field.saveGhost(tmp, ghostFaceIndex<0>(x,arg.X,dim,arg.nFace), dim, 0, spinor_parity); #endif } if (arg.commDim[dim] && x[dim] >= X[dim] - arg.nFace){ #ifdef FINE_GRAINED_ACCESS for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { arg.field.Ghost(dim, 1, spinor_parity, ghostFaceIndex<1>(x,arg.X,dim,arg.nFace), s, c) = arg.field(spinor_parity, cb_idx, s, c); } } #else RegType tmp[2*Ns*Nc]; arg.field.load(tmp, cb_idx, spinor_parity); arg.field.saveGhost(tmp, ghostFaceIndex<1>(x,arg.X,dim,arg.nFace), dim, 1, spinor_parity); #endif } } } template <typename Float, int Ns, int Nc, typename Arg> void GenericPackGhost(Arg &arg) { for (int parity=0; parity<arg.nParity; parity++) { parity = (arg.nParity == 2) ? parity : arg.parity; const int spinor_parity = (arg.nParity == 2) ? parity : 0; for (int i=0; i<arg.volumeCB; i++) packGhost<Float,Ns,Nc>(arg, i, parity, spinor_parity); } } template <typename Float, int Ns, int Nc, typename Arg> __global__ void GenericPackGhostKernel(Arg arg) { int x_cb = blockIdx.x*blockDim.x + threadIdx.x; if (x_cb >= arg.volumeCB) return; const int parity = (blockDim.y == 2) ? threadIdx.y : arg.parity; const int spinor_parity = (blockDim.y == 2) ? parity : 0; packGhost<Float,Ns,Nc>(arg, x_cb, parity, spinor_parity); } template <typename Float, int Ns, int Nc, typename Arg> class GenericPackGhostLauncher : public Tunable { protected: Arg &arg; const ColorSpinorField &meta; long long flops() const { return 0; } long long bytes() const { // FIXME take into account paritioning size_t totalBytes = 0; for (int d=0; d<4; d++) { if (!comm_dim_partitioned(d)) continue; totalBytes += 2*arg.nFace*2*Ns*Nc*meta.SurfaceCB(d)*meta.Precision(); } return totalBytes; } unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneGridDim() const { return false; } unsigned int minThreads() const { return arg.volumeCB; } bool advanceTuneParam(TuneParam &param) const { bool rtn = Tunable::advanceTuneParam(param); param.block.y = arg.nParity; return rtn; } virtual void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.block.y = arg.nParity; } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.block.y = arg.nParity; } public: GenericPackGhostLauncher(Arg &arg, const ColorSpinorField &meta) : arg(arg), meta(meta) { strcpy(aux, meta.AuxString()); #ifdef MULTI_GPU char comm[5]; comm[0] = (arg.commDim[0] ? '1' : '0'); comm[1] = (arg.commDim[1] ? '1' : '0'); comm[2] = (arg.commDim[2] ? '1' : '0'); comm[3] = (arg.commDim[3] ? '1' : '0'); comm[4] = '\0'; strcat(aux,",comm="); strcat(aux,comm); #endif } virtual ~GenericPackGhostLauncher() { } void apply(const cudaStream_t &stream) { if (meta.Location() == QUDA_CPU_FIELD_LOCATION) { GenericPackGhost<Float,Ns,Nc,Arg>(arg); } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); GenericPackGhostKernel<Float,Ns,Nc,Arg> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), meta.AuxString()); } }; template <typename Float, QudaFieldOrder order, int Ns, int Nc> void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { #ifdef FINE_GRAINED_ACCESS typedef typename colorspinor::FieldOrderCB<Float,Ns,Nc,1,order> Q; Q field(a, 0, ghost); #else typedef typename colorspinor_order_mapper<Float,order,Ns,Nc>::type Q; Q field(a, (Float*)0, (float*)0, (Float**)ghost); #endif PackGhostArg<Q> arg(field, ghost, a, parity, dagger); GenericPackGhostLauncher<Float,Ns,Nc,PackGhostArg<Q> > launch(arg, a); launch.apply(0); } template <typename Float, QudaFieldOrder order, int Ns> void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { if (a.Ncolor() == 2) { genericPackGhost<Float,order,Ns,2>(ghost, a, parity, dagger); } else if (a.Ncolor() == 3) { genericPackGhost<Float,order,Ns,3>(ghost, a, parity, dagger); } else if (a.Ncolor() == 4) { genericPackGhost<Float,order,Ns,4>(ghost, a, parity, dagger); } else if (a.Ncolor() == 6) { genericPackGhost<Float,order,Ns,6>(ghost, a, parity, dagger); } else if (a.Ncolor() == 12) { genericPackGhost<Float,order,Ns,12>(ghost, a, parity, dagger); } else if (a.Ncolor() == 16) { genericPackGhost<Float,order,Ns,16>(ghost, a, parity, dagger); } else if (a.Ncolor() == 20) { genericPackGhost<Float,order,Ns,20>(ghost, a, parity, dagger); } else if (a.Ncolor() == 24) { genericPackGhost<Float,order,Ns,24>(ghost, a, parity, dagger); } else if (a.Ncolor() == 48) { genericPackGhost<Float,order,Ns,48>(ghost, a, parity, dagger); } else if (a.Ncolor() == 72) { genericPackGhost<Float,order,Ns,72>(ghost, a, parity, dagger); } else if (a.Ncolor() == 256) { genericPackGhost<Float,order,Ns,256>(ghost, a, parity, dagger); } else if (a.Ncolor() == 576) { genericPackGhost<Float,order,Ns,576>(ghost, a, parity, dagger); } else { errorQuda("Unsupported nColor = %d", a.Ncolor()); } } template <typename Float, QudaFieldOrder order> void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { if (a.Nspin() == 4) { genericPackGhost<Float,order,4>(ghost, a, parity, dagger); } else if (a.Nspin() == 2) { genericPackGhost<Float,order,2>(ghost, a, parity, dagger); #ifdef GPU_STAGGERED_DIRAC } else if (a.Nspin() == 1) { genericPackGhost<Float,order,1>(ghost, a, parity, dagger); #endif } else { errorQuda("Unsupported nSpin = %d", a.Nspin()); } } template <typename Float> void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { if (a.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) { genericPackGhost<Float,QUDA_FLOAT2_FIELD_ORDER>(ghost, a, parity, dagger); } else if (a.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) { genericPackGhost<Float,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER>(ghost, a, parity, dagger); } else { errorQuda("Unsupported field order = %d", a.FieldOrder()); } } void genericPackGhost(void **ghost, const ColorSpinorField &a, const QudaParity parity, const int dagger) { if (a.FieldOrder() == QUDA_QOP_DOMAIN_WALL_FIELD_ORDER) { errorQuda("Field order %d not supported", a.FieldOrder()); } // only do packing if one of the dimensions is partitioned bool partitioned; for (int d=0; d<4; d++) if (comm_dim_partitioned(d)) partitioned = true; if (!partitioned) return; if (a.Precision() == QUDA_DOUBLE_PRECISION) { genericPackGhost<double>(ghost, a, parity, dagger); } else if (a.Precision() == QUDA_SINGLE_PRECISION) { genericPackGhost<float>(ghost, a, parity, dagger); } else { errorQuda("Unsupported precision %d", a.Precision()); } } } // namespace quda
1c87255e69327b6cbce270b6cdc2b2a99c426bf3.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Copyright 2019 BlazingDB, Inc. * Copyright 2019 Eyal Rozenberg <eyalroz@blazingdb.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cudf_test_utils.cuh" #include <nvstrings/NVCategory.h> #include <nvstrings/NVStrings.h> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <tests/utilities/nvcategory_utils.cuh> #include <cudf/functions.h> namespace { static constexpr char null_signifier = '@'; namespace detail { // When streaming char-like types, the standard library streams tend to treat // them as characters rather than numbers, e.g. you would get an 'a' instead of 97. // The following function(s) ensure we "promote" such values to integers before // they're streamed template <typename T> const T& promote_for_streaming(const T& x) { return x; } //int promote_for_streaming(const char& x) { return x; } //int promote_for_streaming(const unsigned char& x) { return x; } int promote_for_streaming(const signed char& x) { return x; } } // namespace detail struct column_printer { template<typename Element> void operator()(gdf_column const* the_column, unsigned min_printing_width, std::ostream& stream) { gdf_size_type num_rows { the_column->size }; Element const* column_data { static_cast<Element const*>(the_column->data) }; std::vector<Element> host_side_data(num_rows); hipMemcpy(host_side_data.data(), column_data, num_rows * sizeof(Element), hipMemcpyDeviceToHost); gdf_size_type const num_masks { gdf_valid_allocation_size(num_rows) }; std::vector<gdf_valid_type> h_mask(num_masks, ~gdf_valid_type { 0 }); if (nullptr != the_column->valid) { hipMemcpy(h_mask.data(), the_column->valid, num_masks * sizeof(gdf_valid_type), hipMemcpyDeviceToHost); } for (gdf_size_type i = 0; i < num_rows; ++i) { stream << std::setw(min_printing_width); if (gdf_is_valid(h_mask.data(), i)) { stream << detail::promote_for_streaming(host_side_data[i]); } else { stream << null_representative; } stream << ' '; } stream << std::endl; if(the_column->dtype == GDF_STRING_CATEGORY){ stream<<"Category Data (index | key):\n"; if(the_column->dtype_info.category != nullptr){ NVCategory *category = static_cast<NVCategory *>(the_column->dtype_info.category); size_t keys_size = category->keys_size(); NVStrings *keys = category->get_keys(); if (keys_size>0) { char ** data = new char *[keys_size]; int * byte_sizes = new int[keys_size]; keys->byte_count(byte_sizes, false); for(size_t i=0; i<keys_size; i++){ data[i]=new char[::max(2, byte_sizes[i])]; } keys->to_host(data, 0, keys_size); for(size_t i=0; i<keys_size; i++){ // null terminate strings // TODO: nvstrings overwrites data[i] ifit is a null string // Update this based on resolution of https://github.com/rapidsai/custrings/issues/330 if (byte_sizes[i]!=-1) data[i][byte_sizes[i]]=0; } for(size_t i=0; i<keys_size; i++){ // print category strings stream << "(" << i << "|"; if (data[i] == nullptr) stream << null_signifier; // account for null else stream << data[i]; stream << ")\t"; } stream<<std::endl; for(size_t i=0; i<keys_size; i++){ delete data[i]; } delete [] data; delete [] byte_sizes; } } } } }; /**---------------------------------------------------------------------------* * @brief Functor for comparing whether two elements from two gdf_columns are * equal. * *---------------------------------------------------------------------------**/ template <typename T> struct elements_equal { gdf_column lhs_col; gdf_column rhs_col; bool nulls_are_equivalent; using bit_mask_t = bit_mask::bit_mask_t; /**---------------------------------------------------------------------------* * @brief Constructs functor for comparing elements between two gdf_column's * * @param lhs The left column for comparison * @param rhs The right column for comparison * @param nulls_are_equal Desired behavior for whether or not nulls are * treated as equal to other nulls. Defaults to true. *---------------------------------------------------------------------------**/ __host__ __device__ elements_equal(gdf_column lhs, gdf_column rhs, bool nulls_are_equal = true) : lhs_col{lhs}, rhs_col{rhs}, nulls_are_equivalent{nulls_are_equal} {} __device__ bool operator()(gdf_index_type row) { bool const lhs_is_valid{gdf_is_valid(lhs_col.valid, row)}; bool const rhs_is_valid{gdf_is_valid(rhs_col.valid, row)}; if (lhs_is_valid and rhs_is_valid) { return static_cast<T const*>(lhs_col.data)[row] == static_cast<T const*>(rhs_col.data)[row]; } // If one value is valid but the other is not if (lhs_is_valid != rhs_is_valid) { return false; } return nulls_are_equivalent; } }; } // namespace anonymous /** * ---------------------------------------------------------------------------* * @brief Compare two gdf_columns on all fields, including pairwise comparison * of data and valid arrays * * @tparam T The type of columns to compare * @param left The left column * @param right The right column * @return bool Whether or not the columns are equal * ---------------------------------------------------------------------------**/ template <typename T> bool gdf_equal_columns(gdf_column const& left, gdf_column const& right) { if (left.size != right.size) return false; if (left.dtype != right.dtype) return false; if (left.null_count != right.null_count) return false; if (left.dtype_info.time_unit != right.dtype_info.time_unit) return false; if ((left.col_name == nullptr) != (right.col_name == nullptr)) return false; // if one is null but not both if (left.col_name != nullptr && std::strcmp(left.col_name, right.col_name) != 0) return false; if ((left.data == nullptr) != (right.data == nullptr)) return false; // if one is null but not both if ((left.valid == nullptr) != (right.valid == nullptr)) return false; // if one is null but not both if (left.data == nullptr) return true; // logically, both are null if (left.dtype == GDF_STRING_CATEGORY) { // Transfer input column to host std::vector<std::string> left_data, right_data; std::vector<gdf_valid_type> left_bitmask, right_bitmask; std::tie(left_data, left_bitmask) = cudf::test::nvcategory_column_to_host(const_cast<gdf_column*>(&left)); std::tie(right_data, right_bitmask) = cudf::test::nvcategory_column_to_host(const_cast<gdf_column*>(&right)); CHECK_STREAM(0); if (left_data.size() != right_data.size()) return false; for (size_t i = 0; i < left_data.size(); i++) { bool const left_is_valid{gdf_is_valid(left_bitmask.data(), i)}; bool const right_is_valid{gdf_is_valid(right_bitmask.data(), i)}; if (left_is_valid != right_is_valid) return false; else if (left_is_valid && (left_data[i] != right_data[i])) return false; } return true; } else { if ((left.dtype_info.category != nullptr) || (right.dtype_info.category != nullptr)) return false; // category must be nullptr bool equal_data = thrust::all_of(rmm::exec_policy()->on(0), thrust::make_counting_iterator(0), thrust::make_counting_iterator(left.size), elements_equal<T>{left, right}); CHECK_STREAM(0); return equal_data; } } namespace { struct columns_equal { template <typename T> bool operator()(gdf_column const& left, gdf_column const& right) { return gdf_equal_columns<T>(left, right); } }; }; // namespace anonymous // Type-erased version of gdf_equal_columns bool gdf_equal_columns(gdf_column const& left, gdf_column const& right) { return cudf::type_dispatcher(left.dtype, columns_equal{}, left, right); } void print_gdf_column(gdf_column const * the_column, unsigned min_printing_width, std::ostream& stream) { cudf::type_dispatcher(the_column->dtype, column_printer{}, the_column, min_printing_width, stream); } void print_valid_data(const gdf_valid_type *validity_mask, const size_t num_rows) { hipError_t error; hipPointerAttribute_t attrib; hipPointerGetAttributes(&attrib, validity_mask); error = hipGetLastError(); std::vector<gdf_valid_type> h_mask(gdf_valid_allocation_size(num_rows)); if (error != hipErrorInvalidValue && isDeviceType(attrib)) hipMemcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows), hipMemcpyDeviceToHost); else memcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows)); std::transform( h_mask.begin(), h_mask.begin() + gdf_num_bitmask_elements(num_rows), std::ostream_iterator<std::string>(std::cout, " "), [](gdf_valid_type x) { auto bits = std::bitset<GDF_VALID_BITSIZE>(x).to_string(null_signifier); return std::string(bits.rbegin(), bits.rend()); }); std::cout << std::endl; } gdf_size_type count_valid_bits_host( std::vector<gdf_valid_type> const& masks, gdf_size_type const num_rows) { if ((0 == num_rows) || (0 == masks.size())) { return 0; } gdf_size_type count{0}; // Count the valid bits for all masks except the last one for (gdf_size_type i = 0; i < (gdf_num_bitmask_elements(num_rows) - 1); ++i) { gdf_valid_type current_mask = masks[i]; while (current_mask > 0) { current_mask &= (current_mask - 1); count++; } } // Only count the bits in the last mask that correspond to rows int num_rows_last_mask = num_rows % GDF_VALID_BITSIZE; if (num_rows_last_mask == 0) { num_rows_last_mask = GDF_VALID_BITSIZE; } // Mask off only the bits that correspond to rows gdf_valid_type const rows_mask = ( gdf_valid_type{1} << num_rows_last_mask ) - 1; gdf_valid_type last_mask = masks[gdf_num_bitmask_elements(num_rows) - 1] & rows_mask; while (last_mask > 0) { last_mask &= (last_mask - 1); count++; } return count; }
1c87255e69327b6cbce270b6cdc2b2a99c426bf3.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Copyright 2019 BlazingDB, Inc. * Copyright 2019 Eyal Rozenberg <eyalroz@blazingdb.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cudf_test_utils.cuh" #include <nvstrings/NVCategory.h> #include <nvstrings/NVStrings.h> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <tests/utilities/nvcategory_utils.cuh> #include <cudf/functions.h> namespace { static constexpr char null_signifier = '@'; namespace detail { // When streaming char-like types, the standard library streams tend to treat // them as characters rather than numbers, e.g. you would get an 'a' instead of 97. // The following function(s) ensure we "promote" such values to integers before // they're streamed template <typename T> const T& promote_for_streaming(const T& x) { return x; } //int promote_for_streaming(const char& x) { return x; } //int promote_for_streaming(const unsigned char& x) { return x; } int promote_for_streaming(const signed char& x) { return x; } } // namespace detail struct column_printer { template<typename Element> void operator()(gdf_column const* the_column, unsigned min_printing_width, std::ostream& stream) { gdf_size_type num_rows { the_column->size }; Element const* column_data { static_cast<Element const*>(the_column->data) }; std::vector<Element> host_side_data(num_rows); cudaMemcpy(host_side_data.data(), column_data, num_rows * sizeof(Element), cudaMemcpyDeviceToHost); gdf_size_type const num_masks { gdf_valid_allocation_size(num_rows) }; std::vector<gdf_valid_type> h_mask(num_masks, ~gdf_valid_type { 0 }); if (nullptr != the_column->valid) { cudaMemcpy(h_mask.data(), the_column->valid, num_masks * sizeof(gdf_valid_type), cudaMemcpyDeviceToHost); } for (gdf_size_type i = 0; i < num_rows; ++i) { stream << std::setw(min_printing_width); if (gdf_is_valid(h_mask.data(), i)) { stream << detail::promote_for_streaming(host_side_data[i]); } else { stream << null_representative; } stream << ' '; } stream << std::endl; if(the_column->dtype == GDF_STRING_CATEGORY){ stream<<"Category Data (index | key):\n"; if(the_column->dtype_info.category != nullptr){ NVCategory *category = static_cast<NVCategory *>(the_column->dtype_info.category); size_t keys_size = category->keys_size(); NVStrings *keys = category->get_keys(); if (keys_size>0) { char ** data = new char *[keys_size]; int * byte_sizes = new int[keys_size]; keys->byte_count(byte_sizes, false); for(size_t i=0; i<keys_size; i++){ data[i]=new char[std::max(2, byte_sizes[i])]; } keys->to_host(data, 0, keys_size); for(size_t i=0; i<keys_size; i++){ // null terminate strings // TODO: nvstrings overwrites data[i] ifit is a null string // Update this based on resolution of https://github.com/rapidsai/custrings/issues/330 if (byte_sizes[i]!=-1) data[i][byte_sizes[i]]=0; } for(size_t i=0; i<keys_size; i++){ // print category strings stream << "(" << i << "|"; if (data[i] == nullptr) stream << null_signifier; // account for null else stream << data[i]; stream << ")\t"; } stream<<std::endl; for(size_t i=0; i<keys_size; i++){ delete data[i]; } delete [] data; delete [] byte_sizes; } } } } }; /**---------------------------------------------------------------------------* * @brief Functor for comparing whether two elements from two gdf_columns are * equal. * *---------------------------------------------------------------------------**/ template <typename T> struct elements_equal { gdf_column lhs_col; gdf_column rhs_col; bool nulls_are_equivalent; using bit_mask_t = bit_mask::bit_mask_t; /**---------------------------------------------------------------------------* * @brief Constructs functor for comparing elements between two gdf_column's * * @param lhs The left column for comparison * @param rhs The right column for comparison * @param nulls_are_equal Desired behavior for whether or not nulls are * treated as equal to other nulls. Defaults to true. *---------------------------------------------------------------------------**/ __host__ __device__ elements_equal(gdf_column lhs, gdf_column rhs, bool nulls_are_equal = true) : lhs_col{lhs}, rhs_col{rhs}, nulls_are_equivalent{nulls_are_equal} {} __device__ bool operator()(gdf_index_type row) { bool const lhs_is_valid{gdf_is_valid(lhs_col.valid, row)}; bool const rhs_is_valid{gdf_is_valid(rhs_col.valid, row)}; if (lhs_is_valid and rhs_is_valid) { return static_cast<T const*>(lhs_col.data)[row] == static_cast<T const*>(rhs_col.data)[row]; } // If one value is valid but the other is not if (lhs_is_valid != rhs_is_valid) { return false; } return nulls_are_equivalent; } }; } // namespace anonymous /** * ---------------------------------------------------------------------------* * @brief Compare two gdf_columns on all fields, including pairwise comparison * of data and valid arrays * * @tparam T The type of columns to compare * @param left The left column * @param right The right column * @return bool Whether or not the columns are equal * ---------------------------------------------------------------------------**/ template <typename T> bool gdf_equal_columns(gdf_column const& left, gdf_column const& right) { if (left.size != right.size) return false; if (left.dtype != right.dtype) return false; if (left.null_count != right.null_count) return false; if (left.dtype_info.time_unit != right.dtype_info.time_unit) return false; if ((left.col_name == nullptr) != (right.col_name == nullptr)) return false; // if one is null but not both if (left.col_name != nullptr && std::strcmp(left.col_name, right.col_name) != 0) return false; if ((left.data == nullptr) != (right.data == nullptr)) return false; // if one is null but not both if ((left.valid == nullptr) != (right.valid == nullptr)) return false; // if one is null but not both if (left.data == nullptr) return true; // logically, both are null if (left.dtype == GDF_STRING_CATEGORY) { // Transfer input column to host std::vector<std::string> left_data, right_data; std::vector<gdf_valid_type> left_bitmask, right_bitmask; std::tie(left_data, left_bitmask) = cudf::test::nvcategory_column_to_host(const_cast<gdf_column*>(&left)); std::tie(right_data, right_bitmask) = cudf::test::nvcategory_column_to_host(const_cast<gdf_column*>(&right)); CHECK_STREAM(0); if (left_data.size() != right_data.size()) return false; for (size_t i = 0; i < left_data.size(); i++) { bool const left_is_valid{gdf_is_valid(left_bitmask.data(), i)}; bool const right_is_valid{gdf_is_valid(right_bitmask.data(), i)}; if (left_is_valid != right_is_valid) return false; else if (left_is_valid && (left_data[i] != right_data[i])) return false; } return true; } else { if ((left.dtype_info.category != nullptr) || (right.dtype_info.category != nullptr)) return false; // category must be nullptr bool equal_data = thrust::all_of(rmm::exec_policy()->on(0), thrust::make_counting_iterator(0), thrust::make_counting_iterator(left.size), elements_equal<T>{left, right}); CHECK_STREAM(0); return equal_data; } } namespace { struct columns_equal { template <typename T> bool operator()(gdf_column const& left, gdf_column const& right) { return gdf_equal_columns<T>(left, right); } }; }; // namespace anonymous // Type-erased version of gdf_equal_columns bool gdf_equal_columns(gdf_column const& left, gdf_column const& right) { return cudf::type_dispatcher(left.dtype, columns_equal{}, left, right); } void print_gdf_column(gdf_column const * the_column, unsigned min_printing_width, std::ostream& stream) { cudf::type_dispatcher(the_column->dtype, column_printer{}, the_column, min_printing_width, stream); } void print_valid_data(const gdf_valid_type *validity_mask, const size_t num_rows) { cudaError_t error; cudaPointerAttributes attrib; cudaPointerGetAttributes(&attrib, validity_mask); error = cudaGetLastError(); std::vector<gdf_valid_type> h_mask(gdf_valid_allocation_size(num_rows)); if (error != cudaErrorInvalidValue && isDeviceType(attrib)) cudaMemcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows), cudaMemcpyDeviceToHost); else memcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows)); std::transform( h_mask.begin(), h_mask.begin() + gdf_num_bitmask_elements(num_rows), std::ostream_iterator<std::string>(std::cout, " "), [](gdf_valid_type x) { auto bits = std::bitset<GDF_VALID_BITSIZE>(x).to_string(null_signifier); return std::string(bits.rbegin(), bits.rend()); }); std::cout << std::endl; } gdf_size_type count_valid_bits_host( std::vector<gdf_valid_type> const& masks, gdf_size_type const num_rows) { if ((0 == num_rows) || (0 == masks.size())) { return 0; } gdf_size_type count{0}; // Count the valid bits for all masks except the last one for (gdf_size_type i = 0; i < (gdf_num_bitmask_elements(num_rows) - 1); ++i) { gdf_valid_type current_mask = masks[i]; while (current_mask > 0) { current_mask &= (current_mask - 1); count++; } } // Only count the bits in the last mask that correspond to rows int num_rows_last_mask = num_rows % GDF_VALID_BITSIZE; if (num_rows_last_mask == 0) { num_rows_last_mask = GDF_VALID_BITSIZE; } // Mask off only the bits that correspond to rows gdf_valid_type const rows_mask = ( gdf_valid_type{1} << num_rows_last_mask ) - 1; gdf_valid_type last_mask = masks[gdf_num_bitmask_elements(num_rows) - 1] & rows_mask; while (last_mask > 0) { last_mask &= (last_mask - 1); count++; } return count; }
36c9d25ab7816bf67cd3528b80d22e578ccc418b.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: ~GPUCoordinateUpdater() { // NOLINT if (learner_param_->gpu_id >= 0) { dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); } } // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); fromJson(config.at("linear_train_param"), &tparam_); fromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = toJson(tparam_); out["coordinate_param"] = toJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (learner_param_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); if ( IsEmpty() ) { return; } dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } ba_.Allocate(learner_param_->gpu_id, &data_, row_ptr_.back(), &gpair_, num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(hipMemcpy( data_.subspan(row_ptr_[fidx]).data(), col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param_)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (learner_param_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->learner_model_param_->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param_->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->learner_model_param_->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param_->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual if (learner_param_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param_->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param_->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (learner_param_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param_->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(temp_, perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_; dh::LaunchN(learner_param_->gpu_id, num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); common::Span<xgboost::Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = gpair_; auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = gpair_; common::Span<Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(learner_param_->gpu_id, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(hipMemcpyAsync( gpair_.data(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), hipMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; dh::BulkAllocator ba_; std::vector<size_t> row_ptr_; common::Span<xgboost::Entry> data_; common::Span<GradientPair> gpair_; dh::CubMemory temp_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
36c9d25ab7816bf67cd3528b80d22e578ccc418b.cu
/*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: ~GPUCoordinateUpdater() { // NOLINT if (learner_param_->gpu_id >= 0) { dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); } } // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); fromJson(config.at("linear_train_param"), &tparam_); fromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = toJson(tparam_); out["coordinate_param"] = toJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (learner_param_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); if ( IsEmpty() ) { return; } dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } ba_.Allocate(learner_param_->gpu_id, &data_, row_ptr_.back(), &gpair_, num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(cudaMemcpy( data_.subspan(row_ptr_[fidx]).data(), col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param_)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (learner_param_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->learner_model_param_->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param_->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->learner_model_param_->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param_->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual if (learner_param_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param_->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param_->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (learner_param_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param_->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(temp_, perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_; dh::LaunchN(learner_param_->gpu_id, num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); common::Span<xgboost::Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = gpair_; auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = gpair_; common::Span<Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(learner_param_->gpu_id, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(cudaMemcpyAsync( gpair_.data(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), cudaMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; dh::BulkAllocator ba_; std::vector<size_t> row_ptr_; common::Span<xgboost::Entry> data_; common::Span<GradientPair> gpair_; dh::CubMemory temp_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost