hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
e39115c6fe17c263db9199a1e8d6e18aa6629575.hip | // !!! This is a file automatically generated by hipify!!!
#include "parallel.cuh"
#include "sparse_struc.cuh"
#include "sys.cuh"
void sys_inf_norm_parallel(struct sys *sys, double *hinf_freq_peak, double *hinf_nrm, hipblasHandle_t cublas_handle, hipsolverDnHandle_t cusolverH) {
struct sparse matA_d;
struct sparse matE_d;
struct sparse matC_d;
struct sparse matB_d;
double *diff_d, *prod1_d, *prod2_d, *matB_full_d, *matC_full_d;
double alpha = 1, beta = 0;
int size_x, size_y;
int inv_nrow, inv_ncol;
int incx = 1, incy = 1;
//inversion parameters
int lda, ldb;
double *inv_full_d;
int *ipiv_d = NULL, *info_d = NULL;
int lwork = 0;
double *d_work = NULL;
double omega, omega_peak, omegastep, omegamax, nrm_max, current_nrm;
matE_d.nrow = sys->e.nrow;
matE_d.ncol = sys->e.ncol;
matE_d.nnz = sys->e.nnz;
matA_d.nrow = sys->a.nrow;
matA_d.ncol = sys->a.ncol;
matA_d.nnz = sys->a.nnz;
matC_d.nrow = sys->c.nrow;
matC_d.ncol = sys->c.ncol;
matC_d.nnz = sys->c.nnz;
matB_d.nrow = sys->b.nrow;
matB_d.ncol = sys->b.ncol;
matB_d.nnz = sys->b.nnz;
inv_nrow = matE_d.nrow;
inv_ncol = matE_d.ncol;
lda = matE_d.nrow;
ldb = matE_d.nrow;
//------------------------------allocating device pointers------------------------------------------
hipMalloc((void**)&matA_d.row, matA_d.nnz * sizeof(int));
hipMalloc((void**)&matA_d.col, matA_d.nnz * sizeof(int));
hipMalloc((void**)&matA_d.val, matA_d.nnz * sizeof(double));
hipMalloc((void**)&matE_d.row, matE_d.nnz * sizeof(int));
hipMalloc((void**)&matE_d.col, matE_d.nnz * sizeof(int));
hipMalloc((void**)&matE_d.val, matE_d.nnz * sizeof(double));
hipMalloc((void**)&matC_d.row, matC_d.nnz * sizeof(int));
hipMalloc((void**)&matC_d.col, matC_d.nnz * sizeof(int));
hipMalloc((void**)&matC_d.val, matC_d.nnz * sizeof(double));
hipMalloc((void**)&matB_d.row, matB_d.nnz * sizeof(int));
hipMalloc((void**)&matB_d.col, matB_d.nnz * sizeof(int));
hipMalloc((void**)&matB_d.val, matB_d.nnz * sizeof(double));
hipMalloc((void**)&diff_d, matE_d.nrow*matE_d.ncol * sizeof(double));
hipMalloc((void**)&inv_full_d, inv_nrow * inv_ncol * sizeof(double));
hipMalloc((void**)&ipiv_d, matE_d.nrow * sizeof(int));
hipMalloc((void**)&info_d, sizeof(int));
hipMalloc((void**)&matB_full_d, matB_d.nrow*matB_d.ncol * sizeof(double));
hipMalloc((void**)&matC_full_d, matC_d.nrow*matC_d.ncol * sizeof(double));
hipMalloc((void**)&prod1_d, matC_d.nrow*matE_d.ncol * sizeof(double));
hipMalloc((void**)&prod2_d, inv_nrow*matB_d.ncol * sizeof(double));
//----------------------------copy matrices to the device---------------------------------------------
hipMemcpy(matA_d.row, sys->a.row, matA_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matA_d.col, sys->a.col, matA_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matA_d.val, sys->a.val, matA_d.nnz * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(matB_d.row, sys->b.row, matB_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matB_d.col, sys->b.col, matB_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matB_d.val, sys->b.val, matB_d.nnz * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(matC_d.row, sys->c.row, matC_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matC_d.col, sys->c.col, matC_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matC_d.val, sys->c.val, matC_d.nnz * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(matE_d.row, sys->e.row, matE_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matE_d.col, sys->e.col, matE_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matE_d.val, sys->e.val, matE_d.nnz * sizeof(double), hipMemcpyHostToDevice);
omega = 0;
omega_peak = 0;
omegamax = 0.1;
omegastep = 0.01;
nrm_max = 0;
current_nrm = 0;
//converting B,C into dense format
hipMemset(matB_full_d, 0, matB_d.nrow*matB_d.ncol * sizeof(double));
hipMemset(matC_full_d, 0, matC_d.nrow*matC_d.ncol * sizeof(double));
if (matB_d.nnz % THREADS == 0)
size_x = matB_d.nnz / THREADS;
else
size_x = (matB_d.nnz / THREADS) + 1;
dim3 threads3(THREADS, 1, 1);
dim3 blocks3(size_x, 1, 1);
sparse2full_kernel << <blocks3, threads3 >> > (matB_full_d, matB_d.row, matB_d.col, matB_d.val, matB_d.ncol, matB_d.nnz, 1);
if (matC_d.nnz % THREADS == 0)
size_x = matC_d.nnz / THREADS;
else
size_x = (matC_d.nnz / THREADS) + 1;
dim3 threads4(THREADS, 1, 1);
dim3 blocks4(size_x, 1, 1);
sparse2full_kernel << <blocks4, threads4 >> > (matC_full_d, matC_d.row, matC_d.col, matC_d.val, matC_d.ncol, matC_d.nnz, 1);
hipDeviceSynchronize();
while (omega < omegamax) {
//-----------------------computing diff=omega*E-matA and creating the sparse version of diff-----------------------
if (matE_d.nrow % THREADS == 0)
size_x = matE_d.nrow / THREADS;
else
size_x = (matE_d.nrow / THREADS) + 1;
size_y = size_x;
dim3 threads1(THREADS, THREADS, 1);
dim3 blocks1(size_x, size_y, 1);
hipMemset(diff_d, 0, matE_d.nrow*matE_d.ncol * sizeof(double));
sparse2full_diff_kernel << <blocks1, threads1 >> > (matE_d.row, matE_d.col, matE_d.val, matE_d.nnz, matA_d.row, matA_d.col, matA_d.val, matA_d.nnz, diff_d, matE_d.nrow, matE_d.ncol, omega);
hipDeviceSynchronize();
//------------------------------INVERSION of (omega*E - A) ---------------------------
//set inv matrix as identity
if (matE_d.nrow % THREADS == 0)
size_x = matE_d.nrow / THREADS;
else
size_x = (matE_d.nrow / THREADS) + 1;
dim3 threads2(THREADS, 1, 1);
dim3 blocks2(size_x, 1, 1);
hipMemset(inv_full_d, 0, matE_d.nrow*matE_d.ncol * sizeof(double));
create_identity_kernel << <blocks2, threads2 >> > (inv_full_d, matE_d.ncol);
hipDeviceSynchronize();
//query working space of getrf
hipsolverDnDgetrf_bufferSize(cusolverH, matE_d.nrow, matE_d.nrow, diff_d, lda, &lwork);
hipMalloc((void**)&d_work, sizeof(double) * lwork);
//LU factorization of the matrix to invert
hipsolverDnDgetrf(cusolverH, matE_d.nrow, matE_d.nrow, diff_d, lda, d_work, ipiv_d, info_d);
hipFree(d_work);
//computation of the inverse
hipsolverDnDgetrs(cusolverH, HIPBLAS_OP_N, matE_d.nrow, matE_d.nrow, diff_d, lda, ipiv_d, inv_full_d, ldb, info_d);
//----------------------------------computing C*inv*B----------------------------------------------
//computing prod1=C*inv
hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, matC_d.nrow, inv_ncol, matC_d.ncol, &alpha, matC_full_d, matC_d.nrow, inv_full_d, inv_nrow, &beta, prod1_d, matC_d.nrow);
//computing current_nrm=prod1*B
hipblasDdot(cublas_handle, matB_d.nrow, prod1_d, incx, matB_full_d, incy, ¤t_nrm);
if (fabs(current_nrm) > nrm_max) {
nrm_max = fabs(current_nrm);
omega_peak = omega;
}
omega += omegastep;
}
(*hinf_freq_peak) = omega_peak;
(*hinf_nrm) = nrm_max;
//------------------------------------free----------------------------
hipFree(matA_d.row);
hipFree(matA_d.col);
hipFree(matA_d.val);
hipFree(matB_d.row);
hipFree(matB_d.col);
hipFree(matB_d.val);
hipFree(matC_d.row);
hipFree(matC_d.col);
hipFree(matC_d.val);
hipFree(matE_d.row);
hipFree(matE_d.col);
hipFree(matE_d.val);
hipFree(matB_full_d);
hipFree(matC_full_d);
hipFree(diff_d);
hipFree(inv_full_d);
hipFree(ipiv_d);
hipFree(info_d);
}
void freqresp_parallel(struct sys *sys, double omega, double *val, hipblasHandle_t cublas_handle, hipsolverDnHandle_t cusolverH) { //NOT USED!
struct sparse matA_d;
struct sparse matE_d;
struct sparse matC_d;
struct sparse matB_d;
double *diff_d, *prod1_d, *prod2_d, *matB_full_d, *matC_full_d;
double alpha = 1, beta = 0;
int size_x, size_y;
int inv_nrow, inv_ncol;
//inversion parameters
int lda, ldb;
double *inv_full_d;
int *ipiv_d = NULL, *info_d = NULL;
int lwork = 0;
double *d_work = NULL;
int incx = 1, incy = 1;
matE_d.nrow = sys->e.nrow;
matE_d.ncol = sys->e.ncol;
matE_d.nnz = sys->e.nnz;
matA_d.nrow = sys->a.nrow;
matA_d.ncol = sys->a.ncol;
matA_d.nnz = sys->a.nnz;
matC_d.nrow = sys->c.nrow;
matC_d.ncol = sys->c.ncol;
matC_d.nnz = sys->c.nnz;
matB_d.nrow = sys->b.nrow;
matB_d.ncol = sys->b.ncol;
matB_d.nnz = sys->b.nnz;
inv_nrow = matE_d.nrow;
inv_ncol = matE_d.ncol;
lda = matE_d.nrow;
ldb = matE_d.nrow;
//------------------------------allocating device pointers------------------------------------------
hipMalloc((void**)&matA_d.row, matA_d.nnz * sizeof(int));
hipMalloc((void**)&matA_d.col, matA_d.nnz * sizeof(int));
hipMalloc((void**)&matA_d.val, matA_d.nnz * sizeof(double));
hipMalloc((void**)&matE_d.row, matE_d.nnz * sizeof(int));
hipMalloc((void**)&matE_d.col, matE_d.nnz * sizeof(int));
hipMalloc((void**)&matE_d.val, matE_d.nnz * sizeof(double));
hipMalloc((void**)&matC_d.row, matC_d.nnz * sizeof(int));
hipMalloc((void**)&matC_d.col, matC_d.nnz * sizeof(int));
hipMalloc((void**)&matC_d.val, matC_d.nnz * sizeof(double));
hipMalloc((void**)&matB_d.row, matB_d.nnz * sizeof(int));
hipMalloc((void**)&matB_d.col, matB_d.nnz * sizeof(int));
hipMalloc((void**)&matB_d.val, matB_d.nnz * sizeof(double));
hipMalloc((void**)&diff_d, matE_d.nrow*matE_d.ncol * sizeof(double));
hipMalloc((void**)&inv_full_d, inv_nrow * inv_ncol * sizeof(double));
hipMalloc((void**)&ipiv_d, matE_d.nrow * sizeof(int));
hipMalloc((void**)&info_d, sizeof(int));
hipMalloc((void**)&matB_full_d, matB_d.nrow*matB_d.ncol * sizeof(double));
hipMalloc((void**)&matC_full_d, matC_d.nrow*matC_d.ncol * sizeof(double));
hipMalloc((void**)&prod1_d, matC_d.nrow*matE_d.ncol * sizeof(double));
hipMalloc((void**)&prod2_d, inv_nrow*matB_d.ncol * sizeof(double));
//----------------------------copy matrices to the device---------------------------------------------
hipMemcpy(matA_d.row, sys->a.row, matA_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matA_d.col, sys->a.col, matA_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matA_d.val, sys->a.val, matA_d.nnz * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(matB_d.row, sys->b.row, matB_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matB_d.col, sys->b.col, matB_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matB_d.val, sys->b.val, matB_d.nnz * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(matC_d.row, sys->c.row, matC_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matC_d.col, sys->c.col, matC_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matC_d.val, sys->c.val, matC_d.nnz * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(matE_d.row, sys->e.row, matE_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matE_d.col, sys->e.col, matE_d.nnz * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(matE_d.val, sys->e.val, matE_d.nnz * sizeof(double), hipMemcpyHostToDevice);
//-----------------------computing diff=omega*E-matA and creating the sparse version of diff-----------------------
if (matE_d.nrow % THREADS == 0)
size_x = matE_d.nrow / THREADS;
else
size_x = (matE_d.nrow / THREADS) + 1;
size_y = size_x;
dim3 threads1(THREADS, THREADS, 1);
dim3 blocks1(size_x, size_y, 1);
hipMemset(diff_d, 0, matE_d.nrow*matE_d.ncol * sizeof(double));
sparse2full_diff_kernel << <blocks1, threads1 >> > (matE_d.row, matE_d.col, matE_d.val, matE_d.nnz, matA_d.row, matA_d.col, matA_d.val, matA_d.nnz, diff_d, matE_d.nrow, matE_d.ncol, omega);
hipDeviceSynchronize();
//------------------------------INVERSION of (omega*E - A) ---------------------------
//allocations
//set inv matrix as identity
if (matE_d.nrow % THREADS == 0)
size_x = matE_d.nrow / THREADS;
else
size_x = (matE_d.nrow / THREADS) + 1;
dim3 threads2(THREADS, 1, 1);
dim3 blocks2(size_x, 1, 1);
hipMemset(inv_full_d, 0, matE_d.nrow*matE_d.ncol * sizeof(double));
create_identity_kernel << <blocks2, threads2 >> > (inv_full_d, matE_d.ncol);
hipDeviceSynchronize();
//query working space of getrf
hipsolverDnDgetrf_bufferSize(cusolverH, matE_d.nrow, matE_d.nrow, diff_d, lda, &lwork);
hipMalloc((void**)&d_work, sizeof(double) * lwork);
//LU factorization of the matrix to invert
hipsolverDnDgetrf(cusolverH, matE_d.nrow, matE_d.nrow, diff_d, lda, d_work, ipiv_d, info_d);
hipFree(d_work);
//computation of the inverse
hipsolverDnDgetrs(cusolverH, HIPBLAS_OP_N, matE_d.nrow, matE_d.nrow, diff_d, lda, ipiv_d, inv_full_d, ldb, info_d);
//----------------------------------computing C*inv*B----------------------------------------------
//converting B,C into dense format
hipMemset(matB_full_d, 0, matB_d.nrow*matB_d.ncol * sizeof(double));
hipMemset(matC_full_d, 0, matC_d.nrow*matC_d.ncol * sizeof(double));
if (matB_d.nnz % THREADS == 0)
size_x = matB_d.nnz / THREADS;
else
size_x = (matB_d.nnz / THREADS) + 1;
dim3 threads3(THREADS, 1, 1);
dim3 blocks3(size_x, 1, 1);
sparse2full_kernel << <blocks3, threads3 >> > (matB_full_d, matB_d.row, matB_d.col, matB_d.val, matB_d.ncol, matB_d.nnz, 1);
if (matC_d.nnz % THREADS == 0)
size_x = matC_d.nnz / THREADS;
else
size_x = (matC_d.nnz / THREADS) + 1;
dim3 threads4(THREADS, 1, 1);
dim3 blocks4(size_x, 1, 1);
sparse2full_kernel << <blocks4, threads4 >> > (matC_full_d, matC_d.row, matC_d.col, matC_d.val, matC_d.ncol, matC_d.nnz, 1);
hipDeviceSynchronize();
//computing prod1=C*inv
hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, matC_d.nrow, inv_ncol, matC_d.ncol, &alpha, matC_full_d, matC_d.nrow, inv_full_d, inv_nrow, &beta, prod1_d, matC_d.nrow);
//computing val=prod1*B
hipblasDdot(cublas_handle, matB_d.nrow, prod1_d, incx, matB_full_d, incy, val);
//------------------------------------free----------------------------
hipFree(matA_d.row);
hipFree(matA_d.col);
hipFree(matA_d.val);
hipFree(matB_d.row);
hipFree(matB_d.col);
hipFree(matB_d.val);
hipFree(matC_d.row);
hipFree(matC_d.col);
hipFree(matC_d.val);
hipFree(matE_d.row);
hipFree(matE_d.col);
hipFree(matE_d.val);
hipFree(matB_full_d);
hipFree(matC_full_d);
hipFree(diff_d);
hipFree(inv_full_d);
hipFree(ipiv_d);
hipFree(info_d);
} | e39115c6fe17c263db9199a1e8d6e18aa6629575.cu | #include "parallel.cuh"
#include "sparse_struc.cuh"
#include "sys.cuh"
void sys_inf_norm_parallel(struct sys *sys, double *hinf_freq_peak, double *hinf_nrm, cublasHandle_t cublas_handle, cusolverDnHandle_t cusolverH) {
struct sparse matA_d;
struct sparse matE_d;
struct sparse matC_d;
struct sparse matB_d;
double *diff_d, *prod1_d, *prod2_d, *matB_full_d, *matC_full_d;
double alpha = 1, beta = 0;
int size_x, size_y;
int inv_nrow, inv_ncol;
int incx = 1, incy = 1;
//inversion parameters
int lda, ldb;
double *inv_full_d;
int *ipiv_d = NULL, *info_d = NULL;
int lwork = 0;
double *d_work = NULL;
double omega, omega_peak, omegastep, omegamax, nrm_max, current_nrm;
matE_d.nrow = sys->e.nrow;
matE_d.ncol = sys->e.ncol;
matE_d.nnz = sys->e.nnz;
matA_d.nrow = sys->a.nrow;
matA_d.ncol = sys->a.ncol;
matA_d.nnz = sys->a.nnz;
matC_d.nrow = sys->c.nrow;
matC_d.ncol = sys->c.ncol;
matC_d.nnz = sys->c.nnz;
matB_d.nrow = sys->b.nrow;
matB_d.ncol = sys->b.ncol;
matB_d.nnz = sys->b.nnz;
inv_nrow = matE_d.nrow;
inv_ncol = matE_d.ncol;
lda = matE_d.nrow;
ldb = matE_d.nrow;
//------------------------------allocating device pointers------------------------------------------
cudaMalloc((void**)&matA_d.row, matA_d.nnz * sizeof(int));
cudaMalloc((void**)&matA_d.col, matA_d.nnz * sizeof(int));
cudaMalloc((void**)&matA_d.val, matA_d.nnz * sizeof(double));
cudaMalloc((void**)&matE_d.row, matE_d.nnz * sizeof(int));
cudaMalloc((void**)&matE_d.col, matE_d.nnz * sizeof(int));
cudaMalloc((void**)&matE_d.val, matE_d.nnz * sizeof(double));
cudaMalloc((void**)&matC_d.row, matC_d.nnz * sizeof(int));
cudaMalloc((void**)&matC_d.col, matC_d.nnz * sizeof(int));
cudaMalloc((void**)&matC_d.val, matC_d.nnz * sizeof(double));
cudaMalloc((void**)&matB_d.row, matB_d.nnz * sizeof(int));
cudaMalloc((void**)&matB_d.col, matB_d.nnz * sizeof(int));
cudaMalloc((void**)&matB_d.val, matB_d.nnz * sizeof(double));
cudaMalloc((void**)&diff_d, matE_d.nrow*matE_d.ncol * sizeof(double));
cudaMalloc((void**)&inv_full_d, inv_nrow * inv_ncol * sizeof(double));
cudaMalloc((void**)&ipiv_d, matE_d.nrow * sizeof(int));
cudaMalloc((void**)&info_d, sizeof(int));
cudaMalloc((void**)&matB_full_d, matB_d.nrow*matB_d.ncol * sizeof(double));
cudaMalloc((void**)&matC_full_d, matC_d.nrow*matC_d.ncol * sizeof(double));
cudaMalloc((void**)&prod1_d, matC_d.nrow*matE_d.ncol * sizeof(double));
cudaMalloc((void**)&prod2_d, inv_nrow*matB_d.ncol * sizeof(double));
//----------------------------copy matrices to the device---------------------------------------------
cudaMemcpy(matA_d.row, sys->a.row, matA_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matA_d.col, sys->a.col, matA_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matA_d.val, sys->a.val, matA_d.nnz * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(matB_d.row, sys->b.row, matB_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matB_d.col, sys->b.col, matB_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matB_d.val, sys->b.val, matB_d.nnz * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(matC_d.row, sys->c.row, matC_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matC_d.col, sys->c.col, matC_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matC_d.val, sys->c.val, matC_d.nnz * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(matE_d.row, sys->e.row, matE_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matE_d.col, sys->e.col, matE_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matE_d.val, sys->e.val, matE_d.nnz * sizeof(double), cudaMemcpyHostToDevice);
omega = 0;
omega_peak = 0;
omegamax = 0.1;
omegastep = 0.01;
nrm_max = 0;
current_nrm = 0;
//converting B,C into dense format
cudaMemset(matB_full_d, 0, matB_d.nrow*matB_d.ncol * sizeof(double));
cudaMemset(matC_full_d, 0, matC_d.nrow*matC_d.ncol * sizeof(double));
if (matB_d.nnz % THREADS == 0)
size_x = matB_d.nnz / THREADS;
else
size_x = (matB_d.nnz / THREADS) + 1;
dim3 threads3(THREADS, 1, 1);
dim3 blocks3(size_x, 1, 1);
sparse2full_kernel << <blocks3, threads3 >> > (matB_full_d, matB_d.row, matB_d.col, matB_d.val, matB_d.ncol, matB_d.nnz, 1);
if (matC_d.nnz % THREADS == 0)
size_x = matC_d.nnz / THREADS;
else
size_x = (matC_d.nnz / THREADS) + 1;
dim3 threads4(THREADS, 1, 1);
dim3 blocks4(size_x, 1, 1);
sparse2full_kernel << <blocks4, threads4 >> > (matC_full_d, matC_d.row, matC_d.col, matC_d.val, matC_d.ncol, matC_d.nnz, 1);
cudaDeviceSynchronize();
while (omega < omegamax) {
//-----------------------computing diff=omega*E-matA and creating the sparse version of diff-----------------------
if (matE_d.nrow % THREADS == 0)
size_x = matE_d.nrow / THREADS;
else
size_x = (matE_d.nrow / THREADS) + 1;
size_y = size_x;
dim3 threads1(THREADS, THREADS, 1);
dim3 blocks1(size_x, size_y, 1);
cudaMemset(diff_d, 0, matE_d.nrow*matE_d.ncol * sizeof(double));
sparse2full_diff_kernel << <blocks1, threads1 >> > (matE_d.row, matE_d.col, matE_d.val, matE_d.nnz, matA_d.row, matA_d.col, matA_d.val, matA_d.nnz, diff_d, matE_d.nrow, matE_d.ncol, omega);
cudaDeviceSynchronize();
//------------------------------INVERSION of (omega*E - A) ---------------------------
//set inv matrix as identity
if (matE_d.nrow % THREADS == 0)
size_x = matE_d.nrow / THREADS;
else
size_x = (matE_d.nrow / THREADS) + 1;
dim3 threads2(THREADS, 1, 1);
dim3 blocks2(size_x, 1, 1);
cudaMemset(inv_full_d, 0, matE_d.nrow*matE_d.ncol * sizeof(double));
create_identity_kernel << <blocks2, threads2 >> > (inv_full_d, matE_d.ncol);
cudaDeviceSynchronize();
//query working space of getrf
cusolverDnDgetrf_bufferSize(cusolverH, matE_d.nrow, matE_d.nrow, diff_d, lda, &lwork);
cudaMalloc((void**)&d_work, sizeof(double) * lwork);
//LU factorization of the matrix to invert
cusolverDnDgetrf(cusolverH, matE_d.nrow, matE_d.nrow, diff_d, lda, d_work, ipiv_d, info_d);
cudaFree(d_work);
//computation of the inverse
cusolverDnDgetrs(cusolverH, CUBLAS_OP_N, matE_d.nrow, matE_d.nrow, diff_d, lda, ipiv_d, inv_full_d, ldb, info_d);
//----------------------------------computing C*inv*B----------------------------------------------
//computing prod1=C*inv
cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, matC_d.nrow, inv_ncol, matC_d.ncol, &alpha, matC_full_d, matC_d.nrow, inv_full_d, inv_nrow, &beta, prod1_d, matC_d.nrow);
//computing current_nrm=prod1*B
cublasDdot(cublas_handle, matB_d.nrow, prod1_d, incx, matB_full_d, incy, ¤t_nrm);
if (fabs(current_nrm) > nrm_max) {
nrm_max = fabs(current_nrm);
omega_peak = omega;
}
omega += omegastep;
}
(*hinf_freq_peak) = omega_peak;
(*hinf_nrm) = nrm_max;
//------------------------------------free----------------------------
cudaFree(matA_d.row);
cudaFree(matA_d.col);
cudaFree(matA_d.val);
cudaFree(matB_d.row);
cudaFree(matB_d.col);
cudaFree(matB_d.val);
cudaFree(matC_d.row);
cudaFree(matC_d.col);
cudaFree(matC_d.val);
cudaFree(matE_d.row);
cudaFree(matE_d.col);
cudaFree(matE_d.val);
cudaFree(matB_full_d);
cudaFree(matC_full_d);
cudaFree(diff_d);
cudaFree(inv_full_d);
cudaFree(ipiv_d);
cudaFree(info_d);
}
void freqresp_parallel(struct sys *sys, double omega, double *val, cublasHandle_t cublas_handle, cusolverDnHandle_t cusolverH) { //NOT USED!
struct sparse matA_d;
struct sparse matE_d;
struct sparse matC_d;
struct sparse matB_d;
double *diff_d, *prod1_d, *prod2_d, *matB_full_d, *matC_full_d;
double alpha = 1, beta = 0;
int size_x, size_y;
int inv_nrow, inv_ncol;
//inversion parameters
int lda, ldb;
double *inv_full_d;
int *ipiv_d = NULL, *info_d = NULL;
int lwork = 0;
double *d_work = NULL;
int incx = 1, incy = 1;
matE_d.nrow = sys->e.nrow;
matE_d.ncol = sys->e.ncol;
matE_d.nnz = sys->e.nnz;
matA_d.nrow = sys->a.nrow;
matA_d.ncol = sys->a.ncol;
matA_d.nnz = sys->a.nnz;
matC_d.nrow = sys->c.nrow;
matC_d.ncol = sys->c.ncol;
matC_d.nnz = sys->c.nnz;
matB_d.nrow = sys->b.nrow;
matB_d.ncol = sys->b.ncol;
matB_d.nnz = sys->b.nnz;
inv_nrow = matE_d.nrow;
inv_ncol = matE_d.ncol;
lda = matE_d.nrow;
ldb = matE_d.nrow;
//------------------------------allocating device pointers------------------------------------------
cudaMalloc((void**)&matA_d.row, matA_d.nnz * sizeof(int));
cudaMalloc((void**)&matA_d.col, matA_d.nnz * sizeof(int));
cudaMalloc((void**)&matA_d.val, matA_d.nnz * sizeof(double));
cudaMalloc((void**)&matE_d.row, matE_d.nnz * sizeof(int));
cudaMalloc((void**)&matE_d.col, matE_d.nnz * sizeof(int));
cudaMalloc((void**)&matE_d.val, matE_d.nnz * sizeof(double));
cudaMalloc((void**)&matC_d.row, matC_d.nnz * sizeof(int));
cudaMalloc((void**)&matC_d.col, matC_d.nnz * sizeof(int));
cudaMalloc((void**)&matC_d.val, matC_d.nnz * sizeof(double));
cudaMalloc((void**)&matB_d.row, matB_d.nnz * sizeof(int));
cudaMalloc((void**)&matB_d.col, matB_d.nnz * sizeof(int));
cudaMalloc((void**)&matB_d.val, matB_d.nnz * sizeof(double));
cudaMalloc((void**)&diff_d, matE_d.nrow*matE_d.ncol * sizeof(double));
cudaMalloc((void**)&inv_full_d, inv_nrow * inv_ncol * sizeof(double));
cudaMalloc((void**)&ipiv_d, matE_d.nrow * sizeof(int));
cudaMalloc((void**)&info_d, sizeof(int));
cudaMalloc((void**)&matB_full_d, matB_d.nrow*matB_d.ncol * sizeof(double));
cudaMalloc((void**)&matC_full_d, matC_d.nrow*matC_d.ncol * sizeof(double));
cudaMalloc((void**)&prod1_d, matC_d.nrow*matE_d.ncol * sizeof(double));
cudaMalloc((void**)&prod2_d, inv_nrow*matB_d.ncol * sizeof(double));
//----------------------------copy matrices to the device---------------------------------------------
cudaMemcpy(matA_d.row, sys->a.row, matA_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matA_d.col, sys->a.col, matA_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matA_d.val, sys->a.val, matA_d.nnz * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(matB_d.row, sys->b.row, matB_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matB_d.col, sys->b.col, matB_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matB_d.val, sys->b.val, matB_d.nnz * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(matC_d.row, sys->c.row, matC_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matC_d.col, sys->c.col, matC_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matC_d.val, sys->c.val, matC_d.nnz * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(matE_d.row, sys->e.row, matE_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matE_d.col, sys->e.col, matE_d.nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matE_d.val, sys->e.val, matE_d.nnz * sizeof(double), cudaMemcpyHostToDevice);
//-----------------------computing diff=omega*E-matA and creating the sparse version of diff-----------------------
if (matE_d.nrow % THREADS == 0)
size_x = matE_d.nrow / THREADS;
else
size_x = (matE_d.nrow / THREADS) + 1;
size_y = size_x;
dim3 threads1(THREADS, THREADS, 1);
dim3 blocks1(size_x, size_y, 1);
cudaMemset(diff_d, 0, matE_d.nrow*matE_d.ncol * sizeof(double));
sparse2full_diff_kernel << <blocks1, threads1 >> > (matE_d.row, matE_d.col, matE_d.val, matE_d.nnz, matA_d.row, matA_d.col, matA_d.val, matA_d.nnz, diff_d, matE_d.nrow, matE_d.ncol, omega);
cudaDeviceSynchronize();
//------------------------------INVERSION of (omega*E - A) ---------------------------
//allocations
//set inv matrix as identity
if (matE_d.nrow % THREADS == 0)
size_x = matE_d.nrow / THREADS;
else
size_x = (matE_d.nrow / THREADS) + 1;
dim3 threads2(THREADS, 1, 1);
dim3 blocks2(size_x, 1, 1);
cudaMemset(inv_full_d, 0, matE_d.nrow*matE_d.ncol * sizeof(double));
create_identity_kernel << <blocks2, threads2 >> > (inv_full_d, matE_d.ncol);
cudaDeviceSynchronize();
//query working space of getrf
cusolverDnDgetrf_bufferSize(cusolverH, matE_d.nrow, matE_d.nrow, diff_d, lda, &lwork);
cudaMalloc((void**)&d_work, sizeof(double) * lwork);
//LU factorization of the matrix to invert
cusolverDnDgetrf(cusolverH, matE_d.nrow, matE_d.nrow, diff_d, lda, d_work, ipiv_d, info_d);
cudaFree(d_work);
//computation of the inverse
cusolverDnDgetrs(cusolverH, CUBLAS_OP_N, matE_d.nrow, matE_d.nrow, diff_d, lda, ipiv_d, inv_full_d, ldb, info_d);
//----------------------------------computing C*inv*B----------------------------------------------
//converting B,C into dense format
cudaMemset(matB_full_d, 0, matB_d.nrow*matB_d.ncol * sizeof(double));
cudaMemset(matC_full_d, 0, matC_d.nrow*matC_d.ncol * sizeof(double));
if (matB_d.nnz % THREADS == 0)
size_x = matB_d.nnz / THREADS;
else
size_x = (matB_d.nnz / THREADS) + 1;
dim3 threads3(THREADS, 1, 1);
dim3 blocks3(size_x, 1, 1);
sparse2full_kernel << <blocks3, threads3 >> > (matB_full_d, matB_d.row, matB_d.col, matB_d.val, matB_d.ncol, matB_d.nnz, 1);
if (matC_d.nnz % THREADS == 0)
size_x = matC_d.nnz / THREADS;
else
size_x = (matC_d.nnz / THREADS) + 1;
dim3 threads4(THREADS, 1, 1);
dim3 blocks4(size_x, 1, 1);
sparse2full_kernel << <blocks4, threads4 >> > (matC_full_d, matC_d.row, matC_d.col, matC_d.val, matC_d.ncol, matC_d.nnz, 1);
cudaDeviceSynchronize();
//computing prod1=C*inv
cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, matC_d.nrow, inv_ncol, matC_d.ncol, &alpha, matC_full_d, matC_d.nrow, inv_full_d, inv_nrow, &beta, prod1_d, matC_d.nrow);
//computing val=prod1*B
cublasDdot(cublas_handle, matB_d.nrow, prod1_d, incx, matB_full_d, incy, val);
//------------------------------------free----------------------------
cudaFree(matA_d.row);
cudaFree(matA_d.col);
cudaFree(matA_d.val);
cudaFree(matB_d.row);
cudaFree(matB_d.col);
cudaFree(matB_d.val);
cudaFree(matC_d.row);
cudaFree(matC_d.col);
cudaFree(matC_d.val);
cudaFree(matE_d.row);
cudaFree(matE_d.col);
cudaFree(matE_d.val);
cudaFree(matB_full_d);
cudaFree(matC_full_d);
cudaFree(diff_d);
cudaFree(inv_full_d);
cudaFree(ipiv_d);
cudaFree(info_d);
} |
c799e7c232bf8c7239434ca8ba5ef1de49764a57.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include "dense_help_func.hpp"
// cal offset from row col and ld , in row-major matrix, ld is the width of the matrix
#define OFFSET(row, col, ld) ((row) * (ld) + (col))
// transfer float4
#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
template <
const int BLOCK_SIZE_M, // width of block of C that each thread block calculate
const int BLOCK_SIZE_K, // height of block of A that each thread block load into shared memory
const int BLOCK_SIZE_N, // height of block of C that each thread block calculate
const int THREAD_SIZE_Y, // height of block of C that each thread calculate
const int THREAD_SIZE_X, // width of block of C that each thread calculate
const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not
>
__global__ void MatrixMulCUDA6(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
const int K,
const int N) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// size of thread block
const int bszx = BLOCK_SIZE_N / THREAD_SIZE_X;
const int bszy = BLOCK_SIZE_M / THREAD_SIZE_Y;
const int THREAD_NUM_PER_BLOCK = bszy * bszx;
// thread id
const int tid = ty * bszx + tx;
// shared memory
__shared__ float As[BLOCK_SIZE_M][BLOCK_SIZE_K]; // avoid bank conflict
__shared__ float Bs[BLOCK_SIZE_K][BLOCK_SIZE_N];
// registers for C
float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0};
// registers for A and B
float frag_a[THREAD_SIZE_Y];
float frag_b[THREAD_SIZE_X];
// threads needed to load one row of tile
// / 4 is because float4 is used
const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4;
const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4;
// row number and col number that needs to be loaded by this thread
const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW;
const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW;
const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4;
const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4;
// row stride that thread uses to load multiple rows of a tile
const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW;
const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW;
// can not unroll since K can not be determined at this point
for (int tile_idx = 0 ; tile_idx < K ; tile_idx += BLOCK_SIZE_K) {
// load A from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) {
FETCH_FLOAT4(As[A_TILE_ROW_START + i][A_TILE_COL]) = FETCH_FLOAT4(A[OFFSET(
BLOCK_SIZE_M * by + A_TILE_ROW_START + i, // row
A_TILE_COL + tile_idx, // col
K )]);
}
// load B from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) {
FETCH_FLOAT4(Bs[B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET(
tile_idx + B_TILE_ROW_START + i, // row
B_TILE_COL + BLOCK_SIZE_N * bx, // col
K )]);
}
__syncthreads();
// compute c
#pragma unroll
for (int k = 0; k < BLOCK_SIZE_K; ++ k) {
// load A from shared memory to register
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
frag_a[thread_y] = As[ty * THREAD_SIZE_Y + thread_y][k];
}
// load B from shared memory to register
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) {
FETCH_FLOAT4(frag_b[thread_x]) = FETCH_FLOAT4(Bs[k][THREAD_SIZE_X * tx + thread_x]);
}
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) {
accum[thread_y][thread_x] += frag_a[thread_y] * frag_b[thread_x];
}
}
}
__syncthreads();
}
// store back to C
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) {
C[OFFSET(
BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y,
BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x,
N)] = accum[thread_y][thread_x];
}
}
}
// TODO add shuffle to enable GPU write back col | c799e7c232bf8c7239434ca8ba5ef1de49764a57.cu | #include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include "dense_help_func.hpp"
// cal offset from row col and ld , in row-major matrix, ld is the width of the matrix
#define OFFSET(row, col, ld) ((row) * (ld) + (col))
// transfer float4
#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
template <
const int BLOCK_SIZE_M, // width of block of C that each thread block calculate
const int BLOCK_SIZE_K, // height of block of A that each thread block load into shared memory
const int BLOCK_SIZE_N, // height of block of C that each thread block calculate
const int THREAD_SIZE_Y, // height of block of C that each thread calculate
const int THREAD_SIZE_X, // width of block of C that each thread calculate
const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not
>
__global__ void MatrixMulCUDA6(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
const int K,
const int N) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// size of thread block
const int bszx = BLOCK_SIZE_N / THREAD_SIZE_X;
const int bszy = BLOCK_SIZE_M / THREAD_SIZE_Y;
const int THREAD_NUM_PER_BLOCK = bszy * bszx;
// thread id
const int tid = ty * bszx + tx;
// shared memory
__shared__ float As[BLOCK_SIZE_M][BLOCK_SIZE_K]; // avoid bank conflict
__shared__ float Bs[BLOCK_SIZE_K][BLOCK_SIZE_N];
// registers for C
float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0};
// registers for A and B
float frag_a[THREAD_SIZE_Y];
float frag_b[THREAD_SIZE_X];
// threads needed to load one row of tile
// / 4 is because float4 is used
const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4;
const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4;
// row number and col number that needs to be loaded by this thread
const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW;
const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW;
const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4;
const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4;
// row stride that thread uses to load multiple rows of a tile
const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW;
const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW;
// can not unroll since K can not be determined at this point
for (int tile_idx = 0 ; tile_idx < K ; tile_idx += BLOCK_SIZE_K) {
// load A from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) {
FETCH_FLOAT4(As[A_TILE_ROW_START + i][A_TILE_COL]) = FETCH_FLOAT4(A[OFFSET(
BLOCK_SIZE_M * by + A_TILE_ROW_START + i, // row
A_TILE_COL + tile_idx, // col
K )]);
}
// load B from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) {
FETCH_FLOAT4(Bs[B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET(
tile_idx + B_TILE_ROW_START + i, // row
B_TILE_COL + BLOCK_SIZE_N * bx, // col
K )]);
}
__syncthreads();
// compute c
#pragma unroll
for (int k = 0; k < BLOCK_SIZE_K; ++ k) {
// load A from shared memory to register
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
frag_a[thread_y] = As[ty * THREAD_SIZE_Y + thread_y][k];
}
// load B from shared memory to register
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) {
FETCH_FLOAT4(frag_b[thread_x]) = FETCH_FLOAT4(Bs[k][THREAD_SIZE_X * tx + thread_x]);
}
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) {
accum[thread_y][thread_x] += frag_a[thread_y] * frag_b[thread_x];
}
}
}
__syncthreads();
}
// store back to C
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) {
C[OFFSET(
BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y,
BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x,
N)] = accum[thread_y][thread_x];
}
}
}
// TODO add shuffle to enable GPU write back col |
ecf35f45d4a7f7848d1d03abcfad5f2aa898499c.hip | // !!! This is a file automatically generated by hipify!!!
// fdk-ts-t.cu
// Threaded versions of FDK back-projection
// For detector index (t,s).
// Copyright 2008-10-09, Jeff Fessler, University of Michigan
#include "jf-cuda.h"
#include "def,fdk.h"
#include "jf,thread1.h"
#include "fdk-gpu.h"
typedef struct {
float *image; // [nz nx ny] <- trick!
const cbct_ig *ig; // image geometry
const cbct_cg *cg; // cone-beam CT system geometry
int na; // # of views
cfloat *proj; // [nt ns na] <- trick! projection views
cdouble *beta; // [na] source angles [radians]
} fdk_ts_s;
//
// fdk_ts_back_init()
// interface routine for threaded versions
//
static sof fdk_ts_back_init(void *in, cint id, cint nthread)
{
fdk_ts_s *pa = (fdk_ts_s *) in;
const cbct_ig *ig = pa->ig;
const cbct_cg *cg = pa->cg;
cint na = pa->na;
cfloat *proj = pa->proj;
cdouble *beta = pa->beta;
cint nst = cg->ns * cg->nt;
(void) nthread;
#ifdef fdk_gpu
cint nxyz = ig->nx * ig->ny * ig->nz;
float *dev_img;
jf_gpu_malloc(dev_img, nxyz) // image memory on device
jf_gpu_memset(dev_img, 0, nxyz) // initialize device image to 0
hipBindTexture( 0, tex_img, dev_img, nxyz*sizeof(float) );
float *dev_proj;
jf_gpu_malloc(dev_proj, nst) // one projection view on device
byte *dev_mask2;
cint nxy = ig->nx * ig->ny;
jf_gpu_malloc(dev_mask2, nxy) // 2D mask
jf_gpu_put(dev_mask2, ig->mask2, nxy)
hipBindTexture( 0, tex_mask2, dev_mask2, nxy*sizeof(byte));
#endif
for (int ia=0; ia < na; ++ia, proj += nst) { // each view
#ifdef fdk_gpu
// copy this view to gpu and bind to texture
jf_gpu_put(dev_proj, proj, nst)
hipBindTexture( 0, tex_proj, dev_proj, nst*sizeof(float) );
#else
float *dev_img = pa->image; // already zeroed
cfloat *dev_proj = proj;
cbyte *dev_mask2 = ig->mask2;
#endif
if (!fdk_ts_back1_gpu(dev_img,
ig->nx, ig->ny, ig->nz,
ig->dx, ig->dy, ig->dz,
ig->offset_x, ig->offset_y, ig->offset_z,
dev_mask2, id + 1, // each thread does some voxels only
cg->dso, cg->dsd, cg->dfs,
cg->ns, cg->nt,
cg->ds, cg->dt, cg->offset_s, cg->offset_t,
dev_proj, beta[ia]))
Fail("fdk_ts_back1_gpu()")
}
#ifdef fdk_gpu
hipUnbindTexture( tex_img );
hipUnbindTexture( tex_proj );
hipUnbindTexture( tex_mask2 );
Note("Copying image to host")
jf_gpu_get(pa->image, dev_img, nxyz) // caution: works only for 1 thread!
Note("freeing dev_img memory")
jf_gpu_free(dev_img)
Note("freeing dev_proj memory\n")
jf_gpu_free(dev_proj)
#endif
Ok
}
//
// fdk_ts_back_t()
// entry point for threaded FDK back-projector
//
sof fdk_ts_back_t(
float *image, // [nz nx ny] <- trick!
const cbct_ig *ig,
const cbct_cg *cg,
cint na, // # of views
cfloat *proj, // [nt ns na] <- trick! projection views
cdouble *beta, // [na] source angles [radians]
cint nthread, // # of threads
cint chat)
{
fdk_ts_s st;
#define put(arg) st.arg = arg;
put(image)
put(ig)
put(cg)
put(na)
put(proj)
put(beta)
#undef put
Bzero(image, ig->nx * ig->ny * ig->nz) // initialize image volume to 0
Call(jf_thread1_top, (fdk_ts_back_init,
NULL /* wrap up */, &st, nthread, Chat))
Ok
}
| ecf35f45d4a7f7848d1d03abcfad5f2aa898499c.cu | // fdk-ts-t.cu
// Threaded versions of FDK back-projection
// For detector index (t,s).
// Copyright 2008-10-09, Jeff Fessler, University of Michigan
#include "jf-cuda.h"
#include "def,fdk.h"
#include "jf,thread1.h"
#include "fdk-gpu.h"
typedef struct {
float *image; // [nz nx ny] <- trick!
const cbct_ig *ig; // image geometry
const cbct_cg *cg; // cone-beam CT system geometry
int na; // # of views
cfloat *proj; // [nt ns na] <- trick! projection views
cdouble *beta; // [na] source angles [radians]
} fdk_ts_s;
//
// fdk_ts_back_init()
// interface routine for threaded versions
//
static sof fdk_ts_back_init(void *in, cint id, cint nthread)
{
fdk_ts_s *pa = (fdk_ts_s *) in;
const cbct_ig *ig = pa->ig;
const cbct_cg *cg = pa->cg;
cint na = pa->na;
cfloat *proj = pa->proj;
cdouble *beta = pa->beta;
cint nst = cg->ns * cg->nt;
(void) nthread;
#ifdef fdk_gpu
cint nxyz = ig->nx * ig->ny * ig->nz;
float *dev_img;
jf_gpu_malloc(dev_img, nxyz) // image memory on device
jf_gpu_memset(dev_img, 0, nxyz) // initialize device image to 0
cudaBindTexture( 0, tex_img, dev_img, nxyz*sizeof(float) );
float *dev_proj;
jf_gpu_malloc(dev_proj, nst) // one projection view on device
byte *dev_mask2;
cint nxy = ig->nx * ig->ny;
jf_gpu_malloc(dev_mask2, nxy) // 2D mask
jf_gpu_put(dev_mask2, ig->mask2, nxy)
cudaBindTexture( 0, tex_mask2, dev_mask2, nxy*sizeof(byte));
#endif
for (int ia=0; ia < na; ++ia, proj += nst) { // each view
#ifdef fdk_gpu
// copy this view to gpu and bind to texture
jf_gpu_put(dev_proj, proj, nst)
cudaBindTexture( 0, tex_proj, dev_proj, nst*sizeof(float) );
#else
float *dev_img = pa->image; // already zeroed
cfloat *dev_proj = proj;
cbyte *dev_mask2 = ig->mask2;
#endif
if (!fdk_ts_back1_gpu(dev_img,
ig->nx, ig->ny, ig->nz,
ig->dx, ig->dy, ig->dz,
ig->offset_x, ig->offset_y, ig->offset_z,
dev_mask2, id + 1, // each thread does some voxels only
cg->dso, cg->dsd, cg->dfs,
cg->ns, cg->nt,
cg->ds, cg->dt, cg->offset_s, cg->offset_t,
dev_proj, beta[ia]))
Fail("fdk_ts_back1_gpu()")
}
#ifdef fdk_gpu
cudaUnbindTexture( tex_img );
cudaUnbindTexture( tex_proj );
cudaUnbindTexture( tex_mask2 );
Note("Copying image to host")
jf_gpu_get(pa->image, dev_img, nxyz) // caution: works only for 1 thread!
Note("freeing dev_img memory")
jf_gpu_free(dev_img)
Note("freeing dev_proj memory\n")
jf_gpu_free(dev_proj)
#endif
Ok
}
//
// fdk_ts_back_t()
// entry point for threaded FDK back-projector
//
sof fdk_ts_back_t(
float *image, // [nz nx ny] <- trick!
const cbct_ig *ig,
const cbct_cg *cg,
cint na, // # of views
cfloat *proj, // [nt ns na] <- trick! projection views
cdouble *beta, // [na] source angles [radians]
cint nthread, // # of threads
cint chat)
{
fdk_ts_s st;
#define put(arg) st.arg = arg;
put(image)
put(ig)
put(cg)
put(na)
put(proj)
put(beta)
#undef put
Bzero(image, ig->nx * ig->ny * ig->nz) // initialize image volume to 0
Call(jf_thread1_top, (fdk_ts_back_init,
NULL /* wrap up */, &st, nthread, Chat))
Ok
}
|
6a19cd9453f98ab1d9a2ef1a66e4e12bbe3b1f9e.hip | // !!! This is a file automatically generated by hipify!!!
// cudafun.cu --
// CUDA memory allocation & shallow water kernel routines
// To compile (Cuda 3.2):
// nvcc -c --gpu_architecture sm_13 -I${CUDA_INSTALL_PATH}/include
// -Xcompiler -fpic
//
// ! Incomplete kernel call !
//
// Matthias Griessinger, University of Erlangen, 2011.
#include <stdio.h>
//#include <hip/hip_runtime_api.h>
#ifndef _CUDA_MACROS_H_
#define _CUDA_MACROS_H_
#define safecall(call) do{\
hipError_t err = call ;\
if( hipSuccess != err ){\
fprintf(stdout, "cuda error at %s:%d, %s\n",\
__FILE__, __LINE__, hipGetErrorString(err));\
fflush(stdout);\
}\
} while(0)
#define BLOCKDIM 96
#ifdef _SHARED_WRITEBACK_3_
#define SHARED_MEM_MULT 3
#else
#define SHARED_MEM_MULT 1
#endif
#endif
/* *********** DEVICE SELECTION ************************* */
extern "C" void getDeviceInfo( int rank, int size, const char* hostname) {
/* Print device information */
int deviceCount, device;
hipDeviceProp_t deviceProp;
hipGetDeviceCount(&deviceCount);
if ( 0 == rank ) {
printf("## rank %i/%i on %s --\t Device Test: No. Cards: %d\n",
rank, size-1, hostname, deviceCount);
for( device = 0; device < deviceCount; ++device) {
hipGetDeviceProperties(&deviceProp, device);
printf("## rank %i/%i on %s --\t Device %d: %s\n",
rank, size-1, hostname, device, deviceProp.name);
}
}
}
extern "C" int selectDevice( int rank, int size, const char* hostname ) {
/* Select GPU device (for multiple cards);
call before any GPU memory/kernel calls,
otherwise no effect (default card selected)
*/
int deviceCount, takedevice, device;
hipDeviceProp_t deviceProp;
hipGetDeviceCount(&deviceCount);
takedevice = rank%deviceCount;
hipSetDevice(takedevice);
hipGetDevice(&device);
hipGetDeviceProperties(&deviceProp, device);
printf("rank %i/%i on %s --\t Selecting Device %d: %s\n",
rank, size, hostname, device, deviceProp.name);
return device;
}
/* *********** KERNEL LAUNCH PARAMETERS ***************** */
typedef struct {
int gridDim;
int blockDim;
} KERNEL_LAUNCHER;
KERNEL_LAUNCHER _launcher_;
extern "C" void setKernelDims( const int gridDim, const int blockDim ) {
_launcher_.gridDim = gridDim;
_launcher_.blockDim = blockDim;
}
extern "C" void printKernelDims() {
printf(" kernel dims: %i x %i\n",
_launcher_.gridDim, _launcher_.blockDim);
fflush(stdout);
}
/* *********** CUDA MEMORY **************************** */
extern "C" void* allocDeviceMemory( size_t bytesize ) {
char* mem = NULL;
safecall(hipMalloc( (void**)&mem, bytesize ));
fprintf(stdout,"allocDevice: allocating %lu bytes at %p\n", bytesize, mem);fflush(stdout);
return (void*)mem;
}
extern "C" void* allocHostMemory( size_t bytesize ) {
/* returns aligned CPU memory for faster transfer */
char* mem = NULL;
safecall(hipHostMalloc( (void**)&mem, bytesize, 0 ));
return (void*)mem;
}
extern "C" void copyDeviceToHost( void* hostmem, void* devicemem, size_t bytesize ) {
/* copy bytesize bytes from GPU to host */
safecall(hipMemcpy( hostmem, devicemem, bytesize, hipMemcpyDeviceToHost ));
}
extern "C" void copyHostToDevice( void* devmem, void* hostmem, size_t bytesize ) {
/* copy bytesize bytes from host to GPU */
safecall(hipMemcpy( devmem, hostmem, bytesize, hipMemcpyHostToDevice ));
}
extern "C" void freeDeviceMemory( void* mem ) {
fprintf(stdout,"freeDevice: freeing at %p\n", mem);fflush(stdout);
safecall(hipFree( mem ));
}
extern "C" void freeHostMemory( void* mem ) {
safecall(hipHostFree( mem ));
}
extern "C" void dummy( ) {
fprintf(stdout, "dummy 1 at %s:%d\n",__FILE__, __LINE__);
fflush(stdout);
double* mem;
//double* mem = (double*)allocDeviceMemory( 128*sizeof(double) );
hipMalloc( (void**)&mem, 128*sizeof(double));
fprintf(stdout, "dummy 2 at %s:%d\n",__FILE__, __LINE__);
fflush(stdout);
hipFree(mem);
fprintf(stdout, "dummy 3 at %s:%d\n",__FILE__, __LINE__);
fflush(stdout);
}
/* *********** GPU KERNELS ************************** */
typedef struct {
double edgeflux_s;
double edgeflux_x;
double edgeflux_y;
double max_speed;
} Fluxes;
typedef struct {
double u;
double uh;
double h;
} Velocity;
__global__ void __set_to_default__(double* edge,
size_t N, double def) {
/* set input array edge of length N to value def */
size_t k;
for( k = blockIdx.x * blockDim.x + threadIdx.x; k < N; k += gridDim.x * blockDim.x ) {
edge[k] = def;
}
}
__global__ void __set_arrays_to_default__(double* edge,
double* xmom,
double* ymom,
size_t N, double def) {
/* set input arrays edge, xmom, ymom of length N to value def */
size_t k;
for( k = blockIdx.x * blockDim.x + threadIdx.x; k < N; k += gridDim.x * blockDim.x ) {
edge[k] = def;
xmom[k] = def;
ymom[k] = def;
}
}
__global__ void __compute_time_step__( const long* tri_full_flag,
const double* max_speed_array,
const double* radii,
const size_t number_of_elements,
const double epsilon,
const double time0,
double* times_out ) {
/* Computes minimal timestep in each triangle k, and finds
minimal timestep in each block of threads.
Output is written to times_out, final reduction must
be performed on CPU.*/
// shared memory size defined at kernel launch,
// set according to blockDim
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
// initialize thread with default (previous) timestep
double mytime = time0;
// For all triangles
for( size_t k = blockIdx.x * blockDim.x + threadIdx.x; k < number_of_elements; k += gridDim.x*blockDim.x) {
if( 1 == tri_full_flag[k] && max_speed_array[k] > epsilon ) {
mytime = fmin( mytime, radii[k] / max_speed_array[k] );
}
}
// each thread in block writes to shared memory
sdata[tid] = mytime;
__syncthreads();
// Reduce to one value per thread block by successively
// comparing value pairs; in first sweep, first half of
// threads compares first half of values to second half and
// writes min to first half; 2nd sweep, first fourth compares
// first and second fourth a.s.o.
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if(tid < s) {
sdata[tid] = fmin( sdata[tid + s], sdata[tid]);
}
__syncthreads();
}
// Lead thread writes min for this block to global mem
if (tid == 0) {
times_out[blockIdx.x] = sdata[0];
}
}
__device__ double2 __rotate__(double q1, double q2, double n1, double n2) {
/*Rotate the momentum component q (q1, q2)
from x,y coordinates to coordinates based on normal vector (n1, n2).
Result is returned in array 3x1 r
To rotate in opposite direction, call rotate with (q, n1, -n2)
Contents of q are changed by this function */
double2 q;
// Rotate
q.x = n1*q1 + n2*q2;
q.y = -n2*q1 + n1*q2;
return q;
}
__device__ Velocity __compute_speed__(
double uh,
double h,
const double epsilon,
const double h0,
const double limiting_threshold) {
Velocity result;
if (h < limiting_threshold) {
// Apply limiting of speeds according to the ANUGA manual
if (h < epsilon) {
h = 0.0; // Could have been negative
result.u = 0.0;
} else {
result.u = uh/(h + h0/ h);
}
// Adjust momentum to be consistent with speed
uh = result.u * h;
} else {
// We are in deep water - no need for limiting
result.u = uh/ h;
}
result.uh = uh;
result.h = h;
return result;
}
__device__ Fluxes __flux_function_central__(double2 stage, double2 xmom,
double2 ymom, double2 z_lr,
double2 n,
const double epsilon,
const double h0,
const double limiting_threshold,
const double g) {
//double *edgeflux,
//double *max_speed) {
/*Compute fluxes between volumes for the shallow water wave equation
cast in terms of the 'stage', w = h+z using
the 'central scheme' as described in
Kurganov, Noelle, Petrova. 'Semidiscrete Central-Upwind Schemes For
Hyperbolic Conservation Laws and Hamilton-Jacobi Equations'.
Siam J. Sci. Comput. Vol. 23, No. 3, pp. 707-740.
The implemented formula is given in equation (3.15) on page 714
*/
double2 tmp;
double h_left, uh_left, vh_left, u_left;
double h_right, uh_right, vh_right, u_right;
double s_min, s_max, soundspeed_left, soundspeed_right;
double denom, inverse_denominator, z;
// Cuda doesn't do static arrays
double fs_l, fs_r;
double2 fv_l, fv_r;
Velocity velos;
Fluxes fluxes;
// Align x- and y-momentum with x-axis
// Do not be confused: xmom.x and xmom.y are
// left and right momenti in x direction
tmp = __rotate__( xmom.x, ymom.x, n.x, n.y);
xmom.x = tmp.x; ymom.x = tmp.y;
tmp = __rotate__( xmom.y, ymom.y, n.x, n.y);
xmom.y = tmp.x; ymom.y = tmp.y;
z = 0.5*(z_lr.x + z_lr.y); // Average elevation values.
// Even though this will nominally allow
// for discontinuities in the elevation data,
// there is currently no numerical support for
// this so results may be strange near
// jumps in the bed.
// Compute speeds in x-direction
h_left = stage.x - z;
uh_left = xmom.x; // q_left_rotated[1];
velos = __compute_speed__(uh_left, h_left,
epsilon, h0, limiting_threshold);
u_left = velos.u;
uh_left = velos.uh;
h_left = velos.h;
h_right = stage.y - z;
uh_right = xmom.y; // q_right_rotated[1];
velos = __compute_speed__(uh_right, h_right,
epsilon, h0, limiting_threshold);
u_right = velos.u;
uh_right = velos.uh;
h_right = velos.h;
// Momentum in y-direction
vh_left = ymom.x; //q_left_rotated[2];
vh_right = ymom.y; //q_right_rotated[2];
// Limit y-momentum if necessary
// Leaving this out, improves speed significantly (Ole 27/5/2009)
// All validation tests pass, so do we really need it anymore?
velos = __compute_speed__(vh_left, h_left,
epsilon, h0, limiting_threshold);
vh_left = velos.uh;
h_left = velos.h;
velos = __compute_speed__(vh_right, h_right,
epsilon, h0, limiting_threshold);
vh_right = velos.uh;
h_right = velos.h;
// Maximal and minimal wave speeds
soundspeed_left = sqrt(g*h_left);
soundspeed_right = sqrt(g*h_right);
s_max = fmax(u_left + soundspeed_left, u_right + soundspeed_right);
if (s_max < 0.0)
{
s_max = 0.0;
}
s_min = fmin(u_left - soundspeed_left, u_right - soundspeed_right);
if (s_min > 0.0)
{
s_min = 0.0;
}
// Flux formulas
fs_l = u_left*h_left;
fv_l.x = u_left*uh_left + 0.5*g*h_left*h_left;
fv_l.y = u_left*vh_left;
fs_r = u_right*h_right;
fv_r.x = u_right*uh_right + 0.5*g*h_right*h_right;
fv_r.y = u_right*vh_right;
// Flux computation
denom = s_max - s_min;
if (denom < epsilon)
{ // FIXME (Ole): Try using h0 here
fluxes.edgeflux_s = 0.0;
fluxes.edgeflux_x = 0.0;
fluxes.edgeflux_y = 0.0;
fluxes.max_speed = 0.0;
}
else
{
inverse_denominator = 1.0/denom;
fluxes.edgeflux_s = s_max*fs_l - s_min*fs_r;
fluxes.edgeflux_s += s_max*s_min*(stage.y - stage.x);
fluxes.edgeflux_s *= inverse_denominator;
fluxes.edgeflux_x = s_max*fv_l.x - s_min*fv_r.x;
fluxes.edgeflux_x += s_max*s_min*(xmom.y - xmom.x);
fluxes.edgeflux_x *= inverse_denominator;
fluxes.edgeflux_y = s_max*fv_l.y - s_min*fv_r.y;
fluxes.edgeflux_y += s_max*s_min*(ymom.y - ymom.x);
fluxes.edgeflux_y *= inverse_denominator;
// Maximal wavespeed
fluxes.max_speed = fmax(fabs(s_max), fabs(s_min));
// Rotate back
tmp = __rotate__( fluxes.edgeflux_x, fluxes.edgeflux_y, n.x, -n.y);
fluxes.edgeflux_x = tmp.x; fluxes.edgeflux_y = tmp.y;
}
return fluxes;
}
__global__ void __compute_fluxes_central_kernel__(
const int number_of_elements,
double timestep,
const double epsilon,
const double H0,
const double g,
const long* neighbours,
const long* neighbour_edges,
const double* normals,
double* edgelengths,
const double* areas,
const double* stage_edge_values,
const double* xmom_edge_values,
const double* ymom_edge_values,
const double* bed_edge_values,
const double* stage_boundary_values,
const double* xmom_boundary_values,
const double* ymom_boundary_values,
double* stage_explicit_update,
double* xmom_explicit_update,
double* ymom_explicit_update,
double* max_speed_array,
const int optimise_dry_cells) {
/* __global__: called by CPU and executed on GPU;
must return void, scalar variables, structs (and scalar members)
are copied automatically, arrays must be allocated on device.
*/
// Local variables
double length; //, zl, zr;
double h0 = H0*H0; // This ensures a good balance when h approaches H0.
double limiting_threshold = 10 * H0; // Avoid applying limiter below this
// threshold for performance reasons.
// See ANUGA manual under flux limiting
int i, k, m, n;
int ki, nm = 0, ki2; // Index shorthands
Fluxes fluxes;
double2 stage, xmom, ymom, z_lr, normvec;
// Shared memory for explicit update quantity reduction
extern __shared__ double update_shared[]; // empty [] array:(byte)size defined by kernel call
//__shared__ double update_shared[BLOCKDIM*SHARED_MEM_MULT]; // OR static size (not both)
// For all edges;
for( ki = blockIdx.x * blockDim.x + threadIdx.x; ki < 3*number_of_elements; ki += gridDim.x * blockDim.x ) {
// Get left hand side values from triangle k, edge i
stage.x = stage_edge_values[ki];
xmom.x = xmom_edge_values[ki];
ymom.x = ymom_edge_values[ki];
z_lr.x = bed_edge_values[ki];
// Get right hand side values either from neighbouring triangle
// or from boundary array (Quantities at neighbour on nearest face).
n = neighbours[ki];
if (n < 0) {
// Neighbour is a boundary condition
m = -n - 1; // Convert negative flag to boundary index
// Bad access order consider binding boundary_values to Texture cache
stage.y = stage_boundary_values[m];
xmom.y = xmom_boundary_values[m];
ymom.y = ymom_boundary_values[m];
z_lr.y = z_lr.x; // Extend bed elevation to boundary
}
else {
// Neighbour is a real triangle
m = neighbour_edges[ki];
nm = n * 3 + m; // Linear index (triangle n, edge m)
// Again, bind to Texture cache
stage.y = stage_edge_values[nm];
xmom.y = xmom_edge_values[nm];
ymom.y = ymom_edge_values[nm];
z_lr.y = bed_edge_values[nm];
}
// Now we have values for this edge - both from left and right side.
/*if (optimise_dry_cells) {
// Check if flux calculation is necessary across this edge
// This check will exclude dry cells.
// This will also optimise cases where zl != zr as
// long as both are dry
if (fabs(ql[0] - zl) < epsilon &&
fabs(qr[0] - zr) < epsilon) {
// Cell boundary is dry
already_computed_flux[ki] = call; // #k Done
if (n >= 0) {
already_computed_flux[nm] = call; // #n Done
}
max_speed = 0.0;
continue;
}
}*/
/*if (fabs(zl-zr)>1.0e-10) {TODO:
report_python_error(AT,"Discontinuous Elevation");
return 0.0;
}*/
// Outward pointing normal vector (domain.normals[k, 2*i:2*i+2])
ki2 = 2 * ki; //k*6 + i*2
// Bad access order, use Texture or shared mem
normvec.x = normals[ki2];
normvec.y = normals[ki2+1];
// Edge flux computation (triangle k, edge i);
// TODO: subroutine causes unspecified launch failure
//fluxes = __flux_function_central__( stage, xmom, ymom, z_lr, normvec,
// epsilon, h0, limiting_threshold, g);
// Multiply edgeflux by edgelength
length = edgelengths[ki];
fluxes.edgeflux_s *= length;
fluxes.edgeflux_x *= length;
fluxes.edgeflux_y *= length;
// Use shared memory to accumulate flux for one triangle;
// requires blockDim.x to be multiple of 3 (should also be multiple of 32)
#ifdef _SHARED_WRITEBACK_3_
// Accumulate all update arrays in one sweep;
// if shared memory is large enough
update_shared[threadIdx.x] = fluxes.edgeflux_s;
update_shared[threadIdx.x+blockDim.x] = fluxes.edgeflux_x;
update_shared[threadIdx.x+2*blockDim.x] = fluxes.edgeflux_y;
__syncthreads();
// Each third of the threads in block write back an update array
// with contiguous access, each thread sums fluxes in a triangle
if( threadIdx.x < blockDim.x/3) {
// Calculate contiguous index
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
stage_explicit_update[k] = ( update_shared[3*i]
+ update_shared[3*i+1]
+ update_shared[3*i+2]) / areas[k];
} else if( threadIdx.x < 2*(blockDim.x/3) ) {
i = threadIdx.x - (blockDim.x/3);
k = blockIdx.x*(blockDim.x/3) + i;
xmom_explicit_update[k] = ( update_shared[blockDim.x+3*i]
+ update_shared[blockDim.x+3*i+1]
+ update_shared[blockDim.x+3*i+2]) / areas[k];
} else {
i = threadIdx.x - 2*(blockDim.x/3);
k = blockIdx.x*(blockDim.x/3) + i;
ymom_explicit_update[k] = ( update_shared[2*blockDim.x+3*i]
+ update_shared[2*blockDim.x+3*i+1]
+ update_shared[2*blockDim.x+3*i+2] ) / areas[k];
}
__syncthreads();
#else
// Write each update array back by itself;
// only first third of threads busy
update_shared[threadIdx.x]= fluxes.edgeflux_s;
__syncthreads();
if( threadIdx.x < blockDim.x/3 ) {
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
stage_explicit_update[k] = ( update_shared[3*i]
+ update_shared[3*i+1]
+ update_shared[3*i+2] ) / areas[k];
}
__syncthreads();
update_shared[threadIdx.x] = fluxes.edgeflux_x;
__syncthreads();
if( threadIdx.x < blockDim.x/3 ) {
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
xmom_explicit_update[k] = ( update_shared[3*i]
+ update_shared[3*i+1]
+ update_shared[3*i+2] ) / areas[k];
}
__syncthreads();
update_shared[threadIdx.x] = fluxes.edgeflux_y;
__syncthreads();
if( threadIdx.x < blockDim.x/3 ) {
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
ymom_explicit_update[k] = ( update_shared[3*i]
+ update_shared[3*i+1]
+ update_shared[3*i+2] ) / areas[k];
}
__syncthreads();
#endif
// Likewise, get and write maximum speed within triangle
// update_shared[threadIdx.x] = fluxes.max_speed;
__syncthreads();
if( threadIdx.x < blockDim.x/3 ) {
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
fluxes.max_speed = fmax( update_shared[3*i], update_shared[3*i+1] );
max_speed_array[k] = fmax( fluxes.max_speed, update_shared[3*i+2] );
}
} // End edge ki
// computation of timestep in seperate routine because of triangle-wise access
}
/* *********** KERNEL WRAPPER FUNCTIONS ************************** */
extern "C" void _set_to_default( double* edge, double* xmom, double* ymom, size_t N, double def) {
hipLaunchKernelGGL(( __set_arrays_to_default__) , dim3(_launcher_.gridDim), dim3(_launcher_.blockDim) , 0, 0, edge, xmom, ymom, N, def);
safecall(hipDeviceSynchronize());
}
/*extern "C" void _set_to_default( double* edge, size_t N, double def) {
__set_to_default__ <<< _launcher_.gridDim, _launcher_.blockDim >>> ( edge, N, def);
safecall(hipDeviceSynchronize());
}*/
extern "C" double _compute_fluxes_central(
int number_of_elements,
double timestep,
double epsilon,
double H0,
double g,
long* neighbours,
long* neighbour_edges,
double* normals,
double* edgelengths,
double* radii,
double* areas,
long* tri_full_flag,
double* stage_edge_values,
double* xmom_edge_values,
double* ymom_edge_values,
double* bed_edge_values,
double* stage_boundary_values,
double* xmom_boundary_values,
double* ymom_boundary_values,
double* stage_explicit_update,
double* xmom_explicit_update,
double* ymom_explicit_update,
double* max_speed_array,
int optimise_dry_cells) {
static long call = 1; // Static local variable flagging already computed flux
int i;
// Start computation
call++; // Flag 'id' of flux calculation for this timestep
// prepare memory for timestep reduction (TODO: (de)allocate only once)
const size_t reduction_size = _launcher_.gridDim*sizeof(double);
double* times_out = (double*)allocHostMemory( reduction_size );
double* times_out_gpu = (double*)allocDeviceMemory( reduction_size );
printf("shared mum mult: %i\n", SHARED_MEM_MULT);
if( 0 != _launcher_.blockDim%3 ) {
fprintf(stderr,"error: blockDim required to be multiple of 3!\n");
}
hipLaunchKernelGGL(( __compute_fluxes_central_kernel__) , dim3(_launcher_.gridDim), dim3(_launcher_.blockDim),
_launcher_.blockDim*sizeof(double)*SHARED_MEM_MULT , 0,
//__compute_fluxes_central_kernel__ <<< _launcher_.gridDim, BLOCKDIM >>> ( // for static shared memory size
number_of_elements,
timestep,
epsilon,
H0,
g,
neighbours,
neighbour_edges,
normals,
edgelengths,
areas,
stage_edge_values,
xmom_edge_values,
ymom_edge_values,
bed_edge_values,
stage_boundary_values,
xmom_boundary_values,
ymom_boundary_values,
stage_explicit_update,
xmom_explicit_update,
ymom_explicit_update,
max_speed_array,
optimise_dry_cells);
safecall(hipDeviceSynchronize()); // prevents overlap of kernels
// Some timestepping debug: (timestep 1.0)
//printKernelDims();
//_set_to_default( max_speed_array, radii, areas, number_of_elements, 3.3); //
hipLaunchKernelGGL(( __compute_time_step__) , dim3(_launcher_.gridDim), dim3(_launcher_.blockDim), _launcher_.blockDim*sizeof(double) , 0,
tri_full_flag,
max_speed_array,
radii,
number_of_elements,
epsilon,
timestep,
times_out_gpu );
safecall(hipDeviceSynchronize());
copyDeviceToHost( times_out, times_out_gpu, reduction_size );
for( i=0; i < _launcher_.gridDim; ++i) {
timestep = min( timestep, times_out[i] );
}
//printf("\ntimestep = %f\n",timestep);
//fflush(stdout);
freeDeviceMemory( times_out_gpu );
freeHostMemory( times_out );
return timestep;
}
| 6a19cd9453f98ab1d9a2ef1a66e4e12bbe3b1f9e.cu | // cudafun.cu --
// CUDA memory allocation & shallow water kernel routines
// To compile (Cuda 3.2):
// nvcc -c --gpu_architecture sm_13 -I${CUDA_INSTALL_PATH}/include
// -Xcompiler -fpic
//
// ! Incomplete kernel call !
//
// Matthias Griessinger, University of Erlangen, 2011.
#include <stdio.h>
//#include <cuda_runtime_api.h>
#ifndef _CUDA_MACROS_H_
#define _CUDA_MACROS_H_
#define safecall(call) do{\
cudaError_t err = call ;\
if( cudaSuccess != err ){\
fprintf(stdout, "cuda error at %s:%d, %s\n",\
__FILE__, __LINE__, cudaGetErrorString(err));\
fflush(stdout);\
}\
} while(0)
#define BLOCKDIM 96
#ifdef _SHARED_WRITEBACK_3_
#define SHARED_MEM_MULT 3
#else
#define SHARED_MEM_MULT 1
#endif
#endif
/* *********** DEVICE SELECTION ************************* */
extern "C" void getDeviceInfo( int rank, int size, const char* hostname) {
/* Print device information */
int deviceCount, device;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
if ( 0 == rank ) {
printf("## rank %i/%i on %s --\t Device Test: No. Cards: %d\n",
rank, size-1, hostname, deviceCount);
for( device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties(&deviceProp, device);
printf("## rank %i/%i on %s --\t Device %d: %s\n",
rank, size-1, hostname, device, deviceProp.name);
}
}
}
extern "C" int selectDevice( int rank, int size, const char* hostname ) {
/* Select GPU device (for multiple cards);
call before any GPU memory/kernel calls,
otherwise no effect (default card selected)
*/
int deviceCount, takedevice, device;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
takedevice = rank%deviceCount;
cudaSetDevice(takedevice);
cudaGetDevice(&device);
cudaGetDeviceProperties(&deviceProp, device);
printf("rank %i/%i on %s --\t Selecting Device %d: %s\n",
rank, size, hostname, device, deviceProp.name);
return device;
}
/* *********** KERNEL LAUNCH PARAMETERS ***************** */
typedef struct {
int gridDim;
int blockDim;
} KERNEL_LAUNCHER;
KERNEL_LAUNCHER _launcher_;
extern "C" void setKernelDims( const int gridDim, const int blockDim ) {
_launcher_.gridDim = gridDim;
_launcher_.blockDim = blockDim;
}
extern "C" void printKernelDims() {
printf(" kernel dims: %i x %i\n",
_launcher_.gridDim, _launcher_.blockDim);
fflush(stdout);
}
/* *********** CUDA MEMORY **************************** */
extern "C" void* allocDeviceMemory( size_t bytesize ) {
char* mem = NULL;
safecall(cudaMalloc( (void**)&mem, bytesize ));
fprintf(stdout,"allocDevice: allocating %lu bytes at %p\n", bytesize, mem);fflush(stdout);
return (void*)mem;
}
extern "C" void* allocHostMemory( size_t bytesize ) {
/* returns aligned CPU memory for faster transfer */
char* mem = NULL;
safecall(cudaHostAlloc( (void**)&mem, bytesize, 0 ));
return (void*)mem;
}
extern "C" void copyDeviceToHost( void* hostmem, void* devicemem, size_t bytesize ) {
/* copy bytesize bytes from GPU to host */
safecall(cudaMemcpy( hostmem, devicemem, bytesize, cudaMemcpyDeviceToHost ));
}
extern "C" void copyHostToDevice( void* devmem, void* hostmem, size_t bytesize ) {
/* copy bytesize bytes from host to GPU */
safecall(cudaMemcpy( devmem, hostmem, bytesize, cudaMemcpyHostToDevice ));
}
extern "C" void freeDeviceMemory( void* mem ) {
fprintf(stdout,"freeDevice: freeing at %p\n", mem);fflush(stdout);
safecall(cudaFree( mem ));
}
extern "C" void freeHostMemory( void* mem ) {
safecall(cudaFreeHost( mem ));
}
extern "C" void dummy( ) {
fprintf(stdout, "dummy 1 at %s:%d\n",__FILE__, __LINE__);
fflush(stdout);
double* mem;
//double* mem = (double*)allocDeviceMemory( 128*sizeof(double) );
cudaMalloc( (void**)&mem, 128*sizeof(double));
fprintf(stdout, "dummy 2 at %s:%d\n",__FILE__, __LINE__);
fflush(stdout);
cudaFree(mem);
fprintf(stdout, "dummy 3 at %s:%d\n",__FILE__, __LINE__);
fflush(stdout);
}
/* *********** GPU KERNELS ************************** */
typedef struct {
double edgeflux_s;
double edgeflux_x;
double edgeflux_y;
double max_speed;
} Fluxes;
typedef struct {
double u;
double uh;
double h;
} Velocity;
__global__ void __set_to_default__(double* edge,
size_t N, double def) {
/* set input array edge of length N to value def */
size_t k;
for( k = blockIdx.x * blockDim.x + threadIdx.x; k < N; k += gridDim.x * blockDim.x ) {
edge[k] = def;
}
}
__global__ void __set_arrays_to_default__(double* edge,
double* xmom,
double* ymom,
size_t N, double def) {
/* set input arrays edge, xmom, ymom of length N to value def */
size_t k;
for( k = blockIdx.x * blockDim.x + threadIdx.x; k < N; k += gridDim.x * blockDim.x ) {
edge[k] = def;
xmom[k] = def;
ymom[k] = def;
}
}
__global__ void __compute_time_step__( const long* tri_full_flag,
const double* max_speed_array,
const double* radii,
const size_t number_of_elements,
const double epsilon,
const double time0,
double* times_out ) {
/* Computes minimal timestep in each triangle k, and finds
minimal timestep in each block of threads.
Output is written to times_out, final reduction must
be performed on CPU.*/
// shared memory size defined at kernel launch,
// set according to blockDim
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
// initialize thread with default (previous) timestep
double mytime = time0;
// For all triangles
for( size_t k = blockIdx.x * blockDim.x + threadIdx.x; k < number_of_elements; k += gridDim.x*blockDim.x) {
if( 1 == tri_full_flag[k] && max_speed_array[k] > epsilon ) {
mytime = fmin( mytime, radii[k] / max_speed_array[k] );
}
}
// each thread in block writes to shared memory
sdata[tid] = mytime;
__syncthreads();
// Reduce to one value per thread block by successively
// comparing value pairs; in first sweep, first half of
// threads compares first half of values to second half and
// writes min to first half; 2nd sweep, first fourth compares
// first and second fourth a.s.o.
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if(tid < s) {
sdata[tid] = fmin( sdata[tid + s], sdata[tid]);
}
__syncthreads();
}
// Lead thread writes min for this block to global mem
if (tid == 0) {
times_out[blockIdx.x] = sdata[0];
}
}
__device__ double2 __rotate__(double q1, double q2, double n1, double n2) {
/*Rotate the momentum component q (q1, q2)
from x,y coordinates to coordinates based on normal vector (n1, n2).
Result is returned in array 3x1 r
To rotate in opposite direction, call rotate with (q, n1, -n2)
Contents of q are changed by this function */
double2 q;
// Rotate
q.x = n1*q1 + n2*q2;
q.y = -n2*q1 + n1*q2;
return q;
}
__device__ Velocity __compute_speed__(
double uh,
double h,
const double epsilon,
const double h0,
const double limiting_threshold) {
Velocity result;
if (h < limiting_threshold) {
// Apply limiting of speeds according to the ANUGA manual
if (h < epsilon) {
h = 0.0; // Could have been negative
result.u = 0.0;
} else {
result.u = uh/(h + h0/ h);
}
// Adjust momentum to be consistent with speed
uh = result.u * h;
} else {
// We are in deep water - no need for limiting
result.u = uh/ h;
}
result.uh = uh;
result.h = h;
return result;
}
__device__ Fluxes __flux_function_central__(double2 stage, double2 xmom,
double2 ymom, double2 z_lr,
double2 n,
const double epsilon,
const double h0,
const double limiting_threshold,
const double g) {
//double *edgeflux,
//double *max_speed) {
/*Compute fluxes between volumes for the shallow water wave equation
cast in terms of the 'stage', w = h+z using
the 'central scheme' as described in
Kurganov, Noelle, Petrova. 'Semidiscrete Central-Upwind Schemes For
Hyperbolic Conservation Laws and Hamilton-Jacobi Equations'.
Siam J. Sci. Comput. Vol. 23, No. 3, pp. 707-740.
The implemented formula is given in equation (3.15) on page 714
*/
double2 tmp;
double h_left, uh_left, vh_left, u_left;
double h_right, uh_right, vh_right, u_right;
double s_min, s_max, soundspeed_left, soundspeed_right;
double denom, inverse_denominator, z;
// Cuda doesn't do static arrays
double fs_l, fs_r;
double2 fv_l, fv_r;
Velocity velos;
Fluxes fluxes;
// Align x- and y-momentum with x-axis
// Do not be confused: xmom.x and xmom.y are
// left and right momenti in x direction
tmp = __rotate__( xmom.x, ymom.x, n.x, n.y);
xmom.x = tmp.x; ymom.x = tmp.y;
tmp = __rotate__( xmom.y, ymom.y, n.x, n.y);
xmom.y = tmp.x; ymom.y = tmp.y;
z = 0.5*(z_lr.x + z_lr.y); // Average elevation values.
// Even though this will nominally allow
// for discontinuities in the elevation data,
// there is currently no numerical support for
// this so results may be strange near
// jumps in the bed.
// Compute speeds in x-direction
h_left = stage.x - z;
uh_left = xmom.x; // q_left_rotated[1];
velos = __compute_speed__(uh_left, h_left,
epsilon, h0, limiting_threshold);
u_left = velos.u;
uh_left = velos.uh;
h_left = velos.h;
h_right = stage.y - z;
uh_right = xmom.y; // q_right_rotated[1];
velos = __compute_speed__(uh_right, h_right,
epsilon, h0, limiting_threshold);
u_right = velos.u;
uh_right = velos.uh;
h_right = velos.h;
// Momentum in y-direction
vh_left = ymom.x; //q_left_rotated[2];
vh_right = ymom.y; //q_right_rotated[2];
// Limit y-momentum if necessary
// Leaving this out, improves speed significantly (Ole 27/5/2009)
// All validation tests pass, so do we really need it anymore?
velos = __compute_speed__(vh_left, h_left,
epsilon, h0, limiting_threshold);
vh_left = velos.uh;
h_left = velos.h;
velos = __compute_speed__(vh_right, h_right,
epsilon, h0, limiting_threshold);
vh_right = velos.uh;
h_right = velos.h;
// Maximal and minimal wave speeds
soundspeed_left = sqrt(g*h_left);
soundspeed_right = sqrt(g*h_right);
s_max = fmax(u_left + soundspeed_left, u_right + soundspeed_right);
if (s_max < 0.0)
{
s_max = 0.0;
}
s_min = fmin(u_left - soundspeed_left, u_right - soundspeed_right);
if (s_min > 0.0)
{
s_min = 0.0;
}
// Flux formulas
fs_l = u_left*h_left;
fv_l.x = u_left*uh_left + 0.5*g*h_left*h_left;
fv_l.y = u_left*vh_left;
fs_r = u_right*h_right;
fv_r.x = u_right*uh_right + 0.5*g*h_right*h_right;
fv_r.y = u_right*vh_right;
// Flux computation
denom = s_max - s_min;
if (denom < epsilon)
{ // FIXME (Ole): Try using h0 here
fluxes.edgeflux_s = 0.0;
fluxes.edgeflux_x = 0.0;
fluxes.edgeflux_y = 0.0;
fluxes.max_speed = 0.0;
}
else
{
inverse_denominator = 1.0/denom;
fluxes.edgeflux_s = s_max*fs_l - s_min*fs_r;
fluxes.edgeflux_s += s_max*s_min*(stage.y - stage.x);
fluxes.edgeflux_s *= inverse_denominator;
fluxes.edgeflux_x = s_max*fv_l.x - s_min*fv_r.x;
fluxes.edgeflux_x += s_max*s_min*(xmom.y - xmom.x);
fluxes.edgeflux_x *= inverse_denominator;
fluxes.edgeflux_y = s_max*fv_l.y - s_min*fv_r.y;
fluxes.edgeflux_y += s_max*s_min*(ymom.y - ymom.x);
fluxes.edgeflux_y *= inverse_denominator;
// Maximal wavespeed
fluxes.max_speed = fmax(fabs(s_max), fabs(s_min));
// Rotate back
tmp = __rotate__( fluxes.edgeflux_x, fluxes.edgeflux_y, n.x, -n.y);
fluxes.edgeflux_x = tmp.x; fluxes.edgeflux_y = tmp.y;
}
return fluxes;
}
__global__ void __compute_fluxes_central_kernel__(
const int number_of_elements,
double timestep,
const double epsilon,
const double H0,
const double g,
const long* neighbours,
const long* neighbour_edges,
const double* normals,
double* edgelengths,
const double* areas,
const double* stage_edge_values,
const double* xmom_edge_values,
const double* ymom_edge_values,
const double* bed_edge_values,
const double* stage_boundary_values,
const double* xmom_boundary_values,
const double* ymom_boundary_values,
double* stage_explicit_update,
double* xmom_explicit_update,
double* ymom_explicit_update,
double* max_speed_array,
const int optimise_dry_cells) {
/* __global__: called by CPU and executed on GPU;
must return void, scalar variables, structs (and scalar members)
are copied automatically, arrays must be allocated on device.
*/
// Local variables
double length; //, zl, zr;
double h0 = H0*H0; // This ensures a good balance when h approaches H0.
double limiting_threshold = 10 * H0; // Avoid applying limiter below this
// threshold for performance reasons.
// See ANUGA manual under flux limiting
int i, k, m, n;
int ki, nm = 0, ki2; // Index shorthands
Fluxes fluxes;
double2 stage, xmom, ymom, z_lr, normvec;
// Shared memory for explicit update quantity reduction
extern __shared__ double update_shared[]; // empty [] array:(byte)size defined by kernel call
//__shared__ double update_shared[BLOCKDIM*SHARED_MEM_MULT]; // OR static size (not both)
// For all edges;
for( ki = blockIdx.x * blockDim.x + threadIdx.x; ki < 3*number_of_elements; ki += gridDim.x * blockDim.x ) {
// Get left hand side values from triangle k, edge i
stage.x = stage_edge_values[ki];
xmom.x = xmom_edge_values[ki];
ymom.x = ymom_edge_values[ki];
z_lr.x = bed_edge_values[ki];
// Get right hand side values either from neighbouring triangle
// or from boundary array (Quantities at neighbour on nearest face).
n = neighbours[ki];
if (n < 0) {
// Neighbour is a boundary condition
m = -n - 1; // Convert negative flag to boundary index
// Bad access order consider binding boundary_values to Texture cache
stage.y = stage_boundary_values[m];
xmom.y = xmom_boundary_values[m];
ymom.y = ymom_boundary_values[m];
z_lr.y = z_lr.x; // Extend bed elevation to boundary
}
else {
// Neighbour is a real triangle
m = neighbour_edges[ki];
nm = n * 3 + m; // Linear index (triangle n, edge m)
// Again, bind to Texture cache
stage.y = stage_edge_values[nm];
xmom.y = xmom_edge_values[nm];
ymom.y = ymom_edge_values[nm];
z_lr.y = bed_edge_values[nm];
}
// Now we have values for this edge - both from left and right side.
/*if (optimise_dry_cells) {
// Check if flux calculation is necessary across this edge
// This check will exclude dry cells.
// This will also optimise cases where zl != zr as
// long as both are dry
if (fabs(ql[0] - zl) < epsilon &&
fabs(qr[0] - zr) < epsilon) {
// Cell boundary is dry
already_computed_flux[ki] = call; // #k Done
if (n >= 0) {
already_computed_flux[nm] = call; // #n Done
}
max_speed = 0.0;
continue;
}
}*/
/*if (fabs(zl-zr)>1.0e-10) {TODO:
report_python_error(AT,"Discontinuous Elevation");
return 0.0;
}*/
// Outward pointing normal vector (domain.normals[k, 2*i:2*i+2])
ki2 = 2 * ki; //k*6 + i*2
// Bad access order, use Texture or shared mem
normvec.x = normals[ki2];
normvec.y = normals[ki2+1];
// Edge flux computation (triangle k, edge i);
// TODO: subroutine causes unspecified launch failure
//fluxes = __flux_function_central__( stage, xmom, ymom, z_lr, normvec,
// epsilon, h0, limiting_threshold, g);
// Multiply edgeflux by edgelength
length = edgelengths[ki];
fluxes.edgeflux_s *= length;
fluxes.edgeflux_x *= length;
fluxes.edgeflux_y *= length;
// Use shared memory to accumulate flux for one triangle;
// requires blockDim.x to be multiple of 3 (should also be multiple of 32)
#ifdef _SHARED_WRITEBACK_3_
// Accumulate all update arrays in one sweep;
// if shared memory is large enough
update_shared[threadIdx.x] = fluxes.edgeflux_s;
update_shared[threadIdx.x+blockDim.x] = fluxes.edgeflux_x;
update_shared[threadIdx.x+2*blockDim.x] = fluxes.edgeflux_y;
__syncthreads();
// Each third of the threads in block write back an update array
// with contiguous access, each thread sums fluxes in a triangle
if( threadIdx.x < blockDim.x/3) {
// Calculate contiguous index
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
stage_explicit_update[k] = ( update_shared[3*i]
+ update_shared[3*i+1]
+ update_shared[3*i+2]) / areas[k];
} else if( threadIdx.x < 2*(blockDim.x/3) ) {
i = threadIdx.x - (blockDim.x/3);
k = blockIdx.x*(blockDim.x/3) + i;
xmom_explicit_update[k] = ( update_shared[blockDim.x+3*i]
+ update_shared[blockDim.x+3*i+1]
+ update_shared[blockDim.x+3*i+2]) / areas[k];
} else {
i = threadIdx.x - 2*(blockDim.x/3);
k = blockIdx.x*(blockDim.x/3) + i;
ymom_explicit_update[k] = ( update_shared[2*blockDim.x+3*i]
+ update_shared[2*blockDim.x+3*i+1]
+ update_shared[2*blockDim.x+3*i+2] ) / areas[k];
}
__syncthreads();
#else
// Write each update array back by itself;
// only first third of threads busy
update_shared[threadIdx.x]= fluxes.edgeflux_s;
__syncthreads();
if( threadIdx.x < blockDim.x/3 ) {
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
stage_explicit_update[k] = ( update_shared[3*i]
+ update_shared[3*i+1]
+ update_shared[3*i+2] ) / areas[k];
}
__syncthreads();
update_shared[threadIdx.x] = fluxes.edgeflux_x;
__syncthreads();
if( threadIdx.x < blockDim.x/3 ) {
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
xmom_explicit_update[k] = ( update_shared[3*i]
+ update_shared[3*i+1]
+ update_shared[3*i+2] ) / areas[k];
}
__syncthreads();
update_shared[threadIdx.x] = fluxes.edgeflux_y;
__syncthreads();
if( threadIdx.x < blockDim.x/3 ) {
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
ymom_explicit_update[k] = ( update_shared[3*i]
+ update_shared[3*i+1]
+ update_shared[3*i+2] ) / areas[k];
}
__syncthreads();
#endif
// Likewise, get and write maximum speed within triangle
// update_shared[threadIdx.x] = fluxes.max_speed;
__syncthreads();
if( threadIdx.x < blockDim.x/3 ) {
i = threadIdx.x;
k = blockIdx.x*(blockDim.x/3) + i;
fluxes.max_speed = fmax( update_shared[3*i], update_shared[3*i+1] );
max_speed_array[k] = fmax( fluxes.max_speed, update_shared[3*i+2] );
}
} // End edge ki
// computation of timestep in seperate routine because of triangle-wise access
}
/* *********** KERNEL WRAPPER FUNCTIONS ************************** */
extern "C" void _set_to_default( double* edge, double* xmom, double* ymom, size_t N, double def) {
__set_arrays_to_default__ <<< _launcher_.gridDim, _launcher_.blockDim >>> ( edge, xmom, ymom, N, def);
safecall(cudaThreadSynchronize());
}
/*extern "C" void _set_to_default( double* edge, size_t N, double def) {
__set_to_default__ <<< _launcher_.gridDim, _launcher_.blockDim >>> ( edge, N, def);
safecall(cudaThreadSynchronize());
}*/
extern "C" double _compute_fluxes_central(
int number_of_elements,
double timestep,
double epsilon,
double H0,
double g,
long* neighbours,
long* neighbour_edges,
double* normals,
double* edgelengths,
double* radii,
double* areas,
long* tri_full_flag,
double* stage_edge_values,
double* xmom_edge_values,
double* ymom_edge_values,
double* bed_edge_values,
double* stage_boundary_values,
double* xmom_boundary_values,
double* ymom_boundary_values,
double* stage_explicit_update,
double* xmom_explicit_update,
double* ymom_explicit_update,
double* max_speed_array,
int optimise_dry_cells) {
static long call = 1; // Static local variable flagging already computed flux
int i;
// Start computation
call++; // Flag 'id' of flux calculation for this timestep
// prepare memory for timestep reduction (TODO: (de)allocate only once)
const size_t reduction_size = _launcher_.gridDim*sizeof(double);
double* times_out = (double*)allocHostMemory( reduction_size );
double* times_out_gpu = (double*)allocDeviceMemory( reduction_size );
printf("shared mum mult: %i\n", SHARED_MEM_MULT);
if( 0 != _launcher_.blockDim%3 ) {
fprintf(stderr,"error: blockDim required to be multiple of 3!\n");
}
__compute_fluxes_central_kernel__ <<< _launcher_.gridDim, _launcher_.blockDim,
_launcher_.blockDim*sizeof(double)*SHARED_MEM_MULT >>> (
//__compute_fluxes_central_kernel__ <<< _launcher_.gridDim, BLOCKDIM >>> ( // for static shared memory size
number_of_elements,
timestep,
epsilon,
H0,
g,
neighbours,
neighbour_edges,
normals,
edgelengths,
areas,
stage_edge_values,
xmom_edge_values,
ymom_edge_values,
bed_edge_values,
stage_boundary_values,
xmom_boundary_values,
ymom_boundary_values,
stage_explicit_update,
xmom_explicit_update,
ymom_explicit_update,
max_speed_array,
optimise_dry_cells);
safecall(cudaThreadSynchronize()); // prevents overlap of kernels
// Some timestepping debug: (timestep 1.0)
//printKernelDims();
//_set_to_default( max_speed_array, radii, areas, number_of_elements, 3.3); //
__compute_time_step__ <<< _launcher_.gridDim, _launcher_.blockDim, _launcher_.blockDim*sizeof(double) >>> (
tri_full_flag,
max_speed_array,
radii,
number_of_elements,
epsilon,
timestep,
times_out_gpu );
safecall(cudaThreadSynchronize());
copyDeviceToHost( times_out, times_out_gpu, reduction_size );
for( i=0; i < _launcher_.gridDim; ++i) {
timestep = min( timestep, times_out[i] );
}
//printf("\ntimestep = %f\n",timestep);
//fflush(stdout);
freeDeviceMemory( times_out_gpu );
freeHostMemory( times_out );
return timestep;
}
|
1d234990c8be5fd1c8a1a2a0b6020bb3c178ce41.hip | // !!! This is a file automatically generated by hipify!!!
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cudaflow.hpp>
#include <taskflow/cuda/algorithm/find.hpp>
// ----------------------------------------------------------------------------
// cuda_min_max_element
// ----------------------------------------------------------------------------
template <typename T>
void cuda_min_max_element() {
tf::Taskflow taskflow;
tf::Executor executor;
for(int n=1; n<=1234567; n = (n<=100) ? n+1 : n*2 + 1) {
taskflow.emplace([n](){
tf::cudaStream stream;
tf::cudaDefaultExecutionPolicy policy(stream);
// gpu data
auto gdata = tf::cuda_malloc_shared<T>(n);
auto min_i = tf::cuda_malloc_shared<unsigned>(1);
auto max_i = tf::cuda_malloc_shared<unsigned>(1);
// buffer
void* buff;
hipMalloc(&buff, policy.min_element_bufsz<T>(n));
for(int i=0; i<n; i++) {
gdata[i] = rand() % 1000 - 500;
}
// --------------------------------------------------------------------------
// GPU find
// --------------------------------------------------------------------------
tf::cudaStream s;
tf::cudaDefaultExecutionPolicy p(s);
tf::cuda_min_element(
p, gdata, gdata+n, min_i, []__device__(T a, T b) { return a < b; }, buff
);
tf::cuda_max_element(
p, gdata, gdata+n, max_i, []__device__(T a, T b) { return a < b; }, buff
);
s.synchronize();
auto min_v = *std::min_element(gdata, gdata+n, [](T a, T b) { return a < b; });
auto max_v = *std::max_element(gdata, gdata+n, [](T a, T b) { return a < b; });
REQUIRE(gdata[*min_i] == min_v);
REQUIRE(gdata[*max_i] == max_v);
// change the comparator
tf::cuda_min_element(
p, gdata, gdata+n, min_i, []__device__(T a, T b) { return a > b; }, buff
);
tf::cuda_max_element(
p, gdata, gdata+n, max_i, []__device__(T a, T b) { return a > b; }, buff
);
s.synchronize();
min_v = *std::min_element(gdata, gdata+n, [](T a, T b) { return a > b; });
max_v = *std::max_element(gdata, gdata+n, [](T a, T b) { return a > b; });
REQUIRE(gdata[*min_i] == min_v);
REQUIRE(gdata[*max_i] == max_v);
// change the comparator
tf::cuda_min_element(
p, gdata, gdata+n, min_i, []__device__(T a, T b) { return -a > -b; }, buff
);
tf::cuda_max_element(
p, gdata, gdata+n, max_i, []__device__(T a, T b) { return -a > -b; }, buff
);
s.synchronize();
min_v = *std::min_element(gdata, gdata+n, [](T a, T b) { return -a > -b; });
max_v = *std::max_element(gdata, gdata+n, [](T a, T b) { return -a > -b; });
REQUIRE(gdata[*min_i] == min_v);
REQUIRE(gdata[*max_i] == max_v);
// change the comparator
tf::cuda_min_element(
p, gdata, gdata+n, min_i,
[]__device__(T a, T b) { return std::abs(a) < std::abs(b); },
buff
);
tf::cuda_max_element(
p, gdata, gdata+n, max_i,
[]__device__(T a, T b) { return std::abs(a) < std::abs(b); },
buff
);
s.synchronize();
min_v = *std::min_element(
gdata, gdata+n, [](T a, T b) { return std::abs(a) < std::abs(b); }
);
max_v = *std::max_element(
gdata, gdata+n, [](T a, T b) { return std::abs(a) < std::abs(b); }
);
REQUIRE(std::abs(gdata[*min_i]) == std::abs(min_v));
REQUIRE(std::abs(gdata[*max_i]) == std::abs(max_v));
// deallocate the memory
REQUIRE(hipFree(gdata) == hipSuccess);
REQUIRE(hipFree(min_i) == hipSuccess);
REQUIRE(hipFree(max_i) == hipSuccess);
REQUIRE(hipFree(buff) == hipSuccess);
});
}
executor.run(taskflow).wait();
}
TEST_CASE("cuda_min_max_element.int" * doctest::timeout(300)) {
cuda_min_max_element<int>();
}
TEST_CASE("cuda_min_max_element.float" * doctest::timeout(300)) {
cuda_min_max_element<float>();
}
| 1d234990c8be5fd1c8a1a2a0b6020bb3c178ce41.cu | #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cudaflow.hpp>
#include <taskflow/cuda/algorithm/find.hpp>
// ----------------------------------------------------------------------------
// cuda_min_max_element
// ----------------------------------------------------------------------------
template <typename T>
void cuda_min_max_element() {
tf::Taskflow taskflow;
tf::Executor executor;
for(int n=1; n<=1234567; n = (n<=100) ? n+1 : n*2 + 1) {
taskflow.emplace([n](){
tf::cudaStream stream;
tf::cudaDefaultExecutionPolicy policy(stream);
// gpu data
auto gdata = tf::cuda_malloc_shared<T>(n);
auto min_i = tf::cuda_malloc_shared<unsigned>(1);
auto max_i = tf::cuda_malloc_shared<unsigned>(1);
// buffer
void* buff;
cudaMalloc(&buff, policy.min_element_bufsz<T>(n));
for(int i=0; i<n; i++) {
gdata[i] = rand() % 1000 - 500;
}
// --------------------------------------------------------------------------
// GPU find
// --------------------------------------------------------------------------
tf::cudaStream s;
tf::cudaDefaultExecutionPolicy p(s);
tf::cuda_min_element(
p, gdata, gdata+n, min_i, []__device__(T a, T b) { return a < b; }, buff
);
tf::cuda_max_element(
p, gdata, gdata+n, max_i, []__device__(T a, T b) { return a < b; }, buff
);
s.synchronize();
auto min_v = *std::min_element(gdata, gdata+n, [](T a, T b) { return a < b; });
auto max_v = *std::max_element(gdata, gdata+n, [](T a, T b) { return a < b; });
REQUIRE(gdata[*min_i] == min_v);
REQUIRE(gdata[*max_i] == max_v);
// change the comparator
tf::cuda_min_element(
p, gdata, gdata+n, min_i, []__device__(T a, T b) { return a > b; }, buff
);
tf::cuda_max_element(
p, gdata, gdata+n, max_i, []__device__(T a, T b) { return a > b; }, buff
);
s.synchronize();
min_v = *std::min_element(gdata, gdata+n, [](T a, T b) { return a > b; });
max_v = *std::max_element(gdata, gdata+n, [](T a, T b) { return a > b; });
REQUIRE(gdata[*min_i] == min_v);
REQUIRE(gdata[*max_i] == max_v);
// change the comparator
tf::cuda_min_element(
p, gdata, gdata+n, min_i, []__device__(T a, T b) { return -a > -b; }, buff
);
tf::cuda_max_element(
p, gdata, gdata+n, max_i, []__device__(T a, T b) { return -a > -b; }, buff
);
s.synchronize();
min_v = *std::min_element(gdata, gdata+n, [](T a, T b) { return -a > -b; });
max_v = *std::max_element(gdata, gdata+n, [](T a, T b) { return -a > -b; });
REQUIRE(gdata[*min_i] == min_v);
REQUIRE(gdata[*max_i] == max_v);
// change the comparator
tf::cuda_min_element(
p, gdata, gdata+n, min_i,
[]__device__(T a, T b) { return std::abs(a) < std::abs(b); },
buff
);
tf::cuda_max_element(
p, gdata, gdata+n, max_i,
[]__device__(T a, T b) { return std::abs(a) < std::abs(b); },
buff
);
s.synchronize();
min_v = *std::min_element(
gdata, gdata+n, [](T a, T b) { return std::abs(a) < std::abs(b); }
);
max_v = *std::max_element(
gdata, gdata+n, [](T a, T b) { return std::abs(a) < std::abs(b); }
);
REQUIRE(std::abs(gdata[*min_i]) == std::abs(min_v));
REQUIRE(std::abs(gdata[*max_i]) == std::abs(max_v));
// deallocate the memory
REQUIRE(cudaFree(gdata) == cudaSuccess);
REQUIRE(cudaFree(min_i) == cudaSuccess);
REQUIRE(cudaFree(max_i) == cudaSuccess);
REQUIRE(cudaFree(buff) == cudaSuccess);
});
}
executor.run(taskflow).wait();
}
TEST_CASE("cuda_min_max_element.int" * doctest::timeout(300)) {
cuda_min_max_element<int>();
}
TEST_CASE("cuda_min_max_element.float" * doctest::timeout(300)) {
cuda_min_max_element<float>();
}
|
a30b1caba1f019d8c5b426ae9b0305176ae830b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float* var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp = floorf((+1.4947E-37f - (var_4 + atanf((+0.0f + -1.8696E34f + (var_5 + var_6))))));
comp = expf(+1.8374E19f - -1.7572E-42f * (var_7 / var_8));
for (int i=0; i < var_3; ++i) {
var_9[i] = var_10 - var_11;
comp = var_9[i] - (var_12 - var_13 * var_14 / var_15 + var_16);
}
if (comp < (-1.5008E36f * -0.0f * +0.0f)) {
comp += +1.5387E-41f / (-1.6833E-42f * (+1.8753E34f * -1.3057E-42f * (+1.4579E34f * +1.9198E-6f)));
float tmp_1 = +1.2768E-41f;
float tmp_2 = -1.5513E34f;
comp = tmp_2 + tmp_1 - var_17 + (+1.9675E-42f - (var_18 / (var_19 / +1.8081E-42f)));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float* tmp_10 = initPointer( atof(argv[10]) );
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20);
hipDeviceSynchronize();
return 0;
}
| a30b1caba1f019d8c5b426ae9b0305176ae830b8.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float* var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp = floorf((+1.4947E-37f - (var_4 + atanf((+0.0f + -1.8696E34f + (var_5 + var_6))))));
comp = expf(+1.8374E19f - -1.7572E-42f * (var_7 / var_8));
for (int i=0; i < var_3; ++i) {
var_9[i] = var_10 - var_11;
comp = var_9[i] - (var_12 - var_13 * var_14 / var_15 + var_16);
}
if (comp < (-1.5008E36f * -0.0f * +0.0f)) {
comp += +1.5387E-41f / (-1.6833E-42f * (+1.8753E34f * -1.3057E-42f * (+1.4579E34f * +1.9198E-6f)));
float tmp_1 = +1.2768E-41f;
float tmp_2 = -1.5513E34f;
comp = tmp_2 + tmp_1 - var_17 + (+1.9675E-42f - (var_18 / (var_19 / +1.8081E-42f)));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float* tmp_10 = initPointer( atof(argv[10]) );
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20);
cudaDeviceSynchronize();
return 0;
}
|
f5a9d314ff0b4ec337ae070d555559ee916390b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/activation/bias_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
static __global__ void forward_kernel(const int count, const int channels, const int spatial_dim, const float* in, const float * b, float* out)
{
CUDA_KERNEL_LOOP(i, count)
{
int c = i / spatial_dim % channels;
out[i] = in[i] + b[c];
}
}
static __global__ void backward_kernel_bias(int num, int channels, int spatial_dim, const float* top_diff, float* b_diff)
{
__shared__ float buffer[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + c * spatial_dim + i % spatial_dim;
buffer[tid] += top_diff[index];
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
buffer[tid] += buffer[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0)
{
b_diff[c] += buffer[0];
}
}
void BiasLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
hipLaunchKernelGGL(( forward_kernel), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(), channels, height*width, bottom[0]->gpu_data(), this->blobs_[0]->gpu_data(), top[0]->mutable_gpu_data());
}
void BiasLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
caffe_copy(bottom[0]->count(),top[0]->gpu_diff(),bottom[0]->mutable_gpu_diff());
if (this->lr_mult()[0] > 0 && Caffe::frozen_param() == false)
{
hipLaunchKernelGGL(( backward_kernel_bias), dim3(channels),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, channels, height*width, top[0]->gpu_diff(), this->blobs_[0]->mutable_gpu_diff());
}
}
void BiasLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
caffe_copy(bottom[0]->count(),bottom[0]->gpu_sec_diff(),top[0]->mutable_gpu_sec_diff());
}
} // namespace caffe
| f5a9d314ff0b4ec337ae070d555559ee916390b9.cu |
#include <vector>
#include "caffe/layers/activation/bias_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
static __global__ void forward_kernel(const int count, const int channels, const int spatial_dim, const float* in, const float * b, float* out)
{
CUDA_KERNEL_LOOP(i, count)
{
int c = i / spatial_dim % channels;
out[i] = in[i] + b[c];
}
}
static __global__ void backward_kernel_bias(int num, int channels, int spatial_dim, const float* top_diff, float* b_diff)
{
__shared__ float buffer[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + c * spatial_dim + i % spatial_dim;
buffer[tid] += top_diff[index];
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
buffer[tid] += buffer[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0)
{
b_diff[c] += buffer[0];
}
}
void BiasLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
forward_kernel<<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(bottom[0]->count(), channels, height*width, bottom[0]->gpu_data(), this->blobs_[0]->gpu_data(), top[0]->mutable_gpu_data());
}
void BiasLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
caffe_copy(bottom[0]->count(),top[0]->gpu_diff(),bottom[0]->mutable_gpu_diff());
if (this->lr_mult()[0] > 0 && Caffe::frozen_param() == false)
{
backward_kernel_bias<<<channels,CAFFE_CUDA_NUM_THREADS>>>
(num, channels, height*width, top[0]->gpu_diff(), this->blobs_[0]->mutable_gpu_diff());
}
}
void BiasLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
caffe_copy(bottom[0]->count(),bottom[0]->gpu_sec_diff(),top[0]->mutable_gpu_sec_diff());
}
} // namespace caffe
|
7ceb806cc7bc6e740eeb21af1dcc8d861064f80d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scanKernels.cu"
#include "support_kernels.cu"
//Helper functions
//Reorders data
extern "C" __global__ void dataReorderR4(const int n_particles,
real4 *source,
real4 *destination,
uint *permutation) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_particles) return;
int newIndex = permutation[idx];
destination[idx] = source[newIndex];
}
extern "C" __global__ void dataReorderF2(const int n_particles,
float2 *source,
float2 *destination,
uint *permutation) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_particles) return;
int newIndex = permutation[idx];
destination[idx] = source[newIndex];
}
extern "C" __global__ void dataReorderI1(const int n_particles,
int *source,
int *destination,
uint *permutation) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_particles) return;
int newIndex = permutation[idx];
destination[idx] = source[newIndex];
}
//Convert a 64bit key uint2 key into a 96key with a permutation value build in
extern "C" __global__ void convertKey64to96(uint4 *keys, uint4 *newKeys, const int N)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
uint4 temp = keys[idx];
newKeys[idx] = (uint4){temp.x, temp.y, temp.z, idx};
}
extern "C" __global__ void extractKeyAndPerm(uint4 *newKeys, uint4 *keys, uint *permutation, const int N)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
uint4 temp = newKeys[idx];
keys[idx] = (uint4){temp.x, temp.y, temp.z, temp.w};
permutation[idx] = temp.w;
}
//Extract 1 of the 4 items of an uint4 key and move it into a 32bit array
extern "C" __global__ void extractInt(uint4 *keys, uint *simpleKeys, const int N, int keyIdx)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
uint4 temp = keys[idx];
int simpleTemp;
if(keyIdx == 0)
simpleTemp = temp.x;
else if(keyIdx == 1)
simpleTemp = temp.y;
else if(keyIdx == 2)
simpleTemp = temp.z;
simpleKeys[idx] = simpleTemp;
}
//Create range of 0 to N
extern "C" __global__ void fillSequence(uint *sequence, const int N)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
sequence[idx] = idx;
}
//Reorder the data in the arrays according to a given permutation
extern "C" __global__ void reOrderKeysValues(uint4 *keysSrc, uint4 *keysDest, uint *permutation, const int N)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
int newIndex = permutation[idx];
keysDest[idx] = keysSrc[newIndex];
}
extern "C" __global__ void sort_count(volatile uint2 *valid, int *counts, const int N, setupParams sParam, int bitIdx/*, int2 *blaat*/)
{
const int tid = threadIdx.x;
const int bid = blockDim.y * blockIdx.x + threadIdx.y;
int totalNumThreads = gridDim.x*blockDim.y*blockDim.x; //120*4*32 // gridDim.x * blockDim.y; //2D !!!!
volatile __shared__ int shmemSC[128];
volatile __shared__ int shmemSCTEST[128];
//Determine the parameters and loop over the particles
int jobSize = (N / 2) / totalNumThreads;
int offSet = jobSize * bid;
int count = 0;
jobSize = sParam.jobs;
if(bid < sParam.blocksWithExtraJobs)
jobSize++;
if(bid <= sParam.blocksWithExtraJobs)
offSet = (sParam.jobs+1)*64*bid;
else
{
offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64;
offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64;
}
offSet /= 2; //Divide by two since we do double loads (uint2)
for(int i=0; i < jobSize; i++)
{
count += !(valid[offSet + tid].x & (1u<<bitIdx));
count += !(valid[offSet + tid].y & (1u<<bitIdx));
offSet += blockDim.x;
}
//Reduce to get the count of this block
shmemSC[32*threadIdx.y + tid] = count;
reduce_block2(tid, &shmemSC[32*threadIdx.y], count);
//Save the values / count of the current block
if(threadIdx.x == 0)
counts[bid] = shmemSC[32*threadIdx.y];
//Block 0 handles any extra elements that couldn't be divided equally
if(bid == 0)
{
//Here i use single element reads for ease of boundary conditions and steps
count = 0;
offSet = sParam.extraOffset;
uint* valid2 = (uint*) valid;
for(int i=0 ; i < sParam.extraElements; i += blockDim.x)
{
if((offSet + i + tid) < (N)) //Make sure we dont read more than there are items
{
count += !(valid2[offSet + i + tid] & (1u<<bitIdx));
}
}
//Reduce
shmemSCTEST[tid] = count;
__syncthreads();
if(tid < 16){
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+16];
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+8];
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+4];
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+2];
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+1];
}
//Save the count
if(tid == 0)
{
counts[gridDim.x*blockDim.y] = shmemSCTEST[0];
}
__syncthreads();
}//end if bid==0
}//end compact_count
// __device__ __forceinline__ int testTest(volatile unsigned int tmp[], uint val, const int idx, long test)
// {
// tmp[idx-16] = 0; tmp[idx] = val;
//
// // Since we set half the array to 0 we don't need ifs!
// tmp[idx] = val = tmp[idx - 1] + val;
// tmp[idx] = val = tmp[idx - 2] + val;
// tmp[idx] = val = tmp[idx - 4] + val;
// tmp[idx] = val = tmp[idx - 8] + val;
// tmp[idx] = val = tmp[idx - 16] + val;
//
// return (idx > 0) ? tmp[idx-1] : 0;
// }
/*
For sorting it turns out that the stage kernels works faster than the non-staged
Might depend on how much has to be sorted/moved, have to do timings in the actual code
*/
extern "C" __global__ void sort_move_stage_key_value(uint2 *valid, int *output,
uint2 *srcValues, uint *valuesOut,
int *counts,
const int N, setupParams sParam, int bitIdx)
{
//Walk the values of this block
const int tid = threadIdx.x;
const int bid = blockDim.y * blockIdx.x + threadIdx.y;
volatile __shared__ unsigned int shmemSMSKV[192];
volatile __shared__ int stage[64*4];
volatile __shared__ int stage_values[64*4];
//Determine the parameters and loop over the particles
int jobSize, offSet;
jobSize = sParam.jobs;
if(bid < sParam.blocksWithExtraJobs)
jobSize++;
if(bid <= sParam.blocksWithExtraJobs)
offSet = (sParam.jobs+1)*64*bid;
else
{
offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64;
offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64;
}
int outputOffset = counts[bid];
//Get the start of the output offset of the invalid items
//this is calculated as follows:
//totalValidItems + startReadOffset - startOutputOffset
//startReadOffset - startOutputOffset <- is the total number of invalid items from any blocks
//before the current block
int rightOutputOffset = counts[gridDim.x*blockDim.y+1];
rightOutputOffset = rightOutputOffset + offSet - outputOffset;
offSet /= 2; //Divide by two since we do double loads (uint2) TODO what happens if offSet is uneven...?
int curCount;
int idx, ridx;
outputOffset += threadIdx.x;
rightOutputOffset += threadIdx.x;
//Do per step the prefix scan to determine the output locations
for(int i=0; i < jobSize; i++)
{
uint2 validBase = valid[offSet + tid];
uint2 valuesBase = srcValues[offSet + tid];
int value = !(validBase.x & (1u<<bitIdx));
value += !(validBase.y & (1u<<bitIdx));
idx = hillisSteele5(&shmemSMSKV[48*threadIdx.y+16], curCount, value, threadIdx.x);
ridx = curCount + threadIdx.x*2 - idx; //lane*2 - idx , *2 since we read 2 items a time
if(!(validBase.x & (1u<<bitIdx)))
{
stage[idx + threadIdx.y*64] = validBase.x;
stage_values[idx++ + threadIdx.y*64] = valuesBase.x;
}
else
{
stage[ridx + threadIdx.y*64] = validBase.x;
stage_values[ridx++ + threadIdx.y*64] = valuesBase.x;
}
if(!(validBase.y & (1u<<bitIdx)))
{
stage[idx + threadIdx.y*64] = validBase.y;
stage_values[idx + threadIdx.y*64] = valuesBase.y;
}
else
{
stage[ridx + threadIdx.y*64] = validBase.y;
stage_values[ridx + threadIdx.y*64] = valuesBase.y;
}
//Reuse value as index
value = outputOffset;
//Flush output, first 32
if(threadIdx.x >= curCount)
value = rightOutputOffset-curCount;
output[value] = stage [threadIdx.x + threadIdx.y*64];
valuesOut[value] = stage_values[threadIdx.x + threadIdx.y*64];
//2nd 32
value = outputOffset + blockDim.x;
if(threadIdx.x + blockDim.x >= curCount)
value = rightOutputOffset + blockDim.x - curCount;
output[value] = stage [threadIdx.x + blockDim.x + threadIdx.y*64];
valuesOut[value] = stage_values[threadIdx.x + blockDim.x + threadIdx.y*64];
outputOffset += curCount; //Increase the output offset
rightOutputOffset += 64 - curCount; //64 (32*2) since we do 2 items a time
offSet += blockDim.x; //Step to the next N threads
}
//Block 0 handles any extra elements that couldn't be divided equally
if(bid == 0)
{
//Here i use single element reads for ease of boundary conditions and steps
offSet = sParam.extraOffset;
outputOffset = counts[gridDim.x*blockDim.y];
rightOutputOffset = counts[gridDim.x*blockDim.y+1];
rightOutputOffset = rightOutputOffset + offSet - outputOffset;
uint* valid2 = (uint*) valid;
uint* srcValues2 = (uint*) srcValues;
for(int i=0; i < sParam.extraElements; i += blockDim.x)
{
uint value = 0;
uint srcValueItem = 0;
if((offSet + i + tid) < (N)){ //Make sure we dont read more than there are items
value = valid2[offSet + i + tid];
srcValueItem = srcValues2[offSet + i + tid];
}
idx = hillisSteele5(&shmemSMSKV[48*threadIdx.y+16], curCount, !(value & (1u<<bitIdx)), threadIdx.x);
ridx = threadIdx.x - idx;
if((offSet + i + tid) < N)
if(!(value & (1u<<bitIdx)))
{
output[idx + outputOffset] = value;
valuesOut[idx + outputOffset] = srcValueItem;
}
else
{
output[ridx + rightOutputOffset] = value;
valuesOut[ridx + rightOutputOffset] = srcValueItem;
}
outputOffset += curCount; //Increase the output offset
rightOutputOffset += 32-curCount; //32 since we do only 1 at a time
}
}//end if bid==0
}//end sort_move_stage_key_value
| 7ceb806cc7bc6e740eeb21af1dcc8d861064f80d.cu | #include "scanKernels.cu"
#include "support_kernels.cu"
//Helper functions
//Reorders data
extern "C" __global__ void dataReorderR4(const int n_particles,
real4 *source,
real4 *destination,
uint *permutation) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_particles) return;
int newIndex = permutation[idx];
destination[idx] = source[newIndex];
}
extern "C" __global__ void dataReorderF2(const int n_particles,
float2 *source,
float2 *destination,
uint *permutation) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_particles) return;
int newIndex = permutation[idx];
destination[idx] = source[newIndex];
}
extern "C" __global__ void dataReorderI1(const int n_particles,
int *source,
int *destination,
uint *permutation) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_particles) return;
int newIndex = permutation[idx];
destination[idx] = source[newIndex];
}
//Convert a 64bit key uint2 key into a 96key with a permutation value build in
extern "C" __global__ void convertKey64to96(uint4 *keys, uint4 *newKeys, const int N)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
uint4 temp = keys[idx];
newKeys[idx] = (uint4){temp.x, temp.y, temp.z, idx};
}
extern "C" __global__ void extractKeyAndPerm(uint4 *newKeys, uint4 *keys, uint *permutation, const int N)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
uint4 temp = newKeys[idx];
keys[idx] = (uint4){temp.x, temp.y, temp.z, temp.w};
permutation[idx] = temp.w;
}
//Extract 1 of the 4 items of an uint4 key and move it into a 32bit array
extern "C" __global__ void extractInt(uint4 *keys, uint *simpleKeys, const int N, int keyIdx)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
uint4 temp = keys[idx];
int simpleTemp;
if(keyIdx == 0)
simpleTemp = temp.x;
else if(keyIdx == 1)
simpleTemp = temp.y;
else if(keyIdx == 2)
simpleTemp = temp.z;
simpleKeys[idx] = simpleTemp;
}
//Create range of 0 to N
extern "C" __global__ void fillSequence(uint *sequence, const int N)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
sequence[idx] = idx;
}
//Reorder the data in the arrays according to a given permutation
extern "C" __global__ void reOrderKeysValues(uint4 *keysSrc, uint4 *keysDest, uint *permutation, const int N)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= N) return;
int newIndex = permutation[idx];
keysDest[idx] = keysSrc[newIndex];
}
extern "C" __global__ void sort_count(volatile uint2 *valid, int *counts, const int N, setupParams sParam, int bitIdx/*, int2 *blaat*/)
{
const int tid = threadIdx.x;
const int bid = blockDim.y * blockIdx.x + threadIdx.y;
int totalNumThreads = gridDim.x*blockDim.y*blockDim.x; //120*4*32 // gridDim.x * blockDim.y; //2D !!!!
volatile __shared__ int shmemSC[128];
volatile __shared__ int shmemSCTEST[128];
//Determine the parameters and loop over the particles
int jobSize = (N / 2) / totalNumThreads;
int offSet = jobSize * bid;
int count = 0;
jobSize = sParam.jobs;
if(bid < sParam.blocksWithExtraJobs)
jobSize++;
if(bid <= sParam.blocksWithExtraJobs)
offSet = (sParam.jobs+1)*64*bid;
else
{
offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64;
offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64;
}
offSet /= 2; //Divide by two since we do double loads (uint2)
for(int i=0; i < jobSize; i++)
{
count += !(valid[offSet + tid].x & (1u<<bitIdx));
count += !(valid[offSet + tid].y & (1u<<bitIdx));
offSet += blockDim.x;
}
//Reduce to get the count of this block
shmemSC[32*threadIdx.y + tid] = count;
reduce_block2(tid, &shmemSC[32*threadIdx.y], count);
//Save the values / count of the current block
if(threadIdx.x == 0)
counts[bid] = shmemSC[32*threadIdx.y];
//Block 0 handles any extra elements that couldn't be divided equally
if(bid == 0)
{
//Here i use single element reads for ease of boundary conditions and steps
count = 0;
offSet = sParam.extraOffset;
uint* valid2 = (uint*) valid;
for(int i=0 ; i < sParam.extraElements; i += blockDim.x)
{
if((offSet + i + tid) < (N)) //Make sure we dont read more than there are items
{
count += !(valid2[offSet + i + tid] & (1u<<bitIdx));
}
}
//Reduce
shmemSCTEST[tid] = count;
__syncthreads();
if(tid < 16){
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+16];
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+8];
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+4];
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+2];
shmemSCTEST[tid] = count = count + shmemSCTEST[tid+1];
}
//Save the count
if(tid == 0)
{
counts[gridDim.x*blockDim.y] = shmemSCTEST[0];
}
__syncthreads();
}//end if bid==0
}//end compact_count
// __device__ __forceinline__ int testTest(volatile unsigned int tmp[], uint val, const int idx, long test)
// {
// tmp[idx-16] = 0; tmp[idx] = val;
//
// // Since we set half the array to 0 we don't need ifs!
// tmp[idx] = val = tmp[idx - 1] + val;
// tmp[idx] = val = tmp[idx - 2] + val;
// tmp[idx] = val = tmp[idx - 4] + val;
// tmp[idx] = val = tmp[idx - 8] + val;
// tmp[idx] = val = tmp[idx - 16] + val;
//
// return (idx > 0) ? tmp[idx-1] : 0;
// }
/*
For sorting it turns out that the stage kernels works faster than the non-staged
Might depend on how much has to be sorted/moved, have to do timings in the actual code
*/
extern "C" __global__ void sort_move_stage_key_value(uint2 *valid, int *output,
uint2 *srcValues, uint *valuesOut,
int *counts,
const int N, setupParams sParam, int bitIdx)
{
//Walk the values of this block
const int tid = threadIdx.x;
const int bid = blockDim.y * blockIdx.x + threadIdx.y;
volatile __shared__ unsigned int shmemSMSKV[192];
volatile __shared__ int stage[64*4];
volatile __shared__ int stage_values[64*4];
//Determine the parameters and loop over the particles
int jobSize, offSet;
jobSize = sParam.jobs;
if(bid < sParam.blocksWithExtraJobs)
jobSize++;
if(bid <= sParam.blocksWithExtraJobs)
offSet = (sParam.jobs+1)*64*bid;
else
{
offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64;
offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64;
}
int outputOffset = counts[bid];
//Get the start of the output offset of the invalid items
//this is calculated as follows:
//totalValidItems + startReadOffset - startOutputOffset
//startReadOffset - startOutputOffset <- is the total number of invalid items from any blocks
//before the current block
int rightOutputOffset = counts[gridDim.x*blockDim.y+1];
rightOutputOffset = rightOutputOffset + offSet - outputOffset;
offSet /= 2; //Divide by two since we do double loads (uint2) TODO what happens if offSet is uneven...?
int curCount;
int idx, ridx;
outputOffset += threadIdx.x;
rightOutputOffset += threadIdx.x;
//Do per step the prefix scan to determine the output locations
for(int i=0; i < jobSize; i++)
{
uint2 validBase = valid[offSet + tid];
uint2 valuesBase = srcValues[offSet + tid];
int value = !(validBase.x & (1u<<bitIdx));
value += !(validBase.y & (1u<<bitIdx));
idx = hillisSteele5(&shmemSMSKV[48*threadIdx.y+16], curCount, value, threadIdx.x);
ridx = curCount + threadIdx.x*2 - idx; //lane*2 - idx , *2 since we read 2 items a time
if(!(validBase.x & (1u<<bitIdx)))
{
stage[idx + threadIdx.y*64] = validBase.x;
stage_values[idx++ + threadIdx.y*64] = valuesBase.x;
}
else
{
stage[ridx + threadIdx.y*64] = validBase.x;
stage_values[ridx++ + threadIdx.y*64] = valuesBase.x;
}
if(!(validBase.y & (1u<<bitIdx)))
{
stage[idx + threadIdx.y*64] = validBase.y;
stage_values[idx + threadIdx.y*64] = valuesBase.y;
}
else
{
stage[ridx + threadIdx.y*64] = validBase.y;
stage_values[ridx + threadIdx.y*64] = valuesBase.y;
}
//Reuse value as index
value = outputOffset;
//Flush output, first 32
if(threadIdx.x >= curCount)
value = rightOutputOffset-curCount;
output[value] = stage [threadIdx.x + threadIdx.y*64];
valuesOut[value] = stage_values[threadIdx.x + threadIdx.y*64];
//2nd 32
value = outputOffset + blockDim.x;
if(threadIdx.x + blockDim.x >= curCount)
value = rightOutputOffset + blockDim.x - curCount;
output[value] = stage [threadIdx.x + blockDim.x + threadIdx.y*64];
valuesOut[value] = stage_values[threadIdx.x + blockDim.x + threadIdx.y*64];
outputOffset += curCount; //Increase the output offset
rightOutputOffset += 64 - curCount; //64 (32*2) since we do 2 items a time
offSet += blockDim.x; //Step to the next N threads
}
//Block 0 handles any extra elements that couldn't be divided equally
if(bid == 0)
{
//Here i use single element reads for ease of boundary conditions and steps
offSet = sParam.extraOffset;
outputOffset = counts[gridDim.x*blockDim.y];
rightOutputOffset = counts[gridDim.x*blockDim.y+1];
rightOutputOffset = rightOutputOffset + offSet - outputOffset;
uint* valid2 = (uint*) valid;
uint* srcValues2 = (uint*) srcValues;
for(int i=0; i < sParam.extraElements; i += blockDim.x)
{
uint value = 0;
uint srcValueItem = 0;
if((offSet + i + tid) < (N)){ //Make sure we dont read more than there are items
value = valid2[offSet + i + tid];
srcValueItem = srcValues2[offSet + i + tid];
}
idx = hillisSteele5(&shmemSMSKV[48*threadIdx.y+16], curCount, !(value & (1u<<bitIdx)), threadIdx.x);
ridx = threadIdx.x - idx;
if((offSet + i + tid) < N)
if(!(value & (1u<<bitIdx)))
{
output[idx + outputOffset] = value;
valuesOut[idx + outputOffset] = srcValueItem;
}
else
{
output[ridx + rightOutputOffset] = value;
valuesOut[ridx + rightOutputOffset] = srcValueItem;
}
outputOffset += curCount; //Increase the output offset
rightOutputOffset += 32-curCount; //32 since we do only 1 at a time
}
}//end if bid==0
}//end sort_move_stage_key_value
|
7cbbd8050b166f6d5b9b89443d43483cd4df2c65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reversi.cuh"
/**
* A Warp is a logical unit that shares a pc
* Keep thread blocks as large as possible without increasing divergence
*/
__device__ uint8_t board_get_cuda(board b, uint8_t row, uint8_t column) {
if(b) {
uint8_t total_bit = (row * (b->width << 1)) + (column << 1),
byte = total_bit >> 3,
bit = total_bit % 8;
return ((192 >> bit) & b->board[byte]) >> (6 - bit);
}
return 3;
}
__device__ void board_put_cuda(board b, uint8_t row, uint8_t column, uint8_t player) {
if(b) {
uint8_t total_bit = (row * (b->width << 1)) + (column << 1),
byte = total_bit >> 3,
bit = total_bit % 8,
bph = 192 >> bit;
b->board[byte] = (b->board[byte] | bph) ^ bph;
if(player) b->board[byte] |= ((player == 1) ? 64 : 128) >> bit;
}
}
__global__ uint8_t board_is_legal_move_cuda(board b, uint8_t row, uint8_t column) {
if(b && row >= 0 && row < b->height && column >= 0 && column < b->width) {
if(!board_get_cuda(b, row, column)) {
// Check each of the 8 directions going out from the requested coordinate
// Keep track of how many captures we have
int8_t counts = 0, cr, cc, count, bv, operating;
for(int8_t rd = -1; rd < 2; rd++) {
for(int8_t cd = -1; cd < 2; cd++) {
// Avoid infinite loop when rd=cd=0
if(!rd && !cd) continue;
// Take a step in the current direction
cr = row + rd;
cc = column + cd;
count = 0;
while(cr >= 0 && cr < b->height && cc >= 0 && cc < b->width) {
bv = board_get_cuda(b, cr, cc);
if(bv && bv != b->player) {
// There is a possible capture
count++;
// Take another step in the current direction
cr += rd;
cc += cd;
if((cr == b->height && rd) ||
(cr < 0 && rd == -1) ||
(cc == b->width && cd) ||
(cc < 0 && cd < 0)) {
// We hit the edge of the board, this is not a capture
count = 0;
break;
}
}
else {
if(!bv)
// If we had any captures, they are in vain because our color isn't at the other end.
count = 0;
break;
}
}
counts += count;
}
}
// Return true if we capture at least 1 piece
return counts > 0;
}
}
// Either the board pointer was empty, or the space was already filled.
return 0;
} | 7cbbd8050b166f6d5b9b89443d43483cd4df2c65.cu | #include "reversi.cuh"
/**
* A Warp is a logical unit that shares a pc
* Keep thread blocks as large as possible without increasing divergence
*/
__device__ uint8_t board_get_cuda(board b, uint8_t row, uint8_t column) {
if(b) {
uint8_t total_bit = (row * (b->width << 1)) + (column << 1),
byte = total_bit >> 3,
bit = total_bit % 8;
return ((192 >> bit) & b->board[byte]) >> (6 - bit);
}
return 3;
}
__device__ void board_put_cuda(board b, uint8_t row, uint8_t column, uint8_t player) {
if(b) {
uint8_t total_bit = (row * (b->width << 1)) + (column << 1),
byte = total_bit >> 3,
bit = total_bit % 8,
bph = 192 >> bit;
b->board[byte] = (b->board[byte] | bph) ^ bph;
if(player) b->board[byte] |= ((player == 1) ? 64 : 128) >> bit;
}
}
__global__ uint8_t board_is_legal_move_cuda(board b, uint8_t row, uint8_t column) {
if(b && row >= 0 && row < b->height && column >= 0 && column < b->width) {
if(!board_get_cuda(b, row, column)) {
// Check each of the 8 directions going out from the requested coordinate
// Keep track of how many captures we have
int8_t counts = 0, cr, cc, count, bv, operating;
for(int8_t rd = -1; rd < 2; rd++) {
for(int8_t cd = -1; cd < 2; cd++) {
// Avoid infinite loop when rd=cd=0
if(!rd && !cd) continue;
// Take a step in the current direction
cr = row + rd;
cc = column + cd;
count = 0;
while(cr >= 0 && cr < b->height && cc >= 0 && cc < b->width) {
bv = board_get_cuda(b, cr, cc);
if(bv && bv != b->player) {
// There is a possible capture
count++;
// Take another step in the current direction
cr += rd;
cc += cd;
if((cr == b->height && rd) ||
(cr < 0 && rd == -1) ||
(cc == b->width && cd) ||
(cc < 0 && cd < 0)) {
// We hit the edge of the board, this is not a capture
count = 0;
break;
}
}
else {
if(!bv)
// If we had any captures, they are in vain because our color isn't at the other end.
count = 0;
break;
}
}
counts += count;
}
}
// Return true if we capture at least 1 piece
return counts > 0;
}
}
// Either the board pointer was empty, or the space was already filled.
return 0;
} |
7479a2c2dbad77ad9342c71cfde5538f8e99044c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include "random.h"
using namespace std;
void hillisSteeleScan(int *h_array,int numberOfElements)
{
int *h_tmpArray = new int[numberOfElements];
for(int i=0;i<numberOfElements;i++)
{
h_tmpArray[i] = h_array[i];
}
for(int j=1;j<numberOfElements;j=j*2)
{
for(int i=0;i<numberOfElements;i++)
{
if(i-j>=0)
{
h_tmpArray[i]= h_tmpArray[i] + h_array[i-j];
}
}
for(int i=0;i<numberOfElements;i++)
{
h_array[i] = h_tmpArray[i];
}
}
}
__global__ void hillisSteeleScanDevice(int *d_array , int numberOfElements, int *d_tmpArray,int moveIndex)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if(index > numberOfElements)
{
return;
}
if(index - moveIndex >=0)
{
d_tmpArray[index] = d_array[index];
d_tmpArray[index] = d_tmpArray[index] +d_array[index - moveIndex];
d_array[index] = d_tmpArray[index];
}
}
void hillisSteeleScanHost(int *h_array,int numberOfElements)
{
int *d_array;
hipMalloc(&d_array,sizeof(int)*numberOfElements);
hipMemcpy(d_array,h_array,sizeof(int)*numberOfElements,hipMemcpyHostToDevice);
int *d_tmpArray;
hipMalloc(&d_tmpArray,sizeof(int)*numberOfElements);
for(int j=1;j<numberOfElements;j= j*2)
{
hipLaunchKernelGGL(( hillisSteeleScanDevice), dim3(1600),dim3(500), 0, 0, d_array,numberOfElements,d_tmpArray, j);
}
hipMemcpy(h_array,d_array ,sizeof(int)*numberOfElements,hipMemcpyDeviceToHost);
}
int main()
{
cout<<"enter the number of numbers ";
int numberOfElements;
cin>>numberOfElements;
int *h_array = new int[numberOfElements];
class random a(h_array,numberOfElements);
hillisSteeleScanHost(h_array,numberOfElements);
for(int i=0;i<numberOfElements;i++)
{
cout<<h_array[i]<<"\n";
}
}
| 7479a2c2dbad77ad9342c71cfde5538f8e99044c.cu | #include<iostream>
#include "random.h"
using namespace std;
void hillisSteeleScan(int *h_array,int numberOfElements)
{
int *h_tmpArray = new int[numberOfElements];
for(int i=0;i<numberOfElements;i++)
{
h_tmpArray[i] = h_array[i];
}
for(int j=1;j<numberOfElements;j=j*2)
{
for(int i=0;i<numberOfElements;i++)
{
if(i-j>=0)
{
h_tmpArray[i]= h_tmpArray[i] + h_array[i-j];
}
}
for(int i=0;i<numberOfElements;i++)
{
h_array[i] = h_tmpArray[i];
}
}
}
__global__ void hillisSteeleScanDevice(int *d_array , int numberOfElements, int *d_tmpArray,int moveIndex)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if(index > numberOfElements)
{
return;
}
if(index - moveIndex >=0)
{
d_tmpArray[index] = d_array[index];
d_tmpArray[index] = d_tmpArray[index] +d_array[index - moveIndex];
d_array[index] = d_tmpArray[index];
}
}
void hillisSteeleScanHost(int *h_array,int numberOfElements)
{
int *d_array;
cudaMalloc(&d_array,sizeof(int)*numberOfElements);
cudaMemcpy(d_array,h_array,sizeof(int)*numberOfElements,cudaMemcpyHostToDevice);
int *d_tmpArray;
cudaMalloc(&d_tmpArray,sizeof(int)*numberOfElements);
for(int j=1;j<numberOfElements;j= j*2)
{
hillisSteeleScanDevice<<<1600,500>>>(d_array,numberOfElements,d_tmpArray, j);
}
cudaMemcpy(h_array,d_array ,sizeof(int)*numberOfElements,cudaMemcpyDeviceToHost);
}
int main()
{
cout<<"enter the number of numbers ";
int numberOfElements;
cin>>numberOfElements;
int *h_array = new int[numberOfElements];
class random a(h_array,numberOfElements);
hillisSteeleScanHost(h_array,numberOfElements);
for(int i=0;i<numberOfElements;i++)
{
cout<<h_array[i]<<"\n";
}
}
|
c05edc42f68e60d20db73bc65c73f42ec8948f62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void simple_sinf(float* out, const size_t _data_size, int fnCode, const float _dx, const float _frange_start) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < _data_size) {
float x = _frange_start + i * _dx;
int idx = 2 * i;
out[idx] = x;
switch (fnCode) {
case 0: out[idx + 1] = sinf(x); break;
case 1: out[idx + 1] = cosf(x); break;
case 2: out[idx + 1] = tanf(x); break;
case 3: out[idx + 1] = log10f(x); break;
}
}
} | c05edc42f68e60d20db73bc65c73f42ec8948f62.cu | #include "includes.h"
__global__ void simple_sinf(float* out, const size_t _data_size, int fnCode, const float _dx, const float _frange_start) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < _data_size) {
float x = _frange_start + i * _dx;
int idx = 2 * i;
out[idx] = x;
switch (fnCode) {
case 0: out[idx + 1] = sinf(x); break;
case 1: out[idx + 1] = cosf(x); break;
case 2: out[idx + 1] = tanf(x); break;
case 3: out[idx + 1] = log10f(x); break;
}
}
} |
103fc9136613dea5e2455cc952062e2d6959ef7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 32
#define N 32
#define H 56
#define W 56
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[2];
__shared__ float pad_temp_shared[928];
__shared__ float kernel_shared[384];
float pad_temp_shared_local[12];
float kernel_shared_local[24];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 2; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 5; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 58)) < 16) {
if ((((((int)threadIdx.z) * 232) + (((int)threadIdx.x) * 5)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 928) {
if (((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 232) {
pad_temp_shared[((((((int)threadIdx.z) * 232) + (((int)threadIdx.x) * 5)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= (((int)blockIdx.y) + ry_outer)) && ((((int)blockIdx.y) + ry_outer) < 57)) && (1 <= (((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 58))) && ((((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 58) < 57)) ? data[((((((((rc_outer * 50176) + (((int)threadIdx.z) * 12544)) + ((((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 58) * 3136)) + (((int)blockIdx.y) * 56)) + (ry_outer * 56)) + (((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 58)) - 57))] : 0.000000e+00f);
}
}
}
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 2; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 3)) < 128) {
if ((((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 384) {
if (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 96) {
kernel_shared[((((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[((((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 576)) + ((((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 48) * 288)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) % 3)))];
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int rc_inner_outer = 0; rc_inner_outer < 4; ++rc_inner_outer) {
#pragma unroll
for (int ax1 = 0; ax1 < 4; ++ax1) {
#pragma unroll
for (int ax3 = 0; ax3 < 3; ++ax3) {
pad_temp_shared_local[(((ax1 * 3) + ax3))] = pad_temp_shared[(((((rc_inner_outer * 232) + (ax1 * 58)) + ax3) + ((int)threadIdx.x)))];
}
}
#pragma unroll
for (int ax0 = 0; ax0 < 2; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 4; ++ax11) {
#pragma unroll
for (int ax31 = 0; ax31 < 3; ++ax31) {
kernel_shared_local[((((ax0 * 12) + (ax11 * 3)) + ax31))] = kernel_shared[((((((((int)threadIdx.z) * 96) + (ax0 * 48)) + (rc_inner_outer * 12)) + (ax11 * 3)) + ax31))];
}
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 4; ++rc_inner_inner) {
#pragma unroll
for (int rx_inner_inner = 0; rx_inner_inner < 3; ++rx_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 2; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(((rc_inner_inner * 3) + rx_inner_inner))] * kernel_shared_local[((((ff_c * 12) + (rc_inner_inner * 3)) + rx_inner_inner))]));
}
}
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
compute[((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)blockIdx.y) * 56)) + ((int)threadIdx.x)))] = compute_local[(ff_inner_inner_inner)];
}
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,56,4);
dim3 block(56,1,4);
float * paddedInputDevice;
chkerr(hipMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(hipMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), hipMemcpyHostToDevice));
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
| 103fc9136613dea5e2455cc952062e2d6959ef7c.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 32
#define N 32
#define H 56
#define W 56
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[2];
__shared__ float pad_temp_shared[928];
__shared__ float kernel_shared[384];
float pad_temp_shared_local[12];
float kernel_shared_local[24];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 2; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 5; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 58)) < 16) {
if ((((((int)threadIdx.z) * 232) + (((int)threadIdx.x) * 5)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 928) {
if (((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 232) {
pad_temp_shared[((((((int)threadIdx.z) * 232) + (((int)threadIdx.x) * 5)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= (((int)blockIdx.y) + ry_outer)) && ((((int)blockIdx.y) + ry_outer) < 57)) && (1 <= (((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 58))) && ((((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 58) < 57)) ? data[((((((((rc_outer * 50176) + (((int)threadIdx.z) * 12544)) + ((((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 58) * 3136)) + (((int)blockIdx.y) * 56)) + (ry_outer * 56)) + (((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 58)) - 57))] : 0.000000e+00f);
}
}
}
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 2; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 48)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 3)) < 128) {
if ((((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 384) {
if (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 96) {
kernel_shared[((((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[((((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 576)) + ((((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 48) * 288)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) % 3)))];
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int rc_inner_outer = 0; rc_inner_outer < 4; ++rc_inner_outer) {
#pragma unroll
for (int ax1 = 0; ax1 < 4; ++ax1) {
#pragma unroll
for (int ax3 = 0; ax3 < 3; ++ax3) {
pad_temp_shared_local[(((ax1 * 3) + ax3))] = pad_temp_shared[(((((rc_inner_outer * 232) + (ax1 * 58)) + ax3) + ((int)threadIdx.x)))];
}
}
#pragma unroll
for (int ax0 = 0; ax0 < 2; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 4; ++ax11) {
#pragma unroll
for (int ax31 = 0; ax31 < 3; ++ax31) {
kernel_shared_local[((((ax0 * 12) + (ax11 * 3)) + ax31))] = kernel_shared[((((((((int)threadIdx.z) * 96) + (ax0 * 48)) + (rc_inner_outer * 12)) + (ax11 * 3)) + ax31))];
}
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 4; ++rc_inner_inner) {
#pragma unroll
for (int rx_inner_inner = 0; rx_inner_inner < 3; ++rx_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 2; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(((rc_inner_inner * 3) + rx_inner_inner))] * kernel_shared_local[((((ff_c * 12) + (rc_inner_inner * 3)) + rx_inner_inner))]));
}
}
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
compute[((((((((int)blockIdx.z) * 25088) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)blockIdx.y) * 56)) + ((int)threadIdx.x)))] = compute_local[(ff_inner_inner_inner)];
}
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,56,4);
dim3 block(56,1,4);
float * paddedInputDevice;
chkerr(cudaMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(cudaMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), cudaMemcpyHostToDevice));
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
|
b1997079efe2fc474a315c813f32796a4c17e79b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dividKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dividKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dividKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dividKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b1997079efe2fc474a315c813f32796a4c17e79b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dividKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dividKernel<<<gridBlock,threadBlock>>>(c,a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dividKernel<<<gridBlock,threadBlock>>>(c,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dividKernel<<<gridBlock,threadBlock>>>(c,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cf7f77edb9eb63b17e634ecbc4364b417e62fd1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
extern "C" {
__global__ void saxpy(int n, float a, float *x, float *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a * x[i] + y[i];
}
}
int main(void) {
int N = 1 << 20;
float *x, *y, *d_x, *d_y;
x = (float *)malloc(N * sizeof(float));
y = (float *)malloc(N * sizeof(float));
hipMalloc(&d_x, N * sizeof(float));
hipMalloc(&d_y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N * sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N + 255) / 256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N * sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i] - 4.0f));
printf("Max error: %f\n", maxError);
}
| cf7f77edb9eb63b17e634ecbc4364b417e62fd1a.cu | #include <stdio.h>
extern "C" {
__global__ void saxpy(int n, float a, float *x, float *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a * x[i] + y[i];
}
}
int main(void) {
int N = 1 << 20;
float *x, *y, *d_x, *d_y;
x = (float *)malloc(N * sizeof(float));
y = (float *)malloc(N * sizeof(float));
cudaMalloc(&d_x, N * sizeof(float));
cudaMalloc(&d_y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N + 255) / 256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i] - 4.0f));
printf("Max error: %f\n", maxError);
}
|
89c42dffcaca9943fe1b3b1d9ba118dbd4e3b999.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
//LOG(INFO) << "CONV BACKWARD" ;
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
int count = 0 ;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
count = this->blobs_[0]->count();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| 89c42dffcaca9943fe1b3b1d9ba118dbd4e3b999.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
//LOG(INFO) << "CONV BACKWARD" ;
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
int count = 0 ;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
count = this->blobs_[0]->count();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
e9ac995ccb36692d752ae6f285578ff29c9bb967.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dest = NULL;
hipMalloc(&dest, XSIZE*YSIZE);
int a = 2;
int b = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e9ac995ccb36692d752ae6f285578ff29c9bb967.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dest = NULL;
cudaMalloc(&dest, XSIZE*YSIZE);
int a = 2;
int b = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sum<<<gridBlock,threadBlock>>>(dest,a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sum<<<gridBlock,threadBlock>>>(dest,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sum<<<gridBlock,threadBlock>>>(dest,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
34a62c5c77dac9ecbdcec0d26cf5bbf31b5667d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void wave1Dmac2(double * f_next, double * f_tmp1,
double * f_in, double u, double dt,
double dx, int N){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_m = tid-1;
if(x_m <0) x_m = N-1;
double ft1_tmp = f_tmp1[tid];
f_next[tid]=0.5*(f_in[tid]+ft1_tmp - u*(dt/dx)*(ft1_tmp-f_tmp1[x_m]));
}
}
| 34a62c5c77dac9ecbdcec0d26cf5bbf31b5667d2.cu | __global__ void wave1Dmac2(double * f_next, double * f_tmp1,
double * f_in, double u, double dt,
double dx, int N){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_m = tid-1;
if(x_m <0) x_m = N-1;
double ft1_tmp = f_tmp1[tid];
f_next[tid]=0.5*(f_in[tid]+ft1_tmp - u*(dt/dx)*(ft1_tmp-f_tmp1[x_m]));
}
}
|
cd627a7473ee27c45ef5d7f746b83d8b234163bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "Convolutions_kernels.cuh"
#include "cudamat.cuh"
extern "C"
{
inline bool checkCUDAError()
{
hipError_t err = hipGetLastError();
//if (hipSuccess != err)
//printf("%s\n", hipGetErrorString( err));
return hipSuccess != err;
}
extern int ShiftedConvolution(cudamat *signal1, cudamat *signal2, cudamat *target, int kernelWidth,
cudamat *scratchPad)
{
if (!signal1->on_device || !target->on_device || !signal2->on_device || !scratchPad->on_device)
return ERROR_NOT_ON_DEVICE;
if (signal1->size[0] != 1 && signal1->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
int signalLength = signal1->size[0] * signal1->size[1] ;
int numKernels = signal2->size[1] ;
if (signal2->size[0] != signalLength)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
if (target->size[0] != kernelWidth || target->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (signal1->is_trans)
return ERROR_TRANSPOSED;
if (signal2->is_trans)
return ERROR_TRANSPOSED;
if (target->is_trans)
return ERROR_TRANSPOSED;
// Do calculation on device:
int numThreadsPerBlock = 256 ;
const int numPtsPerBlock = 512 ;
int numBlocksPerKernel = signalLength/numPtsPerBlock + (signalLength%numPtsPerBlock == 0 ? 0:1);
if (scratchPad->size[0]*scratchPad->size[1] < kernelWidth*numBlocksPerKernel*numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
int sharedMemSize = 4*(2*numPtsPerBlock+kernelWidth-1) ;
if (sharedMemSize > 16*1024)
throw "Specified parameters require kernel with shared memory greater than 16KB. Exiting" ;
dim3 gDim(numKernels, numBlocksPerKernel, 1) ;
hipLaunchKernelGGL(( KernPartialConvolve), dim3(gDim),dim3(numThreadsPerBlock),sharedMemSize, 0, signal1->data_device,
signal2->data_device,
scratchPad->data_device,
signalLength,
kernelWidth,
numPtsPerBlock) ;
dim3 gDimSum(numKernels, kernelWidth,1) ;
hipLaunchKernelGGL(( KernPartialConvolveSum), dim3(gDimSum), dim3(numThreadsPerBlock), sizeof(float)*numThreadsPerBlock, 0, scratchPad->data_device,
target->data_device, kernelWidth, numBlocksPerKernel, numKernels) ;
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0 ;
}
// Use this convolution only when kernelWidth is small compared to signalLength because it
// does one convolution per thread. If both are long, consider coding (/using NVIDIA's fft sample)
// with fft coefficient products.
extern int Convolve(cudamat *signal, cudamat *kernels, cudamat *target)
{
if (!signal->on_device || !target->on_device || !kernels->on_device)
return ERROR_NOT_ON_DEVICE;
if (signal->size[0] != 1 && signal->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
int signalLength = signal->size[0] * signal->size[1] ;
int kernelWidth = kernels->size[0] ;
int numKernels = kernels->size[1] ;
if (target->size[0] != signalLength || target->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
if (signal->is_trans)
return ERROR_TRANSPOSED ;
if (kernels->is_trans)
return ERROR_TRANSPOSED ;
if (target->is_trans)
return ERROR_TRANSPOSED ;
// Do calculation on device:
int block_size = 128 ;
int numPtsPerBlock = 128 ;
int numBlocksPerSignal = signalLength/numPtsPerBlock + (signalLength%numPtsPerBlock == 0 ? 0:1);
dim3 gridD(numKernels, numBlocksPerSignal,1) ;
int sharedMemSize = sizeof(float)*((numPtsPerBlock+kernelWidth-1) + kernelWidth) ;
hipLaunchKernelGGL(( KernConvolve) , dim3(gridD), dim3(block_size), sharedMemSize , 0, signal->data_device,
kernels->data_device,
target->data_device,
signalLength,
kernelWidth,
numPtsPerBlock) ;
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0 ;
}
extern int ReverseConvolve(cudamat *convolvedSignals, cudamat *kernels, cudamat * reverseConvolvedSignals)
{
int kernelWidth = kernels->size[0] ;
int numKernels = kernels->size[1] ;
int signalLength = convolvedSignals->size[0] ;
if (!convolvedSignals->on_device || !kernels->on_device || !reverseConvolvedSignals->on_device)
return ERROR_NOT_ON_DEVICE;
if (reverseConvolvedSignals->size[0] != signalLength || reverseConvolvedSignals->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
if (convolvedSignals->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
// Do calculation on device:
int numThreadsPerBlock = 32 ;
int numPtsPerThread = 1 ;
int numPtsPerBlock = numThreadsPerBlock*numPtsPerThread ;
int numBlocks = signalLength/numPtsPerBlock + (signalLength%numPtsPerBlock == 0 ? 0:1);
dim3 gridD(numKernels, numBlocks,1) ;
hipLaunchKernelGGL(( KernReverseConvolve) , dim3(gridD), dim3(numThreadsPerBlock) , 0, 0, convolvedSignals->data_device,
kernels->data_device,
reverseConvolvedSignals->data_device,
signalLength, kernelWidth, numKernels,
numPtsPerBlock, numPtsPerThread) ;
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0 ;
}
extern int Reconstruct(cudamat *convolvedSignals,
cudamat *kernels,
cudamat *reverseConvolvedSignals,
cudamat *reconstruction)
{
int kernelWidth = kernels->size[0] ;
int numKernels = kernels->size[1] ;
int signalLength = convolvedSignals->size[0] ;
if (!convolvedSignals->on_device || !kernels->on_device || !reverseConvolvedSignals->on_device)
return ERROR_NOT_ON_DEVICE;
if (reverseConvolvedSignals->size[0]*reverseConvolvedSignals->size[1] < signalLength * numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
if (convolvedSignals->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
// Do calculation on device:
int numThreadsPerBlock = 256 ;
int numPtsPerThread = 1 ;
int numPtsPerBlock = numThreadsPerBlock*numPtsPerThread ;
int numBlocks = signalLength/numPtsPerBlock + (signalLength%numPtsPerBlock == 0 ? 0:1);
dim3 gridD(numKernels, numBlocks,1) ;
hipLaunchKernelGGL(( KernReverseConvolve) , dim3(gridD), dim3(numThreadsPerBlock) , 0, 0, convolvedSignals->data_device,
kernels->data_device,
reverseConvolvedSignals->data_device,
signalLength, kernelWidth, numKernels,
numPtsPerBlock, numPtsPerThread) ;
hipLaunchKernelGGL(( KernAddSignals) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, reverseConvolvedSignals->data_device,
reconstruction->data_device,
signalLength,
numKernels,
numPtsPerBlock,
numPtsPerThread) ;
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0 ;
}
}
| cd627a7473ee27c45ef5d7f746b83d8b234163bd.cu | #include <cuda.h>
#include "Convolutions_kernels.cuh"
#include "cudamat.cuh"
extern "C"
{
inline bool checkCUDAError()
{
cudaError_t err = cudaGetLastError();
//if (cudaSuccess != err)
//printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
extern int ShiftedConvolution(cudamat *signal1, cudamat *signal2, cudamat *target, int kernelWidth,
cudamat *scratchPad)
{
if (!signal1->on_device || !target->on_device || !signal2->on_device || !scratchPad->on_device)
return ERROR_NOT_ON_DEVICE;
if (signal1->size[0] != 1 && signal1->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
int signalLength = signal1->size[0] * signal1->size[1] ;
int numKernels = signal2->size[1] ;
if (signal2->size[0] != signalLength)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
if (target->size[0] != kernelWidth || target->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (signal1->is_trans)
return ERROR_TRANSPOSED;
if (signal2->is_trans)
return ERROR_TRANSPOSED;
if (target->is_trans)
return ERROR_TRANSPOSED;
// Do calculation on device:
int numThreadsPerBlock = 256 ;
const int numPtsPerBlock = 512 ;
int numBlocksPerKernel = signalLength/numPtsPerBlock + (signalLength%numPtsPerBlock == 0 ? 0:1);
if (scratchPad->size[0]*scratchPad->size[1] < kernelWidth*numBlocksPerKernel*numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
int sharedMemSize = 4*(2*numPtsPerBlock+kernelWidth-1) ;
if (sharedMemSize > 16*1024)
throw "Specified parameters require kernel with shared memory greater than 16KB. Exiting" ;
dim3 gDim(numKernels, numBlocksPerKernel, 1) ;
KernPartialConvolve<<<gDim,numThreadsPerBlock,sharedMemSize>>>(signal1->data_device,
signal2->data_device,
scratchPad->data_device,
signalLength,
kernelWidth,
numPtsPerBlock) ;
dim3 gDimSum(numKernels, kernelWidth,1) ;
KernPartialConvolveSum<<<gDimSum, numThreadsPerBlock, sizeof(float)*numThreadsPerBlock>>>(scratchPad->data_device,
target->data_device, kernelWidth, numBlocksPerKernel, numKernels) ;
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0 ;
}
// Use this convolution only when kernelWidth is small compared to signalLength because it
// does one convolution per thread. If both are long, consider coding (/using NVIDIA's fft sample)
// with fft coefficient products.
extern int Convolve(cudamat *signal, cudamat *kernels, cudamat *target)
{
if (!signal->on_device || !target->on_device || !kernels->on_device)
return ERROR_NOT_ON_DEVICE;
if (signal->size[0] != 1 && signal->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
int signalLength = signal->size[0] * signal->size[1] ;
int kernelWidth = kernels->size[0] ;
int numKernels = kernels->size[1] ;
if (target->size[0] != signalLength || target->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
if (signal->is_trans)
return ERROR_TRANSPOSED ;
if (kernels->is_trans)
return ERROR_TRANSPOSED ;
if (target->is_trans)
return ERROR_TRANSPOSED ;
// Do calculation on device:
int block_size = 128 ;
int numPtsPerBlock = 128 ;
int numBlocksPerSignal = signalLength/numPtsPerBlock + (signalLength%numPtsPerBlock == 0 ? 0:1);
dim3 gridD(numKernels, numBlocksPerSignal,1) ;
int sharedMemSize = sizeof(float)*((numPtsPerBlock+kernelWidth-1) + kernelWidth) ;
KernConvolve <<< gridD, block_size, sharedMemSize >>>(signal->data_device,
kernels->data_device,
target->data_device,
signalLength,
kernelWidth,
numPtsPerBlock) ;
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0 ;
}
extern int ReverseConvolve(cudamat *convolvedSignals, cudamat *kernels, cudamat * reverseConvolvedSignals)
{
int kernelWidth = kernels->size[0] ;
int numKernels = kernels->size[1] ;
int signalLength = convolvedSignals->size[0] ;
if (!convolvedSignals->on_device || !kernels->on_device || !reverseConvolvedSignals->on_device)
return ERROR_NOT_ON_DEVICE;
if (reverseConvolvedSignals->size[0] != signalLength || reverseConvolvedSignals->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
if (convolvedSignals->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
// Do calculation on device:
int numThreadsPerBlock = 32 ;
int numPtsPerThread = 1 ;
int numPtsPerBlock = numThreadsPerBlock*numPtsPerThread ;
int numBlocks = signalLength/numPtsPerBlock + (signalLength%numPtsPerBlock == 0 ? 0:1);
dim3 gridD(numKernels, numBlocks,1) ;
KernReverseConvolve <<< gridD, numThreadsPerBlock >>>(convolvedSignals->data_device,
kernels->data_device,
reverseConvolvedSignals->data_device,
signalLength, kernelWidth, numKernels,
numPtsPerBlock, numPtsPerThread) ;
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0 ;
}
extern int Reconstruct(cudamat *convolvedSignals,
cudamat *kernels,
cudamat *reverseConvolvedSignals,
cudamat *reconstruction)
{
int kernelWidth = kernels->size[0] ;
int numKernels = kernels->size[1] ;
int signalLength = convolvedSignals->size[0] ;
if (!convolvedSignals->on_device || !kernels->on_device || !reverseConvolvedSignals->on_device)
return ERROR_NOT_ON_DEVICE;
if (reverseConvolvedSignals->size[0]*reverseConvolvedSignals->size[1] < signalLength * numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
if (convolvedSignals->size[1] != numKernels)
return ERROR_INCOMPATIBLE_DIMENSIONS ;
// Do calculation on device:
int numThreadsPerBlock = 256 ;
int numPtsPerThread = 1 ;
int numPtsPerBlock = numThreadsPerBlock*numPtsPerThread ;
int numBlocks = signalLength/numPtsPerBlock + (signalLength%numPtsPerBlock == 0 ? 0:1);
dim3 gridD(numKernels, numBlocks,1) ;
KernReverseConvolve <<< gridD, numThreadsPerBlock >>>(convolvedSignals->data_device,
kernels->data_device,
reverseConvolvedSignals->data_device,
signalLength, kernelWidth, numKernels,
numPtsPerBlock, numPtsPerThread) ;
KernAddSignals <<< numBlocks, numThreadsPerBlock >>>(reverseConvolvedSignals->data_device,
reconstruction->data_device,
signalLength,
numKernels,
numPtsPerBlock,
numPtsPerThread) ;
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0 ;
}
}
|
c4d842776a8407fdc64bc793e252f31deb92bfd9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define PRECISION_z
//#if (GPUSHMEM < 200)
#define zdotc_max_bs 512 // 512 is max threads for 1.x cards
//#else
//#define zdotc_max_bs 1024
//#endif
void zpotf2_zdscal(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx);
void zpotf2_zdotc(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx);
#if defined(PRECISION_z) || defined(PRECISION_c)
void zlacgv(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx);
#endif
/**
Purpose
-------
zpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@ingroup magma_zposv_aux
********************************************************************/
extern "C" magma_int_t
magma_zpotf2_gpu(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t j;
*info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
*info = -1;
} else if (n < 0 || n > zdotc_max_bs) {
*info = -2;
} else if (ldda < max(1,n)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (n == 0) {
return *info;
}
magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE;
magmaDoubleComplex beta = MAGMA_Z_ONE;
if (uplo == MagmaUpper) {
for(j = 0; j < n; j++) {
zpotf2_zdotc(j, dA(0,j), 1); // including zdotc product and update a(j,j)
if (j < n) {
#if defined(PRECISION_z) || defined(PRECISION_c)
zlacgv(j, dA(0, j), 1);
#endif
magma_zgemv( MagmaTrans, j, n-j-1,
alpha, dA(0, j+1), ldda,
dA(0, j), 1,
beta, dA(j, j+1), ldda);
#if defined(PRECISION_z) || defined(PRECISION_c)
zlacgv(j, dA(0, j), 1);
#endif
zpotf2_zdscal(n-j, dA(j,j), ldda);
}
}
}
else {
for(j = 0; j < n; j++) {
zpotf2_zdotc(j, dA(j,0), ldda); // including zdotc product and update a(j,j)
if (j < n) {
#if defined(PRECISION_z) || defined(PRECISION_c)
zlacgv(j, dA(j, 0), ldda);
#endif
magma_zgemv( MagmaNoTrans, n-j-1, j,
alpha, dA(j+1, 0), ldda,
dA(j,0), ldda,
beta, dA(j+1, j), 1 );
#if defined(PRECISION_z) || defined(PRECISION_c)
zlacgv(j, dA(j, 0), ldda);
#endif
zpotf2_zdscal(n-j, dA(j,j), 1);
}
}
}
return *info;
}
#define zdscal_bs 32
#define zdotc_bs 512
#define zlacgv_bs 512
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ double shared_data[];
__global__ void kernel_zdotc(int n, magmaDoubleComplex *x, int incx, int threadSize)
{
int tx = threadIdx.x;
double *sdata = shared_data;
magmaDoubleComplex res = MAGMA_Z_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_Z_REAL(res * MAGMA_Z_CNJG(res));
__syncthreads();
for(int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile double* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
double xreal = MAGMA_Z_REAL(x[n*incx]);
x[n*incx] = MAGMA_Z_MAKE( sqrt(xreal - sdata[0]), 0 );
}
}
void zpotf2_zdotc(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx)
{
/*
Specialized Zdotc
1) performs zdotc sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > zdotc_max_bs) {
fprintf( stderr, "n = %d > %d is not supported in zpotf2_zdotc\n", (int) n, (int) zdotc_max_bs);
return;
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
hipLaunchKernelGGL(( kernel_zdotc), dim3(1), dim3(threadSize), threadSize * sizeof(double), magma_stream, n, x, incx, threadSize);
}
__global__ void kernel_zdscal(int n, magmaDoubleComplex *x, int incx)
{
int id = blockIdx.x * zdscal_bs + threadIdx.x;
__shared__ magmaDoubleComplex factor;
if (threadIdx.x == 0) {
factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id >0) {
x[id*incx] = x[id*incx] * factor;
}
}
void zpotf2_zdscal(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx)
{
/*
Specialized Zdscal perform x[1:n-1]/x[0]
*/
dim3 threads(zdscal_bs, 1, 1);
int num_blocks = (n - 1)/zdscal_bs + 1;
dim3 grid(num_blocks,1);
hipLaunchKernelGGL(( kernel_zdscal), dim3(grid), dim3(threads), 0, magma_stream , n, x, incx);
}
#if defined(PRECISION_z) || defined(PRECISION_c)
__global__ void kernel_zlacgv(int n, magmaDoubleComplex *x, int incx)
{
int id = blockIdx.x * zlacgv_bs + threadIdx.x;
if ( id < n ) {
x[id*incx] = MAGMA_Z_CNJG(x[id*incx]);
}
}
/**
Purpose
-------
ZLACGV conjugates a complex vector of length N.
Arguments
---------
@param[in]
n INTEGER
The length of the vector X. N >= 0.
@param[in,out]
x COMPLEX*16 array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
@param[in]
incx INTEGER
The spacing between successive elements of X.
@ingroup magma_zposv_aux
********************************************************************/
void zlacgv(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx)
{
dim3 threads(zlacgv_bs, 1, 1);
int num_blocks = (n - 1)/zlacgv_bs + 1;
dim3 grid(num_blocks,1);
hipLaunchKernelGGL(( kernel_zlacgv), dim3(grid), dim3(threads), 0, magma_stream , n, x, incx);
}
#endif // defined(PRECISION_z) || defined(PRECISION_c)
| c4d842776a8407fdc64bc793e252f31deb92bfd9.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define PRECISION_z
//#if (GPUSHMEM < 200)
#define zdotc_max_bs 512 // 512 is max threads for 1.x cards
//#else
//#define zdotc_max_bs 1024
//#endif
void zpotf2_zdscal(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx);
void zpotf2_zdotc(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx);
#if defined(PRECISION_z) || defined(PRECISION_c)
void zlacgv(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx);
#endif
/**
Purpose
-------
zpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@ingroup magma_zposv_aux
********************************************************************/
extern "C" magma_int_t
magma_zpotf2_gpu(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t j;
*info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
*info = -1;
} else if (n < 0 || n > zdotc_max_bs) {
*info = -2;
} else if (ldda < max(1,n)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (n == 0) {
return *info;
}
magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE;
magmaDoubleComplex beta = MAGMA_Z_ONE;
if (uplo == MagmaUpper) {
for(j = 0; j < n; j++) {
zpotf2_zdotc(j, dA(0,j), 1); // including zdotc product and update a(j,j)
if (j < n) {
#if defined(PRECISION_z) || defined(PRECISION_c)
zlacgv(j, dA(0, j), 1);
#endif
magma_zgemv( MagmaTrans, j, n-j-1,
alpha, dA(0, j+1), ldda,
dA(0, j), 1,
beta, dA(j, j+1), ldda);
#if defined(PRECISION_z) || defined(PRECISION_c)
zlacgv(j, dA(0, j), 1);
#endif
zpotf2_zdscal(n-j, dA(j,j), ldda);
}
}
}
else {
for(j = 0; j < n; j++) {
zpotf2_zdotc(j, dA(j,0), ldda); // including zdotc product and update a(j,j)
if (j < n) {
#if defined(PRECISION_z) || defined(PRECISION_c)
zlacgv(j, dA(j, 0), ldda);
#endif
magma_zgemv( MagmaNoTrans, n-j-1, j,
alpha, dA(j+1, 0), ldda,
dA(j,0), ldda,
beta, dA(j+1, j), 1 );
#if defined(PRECISION_z) || defined(PRECISION_c)
zlacgv(j, dA(j, 0), ldda);
#endif
zpotf2_zdscal(n-j, dA(j,j), 1);
}
}
}
return *info;
}
#define zdscal_bs 32
#define zdotc_bs 512
#define zlacgv_bs 512
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ double shared_data[];
__global__ void kernel_zdotc(int n, magmaDoubleComplex *x, int incx, int threadSize)
{
int tx = threadIdx.x;
double *sdata = shared_data;
magmaDoubleComplex res = MAGMA_Z_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_Z_REAL(res * MAGMA_Z_CNJG(res));
__syncthreads();
for(int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile double* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
double xreal = MAGMA_Z_REAL(x[n*incx]);
x[n*incx] = MAGMA_Z_MAKE( sqrt(xreal - sdata[0]), 0 );
}
}
void zpotf2_zdotc(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx)
{
/*
Specialized Zdotc
1) performs zdotc sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > zdotc_max_bs) {
fprintf( stderr, "n = %d > %d is not supported in zpotf2_zdotc\n", (int) n, (int) zdotc_max_bs);
return;
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
kernel_zdotc<<< 1, threadSize, threadSize * sizeof(double), magma_stream>>> (n, x, incx, threadSize);
}
__global__ void kernel_zdscal(int n, magmaDoubleComplex *x, int incx)
{
int id = blockIdx.x * zdscal_bs + threadIdx.x;
__shared__ magmaDoubleComplex factor;
if (threadIdx.x == 0) {
factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id >0) {
x[id*incx] = x[id*incx] * factor;
}
}
void zpotf2_zdscal(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx)
{
/*
Specialized Zdscal perform x[1:n-1]/x[0]
*/
dim3 threads(zdscal_bs, 1, 1);
int num_blocks = (n - 1)/zdscal_bs + 1;
dim3 grid(num_blocks,1);
kernel_zdscal<<< grid, threads, 0, magma_stream >>> (n, x, incx);
}
#if defined(PRECISION_z) || defined(PRECISION_c)
__global__ void kernel_zlacgv(int n, magmaDoubleComplex *x, int incx)
{
int id = blockIdx.x * zlacgv_bs + threadIdx.x;
if ( id < n ) {
x[id*incx] = MAGMA_Z_CNJG(x[id*incx]);
}
}
/**
Purpose
-------
ZLACGV conjugates a complex vector of length N.
Arguments
---------
@param[in]
n INTEGER
The length of the vector X. N >= 0.
@param[in,out]
x COMPLEX*16 array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
@param[in]
incx INTEGER
The spacing between successive elements of X.
@ingroup magma_zposv_aux
********************************************************************/
void zlacgv(magma_int_t n, magmaDoubleComplex *x, magma_int_t incx)
{
dim3 threads(zlacgv_bs, 1, 1);
int num_blocks = (n - 1)/zlacgv_bs + 1;
dim3 grid(num_blocks,1);
kernel_zlacgv<<< grid, threads, 0, magma_stream >>> (n, x, incx);
}
#endif // defined(PRECISION_z) || defined(PRECISION_c)
|
a964db3785f32954a07bc721471f117db93ec3bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void parallelReduction(int *d_array , int numberOfElements, int elementsPerThread,int numberOfThreadsPerBlock,int numberOfBlocks,int *d_global)
{
int index = blockIdx.x * blockDim.x + threadIdx.x ;
int sum = 0;
int j=0;
for(int i=index;i<numberOfElements;i = i+(numberOfBlocks*numberOfThreadsPerBlock))
{
sum = sum + d_array[i];
j++;
}
d_global[index] = sum;
} | a964db3785f32954a07bc721471f117db93ec3bc.cu | #include "includes.h"
__global__ void parallelReduction(int *d_array , int numberOfElements, int elementsPerThread,int numberOfThreadsPerBlock,int numberOfBlocks,int *d_global)
{
int index = blockIdx.x * blockDim.x + threadIdx.x ;
int sum = 0;
int j=0;
for(int i=index;i<numberOfElements;i = i+(numberOfBlocks*numberOfThreadsPerBlock))
{
sum = sum + d_array[i];
j++;
}
d_global[index] = sum;
} |
00e12cfc133fdbe001dfdeecd12a54f059bcf01f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, softwareg
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nms_with_mask_impl.cuh"
#include <limits>
#include <algorithm>
int NmsRoundUpPower2(int v) {
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
template <typename T>
__inline__ __device__ void Swap(T *lhs, T *rhs) {
T tmp = lhs[0];
lhs[0] = rhs[0];
rhs[0] = tmp;
}
// Initialize per row mask array to all true
__global__ void MaskInit(int numSq, bool *row_mask) {
for (int mat_pos = blockIdx.x * blockDim.x + threadIdx.x; mat_pos < numSq; mat_pos += blockDim.x * gridDim.x) {
row_mask[mat_pos] = true;
}
}
// copy data from input to output array sorted by indices returned from bitonic sort
// flips boxes if asked to, default - false -> if (x1/y1 > x2/y2)
template <typename T>
__global__ void PopulateOutput(T *data_in, T *data_out, int *index_buff, const int num, int box_size,
bool flip_mode = false) {
for (int box_num = blockIdx.x * blockDim.x + threadIdx.x; box_num < num; box_num += blockDim.x * gridDim.x) {
int correct_index = index_buff[(num - 1) - box_num]; // flip the array around
int correct_arr_start = correct_index * box_size;
int current_arr_start = box_num * box_size;
if (flip_mode) { // flip boxes
// check x
if (data_in[correct_arr_start + 0] > data_in[correct_arr_start + 2]) {
data_out[current_arr_start + 0] = data_in[correct_arr_start + 2];
data_out[current_arr_start + 2] = data_in[correct_arr_start + 0];
} else {
data_out[current_arr_start + 0] = data_in[correct_arr_start + 0];
data_out[current_arr_start + 2] = data_in[correct_arr_start + 2];
}
// check y
if (data_in[correct_arr_start + 1] > data_in[correct_arr_start + 3]) {
data_out[current_arr_start + 1] = data_in[correct_arr_start + 3];
data_out[current_arr_start + 3] = data_in[correct_arr_start + 1];
} else {
data_out[current_arr_start + 1] = data_in[correct_arr_start + 1];
data_out[current_arr_start + 3] = data_in[correct_arr_start + 3];
}
data_out[current_arr_start + 4] = data_in[correct_arr_start + 4];
} else { // default behaviour, don't flip
for (int x = 0; x < 5; x++) {
data_out[current_arr_start + x] = data_in[correct_arr_start + x];
}
}
}
}
template <typename T>
__inline__ __device__ bool IouDecision(T *output, int box_A_ix, int box_B_ix, int box_A_start, int box_B_start,
float IOU_value) {
T x_1 = max(output[box_A_start + 0], output[box_B_start + 0]);
T y_1 = max(output[box_A_start + 1], output[box_B_start + 1]);
T x_2 = min(output[box_A_start + 2], output[box_B_start + 2]);
T y_2 = min(output[box_A_start + 3], output[box_B_start + 3]);
T width = max(x_2 - x_1, T(0)); // in case of no overlap
T height = max(y_2 - y_1, T(0));
T area1 = (output[box_A_start + 2] - output[box_A_start + 0]) * (output[box_A_start + 3] - output[box_A_start + 1]);
T area2 = (output[box_B_start + 2] - output[box_B_start + 0]) * (output[box_B_start + 3] - output[box_B_start + 1]);
T combined_area = area1 + area2;
return !(((width * height) / (combined_area - (width * height))) > IOU_value);
}
// populated return mask (init to all true) and return index array
template <typename T>
__global__ void Preprocess(const int num, int *sel_idx, bool *sel_boxes, T *output, int box_size) {
for (int box_num = blockIdx.x * blockDim.x + threadIdx.x; box_num < num; box_num += blockDim.x * gridDim.x) {
sel_idx[box_num] = box_num;
sel_boxes[box_num] = true;
}
}
// Run parallel NMS pass
// Every position in the row_mask array is updated wit correct IOU decision after being init to all True
template <typename T>
__global__ void NmsPass(const int num, const float IOU_value, T *output, bool *sel_boxes, int box_size,
bool *row_mask) {
int box_i, box_j, box_i_start_index, box_j_start_index; // actual input data indexing
for (int mask_index = blockIdx.x * blockDim.x + threadIdx.x; mask_index < num * num;
mask_index += blockDim.x * gridDim.x) {
box_i = mask_index / num; // row in 2d row_mask array
box_j = mask_index % num; // col in 2d row_mask array
if (box_j > box_i) { // skip when box_j index lower/equal to box_i - will remain true
box_i_start_index = box_i * box_size; // adjust starting indices
box_j_start_index = box_j * box_size;
row_mask[mask_index] = IouDecision(output, box_i, box_j, box_i_start_index, box_j_start_index, IOU_value);
}
}
}
template <int threads_per_warp>
__global__ void ReducePass(const int num, bool *sel_boxes, bool *row_mask) {
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num * threads_per_warp; tid += blockDim.x * gridDim.x) {
int warp_id = tid / threads_per_warp;
if (!sel_boxes[warp_id]) {
continue;
}
int lane_id = tid % threads_per_warp;
int t_mask = sel_boxes[warp_id] ? 1 : 0;
if (t_mask > 0) {
for (int j = lane_id; j < num; j += threads_per_warp) {
if (!row_mask[j * num + warp_id]) {
t_mask = 0;
break;
}
}
}
__syncwarp();
for (int offset = threads_per_warp / 2; offset > 0; offset /= 2) {
t_mask &= __shfl_down_sync(0xffffffff, t_mask, offset);
}
if (lane_id == 0) {
sel_boxes[warp_id] = t_mask > 0;
}
}
}
// Sorting function based on BitonicSort from TopK kernel
template <typename T>
__global__ void NmsBitonicSortByKeyKernel(const int outer, const int inner, const int ceil_power2, T *input,
T *data_buff, int *index_buff, int box_size) {
for (int i = threadIdx.x; i < ceil_power2; i += blockDim.x) {
data_buff[i] = (i < inner) ? input[(i * box_size) + 4] : std::numeric_limits<T>::max();
index_buff[i] = i;
}
__syncthreads();
for (size_t i = 2; i <= ceil_power2; i <<= 1) {
for (size_t j = (i >> 1); j > 0; j >>= 1) {
for (size_t tid = threadIdx.x; tid < ceil_power2; tid += blockDim.x) {
size_t tid_comp = tid ^ j;
if (tid_comp > tid) {
if ((tid & i) == 0) {
if (data_buff[tid] > data_buff[tid_comp]) {
Swap(&data_buff[tid], &data_buff[tid_comp]);
Swap(&index_buff[tid], &index_buff[tid_comp]);
}
} else {
if (data_buff[tid] < data_buff[tid_comp]) {
Swap(&data_buff[tid], &data_buff[tid_comp]);
Swap(&index_buff[tid], &index_buff[tid_comp]);
}
}
}
}
__syncthreads();
}
}
}
template <typename T>
void CalPreprocess(const int num, int *sel_idx, bool *sel_boxes, T *input, T *output, int *index_buff, int box_size,
bool *row_mask, hipStream_t cuda_stream) {
int total_val = num * num;
hipLaunchKernelGGL(( MaskInit), dim3(GET_BLOCKS(total_val)), dim3(GET_THREADS), 0, cuda_stream, total_val, row_mask);
// default for flipping boxes -> false (provision available to flip if API updated)
hipLaunchKernelGGL(( PopulateOutput), dim3(GET_BLOCKS(num)), dim3(GET_THREADS), 0, cuda_stream, input, output, index_buff, num, box_size, false);
hipLaunchKernelGGL(( Preprocess), dim3(GET_BLOCKS(num)), dim3(GET_THREADS), 0, cuda_stream, num, sel_idx, sel_boxes, output, box_size);
}
template <typename T>
void CalSort(const int &num, T *data_in, T *data_out, int *index_buff, T *data_buff, int box_size,
hipStream_t stream) {
int ceil_p_2 = NmsRoundUpPower2(num);
int thread = ::min(ceil_p_2, GET_THREADS);
hipLaunchKernelGGL(( NmsBitonicSortByKeyKernel), dim3(1), dim3(thread), 0, stream, 1, num, ceil_p_2, data_in, data_buff, index_buff, box_size);
}
template <typename T>
void CalNms(const int num, const float IOU_value, T *output, bool *sel_boxes, int box_size, bool *row_mask,
hipStream_t cuda_stream) {
// run kernel for every position in row_mask array = (num * num) size
int row_mask_size = num * num;
hipLaunchKernelGGL(( NmsPass), dim3(GET_BLOCKS(row_mask_size)), dim3(GET_THREADS), 0, cuda_stream, num, IOU_value, output, sel_boxes, box_size,
row_mask);
hipLaunchKernelGGL(( ReducePass<32>), dim3(GET_BLOCKS(32 * num)), dim3(512), 0, cuda_stream, num, sel_boxes, row_mask);
}
template void CalSort<float>(const int &inner, float *data_in, float *data_out, int *index_buff, float *data_buff,
int box_size, hipStream_t stream);
template void CalPreprocess<float>(const int num, int *sel_idx, bool *sel_boxes, float *input, float *output,
int *index_buff, int box_size, bool *row_mask, hipStream_t cuda_stream);
template void CalNms<float>(const int num, const float IOU_value, float *output, bool *sel_boxes, int box_size,
bool *row_mask, hipStream_t cuda_stream);
| 00e12cfc133fdbe001dfdeecd12a54f059bcf01f.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, softwareg
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nms_with_mask_impl.cuh"
#include <limits>
#include <algorithm>
int NmsRoundUpPower2(int v) {
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
template <typename T>
__inline__ __device__ void Swap(T *lhs, T *rhs) {
T tmp = lhs[0];
lhs[0] = rhs[0];
rhs[0] = tmp;
}
// Initialize per row mask array to all true
__global__ void MaskInit(int numSq, bool *row_mask) {
for (int mat_pos = blockIdx.x * blockDim.x + threadIdx.x; mat_pos < numSq; mat_pos += blockDim.x * gridDim.x) {
row_mask[mat_pos] = true;
}
}
// copy data from input to output array sorted by indices returned from bitonic sort
// flips boxes if asked to, default - false -> if (x1/y1 > x2/y2)
template <typename T>
__global__ void PopulateOutput(T *data_in, T *data_out, int *index_buff, const int num, int box_size,
bool flip_mode = false) {
for (int box_num = blockIdx.x * blockDim.x + threadIdx.x; box_num < num; box_num += blockDim.x * gridDim.x) {
int correct_index = index_buff[(num - 1) - box_num]; // flip the array around
int correct_arr_start = correct_index * box_size;
int current_arr_start = box_num * box_size;
if (flip_mode) { // flip boxes
// check x
if (data_in[correct_arr_start + 0] > data_in[correct_arr_start + 2]) {
data_out[current_arr_start + 0] = data_in[correct_arr_start + 2];
data_out[current_arr_start + 2] = data_in[correct_arr_start + 0];
} else {
data_out[current_arr_start + 0] = data_in[correct_arr_start + 0];
data_out[current_arr_start + 2] = data_in[correct_arr_start + 2];
}
// check y
if (data_in[correct_arr_start + 1] > data_in[correct_arr_start + 3]) {
data_out[current_arr_start + 1] = data_in[correct_arr_start + 3];
data_out[current_arr_start + 3] = data_in[correct_arr_start + 1];
} else {
data_out[current_arr_start + 1] = data_in[correct_arr_start + 1];
data_out[current_arr_start + 3] = data_in[correct_arr_start + 3];
}
data_out[current_arr_start + 4] = data_in[correct_arr_start + 4];
} else { // default behaviour, don't flip
for (int x = 0; x < 5; x++) {
data_out[current_arr_start + x] = data_in[correct_arr_start + x];
}
}
}
}
template <typename T>
__inline__ __device__ bool IouDecision(T *output, int box_A_ix, int box_B_ix, int box_A_start, int box_B_start,
float IOU_value) {
T x_1 = max(output[box_A_start + 0], output[box_B_start + 0]);
T y_1 = max(output[box_A_start + 1], output[box_B_start + 1]);
T x_2 = min(output[box_A_start + 2], output[box_B_start + 2]);
T y_2 = min(output[box_A_start + 3], output[box_B_start + 3]);
T width = max(x_2 - x_1, T(0)); // in case of no overlap
T height = max(y_2 - y_1, T(0));
T area1 = (output[box_A_start + 2] - output[box_A_start + 0]) * (output[box_A_start + 3] - output[box_A_start + 1]);
T area2 = (output[box_B_start + 2] - output[box_B_start + 0]) * (output[box_B_start + 3] - output[box_B_start + 1]);
T combined_area = area1 + area2;
return !(((width * height) / (combined_area - (width * height))) > IOU_value);
}
// populated return mask (init to all true) and return index array
template <typename T>
__global__ void Preprocess(const int num, int *sel_idx, bool *sel_boxes, T *output, int box_size) {
for (int box_num = blockIdx.x * blockDim.x + threadIdx.x; box_num < num; box_num += blockDim.x * gridDim.x) {
sel_idx[box_num] = box_num;
sel_boxes[box_num] = true;
}
}
// Run parallel NMS pass
// Every position in the row_mask array is updated wit correct IOU decision after being init to all True
template <typename T>
__global__ void NmsPass(const int num, const float IOU_value, T *output, bool *sel_boxes, int box_size,
bool *row_mask) {
int box_i, box_j, box_i_start_index, box_j_start_index; // actual input data indexing
for (int mask_index = blockIdx.x * blockDim.x + threadIdx.x; mask_index < num * num;
mask_index += blockDim.x * gridDim.x) {
box_i = mask_index / num; // row in 2d row_mask array
box_j = mask_index % num; // col in 2d row_mask array
if (box_j > box_i) { // skip when box_j index lower/equal to box_i - will remain true
box_i_start_index = box_i * box_size; // adjust starting indices
box_j_start_index = box_j * box_size;
row_mask[mask_index] = IouDecision(output, box_i, box_j, box_i_start_index, box_j_start_index, IOU_value);
}
}
}
template <int threads_per_warp>
__global__ void ReducePass(const int num, bool *sel_boxes, bool *row_mask) {
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num * threads_per_warp; tid += blockDim.x * gridDim.x) {
int warp_id = tid / threads_per_warp;
if (!sel_boxes[warp_id]) {
continue;
}
int lane_id = tid % threads_per_warp;
int t_mask = sel_boxes[warp_id] ? 1 : 0;
if (t_mask > 0) {
for (int j = lane_id; j < num; j += threads_per_warp) {
if (!row_mask[j * num + warp_id]) {
t_mask = 0;
break;
}
}
}
__syncwarp();
for (int offset = threads_per_warp / 2; offset > 0; offset /= 2) {
t_mask &= __shfl_down_sync(0xffffffff, t_mask, offset);
}
if (lane_id == 0) {
sel_boxes[warp_id] = t_mask > 0;
}
}
}
// Sorting function based on BitonicSort from TopK kernel
template <typename T>
__global__ void NmsBitonicSortByKeyKernel(const int outer, const int inner, const int ceil_power2, T *input,
T *data_buff, int *index_buff, int box_size) {
for (int i = threadIdx.x; i < ceil_power2; i += blockDim.x) {
data_buff[i] = (i < inner) ? input[(i * box_size) + 4] : std::numeric_limits<T>::max();
index_buff[i] = i;
}
__syncthreads();
for (size_t i = 2; i <= ceil_power2; i <<= 1) {
for (size_t j = (i >> 1); j > 0; j >>= 1) {
for (size_t tid = threadIdx.x; tid < ceil_power2; tid += blockDim.x) {
size_t tid_comp = tid ^ j;
if (tid_comp > tid) {
if ((tid & i) == 0) {
if (data_buff[tid] > data_buff[tid_comp]) {
Swap(&data_buff[tid], &data_buff[tid_comp]);
Swap(&index_buff[tid], &index_buff[tid_comp]);
}
} else {
if (data_buff[tid] < data_buff[tid_comp]) {
Swap(&data_buff[tid], &data_buff[tid_comp]);
Swap(&index_buff[tid], &index_buff[tid_comp]);
}
}
}
}
__syncthreads();
}
}
}
template <typename T>
void CalPreprocess(const int num, int *sel_idx, bool *sel_boxes, T *input, T *output, int *index_buff, int box_size,
bool *row_mask, cudaStream_t cuda_stream) {
int total_val = num * num;
MaskInit<<<GET_BLOCKS(total_val), GET_THREADS, 0, cuda_stream>>>(total_val, row_mask);
// default for flipping boxes -> false (provision available to flip if API updated)
PopulateOutput<<<GET_BLOCKS(num), GET_THREADS, 0, cuda_stream>>>(input, output, index_buff, num, box_size, false);
Preprocess<<<GET_BLOCKS(num), GET_THREADS, 0, cuda_stream>>>(num, sel_idx, sel_boxes, output, box_size);
}
template <typename T>
void CalSort(const int &num, T *data_in, T *data_out, int *index_buff, T *data_buff, int box_size,
cudaStream_t stream) {
int ceil_p_2 = NmsRoundUpPower2(num);
int thread = std::min(ceil_p_2, GET_THREADS);
NmsBitonicSortByKeyKernel<<<1, thread, 0, stream>>>(1, num, ceil_p_2, data_in, data_buff, index_buff, box_size);
}
template <typename T>
void CalNms(const int num, const float IOU_value, T *output, bool *sel_boxes, int box_size, bool *row_mask,
cudaStream_t cuda_stream) {
// run kernel for every position in row_mask array = (num * num) size
int row_mask_size = num * num;
NmsPass<<<GET_BLOCKS(row_mask_size), GET_THREADS, 0, cuda_stream>>>(num, IOU_value, output, sel_boxes, box_size,
row_mask);
ReducePass<32><<<GET_BLOCKS(32 * num), 512, 0, cuda_stream>>>(num, sel_boxes, row_mask);
}
template void CalSort<float>(const int &inner, float *data_in, float *data_out, int *index_buff, float *data_buff,
int box_size, cudaStream_t stream);
template void CalPreprocess<float>(const int num, int *sel_idx, bool *sel_boxes, float *input, float *output,
int *index_buff, int box_size, bool *row_mask, cudaStream_t cuda_stream);
template void CalNms<float>(const int num, const float IOU_value, float *output, bool *sel_boxes, int box_size,
bool *row_mask, cudaStream_t cuda_stream);
|
2378815f45b76d106105f5f4bd24f8ff050eca36.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#define DGEMM dgemm_
#define DSPEV dspev_
#define PRINTF printf
#define EXIT exit
#define CLOCKS_PER_SEC_C 1000000
#define MAXTIME 2147.48
#define MAX_BLOCKS 65521 // Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535)
#define THREADS_PER_BLOCK 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void tolerance_check(double *tol_now, double *d_Titer, double *d_mat3, int nstate_sq);
__global__ void initialize_mat1(double * mat1, double * S, int nstate_sq);
__global__ void initialize_identity(double * mat, double scalar, int nstate);
void cputime(double *);
void get_iter_Tmat(double *,double *,int );
void get_diag_Tmat(double *,double *,int );
void get_unit_Tmat(double *,int );
extern "C" { void DGEMM (char *, char *, int *, int *, int *,double *,double *, int *, double *, int *, double *, double *, int * ); }
void matmul(double *X, int *LDX, int *ITYPE_X,
double *Y, int *LDY, int *ITYPE_Y,
double *Z, int *LDZ, int *NRZ, int *NCZ, int *NXY,
double *ALPHA, double *BETA)
{
int m = *NRZ;
int n = *NCZ;
int k = *NXY;
hipblasOperation_t MATX = (ITYPE_X) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t MATY = (ITYPE_Y) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasHandle_t handle; // CUBLAS context
// Step 1: Allocate memory on the device:
double *d_X, *d_Y, *d_Z;
hipMalloc(&d_X, (m*k)*sizeof(double)); // X is an m x k matrix
hipMalloc(&d_Y, (k*n)*sizeof(double)); // Y is a k X n matix
hipMalloc(&d_Z, (m*n)*sizeof(double)); // Z is an m x n matix
hipblasCreate(&handle); // initialize CUBLAS context
// Step 2: Initailize device memory from host:
hipblasSetMatrix(m, k, sizeof(double), X, m, d_X, m);
hipblasSetMatrix(k, n, sizeof(double), Y, k, d_Y, k);
hipblasSetMatrix(m, n, sizeof(double), Z, m, d_Z, m);
// Step 3: Perform operation, function launches kernel on GPU itself
hipblasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m);
// Step 4: Copy the result back to the host:
hipblasGetMatrix(m, n, sizeof(double), d_Z, m, Z, m);
// Step 5: Clean up
hipFree(d_X);
hipFree(d_Y);
hipFree(d_Z);
hipblasDestroy(handle);
}
void device_matmul(double *d_X, int *LDX, int *ITYPE_X,
double *d_Y, int *LDY, int *ITYPE_Y,
double *d_Z, int *LDZ, int *NRZ, int *NCZ, int *NXY,
double *ALPHA, double *BETA, hipblasHandle_t handle)
{
int m = *NRZ;
int n = *NCZ;
int k = *NXY;
hipblasOperation_t MATX = (ITYPE_X) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t MATY = (ITYPE_Y) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
//hipblasHandle_t handle; // CUBLAS context
hipblasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m);
}
#define _USE_LAPACK_
#ifdef _USE_LAPACK_
extern "C" {void DSPEV(char *, char *, int *, double [], double [], double [], int *, double [], int *);}
#endif
//=======================================================================
//ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
//=======================================================================
int main()
//=======================================================================
{// begin routine
//=======================================================================
// I) Set up the problem
int nstate;
PRINTF("\n============================================\n");
PRINTF("Enter the matrix size : ");
if(scanf("%d",&nstate)){}
else{printf("%s\n", "Please enter an integer");}
int nstate_sq = nstate*nstate;
double *S = new double[nstate_sq];
double *Tunit = new double[nstate_sq];
double *Tdiag = new double[nstate_sq];
double *Titer = new double[nstate_sq];
PRINTF("Using random input\n\n");
for(int i=0;i<nstate_sq;i++){S[i]=0.0;}
for(int i=0;i<nstate;i++){int ind =i+nstate*i;S[ind]=2.0;}
double seed=14571.0;
srand48((long) seed);
for(int i=0;i<nstate;i++){
for(int j=i;i<nstate;i++){
int ind = i+nstate*j;
int indt = j+nstate*i;
//int ierr=0; // n=1; -------- *** Not used ?
double rand=drand48();
S[ind] += (rand-0.5)*2.0e-3;
S[indt] = S[ind];
}}//endfor
//=======================================================================
// II) Try three methods
// get_unit_Tmat(Tunit,nstate);
// get_diag_Tmat(S,Tdiag,nstate);
get_iter_Tmat(S,Titer,nstate);
get_iter_Tmat(S,Titer,nstate);
get_iter_Tmat(S,Titer,nstate);
//=======================================================================
// III) Check the error of the iterative method
double err=0.0;
for(int i=0;i<nstate_sq;i++){
double tmp=Tdiag[i]-Titer[i];
tmp = tmp*tmp;
err = (err > tmp ? err : tmp);
}//endfor
err = sqrt(err);
PRINTF("Maximum error in any element : %g\n",err);
err=0.0;
for(int i=0;i<nstate;i++){
for(int j=i;j<nstate;j++){
int ind = i + j*nstate;
int indt = j + i*nstate;
double tmp=Titer[ind]-Titer[indt];
tmp = tmp*tmp;
err = (err > tmp ? err : tmp);
}}//endfor
err = sqrt(err);
PRINTF("Deviation from symmetric : %g\n",err);
PRINTF("============================================\n\n");
//=======================================================================
}//end routine
//=======================================================================
//============================================================================
//cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
//============================================================================
// Diagonalize S and construct T=S^{-1/2} using eigenvalues and eigenvectors
//============================================================================
void get_diag_Tmat(double *S,double *T,int nstate)
//============================================================================
{//begin routine
//============================================================================
// I) Get some scratch
double cpu1,cpu2;
cputime(&cpu1);
int nstate_sq = nstate*nstate;
double *umat = new double[nstate_sq];
double *scr_mat1 = new double[nstate_sq];
double *scr_mat2 = new double[nstate_sq];
double *s_eigs = new double[nstate];
double *scr1 = new double[3*nstate];
double *scr2 = new double[3*nstate];
//==========================================================================
// II. Diagonalize S using rs_ FORTRAN diagonalization routine
int ifound = 0;
int ierr = 0;
//----------------------------------------------------------------------
// Use LAPACK : Captain Jack is Happy.
#ifdef _USE_LAPACK_
ifound ++;
for(int i = 1; i <= nstate; i++){
for(int j = 1; j <= i; j++){
int ind = (i-1) + (j-1)*nstate;
int ind2 = (i-1) + (j-1)*(2*nstate-j)/2;
scr_mat1[ind2] = S[ind];
}}//endfor
char Vstuff ='V';
char Lstuff ='L';
DSPEV(&Vstuff,&Lstuff,&nstate,scr_mat1,s_eigs,umat,&nstate,scr1,&ierr);
#endif
if(ifound!=1 || ierr != 0){
PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
PRINTF("Error trying to diagonalize S : %d %d\n",ifound,ierr);
PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
EXIT(1);
}//endif
//==========================================================================
// III. Compute inverse square root of eigenvalues: Occupation numbers
// are HACKED!!!!!
//----------------------------------------------------------------------
// A) Construct diagonal matrix using eigenvalues : sqrt(2/lamba)
for(int i = 0; i < nstate; i++){s_eigs[i] = sqrt(2.0/s_eigs[i]);}
memset(scr_mat1,0,sizeof(double)*nstate_sq);
for(int i = 0; i < nstate; i++){
int ind = i*nstate+i;
scr_mat1[ind]=s_eigs[i];
}/* endfor */
//------------------------------------------------------------------------
// B) Transform matrix back to original representation using eigenvectors
double alpha = 1.0; double beta = 0.0;
int itransp = 0; int inorm = 1;
matmul(scr_mat1,&nstate,&inorm,umat,&nstate,&itransp,scr_mat2,
&nstate,&nstate,&nstate,&nstate,&alpha,&beta);
matmul(umat,&nstate,&inorm,scr_mat2,&nstate,&inorm,T,
&nstate,&nstate,&nstate,&nstate,&alpha,&beta);
//============================================================================
// IV) Free allocated temporary memory
delete [] umat;
delete [] scr_mat1;
delete [] scr_mat2;
delete [] s_eigs;
delete [] scr1;
delete [] scr2;
cputime(&cpu2);
PRINTF("nstate %d : cpu time diag : %g\n\n",nstate,cpu2-cpu1);
//============================================================================
} /* End function */
//============================================================================
//============================================================================
//cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
//============================================================================
// Set Tmax to the Unit matrix : remove cputime overhead of diag to test
// parallel performance
//============================================================================
void get_unit_Tmat(double *Tunit,int nstate){
int nstate_sq = nstate*nstate;
memset(Tunit,0,nstate_sq*sizeof(double));
for(int i=0;i<nstate;i++){int ind = i+i*nstate;Tunit[ind] = 1.0;}
}
//============================================================================
/*==========================================================================*/
/*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/
/*==========================================================================*/
/* Kernel to check tolerance directly on device */
/*==========================================================================*/
__global__
void tolerance_check(double *d_tol_now, double *d_Titer, double *d_mat3, int nstate_sq)
//============================================================================
{//begin routine
//============================================================================
// I) Find location
int global_idx = threadIdx.x + blockDim.x * blockIdx.x;
int local_idx = threadIdx.x;
// II) d_mat3 = (d_mat3 - d_Titer) ^ 2
if(global_idx < nstate_sq){ // Needed in case last block is not full
double tmp = d_mat3[global_idx] - d_Titer[global_idx];
d_mat3[global_idx] = tmp * tmp;
}
__syncthreads(); // Every thread will execute this unconditionally
// III) Reduce any sized array
if(global_idx < nstate_sq){ // Needed in case last block is not full
unsigned int length = THREADS_PER_BLOCK, next_length, block2_size, global_start_idx, global_split_idx = 0, global_end_idx;
if((blockIdx.x + 1) == gridDim.x) { // This is the last block - may not be full
length = nstate_sq - THREADS_PER_BLOCK * (gridDim.x - 1); // THREADS_PER_BLOCK == blockDim.x
}
for( ; length != 1; length = next_length){ // Ultimately we want host to do ~ 2 ^ 13 work, here it does 2^10
next_length = (length + 1) / 2;
global_start_idx = blockDim.x * blockIdx.x;
global_split_idx = global_start_idx + next_length;
global_end_idx = global_start_idx + length;
block2_size = length / 2; //length - next_length;
if(global_split_idx <= global_idx && global_idx < global_end_idx){
d_mat3[global_idx - block2_size] += d_mat3[global_idx];
}
__syncthreads(); // FIX : find way to have every thread execute this unconditionally
}
// IV) Only 0 thread from each block copies result back
if(local_idx == 0){
d_tol_now[blockIdx.x] = d_mat3[global_idx];
}
}
}
/*==========================================================================*/
__global__
void initialize_mat1(double * mat1, double * S, int nstate_sq)
{
int global_idx = threadIdx.x + blockDim.x * blockIdx.x;
if(global_idx < nstate_sq){
mat1[global_idx] = S[global_idx] / 2.0;
}
}
__global__
void initialize_identity(double * mat, double scalar, int nstate)
{
int i, j, global_idx = threadIdx.x + blockDim.x * blockIdx.x;
i = global_idx % nstate;
j = global_idx / nstate;
if(global_idx < (nstate * nstate)){
if(i == j){
mat[global_idx] = scalar; // THIS FUNCTION IS ONLY CHANGING THE MAIN DIAGONAL, NEED TO BE SURE THE REST ARE 0 using blank memory
}
else{
mat[global_idx] = 0;
}
}
}
//============================================================================
//cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
//============================================================================
// Schulz iteration for inverse sqrt root : quadratic convergence!
//============================================================================
void get_iter_Tmat(double *S,double *Titer,int nstate)
//============================================================================
{//begin routine
//============================================================================
// I) Get some scratch on the host
double cpu1,cpu2,cpu3,cpu4, cpuA, cpuB;
cputime(&cpu1);
int nstate_sq = nstate*nstate;
double *tol_now_ptr = new double[MAX_BLOCKS];
//============================================================================
// II) Set up CUBLAS context
hipblasHandle_t handle; // CUBLAS context
//============================================================================
// III) Allocate memory on the device
double *d_Titer, *d_mat1, *d_mat2, *d_mat3, *d_tol_now;
hipMalloc(&d_Titer, nstate_sq*sizeof(double));
hipMalloc(&d_mat1, nstate_sq*sizeof(double));
hipMalloc(&d_mat2, nstate_sq*sizeof(double));
hipMalloc(&d_mat3, nstate_sq*sizeof(double));
hipMalloc(&d_tol_now, MAX_BLOCKS*sizeof(double));
hipblasCreate(&handle); // initialize CUBLAS context
//============================================================================
// IV) Schulz iteration
//--------------------------------------------------------------------
// A) Initailize d_mat1 and d_Titer on device
cputime(&cpu3);
// d_mat1 = S/2
hipblasSetMatrix(nstate, nstate, sizeof(double), S, nstate, d_mat2, nstate);
hipLaunchKernelGGL(( initialize_mat1), dim3((nstate_sq+1023)/1024), dim3(1024), 0, 0, d_mat1, d_mat2, nstate_sq);
// d_Titer = I = unit matrix
hipLaunchKernelGGL(( initialize_identity), dim3((nstate_sq+1023)/1024), dim3(1024), 0, 0, d_Titer, 1.0, nstate);
//--------------------------------------------------------------------
// B) Iterate
double copy_time = 0, tol_time = 0;
int itransp = 0; int inorm = 1;
double alpha0 = -1.0; double beta0 = 1.0;
double alpha1 = 0.5; double beta1 = 0.0;
int iter = 0;
double tol_now = 1.0;
while (tol_now > 1.0e-15 && iter<10){
iter++;
//--------------------------------
// d_mat2 = 3*I - d_Titer*d_mat1
hipLaunchKernelGGL(( initialize_identity), dim3((nstate_sq+1023)/1024), dim3(1024), 0, 0, d_mat2, 3.0, nstate);
device_matmul(d_Titer,&nstate,&inorm,d_mat1,&nstate,&itransp,d_mat2,
&nstate,&nstate,&nstate,&nstate,&alpha0,&beta0,handle);
//--------------------------------
// d_mat1 = 0.5*d_mat1*d_mat2 = 0.5*d_mat3*d_mat2 // Run this step concurently with the next step ?
hipMemcpy(d_mat3,d_mat1,nstate_sq*sizeof(double),hipMemcpyDeviceToDevice);
device_matmul(d_mat3,&nstate,&inorm,d_mat2,&nstate,&itransp,d_mat1,
&nstate,&nstate,&nstate,&nstate,&alpha1,&beta1,handle);
//--------------------------------
// d_Titer = 0.5*d_mat2*d_Titer = 0.5*d_mat2*d_mat3
// if(iter >= 4){
hipMemcpy(d_mat3,d_Titer,nstate_sq*sizeof(double),hipMemcpyDeviceToDevice); // Only needed for tolerance check
//}
device_matmul(d_mat2,&nstate,&inorm,d_mat3,&nstate,&itransp,d_Titer,
&nstate,&nstate,&nstate,&nstate,&alpha1,&beta1,handle);
//--------------------------------
// Launch kernel to check tolerance only if iter >= 4
// if(iter >= 4){ tol_now = 0; } //TESTING code with out a real tolerance check
hipDeviceSynchronize();
cputime(&cpuA);
int grid_size = (nstate_sq + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
hipLaunchKernelGGL(( tolerance_check), dim3(grid_size), dim3(THREADS_PER_BLOCK), 0, 0, d_tol_now, d_Titer, d_mat3, nstate_sq);
hipMemcpy(tol_now_ptr, d_tol_now, grid_size*sizeof(double), hipMemcpyDeviceToHost);
tol_now = 0.0;
for(int i = 0; i < grid_size; i ++){
tol_now += tol_now_ptr[i];
}
tol_now = sqrt(tol_now / nstate_sq);
PRINTF("iter %d : tol %g\n",iter,tol_now);
hipDeviceSynchronize();
cputime(&cpuB);
copy_time += 0;
tol_time += cpuB - cpuA;
printf("tol_time %g\n", tol_time);
}//endwhile
if(tol_now>1.0e-15){
PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
PRINTF("Iterative computation of S^{-1/2} failed\n");
PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
EXIT(1);
}//endif
/*==========================================================================*/
// V) Copy the result back to the host
hipblasGetMatrix(nstate, nstate, sizeof(double), d_Titer, nstate, Titer, nstate);
cputime(&cpu4);
/*==========================================================================*/
// VI) Clean up device
hipFree(d_Titer);
hipFree(d_mat1);
hipFree(d_mat2);
hipFree(d_mat3);
hipFree(d_tol_now);
hipblasDestroy(handle);
// VII) Clean up host
delete [] tol_now_ptr;
cputime(&cpu2);
printf("copy time %g : tolerance check time %g\n", copy_time, tol_time);
PRINTF("nstate %d : cpu time iter : %g cpu time without hipMalloc or hipFree : %g\n\n",nstate,cpu2-cpu1, cpu4-cpu3);
/*==========================================================================*/
}//end routine
/*==========================================================================*/
/*==========================================================================*/
/*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/
/*==========================================================================*/
/* subroutine to time processes */
/*==========================================================================*/
void cputime(double *time)
/*==========================================================================*/
{
int itime;
static double to=0.,tn=0.;
itime = clock();
tn = (double)((double)itime/(double)CLOCKS_PER_SEC_C);
*time = tn;
if(tn >= 0 && to >= 0){*time=tn;}
if(tn < 0 && to >= 0){*time=MAXTIME*2.0+tn;}
if(tn >= 0 && to < 0){*time=tn+MAXTIME;}
if(tn < 0 && to < 0){*time=MAXTIME+tn;}
to = tn;
}
/*==========================================================================*/
| 2378815f45b76d106105f5f4bd24f8ff050eca36.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#define DGEMM dgemm_
#define DSPEV dspev_
#define PRINTF printf
#define EXIT exit
#define CLOCKS_PER_SEC_C 1000000
#define MAXTIME 2147.48
#define MAX_BLOCKS 65521 // Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535)
#define THREADS_PER_BLOCK 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void tolerance_check(double *tol_now, double *d_Titer, double *d_mat3, int nstate_sq);
__global__ void initialize_mat1(double * mat1, double * S, int nstate_sq);
__global__ void initialize_identity(double * mat, double scalar, int nstate);
void cputime(double *);
void get_iter_Tmat(double *,double *,int );
void get_diag_Tmat(double *,double *,int );
void get_unit_Tmat(double *,int );
extern "C" { void DGEMM (char *, char *, int *, int *, int *,double *,double *, int *, double *, int *, double *, double *, int * ); }
void matmul(double *X, int *LDX, int *ITYPE_X,
double *Y, int *LDY, int *ITYPE_Y,
double *Z, int *LDZ, int *NRZ, int *NCZ, int *NXY,
double *ALPHA, double *BETA)
{
int m = *NRZ;
int n = *NCZ;
int k = *NXY;
cublasOperation_t MATX = (ITYPE_X) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t MATY = (ITYPE_Y) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasHandle_t handle; // CUBLAS context
// Step 1: Allocate memory on the device:
double *d_X, *d_Y, *d_Z;
cudaMalloc(&d_X, (m*k)*sizeof(double)); // X is an m x k matrix
cudaMalloc(&d_Y, (k*n)*sizeof(double)); // Y is a k X n matix
cudaMalloc(&d_Z, (m*n)*sizeof(double)); // Z is an m x n matix
cublasCreate(&handle); // initialize CUBLAS context
// Step 2: Initailize device memory from host:
cublasSetMatrix(m, k, sizeof(double), X, m, d_X, m);
cublasSetMatrix(k, n, sizeof(double), Y, k, d_Y, k);
cublasSetMatrix(m, n, sizeof(double), Z, m, d_Z, m);
// Step 3: Perform operation, function launches kernel on GPU itself
cublasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m);
// Step 4: Copy the result back to the host:
cublasGetMatrix(m, n, sizeof(double), d_Z, m, Z, m);
// Step 5: Clean up
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_Z);
cublasDestroy(handle);
}
void device_matmul(double *d_X, int *LDX, int *ITYPE_X,
double *d_Y, int *LDY, int *ITYPE_Y,
double *d_Z, int *LDZ, int *NRZ, int *NCZ, int *NXY,
double *ALPHA, double *BETA, cublasHandle_t handle)
{
int m = *NRZ;
int n = *NCZ;
int k = *NXY;
cublasOperation_t MATX = (ITYPE_X) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t MATY = (ITYPE_Y) ? CUBLAS_OP_N : CUBLAS_OP_T;
//cublasHandle_t handle; // CUBLAS context
cublasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m);
}
#define _USE_LAPACK_
#ifdef _USE_LAPACK_
extern "C" {void DSPEV(char *, char *, int *, double [], double [], double [], int *, double [], int *);}
#endif
//=======================================================================
//ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
//=======================================================================
int main()
//=======================================================================
{// begin routine
//=======================================================================
// I) Set up the problem
int nstate;
PRINTF("\n============================================\n");
PRINTF("Enter the matrix size : ");
if(scanf("%d",&nstate)){}
else{printf("%s\n", "Please enter an integer");}
int nstate_sq = nstate*nstate;
double *S = new double[nstate_sq];
double *Tunit = new double[nstate_sq];
double *Tdiag = new double[nstate_sq];
double *Titer = new double[nstate_sq];
PRINTF("Using random input\n\n");
for(int i=0;i<nstate_sq;i++){S[i]=0.0;}
for(int i=0;i<nstate;i++){int ind =i+nstate*i;S[ind]=2.0;}
double seed=14571.0;
srand48((long) seed);
for(int i=0;i<nstate;i++){
for(int j=i;i<nstate;i++){
int ind = i+nstate*j;
int indt = j+nstate*i;
//int ierr=0; // n=1; -------- *** Not used ?
double rand=drand48();
S[ind] += (rand-0.5)*2.0e-3;
S[indt] = S[ind];
}}//endfor
//=======================================================================
// II) Try three methods
// get_unit_Tmat(Tunit,nstate);
// get_diag_Tmat(S,Tdiag,nstate);
get_iter_Tmat(S,Titer,nstate);
get_iter_Tmat(S,Titer,nstate);
get_iter_Tmat(S,Titer,nstate);
//=======================================================================
// III) Check the error of the iterative method
double err=0.0;
for(int i=0;i<nstate_sq;i++){
double tmp=Tdiag[i]-Titer[i];
tmp = tmp*tmp;
err = (err > tmp ? err : tmp);
}//endfor
err = sqrt(err);
PRINTF("Maximum error in any element : %g\n",err);
err=0.0;
for(int i=0;i<nstate;i++){
for(int j=i;j<nstate;j++){
int ind = i + j*nstate;
int indt = j + i*nstate;
double tmp=Titer[ind]-Titer[indt];
tmp = tmp*tmp;
err = (err > tmp ? err : tmp);
}}//endfor
err = sqrt(err);
PRINTF("Deviation from symmetric : %g\n",err);
PRINTF("============================================\n\n");
//=======================================================================
}//end routine
//=======================================================================
//============================================================================
//cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
//============================================================================
// Diagonalize S and construct T=S^{-1/2} using eigenvalues and eigenvectors
//============================================================================
void get_diag_Tmat(double *S,double *T,int nstate)
//============================================================================
{//begin routine
//============================================================================
// I) Get some scratch
double cpu1,cpu2;
cputime(&cpu1);
int nstate_sq = nstate*nstate;
double *umat = new double[nstate_sq];
double *scr_mat1 = new double[nstate_sq];
double *scr_mat2 = new double[nstate_sq];
double *s_eigs = new double[nstate];
double *scr1 = new double[3*nstate];
double *scr2 = new double[3*nstate];
//==========================================================================
// II. Diagonalize S using rs_ FORTRAN diagonalization routine
int ifound = 0;
int ierr = 0;
//----------------------------------------------------------------------
// Use LAPACK : Captain Jack is Happy.
#ifdef _USE_LAPACK_
ifound ++;
for(int i = 1; i <= nstate; i++){
for(int j = 1; j <= i; j++){
int ind = (i-1) + (j-1)*nstate;
int ind2 = (i-1) + (j-1)*(2*nstate-j)/2;
scr_mat1[ind2] = S[ind];
}}//endfor
char Vstuff ='V';
char Lstuff ='L';
DSPEV(&Vstuff,&Lstuff,&nstate,scr_mat1,s_eigs,umat,&nstate,scr1,&ierr);
#endif
if(ifound!=1 || ierr != 0){
PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
PRINTF("Error trying to diagonalize S : %d %d\n",ifound,ierr);
PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
EXIT(1);
}//endif
//==========================================================================
// III. Compute inverse square root of eigenvalues: Occupation numbers
// are HACKED!!!!!
//----------------------------------------------------------------------
// A) Construct diagonal matrix using eigenvalues : sqrt(2/lamba)
for(int i = 0; i < nstate; i++){s_eigs[i] = sqrt(2.0/s_eigs[i]);}
memset(scr_mat1,0,sizeof(double)*nstate_sq);
for(int i = 0; i < nstate; i++){
int ind = i*nstate+i;
scr_mat1[ind]=s_eigs[i];
}/* endfor */
//------------------------------------------------------------------------
// B) Transform matrix back to original representation using eigenvectors
double alpha = 1.0; double beta = 0.0;
int itransp = 0; int inorm = 1;
matmul(scr_mat1,&nstate,&inorm,umat,&nstate,&itransp,scr_mat2,
&nstate,&nstate,&nstate,&nstate,&alpha,&beta);
matmul(umat,&nstate,&inorm,scr_mat2,&nstate,&inorm,T,
&nstate,&nstate,&nstate,&nstate,&alpha,&beta);
//============================================================================
// IV) Free allocated temporary memory
delete [] umat;
delete [] scr_mat1;
delete [] scr_mat2;
delete [] s_eigs;
delete [] scr1;
delete [] scr2;
cputime(&cpu2);
PRINTF("nstate %d : cpu time diag : %g\n\n",nstate,cpu2-cpu1);
//============================================================================
} /* End function */
//============================================================================
//============================================================================
//cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
//============================================================================
// Set Tmax to the Unit matrix : remove cputime overhead of diag to test
// parallel performance
//============================================================================
void get_unit_Tmat(double *Tunit,int nstate){
int nstate_sq = nstate*nstate;
memset(Tunit,0,nstate_sq*sizeof(double));
for(int i=0;i<nstate;i++){int ind = i+i*nstate;Tunit[ind] = 1.0;}
}
//============================================================================
/*==========================================================================*/
/*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/
/*==========================================================================*/
/* Kernel to check tolerance directly on device */
/*==========================================================================*/
__global__
void tolerance_check(double *d_tol_now, double *d_Titer, double *d_mat3, int nstate_sq)
//============================================================================
{//begin routine
//============================================================================
// I) Find location
int global_idx = threadIdx.x + blockDim.x * blockIdx.x;
int local_idx = threadIdx.x;
// II) d_mat3 = (d_mat3 - d_Titer) ^ 2
if(global_idx < nstate_sq){ // Needed in case last block is not full
double tmp = d_mat3[global_idx] - d_Titer[global_idx];
d_mat3[global_idx] = tmp * tmp;
}
__syncthreads(); // Every thread will execute this unconditionally
// III) Reduce any sized array
if(global_idx < nstate_sq){ // Needed in case last block is not full
unsigned int length = THREADS_PER_BLOCK, next_length, block2_size, global_start_idx, global_split_idx = 0, global_end_idx;
if((blockIdx.x + 1) == gridDim.x) { // This is the last block - may not be full
length = nstate_sq - THREADS_PER_BLOCK * (gridDim.x - 1); // THREADS_PER_BLOCK == blockDim.x
}
for( ; length != 1; length = next_length){ // Ultimately we want host to do ~ 2 ^ 13 work, here it does 2^10
next_length = (length + 1) / 2;
global_start_idx = blockDim.x * blockIdx.x;
global_split_idx = global_start_idx + next_length;
global_end_idx = global_start_idx + length;
block2_size = length / 2; //length - next_length;
if(global_split_idx <= global_idx && global_idx < global_end_idx){
d_mat3[global_idx - block2_size] += d_mat3[global_idx];
}
__syncthreads(); // FIX : find way to have every thread execute this unconditionally
}
// IV) Only 0 thread from each block copies result back
if(local_idx == 0){
d_tol_now[blockIdx.x] = d_mat3[global_idx];
}
}
}
/*==========================================================================*/
__global__
void initialize_mat1(double * mat1, double * S, int nstate_sq)
{
int global_idx = threadIdx.x + blockDim.x * blockIdx.x;
if(global_idx < nstate_sq){
mat1[global_idx] = S[global_idx] / 2.0;
}
}
__global__
void initialize_identity(double * mat, double scalar, int nstate)
{
int i, j, global_idx = threadIdx.x + blockDim.x * blockIdx.x;
i = global_idx % nstate;
j = global_idx / nstate;
if(global_idx < (nstate * nstate)){
if(i == j){
mat[global_idx] = scalar; // THIS FUNCTION IS ONLY CHANGING THE MAIN DIAGONAL, NEED TO BE SURE THE REST ARE 0 using blank memory
}
else{
mat[global_idx] = 0;
}
}
}
//============================================================================
//cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
//============================================================================
// Schulz iteration for inverse sqrt root : quadratic convergence!
//============================================================================
void get_iter_Tmat(double *S,double *Titer,int nstate)
//============================================================================
{//begin routine
//============================================================================
// I) Get some scratch on the host
double cpu1,cpu2,cpu3,cpu4, cpuA, cpuB;
cputime(&cpu1);
int nstate_sq = nstate*nstate;
double *tol_now_ptr = new double[MAX_BLOCKS];
//============================================================================
// II) Set up CUBLAS context
cublasHandle_t handle; // CUBLAS context
//============================================================================
// III) Allocate memory on the device
double *d_Titer, *d_mat1, *d_mat2, *d_mat3, *d_tol_now;
cudaMalloc(&d_Titer, nstate_sq*sizeof(double));
cudaMalloc(&d_mat1, nstate_sq*sizeof(double));
cudaMalloc(&d_mat2, nstate_sq*sizeof(double));
cudaMalloc(&d_mat3, nstate_sq*sizeof(double));
cudaMalloc(&d_tol_now, MAX_BLOCKS*sizeof(double));
cublasCreate(&handle); // initialize CUBLAS context
//============================================================================
// IV) Schulz iteration
//--------------------------------------------------------------------
// A) Initailize d_mat1 and d_Titer on device
cputime(&cpu3);
// d_mat1 = S/2
cublasSetMatrix(nstate, nstate, sizeof(double), S, nstate, d_mat2, nstate);
initialize_mat1<<<(nstate_sq+1023)/1024, 1024>>>(d_mat1, d_mat2, nstate_sq);
// d_Titer = I = unit matrix
initialize_identity<<<(nstate_sq+1023)/1024, 1024>>>(d_Titer, 1.0, nstate);
//--------------------------------------------------------------------
// B) Iterate
double copy_time = 0, tol_time = 0;
int itransp = 0; int inorm = 1;
double alpha0 = -1.0; double beta0 = 1.0;
double alpha1 = 0.5; double beta1 = 0.0;
int iter = 0;
double tol_now = 1.0;
while (tol_now > 1.0e-15 && iter<10){
iter++;
//--------------------------------
// d_mat2 = 3*I - d_Titer*d_mat1
initialize_identity<<<(nstate_sq+1023)/1024, 1024>>>(d_mat2, 3.0, nstate);
device_matmul(d_Titer,&nstate,&inorm,d_mat1,&nstate,&itransp,d_mat2,
&nstate,&nstate,&nstate,&nstate,&alpha0,&beta0,handle);
//--------------------------------
// d_mat1 = 0.5*d_mat1*d_mat2 = 0.5*d_mat3*d_mat2 // Run this step concurently with the next step ?
cudaMemcpy(d_mat3,d_mat1,nstate_sq*sizeof(double),cudaMemcpyDeviceToDevice);
device_matmul(d_mat3,&nstate,&inorm,d_mat2,&nstate,&itransp,d_mat1,
&nstate,&nstate,&nstate,&nstate,&alpha1,&beta1,handle);
//--------------------------------
// d_Titer = 0.5*d_mat2*d_Titer = 0.5*d_mat2*d_mat3
// if(iter >= 4){
cudaMemcpy(d_mat3,d_Titer,nstate_sq*sizeof(double),cudaMemcpyDeviceToDevice); // Only needed for tolerance check
//}
device_matmul(d_mat2,&nstate,&inorm,d_mat3,&nstate,&itransp,d_Titer,
&nstate,&nstate,&nstate,&nstate,&alpha1,&beta1,handle);
//--------------------------------
// Launch kernel to check tolerance only if iter >= 4
// if(iter >= 4){ tol_now = 0; } //TESTING code with out a real tolerance check
cudaThreadSynchronize();
cputime(&cpuA);
int grid_size = (nstate_sq + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
tolerance_check<<<grid_size, THREADS_PER_BLOCK>>>(d_tol_now, d_Titer, d_mat3, nstate_sq);
cudaMemcpy(tol_now_ptr, d_tol_now, grid_size*sizeof(double), cudaMemcpyDeviceToHost);
tol_now = 0.0;
for(int i = 0; i < grid_size; i ++){
tol_now += tol_now_ptr[i];
}
tol_now = sqrt(tol_now / nstate_sq);
PRINTF("iter %d : tol %g\n",iter,tol_now);
cudaThreadSynchronize();
cputime(&cpuB);
copy_time += 0;
tol_time += cpuB - cpuA;
printf("tol_time %g\n", tol_time);
}//endwhile
if(tol_now>1.0e-15){
PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
PRINTF("Iterative computation of S^{-1/2} failed\n");
PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
EXIT(1);
}//endif
/*==========================================================================*/
// V) Copy the result back to the host
cublasGetMatrix(nstate, nstate, sizeof(double), d_Titer, nstate, Titer, nstate);
cputime(&cpu4);
/*==========================================================================*/
// VI) Clean up device
cudaFree(d_Titer);
cudaFree(d_mat1);
cudaFree(d_mat2);
cudaFree(d_mat3);
cudaFree(d_tol_now);
cublasDestroy(handle);
// VII) Clean up host
delete [] tol_now_ptr;
cputime(&cpu2);
printf("copy time %g : tolerance check time %g\n", copy_time, tol_time);
PRINTF("nstate %d : cpu time iter : %g cpu time without cudaMalloc or cudaFree : %g\n\n",nstate,cpu2-cpu1, cpu4-cpu3);
/*==========================================================================*/
}//end routine
/*==========================================================================*/
/*==========================================================================*/
/*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/
/*==========================================================================*/
/* subroutine to time processes */
/*==========================================================================*/
void cputime(double *time)
/*==========================================================================*/
{
int itime;
static double to=0.,tn=0.;
itime = clock();
tn = (double)((double)itime/(double)CLOCKS_PER_SEC_C);
*time = tn;
if(tn >= 0 && to >= 0){*time=tn;}
if(tn < 0 && to >= 0){*time=MAXTIME*2.0+tn;}
if(tn >= 0 && to < 0){*time=tn+MAXTIME;}
if(tn < 0 && to < 0){*time=MAXTIME+tn;}
to = tn;
}
/*==========================================================================*/
|
b9db4ea731d7dff72fd2a098aa1bdc2764f75aa8.hip | // !!! This is a file automatically generated by hipify!!!
#include "custom_cuda.h"
#define DEBUG 1
bool ERROR_CHECK(hipError_t Status, const char * file, int line)
{
if(Status != hipSuccess)
{
printf("(EE) \n");
printf("(EE) Error detected in the LDPC decoder (%s : %d)\n", file, line);
printf("(EE) MSG: %s\n", hipGetErrorString(Status));
printf("(EE) \n");
exit( 0 );
return false;
}
return true;
}
void CUDA_MALLOC_HOST(float** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(float);
Status = hipHostMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(double** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(double);
Status = hipHostMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(int** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(int);
Status = hipHostMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(unsigned int** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(unsigned int);
Status = hipHostMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(char** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(char);
Status = hipHostMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(float** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(float);
Status = hipMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(double** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(double);
Status = hipMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(int** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(int);
Status = hipMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(unsigned int** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(unsigned int);
Status = hipMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(char** ptr, int nbElements){
hipError_t Status;
int nbytes = nbElements * sizeof(char);
Status = hipMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
| b9db4ea731d7dff72fd2a098aa1bdc2764f75aa8.cu | #include "custom_cuda.h"
#define DEBUG 1
bool ERROR_CHECK(cudaError_t Status, const char * file, int line)
{
if(Status != cudaSuccess)
{
printf("(EE) \n");
printf("(EE) Error detected in the LDPC decoder (%s : %d)\n", file, line);
printf("(EE) MSG: %s\n", cudaGetErrorString(Status));
printf("(EE) \n");
exit( 0 );
return false;
}
return true;
}
void CUDA_MALLOC_HOST(float** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(float);
Status = cudaMallocHost(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(double** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(double);
Status = cudaMallocHost(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(int** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(int);
Status = cudaMallocHost(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(unsigned int** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(unsigned int);
Status = cudaMallocHost(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(char** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(char);
Status = cudaMallocHost(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Host Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(float** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(float);
Status = cudaMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(double** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(double);
Status = cudaMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(int** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(int);
Status = cudaMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(unsigned int** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(unsigned int);
Status = cudaMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(char** ptr, int nbElements){
cudaError_t Status;
int nbytes = nbElements * sizeof(char);
Status = cudaMalloc(ptr, nbytes);
#if DEBUG == 1
//printf("(II) + Allocating Device Memory, %d elements (%d bytes) adr [0x%8.8X, 0x%8.8X]\n", nbElements, nbytes, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
|
639cf1f49b5d8528d9a62039bebfe3c4e1cae543.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <ctime>
#include <random>
#include <thread>
#include <atomic>
#include <mutex>
#include "../cudautil.cuh"
#include "../cudamem.h"
#include "boids.h"
#include "path_group.h"
#include "../opengl/sceneManager.h"
#include "../opengl/demos/demo_boids.h"
#include "../opengl/CoreHeaders/sceneGUI.h"
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/quaternion.hpp>
#include <glm/gtx/quaternion.hpp>
#include <cuda_gl_interop.h>
#define BOID_COUNT (500)
#define THREADS_PER_BLOCK (256)
#define USE_SHARED_MEM
#define CHECK_VIEW_RANGE
#define VISUALIZE
#define SIMULATE
//#define DEBUG_DRAW
double boidsSeparationFactor = 1.0;
double boidsCohesionFactor = 0.7;
double boidsAlignmentFactor = 1.0;
double boidsGoalFactor = 0.2;
glm::vec3 boidGoal{ 10.0f, 0.0f, 0.0f };
double boidsSeparationNeighbourhood = 1.0f;
double boidsCohesionNeighbourhood = 4.0f;
double boidsAlignmentNeighbourhood = 1.0f;
double boidsMaxVelocity = 0.01f;
double boidsViewAngle = 135.0f;
static PathGroup pathGroup(0.5f, {
boidGoal,
glm::vec3(5.0f, 5.0f, 0.0f),
glm::vec3(0.0f, 0.0f, 5.0f)
});
static glm::vec3 flockCenter{ 0.0f, 0.0f, 0.0f };
/// Test
double boidTestDir[3] = { 0.0, 0.0, 1.0 };
/// CUDA
static __device__ bool operator==(const float3& vec1, const float3& vec2)
{
return vec1.x == vec2.x && vec1.y == vec2.y && vec1.z == vec2.z;
}
static __device__ float3 vecClamp(const float3& vec, float max)
{
float len = length(vec);
if (len != 0.0f && len > max)
{
return vec * (max / len);
}
return vec;
}
static __device__ float3 vecNormalize(const float3& vec)
{
float len = length(vec);
if (len == 0.0f) return vec;
return vec / len;
}
static __device__ float3 updateSeparation(const float3& position, const float3& otherPosition, int& count, FlockConfig* config)
{
float3 vec = position - otherPosition;
float len = length(vec);
if (len == 0 || len >= config->separationNeighbourhood)
{
return make_float3(0.0f, 0.0f, 0.0f);
}
count++;
return vecNormalize(vec) / len;
}
static __device__ float3 updateCohesion(const float3& position, const float3& otherPosition, int& count, FlockConfig* config)
{
float3 vec = position - otherPosition;
float len = length(vec);
if (len == 0 || len >= config->cohesionNeighbourhood)
{
return make_float3(0.0f, 0.0f, 0.0f);
}
count++;
return otherPosition;
}
static __device__ float3 updateAlignment(const float3& position, const float3& otherPosition, const float3& otherDirection, int& count, FlockConfig* config)
{
float3 vec = position - otherPosition;
float len = length(vec);
if (len == 0 || len >= config->alignmentNeighbourhood)
{
return make_float3(0.0f, 0.0f, 0.0f);
}
count++;
return otherDirection;
}
static __device__ void updateFlock(Force& force, const float3& position, const float3& otherPosition, const float3& otherDirection, FlockConfig* config)
{
force.separation += updateSeparation(position, otherPosition, force.separationCount, config);
force.cohesion += updateCohesion(position, otherPosition, force.cohesionCount, config);
force.alignment += updateAlignment(position, otherPosition, otherDirection, force.alignmentCount, config);
}
static __device__ bool isInViewRange(const float3& position, const float3& direction, const float3& otherPosition, float viewAngle)
{
#ifdef CHECK_VIEW_RANGE
if (position == otherPosition) return false;
float3 toTarget = vecNormalize(otherPosition - position);
float angle = atan2(length(cross(toTarget, direction)), dot(toTarget, direction));
return angle < viewAngle;
#else
return true;
#endif
}
static __global__ void calculateAccelerations(Boid* __restrict__ boids, Acceleration* __restrict__ outAccelerations, const int size, FlockConfig* config)
{
#pragma region Init
#ifdef USE_SHARED_MEM
__shared__ Boid sharedBoids[THREADS_PER_BLOCK];
#endif
const int tileSize = blockDim.x;
const int tileCount = gridDim.x;
const int boidId = blockDim.x * blockIdx.x + threadIdx.x;
#ifdef USE_SHARED_MEM
float3 position = boids[min(boidId, size - 1)].position;
float3 direction = vecNormalize(boids[min(boidId, size - 1)].direction);
#else
if (boidId >= size) return;
float3 position = boids[boidId].position;
float3 direction = vecNormalize(boids[boidId].direction);
#endif
Force force = { 0 };
#ifdef USE_SHARED_MEM
int boidsLeft = size;
for (int tile = 0; tile < tileCount - 1; tile++)
{
int tid = tile * tileSize + threadIdx.x;
sharedBoids[threadIdx.x] = boids[tid];
__syncthreads();
for (int i = 0; i < tileSize; i++)
{
if (isInViewRange(position, direction, sharedBoids[i].position, config->viewAngle))
{
updateFlock(force, position, sharedBoids[i].position, sharedBoids[i].direction, config);
}
}
boidsLeft -= tileSize;
__syncthreads();
}
int tid = (tileCount - 1) * tileSize + threadIdx.x;
if (tid < size)
{
sharedBoids[threadIdx.x] = boids[tid];
}
__syncthreads();
for (int i = 0; i < boidsLeft; i++)
{
if (isInViewRange(position, direction, sharedBoids[i].position, config->viewAngle))
{
updateFlock(force, position, sharedBoids[i].position, sharedBoids[i].direction, config);
}
}
__syncthreads();
if (boidId >= size) return;
#else
for (int i = 0; i < size; i++)
{
if (isInViewRange(position, direction, boids[i].position, config->viewAngle))
{
updateFlock(force, position, boids[i].position, boids[i].direction, config);
}
}
#endif
#pragma region Create force vector
if (force.cohesionCount > 0)
{
force.cohesion /= force.cohesionCount; // center of mass
}
if (force.alignmentCount > 0)
{
force.alignment /= force.alignmentCount;
}
if (force.separationCount > 0)
{
force.separation /= force.separationCount;
}
Acceleration acc;
acc.alignment = force.alignment * config->alignmentFactor;
acc.separation = force.separation * config->separationFactor;
if (force.cohesionCount > 0)
{
acc.cohesion = (force.cohesion - position) * config->cohesionFactor;
}
else acc.cohesion = make_float3(0.0f, 0.0f, 0.0f);
acc.goal = vecNormalize(config->goal - position) * config->goalFactor;
outAccelerations[boidId] = acc;
#pragma endregion
}
static __global__ void calculatePositions(Boid* boids, Acceleration* accelerations, size_t size, FlockConfig* config)
{
const int boidId = blockDim.x * blockIdx.x + threadIdx.x;
if (boidId >= size) return;
float3 acc = accelerations[boidId].separation + accelerations[boidId].cohesion + accelerations[boidId].alignment + accelerations[boidId].goal;
boids[boidId].direction += acc;
boids[boidId].direction = vecClamp(boids[boidId].direction, config->maxVelocity);
boids[boidId].position += boids[boidId].direction;
}
/// C++
static float getInitBoidRange()
{
return log10(BOID_COUNT);
}
static std::vector<Boid> initBoids(int count)
{
std::random_device rd;
std::mt19937 engine(rd());
std::uniform_real_distribution<float> posDist(-getInitBoidRange(), getInitBoidRange());
std::uniform_real_distribution<float> dirDist(0.01f, 0.01f);
std::vector<Boid> boids;
for (int i = 0; i < count; i++)
{
boids.emplace_back(
make_float3(posDist(engine), posDist(engine), posDist(engine)),
make_float3(dirDist(engine), dirDist(engine), dirDist(engine))
);
}
return boids;
}
static void copyTransformsFromCuda(DemoBoids* demo, CudaMemory<Boid>& boids, CudaMemory<Acceleration>& accelerations)
{
std::vector<Boid> cpuBoids(BOID_COUNT);
boids.load(*cpuBoids.data(), BOID_COUNT);
std::vector<Acceleration> cpuAccelerations(BOID_COUNT);
accelerations.load(*cpuAccelerations.data(), BOID_COUNT);
SceneManager* manager = SceneManager::GetInstance();
flockCenter = glm::vec3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < cpuBoids.size(); i++)
{
demo->boids[i]->setTransforms(cpuBoids[i].position, cpuBoids[i].direction, cpuAccelerations[i]);
demo->boids[i]->setViewAngle(boidsViewAngle);
#ifdef DEBUG_DRAW
demo->boids[i]->setDrawHelper(true);
#endif
flockCenter += glm::vec3(cpuBoids[i].position.x, cpuBoids[i].position.y, cpuBoids[i].position.z);
}
flockCenter /= cpuBoids.size();
pathGroup.update(flockCenter);
boidGoal = pathGroup.getCurrentTarget();
}
static FlockConfig update_config()
{
FlockConfig config = { 0 };
config.separationFactor = boidsSeparationFactor;
config.cohesionFactor = boidsCohesionFactor;
config.alignmentFactor = boidsAlignmentFactor;
config.goalFactor = boidsGoalFactor;
config.goal = make_float3(boidGoal.x, boidGoal.y, boidGoal.z);
config.cohesionNeighbourhood = boidsCohesionNeighbourhood;
config.separationNeighbourhood = boidsSeparationNeighbourhood;
config.alignmentNeighbourhood = boidsAlignmentNeighbourhood;
config.maxVelocity = boidsMaxVelocity;
config.viewAngle = glm::radians(boidsViewAngle);
return config;
}
static glm::vec3 getMousePos(Mouse* mouse)
{
SceneData* sceneData = SceneManager::GetInstance()->m_sceneData;
unsigned int* screen = SceneManager::GetInstance()->m_sceneSetting->m_screen;
glm::vec3 position = glm::vec3(mouse->m_lastPosition[0], screen[1] - mouse->m_lastPosition[1], 1.0f);
return glm::unProject(position, sceneData->cameras[0]->getVM(), sceneData->cameras[0]->getProjectionMatrix(), glm::vec4(0, 0, screen[0], screen[1]));
}
static void updateTarget(DemoBoids* demo)
{
SceneData* sceneData = SceneManager::GetInstance()->m_sceneData;
Mouse* mouse = sceneData->mouse;
if (!mouse->clickPending) return;
mouse->clickPending = false;
glm::vec3 pos = getMousePos(mouse);
glm::vec3 cameraPosition = sceneData->cameras[0]->getPosition();
glm::vec3 toFlock = flockCenter - cameraPosition;
glm::vec3 toTarget = glm::normalize(pos - cameraPosition);
toTarget *= glm::dot(toFlock, toTarget);
toTarget += cameraPosition;
boidGoal = toTarget;
demo->modelObjects[0]->setPosition(boidGoal.x, boidGoal.y, boidGoal.z);
}
static void boids_body(int argc, char** argv)
{
srand((unsigned int) time(nullptr));
#ifdef VISUALIZE
SceneManager* sceneManager = SceneManager::GetInstance();
DemoBoids* demo = new DemoBoids(sceneManager->m_sceneData, BOID_COUNT);
sceneManager->Init(argc, argv, demo);
hipGLSetGLDevice(0);
#endif
std::vector<Boid> boids = initBoids(BOID_COUNT);
CudaMemory<Boid> cudaBoids(boids.size(), boids.data());
CudaMemory<Acceleration> cudaAccelerations(BOID_COUNT);
dim3 blockDim(THREADS_PER_BLOCK, 1);
dim3 gridDim(getNumberOfParts(BOID_COUNT, THREADS_PER_BLOCK), 1);
FlockConfig flockConfig = update_config();
CudaMemory<FlockConfig> flockConfigCuda(1, &flockConfig);
CudaTimer timer;
while (true)
{
#ifdef VISUALIZE
flockConfigCuda.store(update_config());
#endif
#ifdef SIMULATE
timer.start();
calculateAccelerations << <gridDim, blockDim >> > (cudaBoids.device(), cudaAccelerations.device(), BOID_COUNT, flockConfigCuda.device());
timer.stop_wait();
#ifndef VISUALIZE
timer.print("Update directions: ");
#endif
#endif
#ifdef SIMULATE
timer.start();
calculatePositions << <gridDim, blockDim >> > (cudaBoids.device(), cudaAccelerations.device(), BOID_COUNT, flockConfigCuda.device());
timer.stop_wait();
#ifndef VISUALIZE
timer.print("Update positions: ");
#endif
#endif
#ifdef VISUALIZE
copyTransformsFromCuda(demo, cudaBoids, cudaAccelerations);
updateTarget(demo);
sceneManager->Refresh();
Sleep(5);
#endif
}
}
void boids(int argc, char** argv)
{
boids_body(argc, argv);
}
| 639cf1f49b5d8528d9a62039bebfe3c4e1cae543.cu | #include <vector>
#include <ctime>
#include <random>
#include <thread>
#include <atomic>
#include <mutex>
#include "../cudautil.cuh"
#include "../cudamem.h"
#include "boids.h"
#include "path_group.h"
#include "../opengl/sceneManager.h"
#include "../opengl/demos/demo_boids.h"
#include "../opengl/CoreHeaders/sceneGUI.h"
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/quaternion.hpp>
#include <glm/gtx/quaternion.hpp>
#include <cuda_gl_interop.h>
#define BOID_COUNT (500)
#define THREADS_PER_BLOCK (256)
#define USE_SHARED_MEM
#define CHECK_VIEW_RANGE
#define VISUALIZE
#define SIMULATE
//#define DEBUG_DRAW
double boidsSeparationFactor = 1.0;
double boidsCohesionFactor = 0.7;
double boidsAlignmentFactor = 1.0;
double boidsGoalFactor = 0.2;
glm::vec3 boidGoal{ 10.0f, 0.0f, 0.0f };
double boidsSeparationNeighbourhood = 1.0f;
double boidsCohesionNeighbourhood = 4.0f;
double boidsAlignmentNeighbourhood = 1.0f;
double boidsMaxVelocity = 0.01f;
double boidsViewAngle = 135.0f;
static PathGroup pathGroup(0.5f, {
boidGoal,
glm::vec3(5.0f, 5.0f, 0.0f),
glm::vec3(0.0f, 0.0f, 5.0f)
});
static glm::vec3 flockCenter{ 0.0f, 0.0f, 0.0f };
/// Test
double boidTestDir[3] = { 0.0, 0.0, 1.0 };
/// CUDA
static __device__ bool operator==(const float3& vec1, const float3& vec2)
{
return vec1.x == vec2.x && vec1.y == vec2.y && vec1.z == vec2.z;
}
static __device__ float3 vecClamp(const float3& vec, float max)
{
float len = length(vec);
if (len != 0.0f && len > max)
{
return vec * (max / len);
}
return vec;
}
static __device__ float3 vecNormalize(const float3& vec)
{
float len = length(vec);
if (len == 0.0f) return vec;
return vec / len;
}
static __device__ float3 updateSeparation(const float3& position, const float3& otherPosition, int& count, FlockConfig* config)
{
float3 vec = position - otherPosition;
float len = length(vec);
if (len == 0 || len >= config->separationNeighbourhood)
{
return make_float3(0.0f, 0.0f, 0.0f);
}
count++;
return vecNormalize(vec) / len;
}
static __device__ float3 updateCohesion(const float3& position, const float3& otherPosition, int& count, FlockConfig* config)
{
float3 vec = position - otherPosition;
float len = length(vec);
if (len == 0 || len >= config->cohesionNeighbourhood)
{
return make_float3(0.0f, 0.0f, 0.0f);
}
count++;
return otherPosition;
}
static __device__ float3 updateAlignment(const float3& position, const float3& otherPosition, const float3& otherDirection, int& count, FlockConfig* config)
{
float3 vec = position - otherPosition;
float len = length(vec);
if (len == 0 || len >= config->alignmentNeighbourhood)
{
return make_float3(0.0f, 0.0f, 0.0f);
}
count++;
return otherDirection;
}
static __device__ void updateFlock(Force& force, const float3& position, const float3& otherPosition, const float3& otherDirection, FlockConfig* config)
{
force.separation += updateSeparation(position, otherPosition, force.separationCount, config);
force.cohesion += updateCohesion(position, otherPosition, force.cohesionCount, config);
force.alignment += updateAlignment(position, otherPosition, otherDirection, force.alignmentCount, config);
}
static __device__ bool isInViewRange(const float3& position, const float3& direction, const float3& otherPosition, float viewAngle)
{
#ifdef CHECK_VIEW_RANGE
if (position == otherPosition) return false;
float3 toTarget = vecNormalize(otherPosition - position);
float angle = atan2(length(cross(toTarget, direction)), dot(toTarget, direction));
return angle < viewAngle;
#else
return true;
#endif
}
static __global__ void calculateAccelerations(Boid* __restrict__ boids, Acceleration* __restrict__ outAccelerations, const int size, FlockConfig* config)
{
#pragma region Init
#ifdef USE_SHARED_MEM
__shared__ Boid sharedBoids[THREADS_PER_BLOCK];
#endif
const int tileSize = blockDim.x;
const int tileCount = gridDim.x;
const int boidId = blockDim.x * blockIdx.x + threadIdx.x;
#ifdef USE_SHARED_MEM
float3 position = boids[min(boidId, size - 1)].position;
float3 direction = vecNormalize(boids[min(boidId, size - 1)].direction);
#else
if (boidId >= size) return;
float3 position = boids[boidId].position;
float3 direction = vecNormalize(boids[boidId].direction);
#endif
Force force = { 0 };
#ifdef USE_SHARED_MEM
int boidsLeft = size;
for (int tile = 0; tile < tileCount - 1; tile++)
{
int tid = tile * tileSize + threadIdx.x;
sharedBoids[threadIdx.x] = boids[tid];
__syncthreads();
for (int i = 0; i < tileSize; i++)
{
if (isInViewRange(position, direction, sharedBoids[i].position, config->viewAngle))
{
updateFlock(force, position, sharedBoids[i].position, sharedBoids[i].direction, config);
}
}
boidsLeft -= tileSize;
__syncthreads();
}
int tid = (tileCount - 1) * tileSize + threadIdx.x;
if (tid < size)
{
sharedBoids[threadIdx.x] = boids[tid];
}
__syncthreads();
for (int i = 0; i < boidsLeft; i++)
{
if (isInViewRange(position, direction, sharedBoids[i].position, config->viewAngle))
{
updateFlock(force, position, sharedBoids[i].position, sharedBoids[i].direction, config);
}
}
__syncthreads();
if (boidId >= size) return;
#else
for (int i = 0; i < size; i++)
{
if (isInViewRange(position, direction, boids[i].position, config->viewAngle))
{
updateFlock(force, position, boids[i].position, boids[i].direction, config);
}
}
#endif
#pragma region Create force vector
if (force.cohesionCount > 0)
{
force.cohesion /= force.cohesionCount; // center of mass
}
if (force.alignmentCount > 0)
{
force.alignment /= force.alignmentCount;
}
if (force.separationCount > 0)
{
force.separation /= force.separationCount;
}
Acceleration acc;
acc.alignment = force.alignment * config->alignmentFactor;
acc.separation = force.separation * config->separationFactor;
if (force.cohesionCount > 0)
{
acc.cohesion = (force.cohesion - position) * config->cohesionFactor;
}
else acc.cohesion = make_float3(0.0f, 0.0f, 0.0f);
acc.goal = vecNormalize(config->goal - position) * config->goalFactor;
outAccelerations[boidId] = acc;
#pragma endregion
}
static __global__ void calculatePositions(Boid* boids, Acceleration* accelerations, size_t size, FlockConfig* config)
{
const int boidId = blockDim.x * blockIdx.x + threadIdx.x;
if (boidId >= size) return;
float3 acc = accelerations[boidId].separation + accelerations[boidId].cohesion + accelerations[boidId].alignment + accelerations[boidId].goal;
boids[boidId].direction += acc;
boids[boidId].direction = vecClamp(boids[boidId].direction, config->maxVelocity);
boids[boidId].position += boids[boidId].direction;
}
/// C++
static float getInitBoidRange()
{
return log10(BOID_COUNT);
}
static std::vector<Boid> initBoids(int count)
{
std::random_device rd;
std::mt19937 engine(rd());
std::uniform_real_distribution<float> posDist(-getInitBoidRange(), getInitBoidRange());
std::uniform_real_distribution<float> dirDist(0.01f, 0.01f);
std::vector<Boid> boids;
for (int i = 0; i < count; i++)
{
boids.emplace_back(
make_float3(posDist(engine), posDist(engine), posDist(engine)),
make_float3(dirDist(engine), dirDist(engine), dirDist(engine))
);
}
return boids;
}
static void copyTransformsFromCuda(DemoBoids* demo, CudaMemory<Boid>& boids, CudaMemory<Acceleration>& accelerations)
{
std::vector<Boid> cpuBoids(BOID_COUNT);
boids.load(*cpuBoids.data(), BOID_COUNT);
std::vector<Acceleration> cpuAccelerations(BOID_COUNT);
accelerations.load(*cpuAccelerations.data(), BOID_COUNT);
SceneManager* manager = SceneManager::GetInstance();
flockCenter = glm::vec3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < cpuBoids.size(); i++)
{
demo->boids[i]->setTransforms(cpuBoids[i].position, cpuBoids[i].direction, cpuAccelerations[i]);
demo->boids[i]->setViewAngle(boidsViewAngle);
#ifdef DEBUG_DRAW
demo->boids[i]->setDrawHelper(true);
#endif
flockCenter += glm::vec3(cpuBoids[i].position.x, cpuBoids[i].position.y, cpuBoids[i].position.z);
}
flockCenter /= cpuBoids.size();
pathGroup.update(flockCenter);
boidGoal = pathGroup.getCurrentTarget();
}
static FlockConfig update_config()
{
FlockConfig config = { 0 };
config.separationFactor = boidsSeparationFactor;
config.cohesionFactor = boidsCohesionFactor;
config.alignmentFactor = boidsAlignmentFactor;
config.goalFactor = boidsGoalFactor;
config.goal = make_float3(boidGoal.x, boidGoal.y, boidGoal.z);
config.cohesionNeighbourhood = boidsCohesionNeighbourhood;
config.separationNeighbourhood = boidsSeparationNeighbourhood;
config.alignmentNeighbourhood = boidsAlignmentNeighbourhood;
config.maxVelocity = boidsMaxVelocity;
config.viewAngle = glm::radians(boidsViewAngle);
return config;
}
static glm::vec3 getMousePos(Mouse* mouse)
{
SceneData* sceneData = SceneManager::GetInstance()->m_sceneData;
unsigned int* screen = SceneManager::GetInstance()->m_sceneSetting->m_screen;
glm::vec3 position = glm::vec3(mouse->m_lastPosition[0], screen[1] - mouse->m_lastPosition[1], 1.0f);
return glm::unProject(position, sceneData->cameras[0]->getVM(), sceneData->cameras[0]->getProjectionMatrix(), glm::vec4(0, 0, screen[0], screen[1]));
}
static void updateTarget(DemoBoids* demo)
{
SceneData* sceneData = SceneManager::GetInstance()->m_sceneData;
Mouse* mouse = sceneData->mouse;
if (!mouse->clickPending) return;
mouse->clickPending = false;
glm::vec3 pos = getMousePos(mouse);
glm::vec3 cameraPosition = sceneData->cameras[0]->getPosition();
glm::vec3 toFlock = flockCenter - cameraPosition;
glm::vec3 toTarget = glm::normalize(pos - cameraPosition);
toTarget *= glm::dot(toFlock, toTarget);
toTarget += cameraPosition;
boidGoal = toTarget;
demo->modelObjects[0]->setPosition(boidGoal.x, boidGoal.y, boidGoal.z);
}
static void boids_body(int argc, char** argv)
{
srand((unsigned int) time(nullptr));
#ifdef VISUALIZE
SceneManager* sceneManager = SceneManager::GetInstance();
DemoBoids* demo = new DemoBoids(sceneManager->m_sceneData, BOID_COUNT);
sceneManager->Init(argc, argv, demo);
cudaGLSetGLDevice(0);
#endif
std::vector<Boid> boids = initBoids(BOID_COUNT);
CudaMemory<Boid> cudaBoids(boids.size(), boids.data());
CudaMemory<Acceleration> cudaAccelerations(BOID_COUNT);
dim3 blockDim(THREADS_PER_BLOCK, 1);
dim3 gridDim(getNumberOfParts(BOID_COUNT, THREADS_PER_BLOCK), 1);
FlockConfig flockConfig = update_config();
CudaMemory<FlockConfig> flockConfigCuda(1, &flockConfig);
CudaTimer timer;
while (true)
{
#ifdef VISUALIZE
flockConfigCuda.store(update_config());
#endif
#ifdef SIMULATE
timer.start();
calculateAccelerations << <gridDim, blockDim >> > (cudaBoids.device(), cudaAccelerations.device(), BOID_COUNT, flockConfigCuda.device());
timer.stop_wait();
#ifndef VISUALIZE
timer.print("Update directions: ");
#endif
#endif
#ifdef SIMULATE
timer.start();
calculatePositions << <gridDim, blockDim >> > (cudaBoids.device(), cudaAccelerations.device(), BOID_COUNT, flockConfigCuda.device());
timer.stop_wait();
#ifndef VISUALIZE
timer.print("Update positions: ");
#endif
#endif
#ifdef VISUALIZE
copyTransformsFromCuda(demo, cudaBoids, cudaAccelerations);
updateTarget(demo);
sceneManager->Refresh();
Sleep(5);
#endif
}
}
void boids(int argc, char** argv)
{
boids_body(argc, argv);
}
|
47b6a21d4f0250ab81c6fe230fbe6eb995c898fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/host_vector.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <algorithm>
#include <omp.h>
#define KEPLER 0
#include "ErrorCheck.h"
#include "include/encode.cuh"
#include "include/decode.cuh"
#include "include/cuZFP.cuh"
#include "array3d.h"
#include "zfparray3.h"
enum ENGHS_t{ N_LEFT, N_RIGHT, N_UP, N_DOWN, N_NEAR, N_FAR } enghs;
using namespace thrust;
using namespace std;
#define index(x, y, z) ((x) + 4 * ((y) + 4 * (z)))
const size_t nx = 64;
const size_t ny = 64;
const size_t nz = 64;
const int nt = 0;
const double pi = 3.14159265358979323846;
//BSIZE is the length of the array in class Bit
//It's tied to MAXBITS such that
//MAXBITS = sizeof(Word) * BSIZE
//which is really
//MAXBITS = wsize * BSIZE
//e.g. if we match bits one-to-one, double -> unsigned long long
// then BSIZE = 64 and MAXPBITS = 4096
#define BSIZE 16
uint MAXBITS = BSIZE*64;
uint MAXPREC = 64;
int MINEXP = -1074;
const double rate = BSIZE;
size_t blksize = 0;
uint size = 64;
int EBITS = 11; /* number of exponent bits */
const int EBIAS = 1023;
const int intprec = 64;
static const unsigned char
perm[64] = {
index(0, 0, 0), // 0 : 0
index(1, 0, 0), // 1 : 1
index(0, 1, 0), // 2 : 1
index(0, 0, 1), // 3 : 1
index(0, 1, 1), // 4 : 2
index(1, 0, 1), // 5 : 2
index(1, 1, 0), // 6 : 2
index(2, 0, 0), // 7 : 2
index(0, 2, 0), // 8 : 2
index(0, 0, 2), // 9 : 2
index(1, 1, 1), // 10 : 3
index(2, 1, 0), // 11 : 3
index(2, 0, 1), // 12 : 3
index(0, 2, 1), // 13 : 3
index(1, 2, 0), // 14 : 3
index(1, 0, 2), // 15 : 3
index(0, 1, 2), // 16 : 3
index(3, 0, 0), // 17 : 3
index(0, 3, 0), // 18 : 3
index(0, 0, 3), // 19 : 3
index(2, 1, 1), // 20 : 4
index(1, 2, 1), // 21 : 4
index(1, 1, 2), // 22 : 4
index(0, 2, 2), // 23 : 4
index(2, 0, 2), // 24 : 4
index(2, 2, 0), // 25 : 4
index(3, 1, 0), // 26 : 4
index(3, 0, 1), // 27 : 4
index(0, 3, 1), // 28 : 4
index(1, 3, 0), // 29 : 4
index(1, 0, 3), // 30 : 4
index(0, 1, 3), // 31 : 4
index(1, 2, 2), // 32 : 5
index(2, 1, 2), // 33 : 5
index(2, 2, 1), // 34 : 5
index(3, 1, 1), // 35 : 5
index(1, 3, 1), // 36 : 5
index(1, 1, 3), // 37 : 5
index(3, 2, 0), // 38 : 5
index(3, 0, 2), // 39 : 5
index(0, 3, 2), // 40 : 5
index(2, 3, 0), // 41 : 5
index(2, 0, 3), // 42 : 5
index(0, 2, 3), // 43 : 5
index(2, 2, 2), // 44 : 6
index(3, 2, 1), // 45 : 6
index(3, 1, 2), // 46 : 6
index(1, 3, 2), // 47 : 6
index(2, 3, 1), // 48 : 6
index(2, 1, 3), // 49 : 6
index(1, 2, 3), // 50 : 6
index(0, 3, 3), // 51 : 6
index(3, 0, 3), // 52 : 6
index(3, 3, 0), // 53 : 6
index(3, 2, 2), // 54 : 7
index(2, 3, 2), // 55 : 7
index(2, 2, 3), // 56 : 7
index(1, 3, 3), // 57 : 7
index(3, 1, 3), // 58 : 7
index(3, 3, 1), // 59 : 7
index(2, 3, 3), // 60 : 8
index(3, 2, 3), // 61 : 8
index(3, 3, 2), // 62 : 8
index(3, 3, 3), // 63 : 9
};
static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; }
template<class Scalar>
void setupConst(const unsigned char *perm,
uint maxbits_,
uint maxprec_,
int minexp_,
int ebits_,
int ebias_
)
{
ErrorCheck ec;
ec.chk("setupConst start");
hipMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm");
hipMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits");
const uint sizeof_scalar = sizeof(Scalar);
hipMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar");
hipMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec");
hipMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp");
hipMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits");
hipMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias");
ec.chk("setupConst finished");
}
//Used to generate rand array in CUDA with Thrust
struct RandGen
{
RandGen() {}
__device__ float operator () (const uint idx)
{
thrust::default_random_engine randEng;
thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001);
randEng.discard(idx);
return uniDist(randEng);
}
};
template<typename Array>
void rme
(
const Array &u,
int x0,
int y0,
int z0,
const double dx,
const double dy,
const double dz,
const double k,
double t
)
{
// compute root mean square error with respect to exact solution
double e = 0;
double sum = 0;
for (int z = 1; z < nz - 1; z++){
double pz = dz * (z - z0);
for (int y = 1; y < ny - 1; y++) {
double py = dy * (y - y0);
for (int x = 1; x < nx - 1; x++) {
double px = dx * (x - x0);
double f = u(x, y, z);
//http://nptel.ac.in/courses/105103026/34
double g = dx * dy * dz * ::exp(-(px * px + py * py + pz * pz) / (4 * k * t)) / powf(4 * pi * k * t, 3.0 / 2.0);
e += (f - g) * (f - g);
sum += f;
}
}
}
e = std::sqrt(e / ((nx - 2) * (ny - 2)));
std::cerr.unsetf(std::ios::fixed);
std::cerr << "rate=" << rate << " sum=" << std::fixed << sum << " error=" << std::setprecision(6) << std::scientific << e << std::endl;
}
__device__
static inline
int idx(int x, int y, int z)
{
return x + y * (blockDim.x * gridDim.x) + z * (blockDim.x * gridDim.x * blockDim.y * gridDim.y);
}
template<typename Scalar>
__global__
void cudaDiffusion
(
const Scalar *u,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal,
Scalar *du
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
Scalar uxx = (u[idx(max(0, x - 1), y, z)] - 2 * u[idx(x, y, z)] + u[idx(min(blockDim.x*gridDim.x - 1, x + 1), y, z)]) / (dx * dx);
Scalar uyy = (u[idx(x, max(0, y - 1), z)] - 2 * u[idx(x, y, z)] + u[idx(x, min(blockDim.y*gridDim.y - 1, y + 1), z)]) / (dy * dy);
Scalar uzz = (u[idx(x, y, max(0, z - 1))] - 2 * u[idx(x, y, z)] + u[idx(x, y, min(blockDim.z*gridDim.z-1, z + 1))]) / (dz * dz);
du[idx(x, y, z)] = dt * k * (uxx + uyy + uzz);
}
template<typename Scalar>
__global__
void cudaSum
(
Scalar *u,
const Scalar *du
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
u[idx(x, y, z)] += du[idx(x, y, z)];
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
__global__
void
__launch_bounds__(64, 5)
cudaZFPDiffusion
(
const Scalar *u,
Word *du,
uint size,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k
)
{
uint x = threadIdx.x;
uint y = threadIdx.y;
uint z = threadIdx.z;
uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y;
uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x);
uint bdim = blockDim.x*blockDim.y*blockDim.z;
uint tbidx = bidx*bdim;
extern __shared__ unsigned char smem[];
__shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext;
s_u = (Scalar*)&smem[0];
s_du = (Scalar*)&s_u[64];
s_u_ext = (Scalar*)&s_du[64];
s_nghs = (Scalar*)&s_u_ext[216];
unsigned char *new_smem = (unsigned char*)&s_nghs[64];
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + bidx*bsize, new_smem, tid, s_u);
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + bidx*bsize, new_smem, tid, s_du);
//__syncthreads();
int3 utid = make_int3(threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z);
Scalar uxx = (u[idx(max(0, utid.x - 1), utid.y, utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(min(blockDim.x*gridDim.x - 1, utid.x + 1), utid.y, utid.z)]) / (dx * dx);
Scalar uyy = (u[idx(utid.x, max(0, utid.y - 1), utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, min(blockDim.y*gridDim.y - 1, utid.y + 1), utid.z)]) / (dy * dy);
Scalar uzz = (u[idx(utid.x, utid.y, max(0, utid.z - 1))] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, utid.y, min(blockDim.z*gridDim.z - 1, utid.z + 1))]) / (dz * dz);
s_du[tid] = dt*k * (uxx + uyy + uzz);
__syncthreads();
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(
s_du,
size,
new_smem,
bidx * bsize,
du
);
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
__global__
void
__launch_bounds__(64, 5)
cudaZFPDiffusion
(
const Word *u,
Word *du,
uint size,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k
)
{
uint x = threadIdx.x;
uint y = threadIdx.y;
uint z = threadIdx.z;
uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y;
uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x);
uint bdim = blockDim.x*blockDim.y*blockDim.z;
uint bidx = idx*bdim;
extern __shared__ unsigned char smem[];
__shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext;
s_u = (Scalar*)&smem[0];
s_du = (Scalar*)&s_u[64];
s_u_ext = (Scalar*)&s_du[64];
s_nghs = (Scalar*)&s_u_ext[216];
unsigned char *new_smem = (unsigned char*)&s_nghs[64];
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + idx*bsize, new_smem, tid, s_u);
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + idx*bsize, new_smem, tid, s_du);
for (int i = 0; i < 3; i++){
s_u_ext[i * 64 + tid] = 0;
}
if (tid < 24)
s_u_ext[192 + tid] = 0;
__syncthreads();
//left
s_nghs[tid] = 0;
if (blockIdx.x > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + ((blockIdx.x-1) + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[(i+1) * 6 + (j+1) * 36] = s_nghs[3 + i * blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[(x + 1) * 6 + (y + 1) * 36] = s_nghs[3 + x * blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//right
s_nghs[tid] = 0;
if (blockIdx.x+1 < gridDim.x){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (1 + blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[5 + (i+1) * 6 + (j+1) * 36] = s_nghs[i*blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[5 + (x + 1) * 6 + (y + 1) * 36] = s_nghs[x*blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//down
s_nghs[tid] = 0;
if (blockIdx.y > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y - 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j+1) * 36] = s_nghs[i + 3*blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 36] = s_nghs[x + 3 * blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//up
s_nghs[tid] = 0;
if (blockIdx.y + 1 < gridDim.y){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y + 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + 5*6 + (j+1) * 36] = s_nghs[i + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + 5 * 6 + (y + 1) * 36] = s_nghs[x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//near
s_nghs[tid] = 0;
if (blockIdx.z > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z - 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j + 1) * 6] = s_nghs[i + (j)*blockDim.x + 3 * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 6] = s_nghs[x + (y)*blockDim.x + 3 * blockDim.x * blockDim.y];
}
__syncthreads();
//far
s_nghs[tid] = 0;
if (blockIdx.z + 1 < gridDim.z){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z + 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j + 1) * 6 + 5 * 36] = s_nghs[i + (j)*blockDim.x ];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 6 + 5 * 36] = s_nghs[x + (y)*blockDim.x];
}
__syncthreads();
s_u_ext[1 + x + (y + 1) * 6 + (z + 1) * 36] = s_u[tid];
__syncthreads();
Scalar uxx = (s_u_ext[x + (y + 1) * 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 2 + (y + 1) * 6 + (z + 1) * 36]) / (dx * dx);
Scalar uyy = (s_u_ext[x + 1 + (y)* 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 2) * 6 + (z + 1) * 36]) / (dy * dy);
Scalar uzz = (s_u_ext[x + 1 + (y + 1) * 6 + (z)* 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 1) * 6 + (z + 2) * 36]) / (dz * dz);
s_du[tid] = dt*k * (uxx + uyy + uzz);
__syncthreads();
//if (uxx < 0 || uyy < 0 || uzz < 0){
// printf("%d, %f, %f, %f, %f %f %f %d %d %d %d\n", tid, dt, k, s_du[tid], uxx, uyy, uzz, threadIdx.x + blockIdx.x * blockDim.x, threadIdx.y + blockIdx.y * blockDim.y, threadIdx.z + blockIdx.z * blockDim.z, threadIdx.x + blockIdx.x * blockDim.x + (threadIdx.y + blockIdx.y * blockDim.y)*gridDim.x * blockDim.x + (threadIdx.z + blockIdx.z * blockDim.z)*gridDim.x * blockDim.x * gridDim.y * blockDim.y);
//}
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(
s_du,
size,
new_smem,
idx * bsize,
du
);
//out[(threadIdx.z + blockIdx.z * 4)*gridDim.x * gridDim.y * blockDim.x * blockDim.y + (threadIdx.y + blockIdx.y * 4)*gridDim.x * blockDim.x + (threadIdx.x + blockIdx.x * 4)] = s_dblock[tid];
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
void gpuZFPDiffusion
(
int nx, int ny, int nz,
device_vector<Word > &u, //Compressed??
device_vector<Word > &du, //Compressed??
device_vector<Scalar> &df_u, //uncompressed
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal
)
{
dim3 block_size = dim3(4, 4, 4);
dim3 grid_size = dim3(nx, ny, nz);
grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z;
cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> >
(
thrust::raw_pointer_cast(u.data()),
thrust::raw_pointer_cast(du.data()),
size,
dx,dy,dz,dt,k
);
// cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(
// nx, ny, nz,
// u, df_u,
// group_count
// );
//cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> >
// (
// thrust::raw_pointer_cast(df_u.data()),
// thrust::raw_pointer_cast(du.data()),
// size,
// dx,dy,dz,dt,k
// );
cuZFP::transform <Int, UInt, Scalar, bsize, intprec>
(
nx,ny,nz,
size,
u,
du,
thrust::plus<Scalar>()
);
//Scalar sum_u = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u);
//Scalar sum_du = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, du);
//cout << "post-transform du: " << sum_du << " u: " << sum_u << endl;
}
template<typename Scalar>
void gpu_discrete_solution
(
const int x0,
const int y0,
const int z0,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal
)
{
thrust::host_vector<Scalar> h_u(nx*ny*nz, 0);
thrust::device_vector<Scalar> u(nx*ny*nz);
thrust::device_vector<Scalar> du(nx*ny*nz);
ErrorCheck ec;
hipEvent_t start, stop;
float millisecs;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
h_u[x0 + y0 * nx + z0 * nx * ny] = 1;
u = h_u;
dim3 block_size(4, 4, 4);
dim3 grid_size;
grid_size.x = nx / block_size.x;
grid_size.y = ny / block_size.y;
grid_size.z = nz / block_size.z;
double t;
for (t = 0; t < tfinal; t += dt) {
std::cerr << "gpu t=" << std::fixed << t << std::endl;
cudaDiffusion << <grid_size, block_size >> >
(
thrust::raw_pointer_cast(u.data()),
dx,dy,dz,
dt,
k,
tfinal,
thrust::raw_pointer_cast(du.data())
);
hipStreamSynchronize(0);
ec.chk("cudaDiffusion");
cudaSum << < grid_size, block_size >> >
(
thrust::raw_pointer_cast(u.data()),
thrust::raw_pointer_cast(du.data())
);
}
h_u = u;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&millisecs, start, stop);
ec.chk("cudaencode");
cout << "Diffusion GPU in time: " << millisecs << endl;
array3d out(nx, ny, nz, 0);
for (int i = 0; i < u.size(); i++){
out[i] = h_u[i];
}
rme(out, x0, y0, z0, dx, dy, dz, k, tfinal - dt);
}
template<class Int, class UInt, class Scalar, uint bsize>
void gpuDiffusion ( int x0, int y0, int z0,
Scalar dx,Scalar dy, Scalar dz, Scalar dt, Scalar k, Scalar tfinal,
host_vector<Scalar> &h_u)
{
host_vector<Scalar> h_du(nx*ny*nz, 0.0);
thrust::fill(h_u.begin(), h_u.end(), 0.0);
device_vector<Scalar> d_du;
h_u[x0 + nx*y0 + nx*ny*z0] = 1;
device_vector<Scalar> d_u;
//tmp_u = h_data;
d_du = h_du;
d_u = h_u;
ErrorCheck ec;
hipEvent_t start, stop;
float millisecs;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
dim3 emax_size(nx / 4, ny / 4, nz / 4);
// arrays where the incoded data is stored
device_vector<Word > u(emax_size.x * emax_size.y * emax_size.z * bsize);
device_vector<Word > du(emax_size.x * emax_size.y * emax_size.z * bsize);
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_u, u, group_count, size);
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_du, du, group_count, size);
cuZFP::transform<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, size, u, du, thrust::plus<Scalar>());
cout << "start: " << cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u) << endl;
d_du.clear();
d_du.shrink_to_fit();
hipStreamSynchronize(0);
ec.chk("cudaEncode");
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&millisecs, start, stop);
ec.chk("cudaencode");
cout << "encode diffusion GPU in time: " << millisecs << endl;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for (double t = 0; t < tfinal; t += dt){
std::cerr << "compressed gpu t=" << std::fixed << t << std::endl;
gpuZFPDiffusion<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, du, d_u, dx, dy, dz, dt, k, tfinal);
hipStreamSynchronize(0);
ec.chk("gpuZFPDiffusion");
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&millisecs, start, stop);
cout << "Diffusion GPU ZFP in time: " << millisecs << endl;
double tot_sum = 0, max_diff = 0, min_diff = 1e16;
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, d_u, group_count);
host_vector<Scalar> h_out = d_u;
//for (int i = 0; i < h_u.size(); i++){
// int k = 0, j = 0;
// frexp(h_u[i], &j);
// frexp(h_u[i], &k);
// //if (abs(j - k) > 1){
// // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl;
// // //exit(-1);
// //}
// double diff = fabs(h_u[i] - h_out[i]);
// //if (diff > 1 )
// // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl;
// if (max_diff < diff)
// max_diff = diff;
// if (min_diff > diff)
// min_diff = diff;
// tot_sum += diff;
//}
//cout << "tot diff: " << tot_sum << " average diff: " << tot_sum / (float)h_u.size() << " max diff: " << max_diff << " min diff: " << min_diff << endl;
//cout << "sum: " << thrust::reduce(h_u.begin(), h_u.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl;
array3d out(nx, ny, nz, rate);
for (int i = 0; i < h_out.size(); i++){
out[i] = h_out[i];
}
rme(out, x0, y0, z0, dx, dy, dz, k, tfinal - dt);
}
template<typename Array>
void discrete_solution
(
Array &u,
int x0, int y0, int z0,
const double dx,
const double dy,
const double dz,
const double dt,
const double k,
const double tfinal
)
{
// initialize u (constructor zero-initializes)
//rate = u.rate();
double start_time = omp_get_wtime();
u(x0, y0, z0) = 1;
// iterate until final time
std::cerr.precision(6);
double t;
for (t = 0; t < tfinal; t += dt) {
std::cerr << "cpu t=" << std::fixed << t << std::endl;
// compute du/dt
Array du(nx, ny, nz, rate);
for (int z = 1; z < nz - 1; z++){
for (int y = 1; y < ny - 1; y++) {
for (int x = 1; x < nx - 1; x++) {
double uxx = (u(x - 1, y, z) - 2 * u(x, y, z) + u(x + 1, y, z)) / (dx * dx);
double uyy = (u(x, y - 1, z) - 2 * u(x, y, z) + u(x, y + 1, z)) / (dy * dy);
double uzz = (u(x, y, z - 1) - 2 * u(x, y, z) + u(x, y, z + 1)) / (dz * dz);
du(x, y, z) = dt * k * (uxx + uyy + uzz);
}
}
}
// take forward Euler step
double sum = 0;
for (uint i = 0; i < u.size(); i++){
u[i] += du[i];
}
}
double time = omp_get_wtime() - start_time;
cout << "discrete time: " << time << endl;
rme(u, x0, y0, z0, dx, dy, dz, k, tfinal - dt);
}
int main()
{
host_vector<double> h_vec_in(nx*ny*nz, 0);
//device_vector<double> d_vec_in(nx*ny*nz);
// thrust::counting_iterator<uint> index_sequence_begin(0);
//thrust::transform(
// index_sequence_begin,
// index_sequence_begin + nx*ny*nz,
// d_vec_in.begin(),
// RandGen());
//h_vec_in = d_vec_in;
//d_vec_in.clear();
//d_vec_in.shrink_to_fit();
// location of point heat source
int x0 = (nx - 1) / 2;
int y0 = (ny - 1) / 2;
int z0 = (nz - 1) / 2;
// constants used in the solution
const double k = 0.04;
const double dx = 2.0 / (::max(nz,::max(nx, ny)) - 1);
const double dy = 2.0 / (::max(nz, ::max(nx, ny)) - 1);
const double dz = 2.0 / (::max(nz, ::max(nx, ny)) - 1);
const double dt = 0.5 * (dx * dx + dy * dy) / (8 * k);
const double tfinal = nt ? nt * dt : 1;
/*
cout << "cpu diffusion start" << endl;
array3d u(nx, ny, nz, rate);
discrete_solution<array3d>(u, x0, y0, z0, dx,dy,dz,dt,k, tfinal);
cout << "compressed cpu diffusion start" << endl;
zfp::array3d u2(nx, ny, nz, rate);
discrete_solution<zfp::array3d>(u2, x0, y0, z0, dx, dy, dz, dt, k, tfinal);
*/
cout << "GPU discete diffusion start" << endl;
gpu_discrete_solution<double>(x0, y0, z0, dx, dy, dz, dt, k, tfinal);
cout << "GPU ZFP diffusion start" << endl;
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS);
cout << "Begin gpuDiffusion" << endl;
gpuDiffusion<long long, unsigned long long, double, BSIZE>(x0,y0,z0, dx, dy, dz, dt, k, tfinal, h_vec_in);
cout << "Finish gpuDiffusion" << endl;
}
| 47b6a21d4f0250ab81c6fe230fbe6eb995c898fb.cu | #include <iostream>
#include <iomanip>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/host_vector.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <algorithm>
#include <omp.h>
#define KEPLER 0
#include "ErrorCheck.h"
#include "include/encode.cuh"
#include "include/decode.cuh"
#include "include/cuZFP.cuh"
#include "array3d.h"
#include "zfparray3.h"
enum ENGHS_t{ N_LEFT, N_RIGHT, N_UP, N_DOWN, N_NEAR, N_FAR } enghs;
using namespace thrust;
using namespace std;
#define index(x, y, z) ((x) + 4 * ((y) + 4 * (z)))
const size_t nx = 64;
const size_t ny = 64;
const size_t nz = 64;
const int nt = 0;
const double pi = 3.14159265358979323846;
//BSIZE is the length of the array in class Bit
//It's tied to MAXBITS such that
//MAXBITS = sizeof(Word) * BSIZE
//which is really
//MAXBITS = wsize * BSIZE
//e.g. if we match bits one-to-one, double -> unsigned long long
// then BSIZE = 64 and MAXPBITS = 4096
#define BSIZE 16
uint MAXBITS = BSIZE*64;
uint MAXPREC = 64;
int MINEXP = -1074;
const double rate = BSIZE;
size_t blksize = 0;
uint size = 64;
int EBITS = 11; /* number of exponent bits */
const int EBIAS = 1023;
const int intprec = 64;
static const unsigned char
perm[64] = {
index(0, 0, 0), // 0 : 0
index(1, 0, 0), // 1 : 1
index(0, 1, 0), // 2 : 1
index(0, 0, 1), // 3 : 1
index(0, 1, 1), // 4 : 2
index(1, 0, 1), // 5 : 2
index(1, 1, 0), // 6 : 2
index(2, 0, 0), // 7 : 2
index(0, 2, 0), // 8 : 2
index(0, 0, 2), // 9 : 2
index(1, 1, 1), // 10 : 3
index(2, 1, 0), // 11 : 3
index(2, 0, 1), // 12 : 3
index(0, 2, 1), // 13 : 3
index(1, 2, 0), // 14 : 3
index(1, 0, 2), // 15 : 3
index(0, 1, 2), // 16 : 3
index(3, 0, 0), // 17 : 3
index(0, 3, 0), // 18 : 3
index(0, 0, 3), // 19 : 3
index(2, 1, 1), // 20 : 4
index(1, 2, 1), // 21 : 4
index(1, 1, 2), // 22 : 4
index(0, 2, 2), // 23 : 4
index(2, 0, 2), // 24 : 4
index(2, 2, 0), // 25 : 4
index(3, 1, 0), // 26 : 4
index(3, 0, 1), // 27 : 4
index(0, 3, 1), // 28 : 4
index(1, 3, 0), // 29 : 4
index(1, 0, 3), // 30 : 4
index(0, 1, 3), // 31 : 4
index(1, 2, 2), // 32 : 5
index(2, 1, 2), // 33 : 5
index(2, 2, 1), // 34 : 5
index(3, 1, 1), // 35 : 5
index(1, 3, 1), // 36 : 5
index(1, 1, 3), // 37 : 5
index(3, 2, 0), // 38 : 5
index(3, 0, 2), // 39 : 5
index(0, 3, 2), // 40 : 5
index(2, 3, 0), // 41 : 5
index(2, 0, 3), // 42 : 5
index(0, 2, 3), // 43 : 5
index(2, 2, 2), // 44 : 6
index(3, 2, 1), // 45 : 6
index(3, 1, 2), // 46 : 6
index(1, 3, 2), // 47 : 6
index(2, 3, 1), // 48 : 6
index(2, 1, 3), // 49 : 6
index(1, 2, 3), // 50 : 6
index(0, 3, 3), // 51 : 6
index(3, 0, 3), // 52 : 6
index(3, 3, 0), // 53 : 6
index(3, 2, 2), // 54 : 7
index(2, 3, 2), // 55 : 7
index(2, 2, 3), // 56 : 7
index(1, 3, 3), // 57 : 7
index(3, 1, 3), // 58 : 7
index(3, 3, 1), // 59 : 7
index(2, 3, 3), // 60 : 8
index(3, 2, 3), // 61 : 8
index(3, 3, 2), // 62 : 8
index(3, 3, 3), // 63 : 9
};
static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; }
template<class Scalar>
void setupConst(const unsigned char *perm,
uint maxbits_,
uint maxprec_,
int minexp_,
int ebits_,
int ebias_
)
{
ErrorCheck ec;
ec.chk("setupConst start");
cudaMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm");
cudaMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits");
const uint sizeof_scalar = sizeof(Scalar);
cudaMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar");
cudaMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec");
cudaMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp");
cudaMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits");
cudaMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias");
ec.chk("setupConst finished");
}
//Used to generate rand array in CUDA with Thrust
struct RandGen
{
RandGen() {}
__device__ float operator () (const uint idx)
{
thrust::default_random_engine randEng;
thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001);
randEng.discard(idx);
return uniDist(randEng);
}
};
template<typename Array>
void rme
(
const Array &u,
int x0,
int y0,
int z0,
const double dx,
const double dy,
const double dz,
const double k,
double t
)
{
// compute root mean square error with respect to exact solution
double e = 0;
double sum = 0;
for (int z = 1; z < nz - 1; z++){
double pz = dz * (z - z0);
for (int y = 1; y < ny - 1; y++) {
double py = dy * (y - y0);
for (int x = 1; x < nx - 1; x++) {
double px = dx * (x - x0);
double f = u(x, y, z);
//http://nptel.ac.in/courses/105103026/34
double g = dx * dy * dz * std::exp(-(px * px + py * py + pz * pz) / (4 * k * t)) / powf(4 * pi * k * t, 3.0 / 2.0);
e += (f - g) * (f - g);
sum += f;
}
}
}
e = std::sqrt(e / ((nx - 2) * (ny - 2)));
std::cerr.unsetf(std::ios::fixed);
std::cerr << "rate=" << rate << " sum=" << std::fixed << sum << " error=" << std::setprecision(6) << std::scientific << e << std::endl;
}
__device__
static inline
int idx(int x, int y, int z)
{
return x + y * (blockDim.x * gridDim.x) + z * (blockDim.x * gridDim.x * blockDim.y * gridDim.y);
}
template<typename Scalar>
__global__
void cudaDiffusion
(
const Scalar *u,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal,
Scalar *du
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
Scalar uxx = (u[idx(max(0, x - 1), y, z)] - 2 * u[idx(x, y, z)] + u[idx(min(blockDim.x*gridDim.x - 1, x + 1), y, z)]) / (dx * dx);
Scalar uyy = (u[idx(x, max(0, y - 1), z)] - 2 * u[idx(x, y, z)] + u[idx(x, min(blockDim.y*gridDim.y - 1, y + 1), z)]) / (dy * dy);
Scalar uzz = (u[idx(x, y, max(0, z - 1))] - 2 * u[idx(x, y, z)] + u[idx(x, y, min(blockDim.z*gridDim.z-1, z + 1))]) / (dz * dz);
du[idx(x, y, z)] = dt * k * (uxx + uyy + uzz);
}
template<typename Scalar>
__global__
void cudaSum
(
Scalar *u,
const Scalar *du
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
u[idx(x, y, z)] += du[idx(x, y, z)];
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
__global__
void
__launch_bounds__(64, 5)
cudaZFPDiffusion
(
const Scalar *u,
Word *du,
uint size,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k
)
{
uint x = threadIdx.x;
uint y = threadIdx.y;
uint z = threadIdx.z;
uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y;
uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x);
uint bdim = blockDim.x*blockDim.y*blockDim.z;
uint tbidx = bidx*bdim;
extern __shared__ unsigned char smem[];
__shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext;
s_u = (Scalar*)&smem[0];
s_du = (Scalar*)&s_u[64];
s_u_ext = (Scalar*)&s_du[64];
s_nghs = (Scalar*)&s_u_ext[216];
unsigned char *new_smem = (unsigned char*)&s_nghs[64];
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + bidx*bsize, new_smem, tid, s_u);
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + bidx*bsize, new_smem, tid, s_du);
//__syncthreads();
int3 utid = make_int3(threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z);
Scalar uxx = (u[idx(max(0, utid.x - 1), utid.y, utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(min(blockDim.x*gridDim.x - 1, utid.x + 1), utid.y, utid.z)]) / (dx * dx);
Scalar uyy = (u[idx(utid.x, max(0, utid.y - 1), utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, min(blockDim.y*gridDim.y - 1, utid.y + 1), utid.z)]) / (dy * dy);
Scalar uzz = (u[idx(utid.x, utid.y, max(0, utid.z - 1))] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, utid.y, min(blockDim.z*gridDim.z - 1, utid.z + 1))]) / (dz * dz);
s_du[tid] = dt*k * (uxx + uyy + uzz);
__syncthreads();
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(
s_du,
size,
new_smem,
bidx * bsize,
du
);
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
__global__
void
__launch_bounds__(64, 5)
cudaZFPDiffusion
(
const Word *u,
Word *du,
uint size,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k
)
{
uint x = threadIdx.x;
uint y = threadIdx.y;
uint z = threadIdx.z;
uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y;
uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x);
uint bdim = blockDim.x*blockDim.y*blockDim.z;
uint bidx = idx*bdim;
extern __shared__ unsigned char smem[];
__shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext;
s_u = (Scalar*)&smem[0];
s_du = (Scalar*)&s_u[64];
s_u_ext = (Scalar*)&s_du[64];
s_nghs = (Scalar*)&s_u_ext[216];
unsigned char *new_smem = (unsigned char*)&s_nghs[64];
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + idx*bsize, new_smem, tid, s_u);
//cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + idx*bsize, new_smem, tid, s_du);
for (int i = 0; i < 3; i++){
s_u_ext[i * 64 + tid] = 0;
}
if (tid < 24)
s_u_ext[192 + tid] = 0;
__syncthreads();
//left
s_nghs[tid] = 0;
if (blockIdx.x > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + ((blockIdx.x-1) + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[(i+1) * 6 + (j+1) * 36] = s_nghs[3 + i * blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[(x + 1) * 6 + (y + 1) * 36] = s_nghs[3 + x * blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//right
s_nghs[tid] = 0;
if (blockIdx.x+1 < gridDim.x){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (1 + blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[5 + (i+1) * 6 + (j+1) * 36] = s_nghs[i*blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[5 + (x + 1) * 6 + (y + 1) * 36] = s_nghs[x*blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//down
s_nghs[tid] = 0;
if (blockIdx.y > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y - 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j+1) * 36] = s_nghs[i + 3*blockDim.x + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 36] = s_nghs[x + 3 * blockDim.x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//up
s_nghs[tid] = 0;
if (blockIdx.y + 1 < gridDim.y){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y + 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + 5*6 + (j+1) * 36] = s_nghs[i + j * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + 5 * 6 + (y + 1) * 36] = s_nghs[x + y * blockDim.x * blockDim.y];
}
__syncthreads();
//near
s_nghs[tid] = 0;
if (blockIdx.z > 0){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z - 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j + 1) * 6] = s_nghs[i + (j)*blockDim.x + 3 * blockDim.x * blockDim.y];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 6] = s_nghs[x + (y)*blockDim.x + 3 * blockDim.x * blockDim.y];
}
__syncthreads();
//far
s_nghs[tid] = 0;
if (blockIdx.z + 1 < gridDim.z){
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z + 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs);
}
__syncthreads();
//if (tid == 0){
// for (int i = 0; i < 4; i++){
// for (int j = 0; j < 4; j++){
// s_u_ext[1 + i + (j + 1) * 6 + 5 * 36] = s_nghs[i + (j)*blockDim.x ];
// }
// }
//}
if (z == 0){
s_u_ext[1 + x + (y + 1) * 6 + 5 * 36] = s_nghs[x + (y)*blockDim.x];
}
__syncthreads();
s_u_ext[1 + x + (y + 1) * 6 + (z + 1) * 36] = s_u[tid];
__syncthreads();
Scalar uxx = (s_u_ext[x + (y + 1) * 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 2 + (y + 1) * 6 + (z + 1) * 36]) / (dx * dx);
Scalar uyy = (s_u_ext[x + 1 + (y)* 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 2) * 6 + (z + 1) * 36]) / (dy * dy);
Scalar uzz = (s_u_ext[x + 1 + (y + 1) * 6 + (z)* 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 1) * 6 + (z + 2) * 36]) / (dz * dz);
s_du[tid] = dt*k * (uxx + uyy + uzz);
__syncthreads();
//if (uxx < 0 || uyy < 0 || uzz < 0){
// printf("%d, %f, %f, %f, %f %f %f %d %d %d %d\n", tid, dt, k, s_du[tid], uxx, uyy, uzz, threadIdx.x + blockIdx.x * blockDim.x, threadIdx.y + blockIdx.y * blockDim.y, threadIdx.z + blockIdx.z * blockDim.z, threadIdx.x + blockIdx.x * blockDim.x + (threadIdx.y + blockIdx.y * blockDim.y)*gridDim.x * blockDim.x + (threadIdx.z + blockIdx.z * blockDim.z)*gridDim.x * blockDim.x * gridDim.y * blockDim.y);
//}
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(
s_du,
size,
new_smem,
idx * bsize,
du
);
//out[(threadIdx.z + blockIdx.z * 4)*gridDim.x * gridDim.y * blockDim.x * blockDim.y + (threadIdx.y + blockIdx.y * 4)*gridDim.x * blockDim.x + (threadIdx.x + blockIdx.x * 4)] = s_dblock[tid];
}
template<class Int, class UInt, class Scalar, uint bsize, int intprec>
void gpuZFPDiffusion
(
int nx, int ny, int nz,
device_vector<Word > &u, //Compressed??
device_vector<Word > &du, //Compressed??
device_vector<Scalar> &df_u, //uncompressed
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal
)
{
dim3 block_size = dim3(4, 4, 4);
dim3 grid_size = dim3(nx, ny, nz);
grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z;
cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> >
(
thrust::raw_pointer_cast(u.data()),
thrust::raw_pointer_cast(du.data()),
size,
dx,dy,dz,dt,k
);
// cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(
// nx, ny, nz,
// u, df_u,
// group_count
// );
//cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> >
// (
// thrust::raw_pointer_cast(df_u.data()),
// thrust::raw_pointer_cast(du.data()),
// size,
// dx,dy,dz,dt,k
// );
cuZFP::transform <Int, UInt, Scalar, bsize, intprec>
(
nx,ny,nz,
size,
u,
du,
thrust::plus<Scalar>()
);
//Scalar sum_u = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u);
//Scalar sum_du = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, du);
//cout << "post-transform du: " << sum_du << " u: " << sum_u << endl;
}
template<typename Scalar>
void gpu_discrete_solution
(
const int x0,
const int y0,
const int z0,
const Scalar dx,
const Scalar dy,
const Scalar dz,
const Scalar dt,
const Scalar k,
const Scalar tfinal
)
{
thrust::host_vector<Scalar> h_u(nx*ny*nz, 0);
thrust::device_vector<Scalar> u(nx*ny*nz);
thrust::device_vector<Scalar> du(nx*ny*nz);
ErrorCheck ec;
cudaEvent_t start, stop;
float millisecs;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
h_u[x0 + y0 * nx + z0 * nx * ny] = 1;
u = h_u;
dim3 block_size(4, 4, 4);
dim3 grid_size;
grid_size.x = nx / block_size.x;
grid_size.y = ny / block_size.y;
grid_size.z = nz / block_size.z;
double t;
for (t = 0; t < tfinal; t += dt) {
std::cerr << "gpu t=" << std::fixed << t << std::endl;
cudaDiffusion << <grid_size, block_size >> >
(
thrust::raw_pointer_cast(u.data()),
dx,dy,dz,
dt,
k,
tfinal,
thrust::raw_pointer_cast(du.data())
);
cudaStreamSynchronize(0);
ec.chk("cudaDiffusion");
cudaSum << < grid_size, block_size >> >
(
thrust::raw_pointer_cast(u.data()),
thrust::raw_pointer_cast(du.data())
);
}
h_u = u;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millisecs, start, stop);
ec.chk("cudaencode");
cout << "Diffusion GPU in time: " << millisecs << endl;
array3d out(nx, ny, nz, 0);
for (int i = 0; i < u.size(); i++){
out[i] = h_u[i];
}
rme(out, x0, y0, z0, dx, dy, dz, k, tfinal - dt);
}
template<class Int, class UInt, class Scalar, uint bsize>
void gpuDiffusion ( int x0, int y0, int z0,
Scalar dx,Scalar dy, Scalar dz, Scalar dt, Scalar k, Scalar tfinal,
host_vector<Scalar> &h_u)
{
host_vector<Scalar> h_du(nx*ny*nz, 0.0);
thrust::fill(h_u.begin(), h_u.end(), 0.0);
device_vector<Scalar> d_du;
h_u[x0 + nx*y0 + nx*ny*z0] = 1;
device_vector<Scalar> d_u;
//tmp_u = h_data;
d_du = h_du;
d_u = h_u;
ErrorCheck ec;
cudaEvent_t start, stop;
float millisecs;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dim3 emax_size(nx / 4, ny / 4, nz / 4);
// arrays where the incoded data is stored
device_vector<Word > u(emax_size.x * emax_size.y * emax_size.z * bsize);
device_vector<Word > du(emax_size.x * emax_size.y * emax_size.z * bsize);
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_u, u, group_count, size);
cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_du, du, group_count, size);
cuZFP::transform<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, size, u, du, thrust::plus<Scalar>());
cout << "start: " << cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u) << endl;
d_du.clear();
d_du.shrink_to_fit();
cudaStreamSynchronize(0);
ec.chk("cudaEncode");
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millisecs, start, stop);
ec.chk("cudaencode");
cout << "encode diffusion GPU in time: " << millisecs << endl;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (double t = 0; t < tfinal; t += dt){
std::cerr << "compressed gpu t=" << std::fixed << t << std::endl;
gpuZFPDiffusion<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, du, d_u, dx, dy, dz, dt, k, tfinal);
cudaStreamSynchronize(0);
ec.chk("gpuZFPDiffusion");
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millisecs, start, stop);
cout << "Diffusion GPU ZFP in time: " << millisecs << endl;
double tot_sum = 0, max_diff = 0, min_diff = 1e16;
cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, d_u, group_count);
host_vector<Scalar> h_out = d_u;
//for (int i = 0; i < h_u.size(); i++){
// int k = 0, j = 0;
// frexp(h_u[i], &j);
// frexp(h_u[i], &k);
// //if (abs(j - k) > 1){
// // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl;
// // //exit(-1);
// //}
// double diff = fabs(h_u[i] - h_out[i]);
// //if (diff > 1 )
// // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl;
// if (max_diff < diff)
// max_diff = diff;
// if (min_diff > diff)
// min_diff = diff;
// tot_sum += diff;
//}
//cout << "tot diff: " << tot_sum << " average diff: " << tot_sum / (float)h_u.size() << " max diff: " << max_diff << " min diff: " << min_diff << endl;
//cout << "sum: " << thrust::reduce(h_u.begin(), h_u.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl;
array3d out(nx, ny, nz, rate);
for (int i = 0; i < h_out.size(); i++){
out[i] = h_out[i];
}
rme(out, x0, y0, z0, dx, dy, dz, k, tfinal - dt);
}
template<typename Array>
void discrete_solution
(
Array &u,
int x0, int y0, int z0,
const double dx,
const double dy,
const double dz,
const double dt,
const double k,
const double tfinal
)
{
// initialize u (constructor zero-initializes)
//rate = u.rate();
double start_time = omp_get_wtime();
u(x0, y0, z0) = 1;
// iterate until final time
std::cerr.precision(6);
double t;
for (t = 0; t < tfinal; t += dt) {
std::cerr << "cpu t=" << std::fixed << t << std::endl;
// compute du/dt
Array du(nx, ny, nz, rate);
for (int z = 1; z < nz - 1; z++){
for (int y = 1; y < ny - 1; y++) {
for (int x = 1; x < nx - 1; x++) {
double uxx = (u(x - 1, y, z) - 2 * u(x, y, z) + u(x + 1, y, z)) / (dx * dx);
double uyy = (u(x, y - 1, z) - 2 * u(x, y, z) + u(x, y + 1, z)) / (dy * dy);
double uzz = (u(x, y, z - 1) - 2 * u(x, y, z) + u(x, y, z + 1)) / (dz * dz);
du(x, y, z) = dt * k * (uxx + uyy + uzz);
}
}
}
// take forward Euler step
double sum = 0;
for (uint i = 0; i < u.size(); i++){
u[i] += du[i];
}
}
double time = omp_get_wtime() - start_time;
cout << "discrete time: " << time << endl;
rme(u, x0, y0, z0, dx, dy, dz, k, tfinal - dt);
}
int main()
{
host_vector<double> h_vec_in(nx*ny*nz, 0);
//device_vector<double> d_vec_in(nx*ny*nz);
// thrust::counting_iterator<uint> index_sequence_begin(0);
//thrust::transform(
// index_sequence_begin,
// index_sequence_begin + nx*ny*nz,
// d_vec_in.begin(),
// RandGen());
//h_vec_in = d_vec_in;
//d_vec_in.clear();
//d_vec_in.shrink_to_fit();
// location of point heat source
int x0 = (nx - 1) / 2;
int y0 = (ny - 1) / 2;
int z0 = (nz - 1) / 2;
// constants used in the solution
const double k = 0.04;
const double dx = 2.0 / (std::max(nz,std::max(nx, ny)) - 1);
const double dy = 2.0 / (std::max(nz, std::max(nx, ny)) - 1);
const double dz = 2.0 / (std::max(nz, std::max(nx, ny)) - 1);
const double dt = 0.5 * (dx * dx + dy * dy) / (8 * k);
const double tfinal = nt ? nt * dt : 1;
/*
cout << "cpu diffusion start" << endl;
array3d u(nx, ny, nz, rate);
discrete_solution<array3d>(u, x0, y0, z0, dx,dy,dz,dt,k, tfinal);
cout << "compressed cpu diffusion start" << endl;
zfp::array3d u2(nx, ny, nz, rate);
discrete_solution<zfp::array3d>(u2, x0, y0, z0, dx, dy, dz, dt, k, tfinal);
*/
cout << "GPU discete diffusion start" << endl;
gpu_discrete_solution<double>(x0, y0, z0, dx, dy, dz, dt, k, tfinal);
cout << "GPU ZFP diffusion start" << endl;
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS);
cout << "Begin gpuDiffusion" << endl;
gpuDiffusion<long long, unsigned long long, double, BSIZE>(x0,y0,z0, dx, dy, dz, dt, k, tfinal, h_vec_in);
cout << "Finish gpuDiffusion" << endl;
}
|
b72f60b684fdc79c6f94f703325d48c3dbf0eab8.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "WarpSelectImpl_hip.cuh"
namespace faiss { namespace gpu {
#ifdef FAISS_USE_FLOAT16
WARP_SELECT_IMPL(half, true, 512, 8);
#endif
} } // namespace
| b72f60b684fdc79c6f94f703325d48c3dbf0eab8.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "WarpSelectImpl.cuh"
namespace faiss { namespace gpu {
#ifdef FAISS_USE_FLOAT16
WARP_SELECT_IMPL(half, true, 512, 8);
#endif
} } // namespace
|
3ac22b30cf572b01ded259faafae8ab76611afe8.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
//
// This is a block-based algorithm.
// Blocks are 2x2 sized, with internal pixels named as:
// +---+
// |a b|
// |c d|
// +---+
//
// Neighbour blocks of block X are named as:
// +-+-+-+
// |P|Q|R|
// +-+-+-+
// |S|X|
// +-+-+
//
enum class Info : unsigned char { a = 0, b = 1, c = 2, d = 3, P = 4, Q = 5, R = 6, S = 7 };
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, Info pos) {
return (bitmap >> static_cast<unsigned char>(pos)) & 1;
}
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) {
return (bitmap >> pos) & 1;
}
// Only use it with unsigned numeric types
__device__ __forceinline__ void SetBit(unsigned char &bitmap, Info pos) {
bitmap |= (1 << static_cast<unsigned char>(pos));
}
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
while (s_buf[n] != n) {
n = s_buf[n];
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a);
done = (old == b);
b = old;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b);
done = (old == a);
a = old;
}
else {
done = true;
}
} while (!done);
}
__global__ void InitLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned P = 0;
// Bitmask representing two kinds of information
// Bits 0, 1, 2, 3 are set if pixel a, b, c, d are foreground, respectively
// Bits 4, 5, 6, 7 are set if block P, Q, R, S need to be merged to X in Merge phase
unsigned char info = 0;
char buffer[4];
*(reinterpret_cast<int*>(buffer)) = 0;
// Read pairs of consecutive values in memory at once
if (col + 1 < img.cols) {
// This does not depend on endianness
*(reinterpret_cast<int16_t*>(buffer)) = *(reinterpret_cast<int16_t*>(img.data + img_index));
if (row + 1 < img.rows) {
*(reinterpret_cast<int16_t*>(buffer + 2)) = *(reinterpret_cast<int16_t*>(img.data + img_index + img.step));
}
}
else {
buffer[0] = img.data[img_index];
if (row + 1 < img.rows) {
buffer[2] = img.data[img_index + img.step];
}
}
if (buffer[0]) {
P |= 0x777;
SetBit(info, Info::a);
}
if (buffer[1]) {
P |= (0x777 << 1);
SetBit(info, Info::b);
}
if (buffer[2]) {
P |= (0x777 << 4);
SetBit(info, Info::c);
}
if (buffer[3]) {
SetBit(info, Info::d);
}
if (col == 0) {
P &= 0xEEEE;
}
if (col + 1 >= img.cols) {
P &= 0x3333;
}
else if (col + 2 >= img.cols) {
P &= 0x7777;
}
if (row == 0) {
P &= 0xFFF0;
}
if (row + 1 >= img.rows) {
P &= 0x00FF;
}
else if (row + 2 >= img.rows) {
P &= 0x0FFF;
}
// P is now ready to be used to find neighbour blocks
// P value avoids range errors
int father_offset = 0;
// P square
if (HasBit(P, 0) && img.data[img_index - img.step - 1]) {
father_offset = -(2 * (labels.step / labels.elem_size) + 2);
}
// Q square
if ((HasBit(P, 1) && img.data[img_index - img.step]) || (HasBit(P, 2) && img.data[img_index + 1 - img.step])) {
if (!father_offset) {
father_offset = -(2 * (labels.step / labels.elem_size));
}
else {
SetBit(info, Info::Q);
}
}
// R square
if (HasBit(P, 3) && img.data[img_index + 2 - img.step]) {
if (!father_offset) {
father_offset = -(2 * (labels.step / labels.elem_size) - 2);
}
else {
SetBit(info, Info::R);
}
}
// S square
if ((HasBit(P, 4) && img.data[img_index - 1]) || (HasBit(P, 8) && img.data[img_index + img.step - 1])) {
if (!father_offset) {
father_offset = -2;
}
else {
SetBit(info, Info::S);
}
}
labels.data[labels_index] = labels_index + father_offset;
if (col + 1 < labels.cols) {
last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + 1);
}
else if (row + 1 < labels.rows) {
last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + labels.step / labels.elem_size);
}
*last_pixel = info;
}
}
__global__ void Merge(cuda::PtrStepSzi labels, unsigned char *last_pixel) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
if (col + 1 < labels.cols) {
last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + 1);
}
else if (row + 1 < labels.rows) {
last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + labels.step / labels.elem_size);
}
unsigned char info = *last_pixel;
if (HasBit(info, Info::Q)) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size));
}
if (HasBit(info, Info::R)) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2);
}
if (HasBit(info, Info::S)) {
Union(labels.data, labels_index, labels_index - 2);
}
}
}
__global__ void Compression(cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels.data[labels_index];
if (label < labels_index) {
labels[labels_index] = Find(labels.data, label);
}
}
}
__global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
int label;
unsigned char info;
unsigned long long buffer;
if (col + 1 < labels.cols) {
buffer = *reinterpret_cast<unsigned long long *>(labels.data + labels_index);
label = (buffer & (0xFFFFFFFF)) + 1;
info = (buffer >> 32) & 0xFFFFFFFF;
}
else {
label = labels[labels_index] + 1;
if (row + 1 < labels.rows) {
info = labels[labels_index + labels.step / labels.elem_size];
}
else {
info = *last_pixel;
}
}
if (col + 1 < labels.cols) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index) =
(static_cast<unsigned long long>(HasBit(info, Info::b) * label) << 32) | (HasBit(info, Info::a) * label);
if (row + 1 < labels.rows) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.step / labels.elem_size) =
(static_cast<unsigned long long>(HasBit(info, Info::d) * label) << 32) | (HasBit(info, Info::c) * label);
}
}
else {
labels[labels_index] = HasBit(info, Info::a) * label;
if (row + 1 < labels.rows) {
labels[labels_index + (labels.step / labels.elem_size)] = HasBit(info, Info::c) * label;
}
}
}
}
}
class BKE : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
unsigned char *last_pixel_;
bool last_pixel_allocated_;
public:
BKE() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
last_pixel_allocated_ = false;
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
hipMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
//Mat1i init_blocks;
//d_img_labels_.download(init_blocks);
//cuda::GpuMat d_init_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_init_labels);
//Mat1i init_labels;
//d_init_labels.download(init_labels);
//d_init_labels.release();
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i compr_blocks;
//d_img_labels_.download(compr_blocks);
//cuda::GpuMat d_compr_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_compr_labels);
//Mat1i compr_labels;
//d_compr_labels.download(compr_labels);
//d_compr_labels.release();
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_);
//Mat1i merge_blocks;
//d_img_labels_.download(merge_blocks);
//cuda::GpuMat d_merge_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_merge_labels);
//Mat1i merge_labels;
//d_merge_labels.download(merge_labels);
//d_merge_labels.release();
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i final_blocks;
//d_img_labels_.download(final_blocks);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
//d_img_labels_.download(img_labels_);
if (last_pixel_allocated_) {
hipFree(last_pixel_);
}
hipDeviceSynchronize();
}
private:
void Alloc() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
hipMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
}
void Dealloc() {
if (last_pixel_allocated_) {
hipFree(last_pixel_);
}
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
perf_.start();
Alloc();
perf_.stop();
double alloc_timing = perf_.last();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
perf_.start();
Dealloc();
perf_.stop();
double dealloc_timing = perf_.last();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(BKE);
| 3ac22b30cf572b01ded259faafae8ab76611afe8.cu | // Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
//
// This is a block-based algorithm.
// Blocks are 2x2 sized, with internal pixels named as:
// +---+
// |a b|
// |c d|
// +---+
//
// Neighbour blocks of block X are named as:
// +-+-+-+
// |P|Q|R|
// +-+-+-+
// |S|X|
// +-+-+
//
enum class Info : unsigned char { a = 0, b = 1, c = 2, d = 3, P = 4, Q = 5, R = 6, S = 7 };
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, Info pos) {
return (bitmap >> static_cast<unsigned char>(pos)) & 1;
}
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) {
return (bitmap >> pos) & 1;
}
// Only use it with unsigned numeric types
__device__ __forceinline__ void SetBit(unsigned char &bitmap, Info pos) {
bitmap |= (1 << static_cast<unsigned char>(pos));
}
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
while (s_buf[n] != n) {
n = s_buf[n];
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a);
done = (old == b);
b = old;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b);
done = (old == a);
a = old;
}
else {
done = true;
}
} while (!done);
}
__global__ void InitLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned P = 0;
// Bitmask representing two kinds of information
// Bits 0, 1, 2, 3 are set if pixel a, b, c, d are foreground, respectively
// Bits 4, 5, 6, 7 are set if block P, Q, R, S need to be merged to X in Merge phase
unsigned char info = 0;
char buffer[4];
*(reinterpret_cast<int*>(buffer)) = 0;
// Read pairs of consecutive values in memory at once
if (col + 1 < img.cols) {
// This does not depend on endianness
*(reinterpret_cast<int16_t*>(buffer)) = *(reinterpret_cast<int16_t*>(img.data + img_index));
if (row + 1 < img.rows) {
*(reinterpret_cast<int16_t*>(buffer + 2)) = *(reinterpret_cast<int16_t*>(img.data + img_index + img.step));
}
}
else {
buffer[0] = img.data[img_index];
if (row + 1 < img.rows) {
buffer[2] = img.data[img_index + img.step];
}
}
if (buffer[0]) {
P |= 0x777;
SetBit(info, Info::a);
}
if (buffer[1]) {
P |= (0x777 << 1);
SetBit(info, Info::b);
}
if (buffer[2]) {
P |= (0x777 << 4);
SetBit(info, Info::c);
}
if (buffer[3]) {
SetBit(info, Info::d);
}
if (col == 0) {
P &= 0xEEEE;
}
if (col + 1 >= img.cols) {
P &= 0x3333;
}
else if (col + 2 >= img.cols) {
P &= 0x7777;
}
if (row == 0) {
P &= 0xFFF0;
}
if (row + 1 >= img.rows) {
P &= 0x00FF;
}
else if (row + 2 >= img.rows) {
P &= 0x0FFF;
}
// P is now ready to be used to find neighbour blocks
// P value avoids range errors
int father_offset = 0;
// P square
if (HasBit(P, 0) && img.data[img_index - img.step - 1]) {
father_offset = -(2 * (labels.step / labels.elem_size) + 2);
}
// Q square
if ((HasBit(P, 1) && img.data[img_index - img.step]) || (HasBit(P, 2) && img.data[img_index + 1 - img.step])) {
if (!father_offset) {
father_offset = -(2 * (labels.step / labels.elem_size));
}
else {
SetBit(info, Info::Q);
}
}
// R square
if (HasBit(P, 3) && img.data[img_index + 2 - img.step]) {
if (!father_offset) {
father_offset = -(2 * (labels.step / labels.elem_size) - 2);
}
else {
SetBit(info, Info::R);
}
}
// S square
if ((HasBit(P, 4) && img.data[img_index - 1]) || (HasBit(P, 8) && img.data[img_index + img.step - 1])) {
if (!father_offset) {
father_offset = -2;
}
else {
SetBit(info, Info::S);
}
}
labels.data[labels_index] = labels_index + father_offset;
if (col + 1 < labels.cols) {
last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + 1);
}
else if (row + 1 < labels.rows) {
last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + labels.step / labels.elem_size);
}
*last_pixel = info;
}
}
__global__ void Merge(cuda::PtrStepSzi labels, unsigned char *last_pixel) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
if (col + 1 < labels.cols) {
last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + 1);
}
else if (row + 1 < labels.rows) {
last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + labels.step / labels.elem_size);
}
unsigned char info = *last_pixel;
if (HasBit(info, Info::Q)) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size));
}
if (HasBit(info, Info::R)) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2);
}
if (HasBit(info, Info::S)) {
Union(labels.data, labels_index, labels_index - 2);
}
}
}
__global__ void Compression(cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels.data[labels_index];
if (label < labels_index) {
labels[labels_index] = Find(labels.data, label);
}
}
}
__global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
int label;
unsigned char info;
unsigned long long buffer;
if (col + 1 < labels.cols) {
buffer = *reinterpret_cast<unsigned long long *>(labels.data + labels_index);
label = (buffer & (0xFFFFFFFF)) + 1;
info = (buffer >> 32) & 0xFFFFFFFF;
}
else {
label = labels[labels_index] + 1;
if (row + 1 < labels.rows) {
info = labels[labels_index + labels.step / labels.elem_size];
}
else {
info = *last_pixel;
}
}
if (col + 1 < labels.cols) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index) =
(static_cast<unsigned long long>(HasBit(info, Info::b) * label) << 32) | (HasBit(info, Info::a) * label);
if (row + 1 < labels.rows) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.step / labels.elem_size) =
(static_cast<unsigned long long>(HasBit(info, Info::d) * label) << 32) | (HasBit(info, Info::c) * label);
}
}
else {
labels[labels_index] = HasBit(info, Info::a) * label;
if (row + 1 < labels.rows) {
labels[labels_index + (labels.step / labels.elem_size)] = HasBit(info, Info::c) * label;
}
}
}
}
}
class BKE : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
unsigned char *last_pixel_;
bool last_pixel_allocated_;
public:
BKE() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
last_pixel_allocated_ = false;
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
cudaMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
//Mat1i init_blocks;
//d_img_labels_.download(init_blocks);
//cuda::GpuMat d_init_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_init_labels);
//Mat1i init_labels;
//d_init_labels.download(init_labels);
//d_init_labels.release();
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i compr_blocks;
//d_img_labels_.download(compr_blocks);
//cuda::GpuMat d_compr_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_compr_labels);
//Mat1i compr_labels;
//d_compr_labels.download(compr_labels);
//d_compr_labels.release();
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_);
//Mat1i merge_blocks;
//d_img_labels_.download(merge_blocks);
//cuda::GpuMat d_merge_labels = d_img_labels_.clone();
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_merge_labels);
//Mat1i merge_labels;
//d_merge_labels.download(merge_labels);
//d_merge_labels.release();
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//Mat1i final_blocks;
//d_img_labels_.download(final_blocks);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
//d_img_labels_.download(img_labels_);
if (last_pixel_allocated_) {
cudaFree(last_pixel_);
}
cudaDeviceSynchronize();
}
private:
void Alloc() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) {
cudaMalloc(&last_pixel_, sizeof(unsigned char));
last_pixel_allocated_ = true;
}
else {
last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize();
}
}
void Dealloc() {
if (last_pixel_allocated_) {
cudaFree(last_pixel_);
}
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
perf_.start();
Alloc();
perf_.stop();
double alloc_timing = perf_.last();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
perf_.start();
Dealloc();
perf_.stop();
double dealloc_timing = perf_.last();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(BKE);
|
a1062c6d125cb87381729d0fde02d38e5d007307.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define NUMTHREADS 16
#define THREADWORK 32
__global__ void gpuKendall(const float * a, size_t na, const float * b, size_t nb, size_t sampleSize, double * results)
{
size_t
i, j, tests,
tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y,
rowa = bx * sampleSize, rowb = by * sampleSize;
float
discordant, concordant = 0.f,
numer, denom;
__shared__ float threadSums[NUMTHREADS*NUMTHREADS];
for(i = tx; i < sampleSize; i += NUMTHREADS) {
for(j = i+1+ty; j < sampleSize; j += NUMTHREADS) {
tests = ((a[rowa+j] > a[rowa+i]) && (b[rowb+j] > b[rowb+i]))
+ ((a[rowa+j] < a[rowa+i]) && (b[rowb+j] < b[rowb+i]))
+ ((a[rowa+j] == a[rowa+i]) && (b[rowb+j] == b[rowb+i]));
concordant = concordant + (float)tests;
}
}
threadSums[tx*NUMTHREADS+ty] = concordant;
__syncthreads();
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if(ty < i)
threadSums[tx*NUMTHREADS+ty] += threadSums[tx*NUMTHREADS+ty+i];
__syncthreads();
}
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if((tx < i) && (ty == 0))
threadSums[tx*NUMTHREADS] += threadSums[(tx+i)*NUMTHREADS];
__syncthreads();
}
if((tx == 0) && (ty == 0)) {
concordant = threadSums[0];
denom = (float)sampleSize;
denom = (denom * (denom - 1.f)) / 2.f; discordant = denom - concordant;
numer = concordant - discordant;
results[by*na+bx] = ((double)numer)/((double)denom);
}
} | a1062c6d125cb87381729d0fde02d38e5d007307.cu | #include "includes.h"
#define NUMTHREADS 16
#define THREADWORK 32
__global__ void gpuKendall(const float * a, size_t na, const float * b, size_t nb, size_t sampleSize, double * results)
{
size_t
i, j, tests,
tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y,
rowa = bx * sampleSize, rowb = by * sampleSize;
float
discordant, concordant = 0.f,
numer, denom;
__shared__ float threadSums[NUMTHREADS*NUMTHREADS];
for(i = tx; i < sampleSize; i += NUMTHREADS) {
for(j = i+1+ty; j < sampleSize; j += NUMTHREADS) {
tests = ((a[rowa+j] > a[rowa+i]) && (b[rowb+j] > b[rowb+i]))
+ ((a[rowa+j] < a[rowa+i]) && (b[rowb+j] < b[rowb+i]))
+ ((a[rowa+j] == a[rowa+i]) && (b[rowb+j] == b[rowb+i]));
concordant = concordant + (float)tests;
}
}
threadSums[tx*NUMTHREADS+ty] = concordant;
__syncthreads();
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if(ty < i)
threadSums[tx*NUMTHREADS+ty] += threadSums[tx*NUMTHREADS+ty+i];
__syncthreads();
}
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if((tx < i) && (ty == 0))
threadSums[tx*NUMTHREADS] += threadSums[(tx+i)*NUMTHREADS];
__syncthreads();
}
if((tx == 0) && (ty == 0)) {
concordant = threadSums[0];
denom = (float)sampleSize;
denom = (denom * (denom - 1.f)) / 2.f; discordant = denom - concordant;
numer = concordant - discordant;
results[by*na+bx] = ((double)numer)/((double)denom);
}
} |
b1c98bd38f18d663ef26d6a31ff234ea1fecd9b9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
// CUDA runtime
#include <rocblas.h>
#include <hip/hip_runtime.h>
// helper functions
#include "helper_string.h"
#include "helper_cuda.h"
#include "hip/device_functions.h"
#include "cuda_util.h"
///////////////
void AllocDevice(Matrix& dev_a)
{
dev_a.elements =NULL;
int batchSize = dev_a.size;
REAL** dev_ptr= (REAL **)malloc( batchSize * sizeof(REAL*));
for(int i=0; i < batchSize;i++)
checkCudaErrors(hipMalloc((void**) &(dev_ptr[i]), dev_a.height * dev_a.width * sizeof(REAL*)));
checkCudaErrors(hipMalloc( (void**) &(dev_a.elements), batchSize * sizeof(REAL*)));
checkCudaErrors(hipMemcpy( dev_a.elements, dev_ptr , batchSize * sizeof(REAL*), hipMemcpyHostToDevice));
free(dev_ptr);
}
void FreeDevice(Matrix& dev_a)
{
int batchSize = dev_a.size;
REAL** host_ptr= (REAL **)malloc( batchSize * sizeof(REAL*));
checkCudaErrors(hipMemcpy(host_ptr, dev_a.elements , batchSize * sizeof(REAL*), hipMemcpyDeviceToHost));
for(int i=0; i < batchSize;i++)
hipFree( host_ptr[i]);
checkCudaErrors(hipFree(dev_a.elements));
free(host_ptr);
}
void Copy2Device(Matrix& dev_a,Matrix& host_A)
{
if(dev_a.size != host_A.size)
{
printf("error: can't copy!");
return;
}
int batchSize = host_A.size;
REAL** host_ptr= (REAL **)malloc( batchSize * sizeof(REAL*));
checkCudaErrors(hipMemcpy(host_ptr, dev_a.elements , batchSize * sizeof(REAL*), hipMemcpyDeviceToHost));
//ShowMatrixByRow2(host_A);
for(int i=0; i < batchSize;i++)
{
// printf("host_ptr==%p\n ",host_ptr[i]);
checkCudaErrors(hipMemcpy(host_ptr[i], host_A.elements[i], dev_a.height * dev_a.width*sizeof(REAL), hipMemcpyHostToDevice));
}
free(host_ptr);
}
void CopyBack2Host(Matrix& host_A,Matrix& dev_a)
{
if(dev_a.size != host_A.size)
{
printf("error: can't copy to host!");
return;
}
int batchSize = dev_a.size;
REAL** dev_ptr= (REAL **)malloc( batchSize * sizeof(REAL*));
checkCudaErrors(hipMemcpy(dev_ptr, dev_a.elements , batchSize * sizeof(REAL*), hipMemcpyDeviceToHost));
for(int i=0; i < batchSize;i++)
{
checkCudaErrors(hipMemcpy(host_A.elements[i], dev_ptr[i], dev_a.height * dev_a.width*sizeof(REAL), hipMemcpyDeviceToHost));
}
free(dev_ptr);
}
| b1c98bd38f18d663ef26d6a31ff234ea1fecd9b9.cu | #include <stdio.h>
// CUDA runtime
#include <cublas_v2.h>
#include <cuda_runtime.h>
// helper functions
#include "helper_string.h"
#include "helper_cuda.h"
#include "device_functions.h"
#include "cuda_util.h"
/////////工具函数//////
void AllocDevice(Matrix& dev_a)
{
dev_a.elements =NULL;
int batchSize = dev_a.size;
REAL** dev_ptr= (REAL **)malloc( batchSize * sizeof(REAL*));
for(int i=0; i < batchSize;i++)
checkCudaErrors(cudaMalloc((void**) &(dev_ptr[i]), dev_a.height * dev_a.width * sizeof(REAL*)));
checkCudaErrors(cudaMalloc( (void**) &(dev_a.elements), batchSize * sizeof(REAL*)));
checkCudaErrors(cudaMemcpy( dev_a.elements, dev_ptr , batchSize * sizeof(REAL*), cudaMemcpyHostToDevice));
free(dev_ptr);
}
void FreeDevice(Matrix& dev_a)
{
int batchSize = dev_a.size;
REAL** host_ptr= (REAL **)malloc( batchSize * sizeof(REAL*));
checkCudaErrors(cudaMemcpy(host_ptr, dev_a.elements , batchSize * sizeof(REAL*), cudaMemcpyDeviceToHost));
for(int i=0; i < batchSize;i++)
cudaFree( host_ptr[i]);
checkCudaErrors(cudaFree(dev_a.elements));
free(host_ptr);
}
void Copy2Device(Matrix& dev_a,Matrix& host_A)
{
if(dev_a.size != host_A.size)
{
printf("error: can't copy!");
return;
}
int batchSize = host_A.size;
REAL** host_ptr= (REAL **)malloc( batchSize * sizeof(REAL*));
checkCudaErrors(cudaMemcpy(host_ptr, dev_a.elements , batchSize * sizeof(REAL*), cudaMemcpyDeviceToHost));
//ShowMatrixByRow2(host_A);
for(int i=0; i < batchSize;i++)
{
// printf("host_ptr==%p\n ",host_ptr[i]);
checkCudaErrors(cudaMemcpy(host_ptr[i], host_A.elements[i], dev_a.height * dev_a.width*sizeof(REAL), cudaMemcpyHostToDevice));
}
free(host_ptr);
}
void CopyBack2Host(Matrix& host_A,Matrix& dev_a)
{
if(dev_a.size != host_A.size)
{
printf("error: can't copy to host!");
return;
}
int batchSize = dev_a.size;
REAL** dev_ptr= (REAL **)malloc( batchSize * sizeof(REAL*));
checkCudaErrors(cudaMemcpy(dev_ptr, dev_a.elements , batchSize * sizeof(REAL*), cudaMemcpyDeviceToHost));
for(int i=0; i < batchSize;i++)
{
checkCudaErrors(cudaMemcpy(host_A.elements[i], dev_ptr[i], dev_a.height * dev_a.width*sizeof(REAL), cudaMemcpyDeviceToHost));
}
free(dev_ptr);
}
|
8f12eaae138a61e7fd981e9f7372a1e0d7d661ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#define N 10
__global__ void add( int *a, int *b, int *c ) {
/**
* threadIdx.x contm o Id da thread (no bloco) a ser executada
*/
int tid = threadIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, N * sizeof(int));
hipMalloc( (void**)&dev_b, N * sizeof(int));
hipMalloc( (void**)&dev_c, N * sizeof(int));
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy( dev_a, a, N * sizeof(int),hipMemcpyHostToDevice);
hipMemcpy( dev_b, b, N * sizeof(int),hipMemcpyHostToDevice);
/**
* N o no. de threads a ser executada em paralelo pela GPU
*/
hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, dev_a,dev_b,dev_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy(c, dev_c, N * sizeof(int),hipMemcpyDeviceToHost);
// display the results
for (int i=0; i<N; i++) {
printf( "[%d] %d + %d = %d\n",i, a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
getchar();
return 0;
}
| 8f12eaae138a61e7fd981e9f7372a1e0d7d661ef.cu | #include <stdlib.h>
#include <stdio.h>
#define N 10
__global__ void add( int *a, int *b, int *c ) {
/**
* threadIdx.x contém o Id da thread (no bloco) a ser executada
*/
int tid = threadIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, N * sizeof(int));
cudaMalloc( (void**)&dev_b, N * sizeof(int));
cudaMalloc( (void**)&dev_c, N * sizeof(int));
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, N * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, b, N * sizeof(int),cudaMemcpyHostToDevice);
/**
* N é o no. de threads a ser executada em paralelo pela GPU
*/
add<<<1,N>>>(dev_a,dev_b,dev_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy(c, dev_c, N * sizeof(int),cudaMemcpyDeviceToHost);
// display the results
for (int i=0; i<N; i++) {
printf( "[%d] %d + %d = %d\n",i, a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
getchar();
return 0;
}
|
59bcccb1f74c46f6e285f52d32e5d8f0773589cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "device.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include < vector >
#include < time.h >
#include < stdio.h >
#include < cassert >
#include < cstdlib >
#include < iostream >
#include < algorithm >
#include < functional >
#include < immintrin.h >
#include < string >
// CTR+M CTR+O
#define NUM_STREAM 4
#define BLOCK_SIZE 32
#define MATRIX_SIZE 512 * 16 * 2
#define BASE_TYPE float
#define LOOP_I(_loop) for(int i=0; i < _loop; i++)
using std::vector;
using std::cout;
using std::generate;
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
typedef struct {
int row = MATRIX_SIZE;
int col = MATRIX_SIZE;
float* elements;
int stride = 2;
} Matrix;
void MatrixMul(float* c, const float* a, const float* b);
int MatrixBlock(float* c, const float* a, const float* b);
void MatrixBlock1(float* c, const float* a, const float* b);
int MatrixBank(float* c, const float* a, const float* b);
void MatrixTiled(float* c, const float* a, const float* b);
void MatrixSub(Matrix C, const Matrix A, const Matrix B);
void MatrixPinned(float* c, const float* a, const float* b);
void MatrixMulStream(float* c, const float* a, const float* b);
void matrixDeviceBuffA(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b);
void matrixDeviceBuffB(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b);
void MatrixMuld(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b);
void matrixSmemm(void smem(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b), BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b, const int stream);
void matrixHost(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
for (size_t i = 0; i < MATRIX_SIZE; i++)
{
for (size_t j = 0; j < MATRIX_SIZE; j++)
{
c[i * MATRIX_SIZE + j] = 0;
for (size_t k = 0; k < MATRIX_SIZE; k++)
{
c[i * MATRIX_SIZE + j] += a[i * MATRIX_SIZE + k] * b[k * MATRIX_SIZE + j];
}
}
}
}
void matrixHostImproved(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
for (size_t i = 0; i < MATRIX_SIZE; ++i)
{
float* C = c + i * MATRIX_SIZE;
for (size_t j = 0; j < MATRIX_SIZE; ++j)
{
C[j] = 0;
}
for (size_t k = 0; k < MATRIX_SIZE; ++k)
{
const float* B = b + k * MATRIX_SIZE;
float A = a[i * MATRIX_SIZE + k];
for (size_t j = 0; j < MATRIX_SIZE; j++)
{
C[j] += A * B[j];
}
}
}
}
void matrixVectorise(float* C, const float* A, float* B)
{
for (int i = 0; i < MATRIX_SIZE; ++i)
{
float* c = C + i * MATRIX_SIZE;
for (int j = 0; j < MATRIX_SIZE; j += 8)
_mm256_storeu_ps(c + j + 0, _mm256_setzero_ps());
for (int k = 0; k < MATRIX_SIZE; ++k)
{
const float* b = B + k * MATRIX_SIZE;
__m256 a = _mm256_set1_ps(A[i * MATRIX_SIZE + k]);
for (int j = 0; j < MATRIX_SIZE; j += 16)
{
_mm256_storeu_ps(c + j + 0, _mm256_fmadd_ps(a, _mm256_loadu_ps(b + j + 0), _mm256_loadu_ps(c + j + 0)));
_mm256_storeu_ps(c + j + 8, _mm256_fmadd_ps(a, _mm256_loadu_ps(b + j + 8), _mm256_loadu_ps(c + j + 8)));
}
}
}
}
void micro_6x161(int K, const float* A, int lda, int step, const float* B, int ldb, float* C, int ldc)
{
__m256 c00 = _mm256_setzero_ps();
__m256 c10 = _mm256_setzero_ps();
__m256 c20 = _mm256_setzero_ps();
__m256 c30 = _mm256_setzero_ps();
__m256 c40 = _mm256_setzero_ps();
__m256 c50 = _mm256_setzero_ps();
__m256 c01 = _mm256_setzero_ps();
__m256 c11 = _mm256_setzero_ps();
__m256 c21 = _mm256_setzero_ps();
__m256 c31 = _mm256_setzero_ps();
__m256 c41 = _mm256_setzero_ps();
__m256 c51 = _mm256_setzero_ps();
const int offset0 = lda * 0;
const int offset1 = lda * 1;
const int offset2 = lda * 2;
const int offset3 = lda * 3;
const int offset4 = lda * 4;
const int offset5 = lda * 5;
__m256 b0, b1, b2, a0, a1, a2;
for (int k = 0; k < K; k++)
{
b0 = _mm256_loadu_ps(B + 0);
b1 = _mm256_loadu_ps(B + 8);
a0 = _mm256_set1_ps(A[offset0]);
a1 = _mm256_set1_ps(A[offset1]);
c00 = _mm256_fmadd_ps(a0, b0, c00);
c01 = _mm256_fmadd_ps(a0, b1, c01);
c10 = _mm256_fmadd_ps(a1, b0, c10);
c11 = _mm256_fmadd_ps(a1, b1, c11);
a0 = _mm256_set1_ps(A[offset2]);
a1 = _mm256_set1_ps(A[offset3]);
c20 = _mm256_fmadd_ps(a0, b0, c20);
c21 = _mm256_fmadd_ps(a0, b1, c21);
c30 = _mm256_fmadd_ps(a1, b0, c30);
c31 = _mm256_fmadd_ps(a1, b1, c31);
a0 = _mm256_set1_ps(A[offset4]);
a1 = _mm256_set1_ps(A[offset5]);
c40 = _mm256_fmadd_ps(a0, b0, c40);
c41 = _mm256_fmadd_ps(a0, b1, c41);
c50 = _mm256_fmadd_ps(a1, b0, c50);
c51 = _mm256_fmadd_ps(a1, b1, c51);
B += ldb; A += step;
}
_mm256_storeu_ps(C + 0, _mm256_add_ps(c00, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c01, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c10, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c11, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c20, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c21, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c30, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c31, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c40, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c41, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c50, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c51, _mm256_loadu_ps(C + 8)));
}
void init_c1(int M, int N, float* C, int ldc)
{
for (int i = 0; i < M; ++i, C += ldc)
for (int j = 0; j < N; j += 8)
_mm256_storeu_ps(C + j, _mm256_setzero_ps());
}
void matrixCore(BASE_TYPE* C, const BASE_TYPE* A, const BASE_TYPE* B)
{
for (int i = 0; i < MATRIX_SIZE; i += 6)
{
for (int j = 0; j < MATRIX_SIZE; j += 16)
{
init_c(6, 16, C + i * MATRIX_SIZE + j, MATRIX_SIZE);
micro_6x16(MATRIX_SIZE, A + i * MATRIX_SIZE, MATRIX_SIZE, 1, B + j, MATRIX_SIZE, C + i * MATRIX_SIZE + j, MATRIX_SIZE);
}
}
}
struct buf_t
{
float* p;
int n;
buf_t(int size) : n(size), p((BASE_TYPE*)_mm_malloc(size * 4, 64)) {}
~buf_t() { _mm_free(p); }
};
void reorder_b_161(int K, const float* B, int ldb, float* bufB)
{
for (int k = 0; k < K; ++k, B += ldb, bufB += 16)
{
_mm256_storeu_ps(bufB + 0, _mm256_loadu_ps(B + 0));
_mm256_storeu_ps(bufB + 8, _mm256_loadu_ps(B + 8));
}
}
void matrixBuf(float* C,const float* A,const float* B)
{
for (int j = 0; j < MATRIX_SIZE; j += 16)
{
buf_t bufB(16 * MATRIX_SIZE);
reorder_b_16(MATRIX_SIZE, B + j, MATRIX_SIZE, bufB.p);
for (int i = 0; i < MATRIX_SIZE; i += 6)
{
init_c(6, 16, C + i * MATRIX_SIZE + j, MATRIX_SIZE);
micro_6x16(MATRIX_SIZE, A + i * MATRIX_SIZE, MATRIX_SIZE, 1, bufB.p, 16, C + i * MATRIX_SIZE + j, MATRIX_SIZE);
}
}
}
void macro(BASE_TYPE* C, int K, int ldc, const BASE_TYPE* A, int lda, const BASE_TYPE* B, int ldb, BASE_TYPE* bufB)
{
for (int j = 0; j < MATRIX_SIZE; j += 16)
{
reorder_b_16(K, B + j, ldb, bufB);
for (int i = 0; i < MATRIX_SIZE; i += 6)
micro_6x16(K, A + i * lda, lda, 1, bufB, 16, C + i * ldc + j, ldc);
}
}
void matrixL1(BASE_TYPE* C, const BASE_TYPE* A, const BASE_TYPE* B, int M, int N, int K)
{
const int L1 = 384 * 1024;
int mK = ::min(L1 / 4 / 16, K);
buf_t bufB(16 * mK);
for (int k = 0; k < K; k += mK)
{
int dK = ::min(K, k + mK) - k;
if (k == 0)
init_c(M, N, C, N);
macro(C, dK, N, A + k, K, B + k * N, N, bufB.p);
}
}
//
__global__ void matrixDevice(BASE_TYPE*c, const BASE_TYPE*a, const BASE_TYPE*b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int sum = 0;
c[i * MATRIX_SIZE/2 + j] = 0;
for (size_t k = 0; k < MATRIX_SIZE; k++)
{
sum += a[i * MATRIX_SIZE/2 + k] * b[j + MATRIX_SIZE * k];
}
c[i * MATRIX_SIZE/2 + j] = sum;
}
__global__ void matrixDeviceStream(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b, const int stream)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int sum = 0;
c[i * stream + j] = 0;
for (size_t k = 0; k < MATRIX_SIZE; k++)
{
sum += a[i * stream + k] * b[j + MATRIX_SIZE * k];
}
c[i * stream + j] = sum;
}
__global__ void matrixDeviceV1(float* c, const float* a, const float* b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
float* C = c + i * MATRIX_SIZE;
for (size_t m = j; m < j+MATRIX_SIZE; ++m)
{
C[m] = 0;
}
int sum = 0;
for (size_t k = 0; k < MATRIX_SIZE; k++)
{
const float* B = b + k * MATRIX_SIZE;
float A = a[i * MATRIX_SIZE + k];
for (size_t m = j; m < j+MATRIX_SIZE; ++m)
{
C[m] += A * B[m];
}
}
}
__device__ float GetElement(const Matrix A, int row, int col ) {
return A.elements[row * A.stride + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix ASub;
ASub.row = BLOCK_SIZE;
ASub.col = BLOCK_SIZE;
ASub.stride = A.stride;
ASub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return ASub;
}
__global__ void matrixDeviceSub(Matrix A, Matrix B, Matrix C);
void MatrixMulCublas(float* c, const float* a, const float* b);
//
__global__ void matrixDeviceSub(Matrix C, Matrix A, Matrix B) {
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
float Cvalue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
for (size_t m = 0; m < (A.row / BLOCK_SIZE); ++m)
{
Matrix Asub = GetSubMatrix(A, blockRow, m);
Matrix Bsub = GetSubMatrix(B, m, blockCol);
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
__syncthreads();
for (size_t e = 0; e < BLOCK_SIZE; ++e)
{
Cvalue += As[row][e] * Bs[e][col];
}
__syncthreads();
}
SetElement(Csub, row, col, Cvalue);
}
__global__ void matrixDevicBlock(BASE_TYPE* C, BASE_TYPE* A, BASE_TYPE* B)
{
BASE_TYPE CValue = 0;
int Row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int Col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ BASE_TYPE As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE Bs[BLOCK_SIZE][BLOCK_SIZE];
for (int k = 0; k < (BLOCK_SIZE + MATRIX_SIZE - 1) / BLOCK_SIZE; k++) {
if (k * BLOCK_SIZE + threadIdx.x < MATRIX_SIZE && Row < MATRIX_SIZE)
As[threadIdx.y][threadIdx.x] = A[Row * MATRIX_SIZE + k * BLOCK_SIZE + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k * BLOCK_SIZE + threadIdx.y < MATRIX_SIZE && Col < MATRIX_SIZE)
Bs[threadIdx.y][threadIdx.x] = B[(k * BLOCK_SIZE + threadIdx.y) * MATRIX_SIZE + Col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < BLOCK_SIZE; ++n)
CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < MATRIX_SIZE && Col < MATRIX_SIZE)
C[((blockIdx.y * blockDim.y + threadIdx.y) * MATRIX_SIZE) + blockIdx.x * blockDim.x + threadIdx.x] = CValue;
}
__global__ void Muld(float* C, const float* A, const float* B)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + MATRIX_SIZE - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * MATRIX_SIZE;
// The element of the block sub-matrix that is computed
// by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B required to
// compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Shared memory for the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Shared memory for the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from global memory to shared memory;
// each thread loads one element of each matrix
As[ty][tx] = A[a + MATRIX_SIZE * ty + tx];
Bs[ty][tx] = B[b + MATRIX_SIZE * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to global memory;
// each thread writes one element
int c = MATRIX_SIZE * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + MATRIX_SIZE * ty + tx] = Csub;
}
__global__ void matrixPin(float* __restrict c, const float* __restrict a, const float* __restrict b, int N) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Iterate over row, and down column
c[row * N + col] = 0;
for (int k = 0; k < N; k++) {
// Accumulate results for a single element
c[row * N + col] += a[row * N + k] * b[k * N + col];
}
}
__global__ void function(float* dA, float* dB, float* dC, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) dC[i] = dA[i] + dB[i];
}
// Pull out matrix and shared memory tile size
const int N = 1024;
const int SHMEM_SIZE = 1024;
// Shared memory bank conflicts
__global__ void matrixMultBank(BASE_TYPE* C, const BASE_TYPE* A, const BASE_TYPE* B)
{
// ,
//
int aBegin = MATRIX_SIZE * blockDim.y * blockIdx.y;
// ,
int aEnd = aBegin + MATRIX_SIZE - 1;
//
int aStep = blockDim.x;
// ,
//
int bBegin = blockDim.x * blockIdx.x;
//
int bStep = blockDim.y * MATRIX_SIZE;
//
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
//
BASE_TYPE sum = 0.0;
for (int ia = aBegin, ib = bBegin; ia < aEnd; ia +=
aStep, ib += bStep)
{
//
//
as[threadIdx.y][threadIdx.x] = A[ia + MATRIX_SIZE * threadIdx.y + threadIdx.x];
bs[threadIdx.y][threadIdx.x] = B[ib + MATRIX_SIZE * threadIdx.y + threadIdx.x];
//
__syncthreads();
//
for (int k = 0; k < blockDim.x; k++)
sum += as[threadIdx.y][k] *
bs[k][threadIdx.x];
//
__syncthreads();
}
//
int ind = MATRIX_SIZE * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x;
//
C[ind] = sum;
}
__global__ void matrixSmem1(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[tx][ty] = a[ia + MATRIX_SIZE * ty + tx];
bs[tx][ty] = b[ib + MATRIX_SIZE * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) sum += as[k][ty] * bs[tx][k];
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum;
}
__global__ void matrixSmem2(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE + 1];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE + 1];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[tx][ty] = a[ia + MATRIX_SIZE * ty + tx];
bs[tx][ty] = b[ib + MATRIX_SIZE * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) sum += as[k][ty] * bs[tx][k];
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum;
}
__global__ void matrixSmem3(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[ty][tx] = a[ia + MATRIX_SIZE * ty + tx];
bs[ty][tx] = b[ib + MATRIX_SIZE * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++)
sum += as[ty][k] * bs[k][tx];
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum;
}
__global__ void matrixSmem4(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum1 = 0.0f, sum2 = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[ty][tx] = a[ia + MATRIX_SIZE * ty + tx];
bs[ty][tx] = b[ib + MATRIX_SIZE * ty + tx];
as[ty + 16][tx] = a[ia + MATRIX_SIZE * (ty + 16) + tx];
bs[ty + 16][tx] = b[ib + MATRIX_SIZE * (ty + 16) + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) {
sum1 += as[ty][k] * bs[k][tx];
sum2 += as[ty + 16][k] * bs[k][tx];
}
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum1;
c[aBegin + bBegin + (ty + 16) * MATRIX_SIZE + tx] = sum2;
}
__global__ void matrixSmem5(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f, sum4 = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[ty][tx] = a[ia + MATRIX_SIZE * ty + tx];
bs[ty][tx] = b[ib + MATRIX_SIZE * ty + tx];
as[ty + 8][tx] = a[ia + MATRIX_SIZE * (ty + 8) + tx];
bs[ty + 8][tx] = b[ib + MATRIX_SIZE * (ty + 8) + tx];
as[ty + 16][tx] = a[ia + MATRIX_SIZE * (ty + 16) + tx];
bs[ty + 16][tx] = b[ib + MATRIX_SIZE * (ty + 16) + tx];
as[ty + 24][tx] = a[ia + MATRIX_SIZE * (ty + 24) + tx];
bs[ty + 24][tx] = b[ib + MATRIX_SIZE * (ty + 24) + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) {
sum1 += as[ty][k] * bs[k][tx];
sum2 += as[ty + 8][k] * bs[k][tx];
sum3 += as[ty + 16][k] * bs[k][tx];
sum4 += as[ty + 24][k] * bs[k][tx];
}
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum1;
c[aBegin + bBegin + (ty + 8) * MATRIX_SIZE + tx] = sum2;
c[aBegin + bBegin + (ty + 16) * MATRIX_SIZE + tx] = sum3;
c[aBegin + bBegin + (ty + 24) * MATRIX_SIZE + tx] = sum4;
}
__global__ void vectorAdd(float* c, const float* a, const float* b, int N) {
// Calculate global thread ID
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Boundary check
if (tid < N) {
// Each thread adds a single element
c[tid] = a[tid] + b[tid];
}
}
__global__ void kernel_global(float* c, const float* a, const float* b)
{
int bx = blockIdx.x; // x
int by = blockIdx.y; // y
int tx = threadIdx.x; // x
int ty = threadIdx.y; // y
float sum = 0.0f;
int ia = MATRIX_SIZE * (BLOCK_SIZE * by + ty); // A
int ib = BLOCK_SIZE * bx + tx; // B
int ic = ia + ib; //
// C
for (int k = 0; k < MATRIX_SIZE; k++) sum += a[ia + k] * b[ib + k * MATRIX_SIZE];
c[ic] = sum;
}
__global__ void matrixMulTiled(BASE_TYPE* c, const BASE_TYPE* a,const BASE_TYPE* b) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Statically allocated shared memory
__shared__ BASE_TYPE s_a[SHMEM_SIZE];
__shared__ BASE_TYPE s_b[SHMEM_SIZE];
// Accumulate in temporary variable
int tmp = 0;
// Sweep tile across matrix
for (int i = 0; i < MATRIX_SIZE; i += blockDim.x) {
// Load in elements for this tile
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * MATRIX_SIZE + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * MATRIX_SIZE + threadIdx.y * MATRIX_SIZE + col];
// Wait for both tiles to be loaded in before doing computation
__syncthreads();
// Do matrix multiplication on the small matrix
for (int j = 0; j < blockDim.x; j++) {
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
}
// Wait for all threads to finish using current tiles before loading in new
// ones
__syncthreads();
}
// Write back results
c[row * MATRIX_SIZE + col] = tmp;
}
double experiment(void function(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b),
const std::string type, const std::string description, BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
const int testCount = 10;
double seconds;
clock_t start, end;
LOOP_I(testCount) {
start = clock();
function(c, a, b);
end = clock();
seconds += (double)(end - start) / CLOCKS_PER_SEC;
}
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
c = new float[MATRIX_SIZE * MATRIX_SIZE];
printf("time %s: %.2f ms - %s\n", type, seconds / testCount, description);
return seconds;
}
const int TRX = 16;
const int TRY = 16;
struct gpu_buf_t
{
float* p;
int n;
gpu_buf_t(int size)
: n(size)
, p(0)
{
hipError_t error = hipMalloc(&p, n * sizeof(float));
assert(error == hipSuccess);
}
~gpu_buf_t()
{
if (p)
{
hipError_t error = hipFree(p);
assert(error == hipSuccess);
p = 0;
}
}
};
const int TSM = 128;
const int TSN = 128;
const int TSK = 16;
const int WPTM = 8;
const int WPTN = 8;
const int RTSM = TSM / WPTM;
const int RTSN = TSN / WPTN;
const int LPTA = TSK * WPTM * WPTN / TSN;
__global__ void transpose(int P, int Q, const float* src, float* dst)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
const int ID0 = blockIdx.x * TRX + tx;
const int ID1 = blockIdx.y * TRY + ty;
__shared__ float buf[TRX][TRY];
if (ID0 < P && ID1 < Q)
buf[ty][tx] = src[ID1 * P + ID0];
__syncthreads();
const int newID0 = blockIdx.y * TRY + tx;
const int newID1 = blockIdx.x * TRX + ty;
if (newID0 < Q && newID1 < P)
dst[newID1 * Q + newID0] = buf[tx][ty];
}
__global__ void gemm(int M, int N, int K, const float* A, const float* B, float* C)
{
const int tidm = threadIdx.y;
const int tidn = threadIdx.x;
const int offsetM = TSM * blockIdx.y;
const int offsetN = TSN * blockIdx.x;
__shared__ float sA[TSK][TSM];
__shared__ float sB[TSN][TSK];
float rA;
float rB[WPTN];
float rC[WPTM][WPTN];
#pragma unroll
for (int wm = 0; wm < WPTM; wm++)
{
#pragma unroll
for (int wn = 0; wn < WPTN; wn++)
rC[wm][wn] = 0.0f;
}
for (int k0 = 0; k0 < K; k0 += TSK)
{
#pragma unroll
for (int la = 0; la < LPTA; la++)
{
int tid = tidn * RTSM + tidm;
int id = la * RTSN * RTSM + tid;
int row = id % TSM;
int col = id / TSM;
int tiledIndex = k0 + col;
#if __CUDA_ARCH__ >= 320
sA[col][row] = __ldg(&A[tiledIndex * M + offsetM + row]);
sB[row][col] = __ldg(&B[tiledIndex * N + offsetN + row]);
#else
sA[col][row] = A[tiledIndex * M + offsetM + row];
sB[row][col] = B[tiledIndex * N + offsetN + row];
#endif
}
__syncthreads();
for (int k = 0; k < TSK; k++)
{
#pragma unroll
for (int wn = 0; wn < WPTN; wn++)
{
int col = tidn + wn * RTSN;
rB[wn] = sB[col][k];
}
#pragma unroll
for (int wm = 0; wm < WPTM; wm++)
{
int row = tidm + wm * RTSM;
rA = sA[k][row];
#pragma unroll
for (int wn = 0; wn < WPTN; wn++) {
rC[wm][wn] += rA * rB[wn];
}
}
}
__syncthreads();
}
#pragma unroll
for (int wm = 0; wm < WPTM; wm++)
{
int globalRow = offsetM + tidm + wm * RTSM;
#pragma unroll
for (int wn = 0; wn < WPTN; wn++)
{
int globalCol = offsetN + tidn + wn * RTSN;
C[globalCol + globalRow * N] = rC[wm][wn];
}
}
}
const int TS = 32;
const int WPT = 8;
const int PTS = TS / WPT;
__global__ void transposeBuffB(int P, int Q, const float* src, float* dst)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
const int ID0 = blockIdx.x * TRX + tx;
const int ID1 = blockIdx.y * TRY + ty;
__shared__ float buf[TRX][TRY];
if (ID0 < P && ID1 < Q)
buf[ty][tx] = src[ID1 * P + ID0];
__syncthreads();
const int newID0 = blockIdx.y * TRY + tx;
const int newID1 = blockIdx.x * TRX + ty;
if (newID0 < Q && newID1 < P)
dst[newID1 * Q + newID0] = buf[tx][ty];
}
__global__ void MatrixBuffB(int M, int N, int K, const float* A, const float* B, float* C)
{
int i0 = TS * blockIdx.y + threadIdx.y;
int j = TS * blockIdx.x + threadIdx.x;
float c[WPT];
for (int w = 0; w < WPT; w++)
c[w] = 0.0f;
__shared__ float sA[TS][TS];
__shared__ float sB[TS][TS];
for (int k0 = 0; k0 < K; k0 += TS)
{
for (int w = 0; w < WPT; w++)
{
sA[threadIdx.y + w * PTS][threadIdx.x] = A[(i0 + w * PTS) * K + (k0 + threadIdx.x)];
sB[threadIdx.y + w * PTS][threadIdx.x] = B[(j)*K + (k0 + threadIdx.y + w * PTS)];
}
__syncthreads();
for (int k = 0; k < TS; ++k)
{
float b = sB[k][threadIdx.x];
for (int w = 0; w < WPT; w++)
c[w] += sA[threadIdx.y + w * PTS][k] * b;
}
__syncthreads();
}
for (int w = 0; w < WPT; w++)
C[(i0 + w * PTS) * N + j] = c[w];
}
int main()
{
double seconds;
double t1;
double t2;
double t;
clock_t start, end;
Matrix d;
unsigned int mem_size = sizeof(float) * MATRIX_SIZE * MATRIX_SIZE;
float* a = new float[MATRIX_SIZE * MATRIX_SIZE];
float* b = new float[MATRIX_SIZE * MATRIX_SIZE];
float* c = new float[MATRIX_SIZE * MATRIX_SIZE];
for (size_t i = 0; i < MATRIX_SIZE * MATRIX_SIZE; i++)
{
a[i] = rand() % 5;
b[i] = rand() % 5;
}
// TEST
{
// Standart
experiment(MatrixMul, "GPU", "Standart", c, a, b);
hipDeviceReset();
//
// Cuda standart + pinned
//
{
float* a_pin, * b_pin, * c_pin;
unsigned int mem_size = sizeof(float) * MATRIX_SIZE * MATRIX_SIZE;
hipHostMalloc((void**)&a_pin, mem_size, hipHostMallocDefault);
hipHostMalloc((void**)&b_pin, mem_size, hipHostMallocDefault);
hipHostMalloc((void**)&c_pin, mem_size, hipHostMallocDefault);
for (size_t i = 0; i < MATRIX_SIZE * MATRIX_SIZE; i++)
{
a_pin[i] = a[i];
b_pin[i] = b[i];
}
experiment(MatrixMul, "GPU", "Standart+Pinned", c_pin, a_pin, b_pin);
hipDeviceReset();
hipHostFree(a_pin);
hipHostFree(b_pin);
hipHostFree(c_pin);
}
// Cuda Pinned + Stream
{
float* a_pin, * b_pin, * c_pin;
unsigned int mem_size = sizeof(float) * MATRIX_SIZE * MATRIX_SIZE;
hipHostMalloc((void**)&a_pin, mem_size, hipHostMallocDefault);
hipHostMalloc((void**)&b_pin, mem_size, hipHostMallocDefault);
hipHostMalloc((void**)&c_pin, mem_size, hipHostMallocDefault);
for (int i = 0; i < MATRIX_SIZE * MATRIX_SIZE; i++) {
a_pin[i] = a[i];
b_pin[i] = b[i];
}
experiment(MatrixMulStream, "GPU", "Standart+Stream", c_pin, a_pin, b_pin);
hipDeviceReset();
hipHostFree(a_pin);
hipHostFree(b_pin);
hipHostFree(c_pin);
}
//
start = clock();
matrixSmemm(matrixSmem1,c, a, b, 1);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem1 \n", seconds);
hipDeviceReset();
start = clock();
matrixSmemm(matrixSmem2, c, a, b, 1);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem2 \n", seconds);
hipDeviceReset();
start = clock();
matrixSmemm(matrixSmem3, c, a, b, 1);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem3 \n", seconds);
hipDeviceReset();
start = clock();
matrixSmemm(matrixSmem4, c, a, b, 2);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem4 \n", seconds);
hipDeviceReset();
start = clock();
matrixSmemm(matrixSmem5, c, a, b, 4);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem5 \n", seconds);
hipDeviceReset();
experiment(MatrixMuld, "GPU", "MatrixMuldSub", c, a, b);
hipDeviceReset();
experiment(matrixDeviceBuffA, "GPU", "BuffA",c, a, b);
hipDeviceReset();
experiment(matrixDeviceBuffB, "GPU", "BuffB", c, a, b);
hipDeviceReset();
experiment(MatrixBlock1, "GPU", "Block Mult", c, a, b);
hipDeviceReset();
experiment(MatrixTiled, "GPU", "Tiled Mult", c, a, b);
hipDeviceReset();
}
//
//experiment(matrixHost, "CPU", "Standart", c, a, b
// gemm
//experiment(matrixDevice, "GPU", "gemm2", c, a, b);
//{
//start = clock();
//gemm_v2(MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//t1 = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - SIMD\n", t1);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v3(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//t2 = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - Core\n", t2);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v4(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//seconds = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - BuffB\n", seconds);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v5(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//seconds = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - L1 \n", seconds);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v6(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//seconds = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - L2 \n\n", seconds);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v6(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//seconds = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - L3\n\n", seconds);
//}
//
// Cuda Standart
//
{
// for (size_t i = 200; i < 210; i++)
//{
// std::cout << c[i] << " ";
//}
//t = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time GPU: %.2f standart\n", t);
//printf("GPU vs CPU: %.2f\n", t1 + t);
//printf("GPU vs CPU: %.2f\n", t2);
}
//hipDeviceReset();
//
//experiment(MatrixBank, "GPU", "Bank Mult", c, a, b);
//hipDeviceReset();
//printf("\n----------------------experiment-----------------------\n");
//experiment(MatrixBlock, "GPU", "Block Mult", c, a, b);
//hipDeviceReset();
//
// Cuda BLock
//{
//start = clock();
//MatrixBlock1(c, a, b);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
// std::cout << c[i] << " ";
//}
//t = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time GPU: %.2f ms - Block Mult\n", t);
//hipDeviceReset();
//}
////printf("----------------------experiment-----------------------\n\n");
////
//// Cuda Sub
////
//{
// c = new float[MATRIX_SIZE * MATRIX_SIZE];
// int numBytes = sizeof(float) * MATRIX_SIZE * MATRIX_SIZE;
// Matrix A, B, C;
// A.elements = a;
// B.elements = b;
// C.elements = c;
// start = clock();
// MatrixSub(C, A, B);
// end = clock();
// for (size_t i = 200; i < 210; i++)
// {
// std::cout << C.elements[i] << " ";
// }
// seconds = (double)(end - start) / CLOCKS_PER_SEC;
// printf("time GPU: %.2f ms - Sub Mult\n", seconds);
// hipDeviceReset();
// c = new float[MATRIX_SIZE * MATRIX_SIZE];
//}
//
//// Cuda Pinnet
//
//{
// start = clock();
// MatrixPinned(c, a, b);
// end = clock();
// for (size_t i = 200; i < 210; i++)
// {
// std::cout << c[i] << " ";
// }
// seconds = (double)(end - start) / CLOCKS_PER_SEC;
// //printf("time GPU: %.2f ms - Pinned\n", seconds);
// hipDeviceReset();
//}
//{
// float timerValueGPU, timerValueCPU;
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// float* hA, * hB, * hC, * dA, * dB, * dC;
// int size = MATRIX_SIZE * MATRIX_SIZE; //
// int N_thread = MATRIX_SIZE; //
// int N_blocks, i;
// // hA,hB,hC host
// unsigned int mem_size = sizeof(float) * size;
// hA = (float*)malloc(mem_size);
// hB = (float*)malloc(mem_size);
// hC = (float*)malloc(mem_size);
// // device hA, hB, hC
// hipMalloc((void**)&dA, mem_size);
// hipMalloc((void**)&dB, mem_size);
// hipMalloc((void**)&dC, mem_size);
// // hA,hB hC
// for (i = 0; i < size; i++)
// {
// hA[i] = a[i];
// hB[i] = b[i];
// hC[i] = 0.0f;
// }
// //
// if ((size % N_thread) == 0)
// {
// N_blocks = size / N_thread;
// }
// else
// {
// N_blocks = (int)(size / N_thread) + 1;
// }
// dim3 blocks(N_blocks);
// dim3 dimBLock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dimGrid(MATRIX_SIZE / dimBLock.x, MATRIX_SIZE / dimBLock.y);
// // ----------------------GPU -------------------
// //
// hipEventRecord(start, 0);
// // host device
// hipMemcpy(dA, a, mem_size, hipMemcpyHostToDevice);
// hipMemcpy(dB, b, mem_size, hipMemcpyHostToDevice);
// // -
// function << < dimBLock, dimGrid >> > (dA, dB, dC, size);
// // device host
// hipMemcpy(hC, dC, mem_size, hipMemcpyDeviceToHost);
// for (size_t i = 200; i < 210; i++)
// {
// std::cout << ":" << hC[i] << " ";
// }
// //
// // GPU
// hipEventRecord(stop, 0);
// hipEventSynchronize(stop);
// hipEventElapsedTime(&timerValueGPU, start, stop);
// printf("\n GPU calculation time: %f ms\n", timerValueGPU);
//}
////
//// Cublas
////
//{
// start = clock();
// MatrixMulCublas(c, a, b);
// end = clock();
// for (size_t i = 200; i < 210; i++)
// {
// std::cout << c[i] << " ";
// }
// seconds = (double)(end - start) / CLOCKS_PER_SEC;
// printf("time GPU: %.2f ms - Cublas \n", seconds);
// hipDeviceReset();
//}
//
delete a;
delete b;
delete c;
return 0;
}
void matrixSmemm(void smem(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b), BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b, const int stream)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda(hipSetDevice(0));
checkCuda(hipMalloc((void**)&dev_c, numBytes));
checkCuda(hipMalloc((void**)&dev_a, numBytes));
checkCuda(hipMalloc((void**)&dev_b, numBytes));
checkCuda(hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice));
dim3 threads(BLOCK_SIZE, BLOCK_SIZE / stream);
dim3 blocks(N / threads.x, N / threads.y);
smem << <blocks, threads >> > (dev_c, dev_a, dev_b);
checkCuda(hipDeviceSynchronize());
checkCuda(hipGetLastError());
checkCuda(hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost));
checkCuda(hipFree(dev_c));
checkCuda(hipFree(dev_a));
checkCuda(hipFree(dev_b));
}
void MatrixPinned(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
// 2^16
constexpr int N = MATRIX_SIZE;
size_t bytes = numBytes;
unsigned dataArraySize = MATRIX_SIZE * MATRIX_SIZE / sizeof(float);
//vector <string> ivector;
vector<int> h_a(MATRIX_SIZE * MATRIX_SIZE);
vector<int> h_b(MATRIX_SIZE * MATRIX_SIZE);
vector<int> h_c(MATRIX_SIZE * MATRIX_SIZE);
for (int i = 0; i < MATRIX_SIZE * MATRIX_SIZE; i++) {
h_a[i] = a[i];
h_b[i] = b[i];
}
//h_a.insert(h_a.end(), &a[0], &a[numBytes]);
//h_b.insert(h_b.end(), &b[0], &b[numBytes]);
////// Initialize matrices
//std: generate(h_a.begin(), h_a.end(), a);
//generate(h_b.begin(), h_b.end(), b);
// // Copy data to the device
//hipMemcpy(d_a, h_a.data(), bytes, hipMemcpyHostToDevice);
//hipMemcpy(d_b, h_b.data(), bytes, hipMemcpyHostToDevice);
// Vectors for holding the host-side (CPU-side) data
//float* h_a, * h_b, * h_c;
// Allocate pinned memory
//hipHostMalloc(&h_a, bytes);
//hipHostMalloc(&h_b, bytes);
//hipHostMalloc(&h_c, bytes);
// Threads per CTA(1024 threads per CTA)
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
//dim3 block(BLOCK_SIZE, BLOCK_SIZE);
//dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
// Threads per CTA dimension
int THREADS = 32;
// Blocks per grid dimension (assumes THREADS divides N evenly)
int BLOCKS = N / THREADS;
// Use dim3 structs for block and grid dimensions
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
checkCuda(hipSetDevice(0));
checkCuda(hipMalloc((void**)&dev_c, numBytes));
checkCuda(hipMalloc((void**)&dev_a, numBytes));
checkCuda(hipMalloc((void**)&dev_b, numBytes));
checkCuda(hipMemcpy(dev_a, h_a.data(), numBytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(dev_b, h_b.data(), numBytes, hipMemcpyHostToDevice));
matrixDevice << <blocks, threads >> > (dev_c, dev_a, dev_b);
checkCuda(hipDeviceSynchronize());
checkCuda(hipGetLastError());
checkCuda(hipMemcpy(h_c.data(), dev_c, numBytes, hipMemcpyDeviceToHost));
//for (auto x : h_c)
// std::cout << x << ' ';
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
void MatrixSub(Matrix C, const Matrix A, const Matrix B)
{
size_t numBytes = A.col * A.row * sizeof(float);
Matrix dev_a;
Matrix dev_b;
Matrix dev_c;
// Load A and B to device memory
dev_a.col = dev_a.stride = A.col; dev_a.row = A.row;
checkCuda( hipMalloc((void**)&dev_a.elements, numBytes)) ;
checkCuda( hipMemcpy(dev_a.elements, A.elements, numBytes, hipMemcpyHostToDevice) );
dev_b.col = dev_b.stride = A.col; dev_b.row = B.row;
checkCuda( hipMalloc((void**)&dev_b.elements, numBytes) );
checkCuda( hipMemcpy(dev_b.elements, B.elements, numBytes, hipMemcpyHostToDevice) );
dev_c.col = dev_c.stride = C.col; dev_c.row = C.row;
checkCuda( hipMalloc((void**)&dev_c.elements, numBytes) );
checkCuda( hipMemcpy(dev_c.elements, C.elements, numBytes, hipMemcpyHostToDevice) );
dim3 dimBLock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid (MATRIX_SIZE / dimBLock.x, MATRIX_SIZE / dimBLock.y);
hipLaunchKernelGGL(( matrixDeviceSub) , dim3(dimGrid), dim3(dimBLock), 0, 0, dev_c, dev_a, dev_b);
checkCuda( hipGetLastError() );
checkCuda( hipDeviceSynchronize() );
checkCuda( hipMemcpy(C.elements, dev_c.elements, numBytes, hipMemcpyDeviceToHost) );
hipFree(dev_c.elements);
hipFree(dev_a.elements);
hipFree(dev_b.elements);
}
int MatrixBlock(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda( hipSetDevice(0) );
checkCuda( hipMalloc((void**)&dev_c, numBytes) );
checkCuda( hipMalloc((void**)&dev_a, numBytes) );
checkCuda( hipMalloc((void**)&dev_b, numBytes) );
checkCuda( hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( matrixDevicBlock) , dim3(grid), dim3(block), 0, 0, dev_c, dev_a, dev_b);
checkCuda( hipDeviceSynchronize() );
checkCuda( hipGetLastError() );
checkCuda( hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost) );
hipFree( dev_c );
hipFree( dev_a );
hipFree( dev_b );
return 0;
}
void MatrixBlock1(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda(hipSetDevice(0));
checkCuda(hipMalloc((void**)&dev_c, numBytes));
checkCuda(hipMalloc((void**)&dev_a, numBytes));
checkCuda(hipMalloc((void**)&dev_b, numBytes));
checkCuda(hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice));
matrixDevicBlock << <grid, block >> > (dev_c, dev_a, dev_b);
checkCuda(hipDeviceSynchronize());
checkCuda(hipGetLastError());
checkCuda(hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost));
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
void MatrixTiled(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda( hipSetDevice(0) );
checkCuda( hipMalloc((void**)&dev_c, numBytes) );
checkCuda( hipMalloc((void**)&dev_a, numBytes) );
checkCuda( hipMalloc((void**)&dev_b, numBytes) );
checkCuda( hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( matrixMulTiled) , dim3(grid), dim3(block), 0, 0, dev_c, dev_a, dev_b);
checkCuda( hipDeviceSynchronize() );
checkCuda( hipGetLastError() );
checkCuda( hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost) );
hipFree( dev_c );
hipFree( dev_a );
hipFree( dev_b );
}
int MatrixBank(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda(hipSetDevice(0));
checkCuda(hipMalloc((void**)&dev_c, numBytes));
checkCuda(hipMalloc((void**)&dev_a, numBytes));
checkCuda(hipMalloc((void**)&dev_b, numBytes));
checkCuda(hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice));
matrixMultBank << <grid, block >> > (dev_c, dev_a, dev_b);
checkCuda(hipDeviceSynchronize());
checkCuda(hipGetLastError());
checkCuda(hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost));
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return 0;
}
void MatrixMulStream(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
const int nStream = 4; // CUDA-
int sizeMatrixStream = MATRIX_SIZE * MATRIX_SIZE / nStream;
int numBytes = sizeMatrixStream * sizeof(float);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda( hipSetDevice(0));
checkCuda( hipMalloc((void**)&dev_c, numBytes * nStream));
checkCuda( hipMalloc((void**)&dev_a, numBytes * nStream));
checkCuda( hipMalloc((void**)&dev_b, numBytes * nStream));
hipStream_t stream[nStream];
for (size_t i = 0; i < nStream; ++i)
hipStreamCreate(&stream[i]);
for (size_t i = 0; i < nStream; ++i)
{
hipMemcpyAsync(dev_a + i * sizeMatrixStream, a + i * sizeMatrixStream, numBytes, hipMemcpyHostToDevice, stream[i]);
hipMemcpyAsync(dev_b + i * sizeMatrixStream, b + i * sizeMatrixStream, numBytes, hipMemcpyHostToDevice, stream[i]);
}
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x , MATRIX_SIZE / block.y / nStream);
for (size_t i = 0; i < nStream; ++i) // -
{
matrixDeviceStream << <grid, block, 0, stream[i] >> > (&dev_c[i * sizeMatrixStream], &dev_a[i * sizeMatrixStream], dev_b, MATRIX_SIZE / nStream);
}
checkCuda( hipDeviceSynchronize());
checkCuda( hipGetLastError());
for (size_t i = 0; i < nStream; ++i)
hipMemcpyAsync(c + i * sizeMatrixStream, dev_c + i * sizeMatrixStream, numBytes, hipMemcpyDeviceToHost, stream[i]);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
for (size_t i = 0; i < nStream; ++i)
hipStreamDestroy(stream[i]);
}
void MatrixMul(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda( hipSetDevice(0) );
checkCuda( hipMalloc((void**)&dev_c, numBytes) );
checkCuda( hipMalloc((void**)&dev_a, numBytes) );
checkCuda( hipMalloc((void**)&dev_b, numBytes) );
checkCuda( hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice) );
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE /block.x, MATRIX_SIZE / block.y);
matrixDevice << <grid, block >> > (dev_c , dev_a , dev_b );
checkCuda( hipDeviceSynchronize());
checkCuda( hipGetLastError());
checkCuda( hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost) );
checkCuda( hipFree(dev_c) );
checkCuda( hipFree(dev_a) );
checkCuda( hipFree(dev_b) );
}
void MatrixMuld(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda(hipSetDevice(0));
checkCuda(hipMalloc((void**)&dev_c, numBytes));
checkCuda(hipMalloc((void**)&dev_a, numBytes));
checkCuda(hipMalloc((void**)&dev_b, numBytes));
checkCuda(hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice));
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
Muld << <grid, block >> > (dev_c, dev_a, dev_b);
checkCuda(hipDeviceSynchronize());
checkCuda(hipGetLastError());
checkCuda(hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost));
checkCuda(hipFree(dev_c));
checkCuda(hipFree(dev_a));
checkCuda(hipFree(dev_b));
}
void MatrixMulCublas(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
// hipblasHandle_t handle;
//hipblasCreate(&handle);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda( hipSetDevice(0));
checkCuda( hipMalloc((void**)&dev_c, numBytes));
checkCuda( hipMalloc((void**)&dev_a, numBytes));
checkCuda( hipMalloc((void**)&dev_b, numBytes));
checkCuda( hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice));
checkCuda( hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice));
float alpha = 1.0f;
float beta = 0.0f;
//hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, &alpha, dev_a, MATRIX_SIZE, dev_b, MATRIX_SIZE, &beta, dev_c, MATRIX_SIZE);
//hipblasDestroy(handle);
checkCuda( hipDeviceSynchronize());
checkCuda( hipGetLastError());
checkCuda( hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost));
hipFree( dev_c );
hipFree( dev_a );
hipFree( dev_b );
}
void matrixDeviceBuffB(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda(hipSetDevice(0));
checkCuda(hipMalloc((void**)&dev_c, numBytes));
checkCuda(hipMalloc((void**)&dev_a, numBytes));
checkCuda(hipMalloc((void**)&dev_b, numBytes));
checkCuda(hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice));
//dim3 grid(TS, TS);
//dim3 block((MATRIX_SIZE + TS - 1) / TS, (MATRIX_SIZE + TS - 1) / TS);
gpu_buf_t tB(MATRIX_SIZE * MATRIX_SIZE);
dim3 gridT(TRX, TRY);
dim3 blockT((MATRIX_SIZE + TRX - 1) / TRX, (MATRIX_SIZE + TRY - 1) / TRY);
dim3 grid(TS, TS / WPT);
dim3 block((MATRIX_SIZE + TS - 1) / TS, (MATRIX_SIZE + TS - 1) / TS);
//matrixTest << <block, grid>> > (MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, dev_a, dev_b, dev_c);
transposeBuffB << <blockT, gridT >> > (MATRIX_SIZE, MATRIX_SIZE, dev_b, tB.p);
MatrixBuffB << <block, grid >> > (MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, dev_a, tB.p, dev_c);
checkCuda(hipDeviceSynchronize());
checkCuda(hipGetLastError());
checkCuda(hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost));
checkCuda(hipFree(dev_c));
checkCuda(hipFree(dev_a));
checkCuda(hipFree(dev_b));
}
void matrixDeviceBuffA(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda(hipSetDevice(0));
checkCuda(hipMalloc((void**)&dev_c, numBytes));
checkCuda(hipMalloc((void**)&dev_a, numBytes));
checkCuda(hipMalloc((void**)&dev_b, numBytes));
checkCuda(hipMemcpy(dev_a, a, numBytes, hipMemcpyHostToDevice));
checkCuda(hipMemcpy(dev_b, b, numBytes, hipMemcpyHostToDevice));
gpu_buf_t tA(MATRIX_SIZE * MATRIX_SIZE);
dim3 gridT(TRX, TRY);
dim3 blockT((MATRIX_SIZE + TRX - 1) / TRX, (MATRIX_SIZE + TRY - 1) / TRY);
dim3 grid(TSM / WPTM, TSN / WPTN);
dim3 block(MATRIX_SIZE / TSM, MATRIX_SIZE / TSN);
transpose << <blockT, gridT >> > (MATRIX_SIZE, MATRIX_SIZE, dev_a, tA.p);
gemm << <block, grid >> > (MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, tA.p, dev_b, dev_c);
checkCuda(hipDeviceSynchronize());
checkCuda(hipGetLastError());
checkCuda(hipMemcpy(c, dev_c, numBytes, hipMemcpyDeviceToHost));
checkCuda(hipFree(dev_c));
checkCuda(hipFree(dev_a));
checkCuda(hipFree(dev_b));
}
//
//void df() {
// // Size (in bytes) of matrix
// size_t bytes = N * N * sizeof(int);
//
// // Host vectors
// vector<int> h_a(N * N);
// vector<int> h_b(N * N);
// vector<int> h_c(N * N);
//
// // Initialize matrices
// generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
// generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
//
// // Allocate device memory
// int* d_a, * d_b, * d_c;
// hipMalloc(&d_a, bytes);
// hipMalloc(&d_b, bytes);
// hipMalloc(&d_c, bytes);
//
// // Copy data to the device
// hipMemcpy(d_a, h_a.data(), bytes, hipMemcpyHostToDevice);
// hipMemcpy(d_b, h_b.data(), bytes, hipMemcpyHostToDevice);
//
// // Threads per CTA dimension
// int THREADS = 32;
//
// // Blocks per grid dimension (assumes THREADS divides N evenly)
// int BLOCKS = N / THREADS;
//
// // Use dim3 structs for block and grid dimensions
// dim3 threads(THREADS, THREADS);
// dim3 blocks(BLOCKS, BLOCKS);
//
// // Launch kernel
// matrixMul << <blocks, threads >> > (d_a, d_b, d_c);
//
// // Copy back to the host
// hipMemcpy(h_c.data(), d_c, bytes, hipMemcpyDeviceToHost);
//
// // Check result
// verify_result(h_a, h_b, h_c);
//
// cout << "COMPLETED SUCCESSFULLY\n";
//
// // Free memory on device
// hipFree(d_a);
// hipFree(d_b);
// hipFree(d_c);
//} | 59bcccb1f74c46f6e285f52d32e5d8f0773589cc.cu | #include "device.h"
#include "curand.h"
#include "cublas_v2.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include < vector >
#include < time.h >
#include < stdio.h >
#include < cassert >
#include < cstdlib >
#include < iostream >
#include < algorithm >
#include < functional >
#include < immintrin.h >
#include < string >
// CTR+M и CTR+O
#define NUM_STREAM 4
#define BLOCK_SIZE 32
#define MATRIX_SIZE 512 * 16 * 2
#define BASE_TYPE float
#define LOOP_I(_loop) for(int i=0; i < _loop; i++)
using std::vector;
using std::cout;
using std::generate;
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
typedef struct {
int row = MATRIX_SIZE;
int col = MATRIX_SIZE;
float* elements;
int stride = 2;
} Matrix;
void MatrixMul(float* c, const float* a, const float* b);
int MatrixBlock(float* c, const float* a, const float* b);
void MatrixBlock1(float* c, const float* a, const float* b);
int MatrixBank(float* c, const float* a, const float* b);
void MatrixTiled(float* c, const float* a, const float* b);
void MatrixSub(Matrix C, const Matrix A, const Matrix B);
void MatrixPinned(float* c, const float* a, const float* b);
void MatrixMulStream(float* c, const float* a, const float* b);
void matrixDeviceBuffA(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b);
void matrixDeviceBuffB(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b);
void MatrixMuld(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b);
void matrixSmemm(void smem(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b), BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b, const int stream);
void matrixHost(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
for (size_t i = 0; i < MATRIX_SIZE; i++)
{
for (size_t j = 0; j < MATRIX_SIZE; j++)
{
c[i * MATRIX_SIZE + j] = 0;
for (size_t k = 0; k < MATRIX_SIZE; k++)
{
c[i * MATRIX_SIZE + j] += a[i * MATRIX_SIZE + k] * b[k * MATRIX_SIZE + j];
}
}
}
}
void matrixHostImproved(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
for (size_t i = 0; i < MATRIX_SIZE; ++i)
{
float* C = c + i * MATRIX_SIZE;
for (size_t j = 0; j < MATRIX_SIZE; ++j)
{
C[j] = 0;
}
for (size_t k = 0; k < MATRIX_SIZE; ++k)
{
const float* B = b + k * MATRIX_SIZE;
float A = a[i * MATRIX_SIZE + k];
for (size_t j = 0; j < MATRIX_SIZE; j++)
{
C[j] += A * B[j];
}
}
}
}
void matrixVectorise(float* C, const float* A, float* B)
{
for (int i = 0; i < MATRIX_SIZE; ++i)
{
float* c = C + i * MATRIX_SIZE;
for (int j = 0; j < MATRIX_SIZE; j += 8)
_mm256_storeu_ps(c + j + 0, _mm256_setzero_ps());
for (int k = 0; k < MATRIX_SIZE; ++k)
{
const float* b = B + k * MATRIX_SIZE;
__m256 a = _mm256_set1_ps(A[i * MATRIX_SIZE + k]);
for (int j = 0; j < MATRIX_SIZE; j += 16)
{
_mm256_storeu_ps(c + j + 0, _mm256_fmadd_ps(a, _mm256_loadu_ps(b + j + 0), _mm256_loadu_ps(c + j + 0)));
_mm256_storeu_ps(c + j + 8, _mm256_fmadd_ps(a, _mm256_loadu_ps(b + j + 8), _mm256_loadu_ps(c + j + 8)));
}
}
}
}
void micro_6x161(int K, const float* A, int lda, int step, const float* B, int ldb, float* C, int ldc)
{
__m256 c00 = _mm256_setzero_ps();
__m256 c10 = _mm256_setzero_ps();
__m256 c20 = _mm256_setzero_ps();
__m256 c30 = _mm256_setzero_ps();
__m256 c40 = _mm256_setzero_ps();
__m256 c50 = _mm256_setzero_ps();
__m256 c01 = _mm256_setzero_ps();
__m256 c11 = _mm256_setzero_ps();
__m256 c21 = _mm256_setzero_ps();
__m256 c31 = _mm256_setzero_ps();
__m256 c41 = _mm256_setzero_ps();
__m256 c51 = _mm256_setzero_ps();
const int offset0 = lda * 0;
const int offset1 = lda * 1;
const int offset2 = lda * 2;
const int offset3 = lda * 3;
const int offset4 = lda * 4;
const int offset5 = lda * 5;
__m256 b0, b1, b2, a0, a1, a2;
for (int k = 0; k < K; k++)
{
b0 = _mm256_loadu_ps(B + 0);
b1 = _mm256_loadu_ps(B + 8);
a0 = _mm256_set1_ps(A[offset0]);
a1 = _mm256_set1_ps(A[offset1]);
c00 = _mm256_fmadd_ps(a0, b0, c00);
c01 = _mm256_fmadd_ps(a0, b1, c01);
c10 = _mm256_fmadd_ps(a1, b0, c10);
c11 = _mm256_fmadd_ps(a1, b1, c11);
a0 = _mm256_set1_ps(A[offset2]);
a1 = _mm256_set1_ps(A[offset3]);
c20 = _mm256_fmadd_ps(a0, b0, c20);
c21 = _mm256_fmadd_ps(a0, b1, c21);
c30 = _mm256_fmadd_ps(a1, b0, c30);
c31 = _mm256_fmadd_ps(a1, b1, c31);
a0 = _mm256_set1_ps(A[offset4]);
a1 = _mm256_set1_ps(A[offset5]);
c40 = _mm256_fmadd_ps(a0, b0, c40);
c41 = _mm256_fmadd_ps(a0, b1, c41);
c50 = _mm256_fmadd_ps(a1, b0, c50);
c51 = _mm256_fmadd_ps(a1, b1, c51);
B += ldb; A += step;
}
_mm256_storeu_ps(C + 0, _mm256_add_ps(c00, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c01, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c10, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c11, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c20, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c21, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c30, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c31, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c40, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c41, _mm256_loadu_ps(C + 8)));
C += ldc;
_mm256_storeu_ps(C + 0, _mm256_add_ps(c50, _mm256_loadu_ps(C + 0)));
_mm256_storeu_ps(C + 8, _mm256_add_ps(c51, _mm256_loadu_ps(C + 8)));
}
void init_c1(int M, int N, float* C, int ldc)
{
for (int i = 0; i < M; ++i, C += ldc)
for (int j = 0; j < N; j += 8)
_mm256_storeu_ps(C + j, _mm256_setzero_ps());
}
void matrixCore(BASE_TYPE* C, const BASE_TYPE* A, const BASE_TYPE* B)
{
for (int i = 0; i < MATRIX_SIZE; i += 6)
{
for (int j = 0; j < MATRIX_SIZE; j += 16)
{
init_c(6, 16, C + i * MATRIX_SIZE + j, MATRIX_SIZE);
micro_6x16(MATRIX_SIZE, A + i * MATRIX_SIZE, MATRIX_SIZE, 1, B + j, MATRIX_SIZE, C + i * MATRIX_SIZE + j, MATRIX_SIZE);
}
}
}
struct buf_t
{
float* p;
int n;
buf_t(int size) : n(size), p((BASE_TYPE*)_mm_malloc(size * 4, 64)) {}
~buf_t() { _mm_free(p); }
};
void reorder_b_161(int K, const float* B, int ldb, float* bufB)
{
for (int k = 0; k < K; ++k, B += ldb, bufB += 16)
{
_mm256_storeu_ps(bufB + 0, _mm256_loadu_ps(B + 0));
_mm256_storeu_ps(bufB + 8, _mm256_loadu_ps(B + 8));
}
}
void matrixBuf(float* C,const float* A,const float* B)
{
for (int j = 0; j < MATRIX_SIZE; j += 16)
{
buf_t bufB(16 * MATRIX_SIZE);
reorder_b_16(MATRIX_SIZE, B + j, MATRIX_SIZE, bufB.p);
for (int i = 0; i < MATRIX_SIZE; i += 6)
{
init_c(6, 16, C + i * MATRIX_SIZE + j, MATRIX_SIZE);
micro_6x16(MATRIX_SIZE, A + i * MATRIX_SIZE, MATRIX_SIZE, 1, bufB.p, 16, C + i * MATRIX_SIZE + j, MATRIX_SIZE);
}
}
}
void macro(BASE_TYPE* C, int K, int ldc, const BASE_TYPE* A, int lda, const BASE_TYPE* B, int ldb, BASE_TYPE* bufB)
{
for (int j = 0; j < MATRIX_SIZE; j += 16)
{
reorder_b_16(K, B + j, ldb, bufB);
for (int i = 0; i < MATRIX_SIZE; i += 6)
micro_6x16(K, A + i * lda, lda, 1, bufB, 16, C + i * ldc + j, ldc);
}
}
void matrixL1(BASE_TYPE* C, const BASE_TYPE* A, const BASE_TYPE* B, int M, int N, int K)
{
const int L1 = 384 * 1024;
int mK = std::min(L1 / 4 / 16, K);
buf_t bufB(16 * mK);
for (int k = 0; k < K; k += mK)
{
int dK = std::min(K, k + mK) - k;
if (k == 0)
init_c(M, N, C, N);
macro(C, dK, N, A + k, K, B + k * N, N, bufB.p);
}
}
// Без разделения памяти
__global__ void matrixDevice(BASE_TYPE*c, const BASE_TYPE*a, const BASE_TYPE*b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int sum = 0;
c[i * MATRIX_SIZE/2 + j] = 0;
for (size_t k = 0; k < MATRIX_SIZE; k++)
{
sum += a[i * MATRIX_SIZE/2 + k] * b[j + MATRIX_SIZE * k];
}
c[i * MATRIX_SIZE/2 + j] = sum;
}
__global__ void matrixDeviceStream(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b, const int stream)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int sum = 0;
c[i * stream + j] = 0;
for (size_t k = 0; k < MATRIX_SIZE; k++)
{
sum += a[i * stream + k] * b[j + MATRIX_SIZE * k];
}
c[i * stream + j] = sum;
}
__global__ void matrixDeviceV1(float* c, const float* a, const float* b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
float* C = c + i * MATRIX_SIZE;
for (size_t m = j; m < j+MATRIX_SIZE; ++m)
{
C[m] = 0;
}
int sum = 0;
for (size_t k = 0; k < MATRIX_SIZE; k++)
{
const float* B = b + k * MATRIX_SIZE;
float A = a[i * MATRIX_SIZE + k];
for (size_t m = j; m < j+MATRIX_SIZE; ++m)
{
C[m] += A * B[m];
}
}
}
__device__ float GetElement(const Matrix A, int row, int col ) {
return A.elements[row * A.stride + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix ASub;
ASub.row = BLOCK_SIZE;
ASub.col = BLOCK_SIZE;
ASub.stride = A.stride;
ASub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return ASub;
}
__global__ void matrixDeviceSub(Matrix A, Matrix B, Matrix C);
void MatrixMulCublas(float* c, const float* a, const float* b);
// С разделяющей памятью
__global__ void matrixDeviceSub(Matrix C, Matrix A, Matrix B) {
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
float Cvalue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
for (size_t m = 0; m < (A.row / BLOCK_SIZE); ++m)
{
Matrix Asub = GetSubMatrix(A, blockRow, m);
Matrix Bsub = GetSubMatrix(B, m, blockCol);
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
__syncthreads();
for (size_t e = 0; e < BLOCK_SIZE; ++e)
{
Cvalue += As[row][e] * Bs[e][col];
}
__syncthreads();
}
SetElement(Csub, row, col, Cvalue);
}
__global__ void matrixDevicBlock(BASE_TYPE* C, BASE_TYPE* A, BASE_TYPE* B)
{
BASE_TYPE CValue = 0;
int Row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int Col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ BASE_TYPE As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE Bs[BLOCK_SIZE][BLOCK_SIZE];
for (int k = 0; k < (BLOCK_SIZE + MATRIX_SIZE - 1) / BLOCK_SIZE; k++) {
if (k * BLOCK_SIZE + threadIdx.x < MATRIX_SIZE && Row < MATRIX_SIZE)
As[threadIdx.y][threadIdx.x] = A[Row * MATRIX_SIZE + k * BLOCK_SIZE + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k * BLOCK_SIZE + threadIdx.y < MATRIX_SIZE && Col < MATRIX_SIZE)
Bs[threadIdx.y][threadIdx.x] = B[(k * BLOCK_SIZE + threadIdx.y) * MATRIX_SIZE + Col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < BLOCK_SIZE; ++n)
CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < MATRIX_SIZE && Col < MATRIX_SIZE)
C[((blockIdx.y * blockDim.y + threadIdx.y) * MATRIX_SIZE) + blockIdx.x * blockDim.x + threadIdx.x] = CValue;
}
__global__ void Muld(float* C, const float* A, const float* B)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + MATRIX_SIZE - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * MATRIX_SIZE;
// The element of the block sub-matrix that is computed
// by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B required to
// compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Shared memory for the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Shared memory for the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from global memory to shared memory;
// each thread loads one element of each matrix
As[ty][tx] = A[a + MATRIX_SIZE * ty + tx];
Bs[ty][tx] = B[b + MATRIX_SIZE * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to global memory;
// each thread writes one element
int c = MATRIX_SIZE * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + MATRIX_SIZE * ty + tx] = Csub;
}
__global__ void matrixPin(float* __restrict c, const float* __restrict a, const float* __restrict b, int N) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Iterate over row, and down column
c[row * N + col] = 0;
for (int k = 0; k < N; k++) {
// Accumulate results for a single element
c[row * N + col] += a[row * N + k] * b[k * N + col];
}
}
__global__ void function(float* dA, float* dB, float* dC, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) dC[i] = dA[i] + dB[i];
}
// Pull out matrix and shared memory tile size
const int N = 1024;
const int SHMEM_SIZE = 1024;
// Shared memory bank conflicts
__global__ void matrixMultBank(BASE_TYPE* C, const BASE_TYPE* A, const BASE_TYPE* B)
{
// индекс начала первой подматрицы А, которую
// обрабатывает блок
int aBegin = MATRIX_SIZE * blockDim.y * blockIdx.y;
// индекс конца подматрицы А, которую обрабатывает блок
int aEnd = aBegin + MATRIX_SIZE - 1;
// шаг для перебора подматриц А
int aStep = blockDim.x;
// индекс начала первой подматрицы В, которую
// обрабатывает блок
int bBegin = blockDim.x * blockIdx.x;
// шаг для перебора подматриц В
int bStep = blockDim.y * MATRIX_SIZE;
// Выделение разделяемой памяти для подматриц
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
// переменная для вычисления элемента подматрицы
BASE_TYPE sum = 0.0;
for (int ia = aBegin, ib = bBegin; ia < aEnd; ia +=
aStep, ib += bStep)
{
// загрузка подматриц А и В из глобальной памяти в
// разделяемую
as[threadIdx.y][threadIdx.x] = A[ia + MATRIX_SIZE * threadIdx.y + threadIdx.x];
bs[threadIdx.y][threadIdx.x] = B[ib + MATRIX_SIZE * threadIdx.y + threadIdx.x];
// синхронизация нитей
__syncthreads();
// перемножение двух матриц
for (int k = 0; k < blockDim.x; k++)
sum += as[threadIdx.y][k] *
bs[k][threadIdx.x];
// синхронизация нитей
__syncthreads();
}
// индекс результирующего элемента в глобальной памяти
int ind = MATRIX_SIZE * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x;
// запись элемента в глобальную память
C[ind] = sum;
}
__global__ void matrixSmem1(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[tx][ty] = a[ia + MATRIX_SIZE * ty + tx];
bs[tx][ty] = b[ib + MATRIX_SIZE * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) sum += as[k][ty] * bs[tx][k];
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum;
}
__global__ void matrixSmem2(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE + 1];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE + 1];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[tx][ty] = a[ia + MATRIX_SIZE * ty + tx];
bs[tx][ty] = b[ib + MATRIX_SIZE * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) sum += as[k][ty] * bs[tx][k];
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum;
}
__global__ void matrixSmem3(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[ty][tx] = a[ia + MATRIX_SIZE * ty + tx];
bs[ty][tx] = b[ib + MATRIX_SIZE * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++)
sum += as[ty][k] * bs[k][tx];
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum;
}
__global__ void matrixSmem4(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum1 = 0.0f, sum2 = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[ty][tx] = a[ia + MATRIX_SIZE * ty + tx];
bs[ty][tx] = b[ib + MATRIX_SIZE * ty + tx];
as[ty + 16][tx] = a[ia + MATRIX_SIZE * (ty + 16) + tx];
bs[ty + 16][tx] = b[ib + MATRIX_SIZE * (ty + 16) + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) {
sum1 += as[ty][k] * bs[k][tx];
sum2 += as[ty + 16][k] * bs[k][tx];
}
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum1;
c[aBegin + bBegin + (ty + 16) * MATRIX_SIZE + tx] = sum2;
}
__global__ void matrixSmem5(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = MATRIX_SIZE * BLOCK_SIZE * by, aEnd = aBegin + MATRIX_SIZE - 1;
int bBegin = BLOCK_SIZE * bx, aStep = BLOCK_SIZE, bStep = BLOCK_SIZE * MATRIX_SIZE;
BASE_TYPE sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f, sum4 = 0.0f;
__shared__ BASE_TYPE as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs[BLOCK_SIZE][BLOCK_SIZE];
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
as[ty][tx] = a[ia + MATRIX_SIZE * ty + tx];
bs[ty][tx] = b[ib + MATRIX_SIZE * ty + tx];
as[ty + 8][tx] = a[ia + MATRIX_SIZE * (ty + 8) + tx];
bs[ty + 8][tx] = b[ib + MATRIX_SIZE * (ty + 8) + tx];
as[ty + 16][tx] = a[ia + MATRIX_SIZE * (ty + 16) + tx];
bs[ty + 16][tx] = b[ib + MATRIX_SIZE * (ty + 16) + tx];
as[ty + 24][tx] = a[ia + MATRIX_SIZE * (ty + 24) + tx];
bs[ty + 24][tx] = b[ib + MATRIX_SIZE * (ty + 24) + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; k++) {
sum1 += as[ty][k] * bs[k][tx];
sum2 += as[ty + 8][k] * bs[k][tx];
sum3 += as[ty + 16][k] * bs[k][tx];
sum4 += as[ty + 24][k] * bs[k][tx];
}
__syncthreads();
}
c[aBegin + bBegin + ty * MATRIX_SIZE + tx] = sum1;
c[aBegin + bBegin + (ty + 8) * MATRIX_SIZE + tx] = sum2;
c[aBegin + bBegin + (ty + 16) * MATRIX_SIZE + tx] = sum3;
c[aBegin + bBegin + (ty + 24) * MATRIX_SIZE + tx] = sum4;
}
__global__ void vectorAdd(float* c, const float* a, const float* b, int N) {
// Calculate global thread ID
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Boundary check
if (tid < N) {
// Each thread adds a single element
c[tid] = a[tid] + b[tid];
}
}
__global__ void kernel_global(float* c, const float* a, const float* b)
{
int bx = blockIdx.x; // номер блока по x
int by = blockIdx.y; // номер блока по y
int tx = threadIdx.x; // номер нити в блоке по x
int ty = threadIdx.y; // номер нити в блоке по y
float sum = 0.0f;
int ia = MATRIX_SIZE * (BLOCK_SIZE * by + ty); // номер строки из A’
int ib = BLOCK_SIZE * bx + tx; // номер столбца из B’
int ic = ia + ib; // номер элемента из С’
// вычисление элемента матрицы C
for (int k = 0; k < MATRIX_SIZE; k++) sum += a[ia + k] * b[ib + k * MATRIX_SIZE];
c[ic] = sum;
}
__global__ void matrixMulTiled(BASE_TYPE* c, const BASE_TYPE* a,const BASE_TYPE* b) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Statically allocated shared memory
__shared__ BASE_TYPE s_a[SHMEM_SIZE];
__shared__ BASE_TYPE s_b[SHMEM_SIZE];
// Accumulate in temporary variable
int tmp = 0;
// Sweep tile across matrix
for (int i = 0; i < MATRIX_SIZE; i += blockDim.x) {
// Load in elements for this tile
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * MATRIX_SIZE + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * MATRIX_SIZE + threadIdx.y * MATRIX_SIZE + col];
// Wait for both tiles to be loaded in before doing computation
__syncthreads();
// Do matrix multiplication on the small matrix
for (int j = 0; j < blockDim.x; j++) {
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
}
// Wait for all threads to finish using current tiles before loading in new
// ones
__syncthreads();
}
// Write back results
c[row * MATRIX_SIZE + col] = tmp;
}
double experiment(void function(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b),
const std::string type, const std::string description, BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
const int testCount = 10;
double seconds;
clock_t start, end;
LOOP_I(testCount) {
start = clock();
function(c, a, b);
end = clock();
seconds += (double)(end - start) / CLOCKS_PER_SEC;
}
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
c = new float[MATRIX_SIZE * MATRIX_SIZE];
printf("time %s: %.2f ms - %s\n", type, seconds / testCount, description);
return seconds;
}
const int TRX = 16;
const int TRY = 16;
struct gpu_buf_t
{
float* p;
int n;
gpu_buf_t(int size)
: n(size)
, p(0)
{
cudaError_t error = cudaMalloc(&p, n * sizeof(float));
assert(error == cudaSuccess);
}
~gpu_buf_t()
{
if (p)
{
cudaError_t error = cudaFree(p);
assert(error == cudaSuccess);
p = 0;
}
}
};
const int TSM = 128;
const int TSN = 128;
const int TSK = 16;
const int WPTM = 8;
const int WPTN = 8;
const int RTSM = TSM / WPTM;
const int RTSN = TSN / WPTN;
const int LPTA = TSK * WPTM * WPTN / TSN;
__global__ void transpose(int P, int Q, const float* src, float* dst)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
const int ID0 = blockIdx.x * TRX + tx;
const int ID1 = blockIdx.y * TRY + ty;
__shared__ float buf[TRX][TRY];
if (ID0 < P && ID1 < Q)
buf[ty][tx] = src[ID1 * P + ID0];
__syncthreads();
const int newID0 = blockIdx.y * TRY + tx;
const int newID1 = blockIdx.x * TRX + ty;
if (newID0 < Q && newID1 < P)
dst[newID1 * Q + newID0] = buf[tx][ty];
}
__global__ void gemm(int M, int N, int K, const float* A, const float* B, float* C)
{
const int tidm = threadIdx.y;
const int tidn = threadIdx.x;
const int offsetM = TSM * blockIdx.y;
const int offsetN = TSN * blockIdx.x;
__shared__ float sA[TSK][TSM];
__shared__ float sB[TSN][TSK];
float rA;
float rB[WPTN];
float rC[WPTM][WPTN];
#pragma unroll
for (int wm = 0; wm < WPTM; wm++)
{
#pragma unroll
for (int wn = 0; wn < WPTN; wn++)
rC[wm][wn] = 0.0f;
}
for (int k0 = 0; k0 < K; k0 += TSK)
{
#pragma unroll
for (int la = 0; la < LPTA; la++)
{
int tid = tidn * RTSM + tidm;
int id = la * RTSN * RTSM + tid;
int row = id % TSM;
int col = id / TSM;
int tiledIndex = k0 + col;
#if __CUDA_ARCH__ >= 320
sA[col][row] = __ldg(&A[tiledIndex * M + offsetM + row]);
sB[row][col] = __ldg(&B[tiledIndex * N + offsetN + row]);
#else
sA[col][row] = A[tiledIndex * M + offsetM + row];
sB[row][col] = B[tiledIndex * N + offsetN + row];
#endif
}
__syncthreads();
for (int k = 0; k < TSK; k++)
{
#pragma unroll
for (int wn = 0; wn < WPTN; wn++)
{
int col = tidn + wn * RTSN;
rB[wn] = sB[col][k];
}
#pragma unroll
for (int wm = 0; wm < WPTM; wm++)
{
int row = tidm + wm * RTSM;
rA = sA[k][row];
#pragma unroll
for (int wn = 0; wn < WPTN; wn++) {
rC[wm][wn] += rA * rB[wn];
}
}
}
__syncthreads();
}
#pragma unroll
for (int wm = 0; wm < WPTM; wm++)
{
int globalRow = offsetM + tidm + wm * RTSM;
#pragma unroll
for (int wn = 0; wn < WPTN; wn++)
{
int globalCol = offsetN + tidn + wn * RTSN;
C[globalCol + globalRow * N] = rC[wm][wn];
}
}
}
const int TS = 32;
const int WPT = 8;
const int PTS = TS / WPT;
__global__ void transposeBuffB(int P, int Q, const float* src, float* dst)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
const int ID0 = blockIdx.x * TRX + tx;
const int ID1 = blockIdx.y * TRY + ty;
__shared__ float buf[TRX][TRY];
if (ID0 < P && ID1 < Q)
buf[ty][tx] = src[ID1 * P + ID0];
__syncthreads();
const int newID0 = blockIdx.y * TRY + tx;
const int newID1 = blockIdx.x * TRX + ty;
if (newID0 < Q && newID1 < P)
dst[newID1 * Q + newID0] = buf[tx][ty];
}
__global__ void MatrixBuffB(int M, int N, int K, const float* A, const float* B, float* C)
{
int i0 = TS * blockIdx.y + threadIdx.y;
int j = TS * blockIdx.x + threadIdx.x;
float c[WPT];
for (int w = 0; w < WPT; w++)
c[w] = 0.0f;
__shared__ float sA[TS][TS];
__shared__ float sB[TS][TS];
for (int k0 = 0; k0 < K; k0 += TS)
{
for (int w = 0; w < WPT; w++)
{
sA[threadIdx.y + w * PTS][threadIdx.x] = A[(i0 + w * PTS) * K + (k0 + threadIdx.x)];
sB[threadIdx.y + w * PTS][threadIdx.x] = B[(j)*K + (k0 + threadIdx.y + w * PTS)];
}
__syncthreads();
for (int k = 0; k < TS; ++k)
{
float b = sB[k][threadIdx.x];
for (int w = 0; w < WPT; w++)
c[w] += sA[threadIdx.y + w * PTS][k] * b;
}
__syncthreads();
}
for (int w = 0; w < WPT; w++)
C[(i0 + w * PTS) * N + j] = c[w];
}
int main()
{
double seconds;
double t1;
double t2;
double t;
clock_t start, end;
Matrix d;
unsigned int mem_size = sizeof(float) * MATRIX_SIZE * MATRIX_SIZE;
float* a = new float[MATRIX_SIZE * MATRIX_SIZE];
float* b = new float[MATRIX_SIZE * MATRIX_SIZE];
float* c = new float[MATRIX_SIZE * MATRIX_SIZE];
for (size_t i = 0; i < MATRIX_SIZE * MATRIX_SIZE; i++)
{
a[i] = rand() % 5;
b[i] = rand() % 5;
}
// TEST
{
// Standart
experiment(MatrixMul, "GPU", "Standart", c, a, b);
cudaDeviceReset();
//
// Cuda standart + pinned
//
{
float* a_pin, * b_pin, * c_pin;
unsigned int mem_size = sizeof(float) * MATRIX_SIZE * MATRIX_SIZE;
cudaHostAlloc((void**)&a_pin, mem_size, cudaHostAllocDefault);
cudaHostAlloc((void**)&b_pin, mem_size, cudaHostAllocDefault);
cudaHostAlloc((void**)&c_pin, mem_size, cudaHostAllocDefault);
for (size_t i = 0; i < MATRIX_SIZE * MATRIX_SIZE; i++)
{
a_pin[i] = a[i];
b_pin[i] = b[i];
}
experiment(MatrixMul, "GPU", "Standart+Pinned", c_pin, a_pin, b_pin);
cudaDeviceReset();
cudaFreeHost(a_pin);
cudaFreeHost(b_pin);
cudaFreeHost(c_pin);
}
// Cuda Pinned + Stream
{
float* a_pin, * b_pin, * c_pin;
unsigned int mem_size = sizeof(float) * MATRIX_SIZE * MATRIX_SIZE;
cudaHostAlloc((void**)&a_pin, mem_size, cudaHostAllocDefault);
cudaHostAlloc((void**)&b_pin, mem_size, cudaHostAllocDefault);
cudaHostAlloc((void**)&c_pin, mem_size, cudaHostAllocDefault);
for (int i = 0; i < MATRIX_SIZE * MATRIX_SIZE; i++) {
a_pin[i] = a[i];
b_pin[i] = b[i];
}
experiment(MatrixMulStream, "GPU", "Standart+Stream", c_pin, a_pin, b_pin);
cudaDeviceReset();
cudaFreeHost(a_pin);
cudaFreeHost(b_pin);
cudaFreeHost(c_pin);
}
//
start = clock();
matrixSmemm(matrixSmem1,c, a, b, 1);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem1 \n", seconds);
cudaDeviceReset();
start = clock();
matrixSmemm(matrixSmem2, c, a, b, 1);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem2 \n", seconds);
cudaDeviceReset();
start = clock();
matrixSmemm(matrixSmem3, c, a, b, 1);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem3 \n", seconds);
cudaDeviceReset();
start = clock();
matrixSmemm(matrixSmem4, c, a, b, 2);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem4 \n", seconds);
cudaDeviceReset();
start = clock();
matrixSmemm(matrixSmem5, c, a, b, 4);
end = clock();
for (size_t i = 200; i < 210; i++)
{
std::cout << c[i] << " ";
}
seconds = (double)(end - start) / CLOCKS_PER_SEC;
printf("time GPU: %.2f ms - Smem5 \n", seconds);
cudaDeviceReset();
experiment(MatrixMuld, "GPU", "MatrixMuldSub", c, a, b);
cudaDeviceReset();
experiment(matrixDeviceBuffA, "GPU", "BuffA",c, a, b);
cudaDeviceReset();
experiment(matrixDeviceBuffB, "GPU", "BuffB", c, a, b);
cudaDeviceReset();
experiment(MatrixBlock1, "GPU", "Block Mult", c, a, b);
cudaDeviceReset();
experiment(MatrixTiled, "GPU", "Tiled Mult", c, a, b);
cudaDeviceReset();
}
// Базовый
//experiment(matrixHost, "CPU", "Standart", c, a, b
// gemm
//experiment(matrixDevice, "GPU", "gemm2", c, a, b);
//{
//start = clock();
//gemm_v2(MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//t1 = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - SIMD\n", t1);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v3(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//t2 = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - Core\n", t2);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v4(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//seconds = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - BuffB\n", seconds);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v5(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//seconds = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - L1 \n", seconds);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v6(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//seconds = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - L2 \n\n", seconds);
//c = new float[MATRIX_SIZE * MATRIX_SIZE];
//start = clock();
//gemm_v6(MATRIX_SIZE - 6, MATRIX_SIZE, MATRIX_SIZE, a, b, c);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
//std::cout << c[i] << " ";
//}
//seconds = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time CPU: %.2f ms - L3\n\n", seconds);
//}
//
// Cuda Standart
//
{
// for (size_t i = 200; i < 210; i++)
//{
// std::cout << c[i] << " ";
//}
//t = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time GPU: %.2f standart\n", t);
//printf("GPU vs CPU: %.2f\n", t1 + t);
//printf("GPU vs CPU: %.2f\n", t2);
}
//cudaDeviceReset();
//
//experiment(MatrixBank, "GPU", "Bank Mult", c, a, b);
//cudaDeviceReset();
//printf("\n----------------------experiment-----------------------\n");
//experiment(MatrixBlock, "GPU", "Block Mult", c, a, b);
//cudaDeviceReset();
//
// Cuda BLock
//{
//start = clock();
//MatrixBlock1(c, a, b);
//end = clock();
//for (size_t i = 200; i < 210; i++)
//{
// std::cout << c[i] << " ";
//}
//t = (double)(end - start) / CLOCKS_PER_SEC;
//printf("time GPU: %.2f ms - Block Mult\n", t);
//cudaDeviceReset();
//}
////printf("----------------------experiment-----------------------\n\n");
////
//// Cuda Sub
////
//{
// c = new float[MATRIX_SIZE * MATRIX_SIZE];
// int numBytes = sizeof(float) * MATRIX_SIZE * MATRIX_SIZE;
// Matrix A, B, C;
// A.elements = a;
// B.elements = b;
// C.elements = c;
// start = clock();
// MatrixSub(C, A, B);
// end = clock();
// for (size_t i = 200; i < 210; i++)
// {
// std::cout << C.elements[i] << " ";
// }
// seconds = (double)(end - start) / CLOCKS_PER_SEC;
// printf("time GPU: %.2f ms - Sub Mult\n", seconds);
// cudaDeviceReset();
// c = new float[MATRIX_SIZE * MATRIX_SIZE];
//}
//
//// Cuda Pinnet
//
//{
// start = clock();
// MatrixPinned(c, a, b);
// end = clock();
// for (size_t i = 200; i < 210; i++)
// {
// std::cout << c[i] << " ";
// }
// seconds = (double)(end - start) / CLOCKS_PER_SEC;
// //printf("time GPU: %.2f ms - Pinned\n", seconds);
// cudaDeviceReset();
//}
//{
// float timerValueGPU, timerValueCPU;
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// float* hA, * hB, * hC, * dA, * dB, * dC;
// int size = MATRIX_SIZE * MATRIX_SIZE; // размер каждого массива
// int N_thread = MATRIX_SIZE; // число нитей в блоке
// int N_blocks, i;
// // задание массивов hA,hB,hC для host
// unsigned int mem_size = sizeof(float) * size;
// hA = (float*)malloc(mem_size);
// hB = (float*)malloc(mem_size);
// hC = (float*)malloc(mem_size);
// // выделение памяти на device под массивы hA, hB, hC
// cudaMalloc((void**)&dA, mem_size);
// cudaMalloc((void**)&dB, mem_size);
// cudaMalloc((void**)&dC, mem_size);
// // заполнение массивов hA,hB и обнуление hC
// for (i = 0; i < size; i++)
// {
// hA[i] = a[i];
// hB[i] = b[i];
// hC[i] = 0.0f;
// }
// // определение числа блоков
// if ((size % N_thread) == 0)
// {
// N_blocks = size / N_thread;
// }
// else
// {
// N_blocks = (int)(size / N_thread) + 1;
// }
// dim3 blocks(N_blocks);
// dim3 dimBLock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dimGrid(MATRIX_SIZE / dimBLock.x, MATRIX_SIZE / dimBLock.y);
// // ----------------------GPU вариант -------------------
// // Старт таймера
// cudaEventRecord(start, 0);
// // Копирование массивов с host на device
// cudaMemcpy(dA, a, mem_size, cudaMemcpyHostToDevice);
// cudaMemcpy(dB, b, mem_size, cudaMemcpyHostToDevice);
// // Запуск функции-ядра
// function << < dimBLock, dimGrid >> > (dA, dB, dC, size);
// // Копирование результат с device на host
// cudaMemcpy(hC, dC, mem_size, cudaMemcpyDeviceToHost);
// for (size_t i = 200; i < 210; i++)
// {
// std::cout << ":" << hC[i] << " ";
// }
// // Остановка таймера и вывод времени
// // вычисления GPU варианта
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&timerValueGPU, start, stop);
// printf("\n GPU calculation time: %f ms\n", timerValueGPU);
//}
////
//// Cublas
////
//{
// start = clock();
// MatrixMulCublas(c, a, b);
// end = clock();
// for (size_t i = 200; i < 210; i++)
// {
// std::cout << c[i] << " ";
// }
// seconds = (double)(end - start) / CLOCKS_PER_SEC;
// printf("time GPU: %.2f ms - Cublas \n", seconds);
// cudaDeviceReset();
//}
//
delete a;
delete b;
delete c;
return 0;
}
void matrixSmemm(void smem(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b), BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b, const int stream)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda(cudaSetDevice(0));
checkCuda(cudaMalloc((void**)&dev_c, numBytes));
checkCuda(cudaMalloc((void**)&dev_a, numBytes));
checkCuda(cudaMalloc((void**)&dev_b, numBytes));
checkCuda(cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice));
dim3 threads(BLOCK_SIZE, BLOCK_SIZE / stream);
dim3 blocks(N / threads.x, N / threads.y);
smem << <blocks, threads >> > (dev_c, dev_a, dev_b);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaGetLastError());
checkCuda(cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost));
checkCuda(cudaFree(dev_c));
checkCuda(cudaFree(dev_a));
checkCuda(cudaFree(dev_b));
}
void MatrixPinned(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
// 2^16
constexpr int N = MATRIX_SIZE;
size_t bytes = numBytes;
unsigned dataArraySize = MATRIX_SIZE * MATRIX_SIZE / sizeof(float);
//vector <string> ivector;
vector<int> h_a(MATRIX_SIZE * MATRIX_SIZE);
vector<int> h_b(MATRIX_SIZE * MATRIX_SIZE);
vector<int> h_c(MATRIX_SIZE * MATRIX_SIZE);
for (int i = 0; i < MATRIX_SIZE * MATRIX_SIZE; i++) {
h_a[i] = a[i];
h_b[i] = b[i];
}
//h_a.insert(h_a.end(), &a[0], &a[numBytes]);
//h_b.insert(h_b.end(), &b[0], &b[numBytes]);
////// Initialize matrices
//std: generate(h_a.begin(), h_a.end(), a);
//generate(h_b.begin(), h_b.end(), b);
// // Copy data to the device
//cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
//cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
// Vectors for holding the host-side (CPU-side) data
//float* h_a, * h_b, * h_c;
// Allocate pinned memory
//cudaMallocHost(&h_a, bytes);
//cudaMallocHost(&h_b, bytes);
//cudaMallocHost(&h_c, bytes);
// Threads per CTA(1024 threads per CTA)
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
//dim3 block(BLOCK_SIZE, BLOCK_SIZE);
//dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
// Threads per CTA dimension
int THREADS = 32;
// Blocks per grid dimension (assumes THREADS divides N evenly)
int BLOCKS = N / THREADS;
// Use dim3 structs for block and grid dimensions
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
checkCuda(cudaSetDevice(0));
checkCuda(cudaMalloc((void**)&dev_c, numBytes));
checkCuda(cudaMalloc((void**)&dev_a, numBytes));
checkCuda(cudaMalloc((void**)&dev_b, numBytes));
checkCuda(cudaMemcpy(dev_a, h_a.data(), numBytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(dev_b, h_b.data(), numBytes, cudaMemcpyHostToDevice));
matrixDevice << <blocks, threads >> > (dev_c, dev_a, dev_b);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaGetLastError());
checkCuda(cudaMemcpy(h_c.data(), dev_c, numBytes, cudaMemcpyDeviceToHost));
//for (auto x : h_c)
// std::cout << x << ' ';
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
void MatrixSub(Matrix C, const Matrix A, const Matrix B)
{
size_t numBytes = A.col * A.row * sizeof(float);
Matrix dev_a;
Matrix dev_b;
Matrix dev_c;
// Load A and B to device memory
dev_a.col = dev_a.stride = A.col; dev_a.row = A.row;
checkCuda( cudaMalloc((void**)&dev_a.elements, numBytes)) ;
checkCuda( cudaMemcpy(dev_a.elements, A.elements, numBytes, cudaMemcpyHostToDevice) );
dev_b.col = dev_b.stride = A.col; dev_b.row = B.row;
checkCuda( cudaMalloc((void**)&dev_b.elements, numBytes) );
checkCuda( cudaMemcpy(dev_b.elements, B.elements, numBytes, cudaMemcpyHostToDevice) );
dev_c.col = dev_c.stride = C.col; dev_c.row = C.row;
checkCuda( cudaMalloc((void**)&dev_c.elements, numBytes) );
checkCuda( cudaMemcpy(dev_c.elements, C.elements, numBytes, cudaMemcpyHostToDevice) );
dim3 dimBLock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid (MATRIX_SIZE / dimBLock.x, MATRIX_SIZE / dimBLock.y);
matrixDeviceSub <<<dimGrid, dimBLock>>> (dev_c, dev_a, dev_b);
checkCuda( cudaGetLastError() );
checkCuda( cudaDeviceSynchronize() );
checkCuda( cudaMemcpy(C.elements, dev_c.elements, numBytes, cudaMemcpyDeviceToHost) );
cudaFree(dev_c.elements);
cudaFree(dev_a.elements);
cudaFree(dev_b.elements);
}
int MatrixBlock(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda( cudaSetDevice(0) );
checkCuda( cudaMalloc((void**)&dev_c, numBytes) );
checkCuda( cudaMalloc((void**)&dev_a, numBytes) );
checkCuda( cudaMalloc((void**)&dev_b, numBytes) );
checkCuda( cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice) );
matrixDevicBlock <<<grid, block>>> (dev_c, dev_a, dev_b);
checkCuda( cudaDeviceSynchronize() );
checkCuda( cudaGetLastError() );
checkCuda( cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost) );
cudaFree( dev_c );
cudaFree( dev_a );
cudaFree( dev_b );
return 0;
}
void MatrixBlock1(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda(cudaSetDevice(0));
checkCuda(cudaMalloc((void**)&dev_c, numBytes));
checkCuda(cudaMalloc((void**)&dev_a, numBytes));
checkCuda(cudaMalloc((void**)&dev_b, numBytes));
checkCuda(cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice));
matrixDevicBlock << <grid, block >> > (dev_c, dev_a, dev_b);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaGetLastError());
checkCuda(cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost));
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
void MatrixTiled(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda( cudaSetDevice(0) );
checkCuda( cudaMalloc((void**)&dev_c, numBytes) );
checkCuda( cudaMalloc((void**)&dev_a, numBytes) );
checkCuda( cudaMalloc((void**)&dev_b, numBytes) );
checkCuda( cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice) );
matrixMulTiled <<<grid, block>>> (dev_c, dev_a, dev_b);
checkCuda( cudaDeviceSynchronize() );
checkCuda( cudaGetLastError() );
checkCuda( cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost) );
cudaFree( dev_c );
cudaFree( dev_a );
cudaFree( dev_b );
}
int MatrixBank(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda(cudaSetDevice(0));
checkCuda(cudaMalloc((void**)&dev_c, numBytes));
checkCuda(cudaMalloc((void**)&dev_a, numBytes));
checkCuda(cudaMalloc((void**)&dev_b, numBytes));
checkCuda(cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice));
matrixMultBank << <grid, block >> > (dev_c, dev_a, dev_b);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaGetLastError());
checkCuda(cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost));
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return 0;
}
void MatrixMulStream(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
const int nStream = 4; // число CUDA-потоков
int sizeMatrixStream = MATRIX_SIZE * MATRIX_SIZE / nStream;
int numBytes = sizeMatrixStream * sizeof(float);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda( cudaSetDevice(0));
checkCuda( cudaMalloc((void**)&dev_c, numBytes * nStream));
checkCuda( cudaMalloc((void**)&dev_a, numBytes * nStream));
checkCuda( cudaMalloc((void**)&dev_b, numBytes * nStream));
cudaStream_t stream[nStream];
for (size_t i = 0; i < nStream; ++i)
cudaStreamCreate(&stream[i]);
for (size_t i = 0; i < nStream; ++i)
{
cudaMemcpyAsync(dev_a + i * sizeMatrixStream, a + i * sizeMatrixStream, numBytes, cudaMemcpyHostToDevice, stream[i]);
cudaMemcpyAsync(dev_b + i * sizeMatrixStream, b + i * sizeMatrixStream, numBytes, cudaMemcpyHostToDevice, stream[i]);
}
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x , MATRIX_SIZE / block.y / nStream);
for (size_t i = 0; i < nStream; ++i) // Запуск функции-ядра
{
matrixDeviceStream << <grid, block, 0, stream[i] >> > (&dev_c[i * sizeMatrixStream], &dev_a[i * sizeMatrixStream], dev_b, MATRIX_SIZE / nStream);
}
checkCuda( cudaDeviceSynchronize());
checkCuda( cudaGetLastError());
for (size_t i = 0; i < nStream; ++i)
cudaMemcpyAsync(c + i * sizeMatrixStream, dev_c + i * sizeMatrixStream, numBytes, cudaMemcpyDeviceToHost, stream[i]);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
for (size_t i = 0; i < nStream; ++i)
cudaStreamDestroy(stream[i]);
}
void MatrixMul(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda( cudaSetDevice(0) );
checkCuda( cudaMalloc((void**)&dev_c, numBytes) );
checkCuda( cudaMalloc((void**)&dev_a, numBytes) );
checkCuda( cudaMalloc((void**)&dev_b, numBytes) );
checkCuda( cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice) );
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE /block.x, MATRIX_SIZE / block.y);
matrixDevice << <grid, block >> > (dev_c , dev_a , dev_b );
checkCuda( cudaDeviceSynchronize());
checkCuda( cudaGetLastError());
checkCuda( cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaFree(dev_c) );
checkCuda( cudaFree(dev_a) );
checkCuda( cudaFree(dev_b) );
}
void MatrixMuld(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda(cudaSetDevice(0));
checkCuda(cudaMalloc((void**)&dev_c, numBytes));
checkCuda(cudaMalloc((void**)&dev_a, numBytes));
checkCuda(cudaMalloc((void**)&dev_b, numBytes));
checkCuda(cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice));
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
Muld << <grid, block >> > (dev_c, dev_a, dev_b);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaGetLastError());
checkCuda(cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost));
checkCuda(cudaFree(dev_c));
checkCuda(cudaFree(dev_a));
checkCuda(cudaFree(dev_b));
}
void MatrixMulCublas(float* c, const float* a, const float* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
// cublasHandle_t handle;
//cublasCreate(&handle);
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / block.x, MATRIX_SIZE / block.y);
checkCuda( cudaSetDevice(0));
checkCuda( cudaMalloc((void**)&dev_c, numBytes));
checkCuda( cudaMalloc((void**)&dev_a, numBytes));
checkCuda( cudaMalloc((void**)&dev_b, numBytes));
checkCuda( cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice));
checkCuda( cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice));
float alpha = 1.0f;
float beta = 0.0f;
//cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, &alpha, dev_a, MATRIX_SIZE, dev_b, MATRIX_SIZE, &beta, dev_c, MATRIX_SIZE);
//cublasDestroy(handle);
checkCuda( cudaDeviceSynchronize());
checkCuda( cudaGetLastError());
checkCuda( cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost));
cudaFree( dev_c );
cudaFree( dev_a );
cudaFree( dev_b );
}
void matrixDeviceBuffB(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda(cudaSetDevice(0));
checkCuda(cudaMalloc((void**)&dev_c, numBytes));
checkCuda(cudaMalloc((void**)&dev_a, numBytes));
checkCuda(cudaMalloc((void**)&dev_b, numBytes));
checkCuda(cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice));
//dim3 grid(TS, TS);
//dim3 block((MATRIX_SIZE + TS - 1) / TS, (MATRIX_SIZE + TS - 1) / TS);
gpu_buf_t tB(MATRIX_SIZE * MATRIX_SIZE);
dim3 gridT(TRX, TRY);
dim3 blockT((MATRIX_SIZE + TRX - 1) / TRX, (MATRIX_SIZE + TRY - 1) / TRY);
dim3 grid(TS, TS / WPT);
dim3 block((MATRIX_SIZE + TS - 1) / TS, (MATRIX_SIZE + TS - 1) / TS);
//matrixTest << <block, grid>> > (MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, dev_a, dev_b, dev_c);
transposeBuffB << <blockT, gridT >> > (MATRIX_SIZE, MATRIX_SIZE, dev_b, tB.p);
MatrixBuffB << <block, grid >> > (MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, dev_a, tB.p, dev_c);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaGetLastError());
checkCuda(cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost));
checkCuda(cudaFree(dev_c));
checkCuda(cudaFree(dev_a));
checkCuda(cudaFree(dev_b));
}
void matrixDeviceBuffA(BASE_TYPE* c, const BASE_TYPE* a, const BASE_TYPE* b)
{
int numBytes = MATRIX_SIZE * MATRIX_SIZE * sizeof(BASE_TYPE);
BASE_TYPE* dev_a = 0;
BASE_TYPE* dev_b = 0;
BASE_TYPE* dev_c = 0;
checkCuda(cudaSetDevice(0));
checkCuda(cudaMalloc((void**)&dev_c, numBytes));
checkCuda(cudaMalloc((void**)&dev_a, numBytes));
checkCuda(cudaMalloc((void**)&dev_b, numBytes));
checkCuda(cudaMemcpy(dev_a, a, numBytes, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(dev_b, b, numBytes, cudaMemcpyHostToDevice));
gpu_buf_t tA(MATRIX_SIZE * MATRIX_SIZE);
dim3 gridT(TRX, TRY);
dim3 blockT((MATRIX_SIZE + TRX - 1) / TRX, (MATRIX_SIZE + TRY - 1) / TRY);
dim3 grid(TSM / WPTM, TSN / WPTN);
dim3 block(MATRIX_SIZE / TSM, MATRIX_SIZE / TSN);
transpose << <blockT, gridT >> > (MATRIX_SIZE, MATRIX_SIZE, dev_a, tA.p);
gemm << <block, grid >> > (MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE, tA.p, dev_b, dev_c);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaGetLastError());
checkCuda(cudaMemcpy(c, dev_c, numBytes, cudaMemcpyDeviceToHost));
checkCuda(cudaFree(dev_c));
checkCuda(cudaFree(dev_a));
checkCuda(cudaFree(dev_b));
}
//
//void df() {
// // Size (in bytes) of matrix
// size_t bytes = N * N * sizeof(int);
//
// // Host vectors
// vector<int> h_a(N * N);
// vector<int> h_b(N * N);
// vector<int> h_c(N * N);
//
// // Initialize matrices
// generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
// generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
//
// // Allocate device memory
// int* d_a, * d_b, * d_c;
// cudaMalloc(&d_a, bytes);
// cudaMalloc(&d_b, bytes);
// cudaMalloc(&d_c, bytes);
//
// // Copy data to the device
// cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
//
// // Threads per CTA dimension
// int THREADS = 32;
//
// // Blocks per grid dimension (assumes THREADS divides N evenly)
// int BLOCKS = N / THREADS;
//
// // Use dim3 structs for block and grid dimensions
// dim3 threads(THREADS, THREADS);
// dim3 blocks(BLOCKS, BLOCKS);
//
// // Launch kernel
// matrixMul << <blocks, threads >> > (d_a, d_b, d_c);
//
// // Copy back to the host
// cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
//
// // Check result
// verify_result(h_a, h_b, h_c);
//
// cout << "COMPLETED SUCCESSFULLY\n";
//
// // Free memory on device
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c);
//} |
e369997db811f61082f054543289152cf415c9c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Used in cupy based on example from here: https://github.com/cupy/cupy/tree/master/examples/gemm
// TODO: Add 2D functions
#include <cupy/complex.cuh>
extern "C" __global__
void grid2d(complex<float> *data, float *kr, float *ki, float *traj, float *dens,
long n_points, long *N, float krad, long grid_mod, float *kernel)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_points;
i += blockDim.x * gridDim.x)
{
if (i < n_points) {
float x = (traj[i * 3 + 0] + 0.5) * N[1];
float y = (traj[i * 3 + 1] + 0.5) * N[0];
int xmin = floor(x - krad);
int ymin = floor(y - krad);
int xmax = ceil(x + krad);
int ymax = ceil(y + krad);
// Kernel lookup variables (TODO: give clearer names)
float kii, kdi;
int ri;
for (int iy = ymin; iy <= ymax; iy++)
{
if ((iy >= 0) && (iy < N[0]))
{
float dy = abs(y - iy);
kii = dy / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvy = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int ix = xmin; ix <= xmax; ix++)
{
if ((ix >= 0) && (ix < N[1]))
{
float dx = abs(x - ix);
kii = dx / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvx = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
float dr = data[i].real() * dens[i] * kvx * kvy;
float di = data[i].imag() * dens[i] * kvx * kvy;
atomicAdd(&kr[iy * N[1] + ix], dr);
atomicAdd(&ki[iy * N[1] + ix], di);
}
}
}
}
}
}
}
extern "C" __global__
void grid3d(complex<float> *data, float *kr, float *ki, float *traj, float *dens,
long n_points, long *N, float krad, long grid_mod, float *kernel)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_points;
i += blockDim.x * gridDim.x)
{
if (i < n_points) {
float x = (traj[i * 3 + 0] + 0.5) * N[2];
float y = (traj[i * 3 + 1] + 0.5) * N[1];
float z = (traj[i * 3 + 2] + 0.5) * N[0];
int xmin = floor(x - krad);
int ymin = floor(y - krad);
int zmin = floor(z - krad);
int xmax = ceil(x + krad);
int ymax = ceil(y + krad);
int zmax = ceil(z + krad);
// Kernel lookup variables (TODO: give clearer names)
float kii, kdi;
int ri;
for (int iz = zmin; iz <= zmax; iz++)
{
if ((iz >= 0) && (iz < N[0]))
{
float dz = abs(z - iz);
kii = dz / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvz = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int iy = ymin; iy <= ymax; iy++)
{
if ((iy >= 0) && (iy < N[1]))
{
float dy = abs(y - iy);
kii = dy / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvy = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int ix = xmin; ix <= xmax; ix++)
{
if ((ix >= 0) && (ix < N[2]))
{
float dx = abs(x - ix);
kii = dx / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvx = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
float dr = data[i].real() * dens[i] * kvx * kvy * kvz;
float di = data[i].imag() * dens[i] * kvx * kvy * kvz;
atomicAdd(&kr[iz * N[2] * N[1] + iy * N[2] + ix], dr);
atomicAdd(&ki[iz * N[2] * N[1] + iy * N[2] + ix], di);
}
}
}
}
}
}
}
}
}
extern "C" __global__
void igrid2d(complex<float> *data, complex<float> *kspace, float *traj, float *dens,
long n_points, long *N, float krad, long grid_mod, float *kernel)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_points;
i += blockDim.x * gridDim.x)
{
if (i < n_points) {
float x = (traj[i * 3 + 0] + 0.5) * N[1];
float y = (traj[i * 3 + 1] + 0.5) * N[0];
int xmin = floor(x - krad);
int ymin = floor(y - krad);
int xmax = ceil(x + krad);
int ymax = ceil(y + krad);
// Kernel lookup variables (TODO: give clearer names)
float kii, kdi;
int ri;
for (int iy = ymin; iy <= ymax; iy++)
{
if ((iy >= 0) && (iy < N[0]))
{
float dy = abs(y - iy);
kii = dy / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvy = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int ix = xmin; ix <= xmax; ix++)
{
if ((ix >= 0) && (ix < N[1]))
{
float dx = abs(x - ix);
kii = dx / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvx = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
data[i] += kspace[iy * N[1] + ix] * kvx * kvy;
}
}
}
}
}
}
}
extern "C" __global__
void igrid3d(complex<float> *data, complex<float> *kspace, float *traj, float *dens,
long n_points, long *N, float krad, long grid_mod, float *kernel)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_points;
i += blockDim.x * gridDim.x)
{
if (i < n_points) {
float x = (traj[i * 3 + 0] + 0.5) * N[2];
float y = (traj[i * 3 + 1] + 0.5) * N[1];
float z = (traj[i * 3 + 2] + 0.5) * N[0];
int xmin = floor(x - krad);
int ymin = floor(y - krad);
int zmin = floor(z - krad);
int xmax = ceil(x + krad);
int ymax = ceil(y + krad);
int zmax = ceil(z + krad);
// Kernel lookup variables (TODO: give clearer names)
float kii, kdi;
int ri;
for (int iz = zmin; iz <= zmax; iz++)
{
if ((iz >= 0) && (iz < N[0]))
{
float dz = abs(z - iz);
kii = dz / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvz = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int iy = ymin; iy <= ymax; iy++)
{
if ((iy >= 0) && (iy < N[1]))
{
float dy = abs(y - iy);
kii = dy / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvy = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int ix = xmin; ix <= xmax; ix++)
{
if ((ix >= 0) && (ix < N[2]))
{
float dx = abs(x - ix);
kii = dx / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvx = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
data[i] += kspace[iz * N[2] * N[1] + iy * N[2] + ix] * kvx * kvy * kvz;
}
}
}
}
}
}
}
}
}
extern "C" __global__
void deapp3(complex<float> *A, long *N, float *D0, float *D1, float *D2)
{
for (int k = blockIdx.z * blockDim.z + threadIdx.z;
k < N[0];
k += blockDim.z * gridDim.z)
{
for (int j = blockIdx.y * blockDim.y + threadIdx.y;
j < N[1];
j += blockDim.y * gridDim.y)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N[2];
i += blockDim.x * gridDim.x)
{
A[k*N[2]*N[1] + j*N[2] + i] /= (D0[k] * D1[j] * D2[i]);
}}}
}
extern "C" __global__
void deapp2(complex<float> *A, long *N, float *D0, float *D1)
{
for (int j = blockIdx.y * blockDim.y + threadIdx.y;
j < N[0];
j += blockDim.y * gridDim.y)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N[1];
i += blockDim.x * gridDim.x)
{
A[j*N[1] + i] /= (D0[j] * D1[i]);
}}
} | e369997db811f61082f054543289152cf415c9c7.cu | // Used in cupy based on example from here: https://github.com/cupy/cupy/tree/master/examples/gemm
// TODO: Add 2D functions
#include <cupy/complex.cuh>
extern "C" __global__
void grid2d(complex<float> *data, float *kr, float *ki, float *traj, float *dens,
long n_points, long *N, float krad, long grid_mod, float *kernel)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_points;
i += blockDim.x * gridDim.x)
{
if (i < n_points) {
float x = (traj[i * 3 + 0] + 0.5) * N[1];
float y = (traj[i * 3 + 1] + 0.5) * N[0];
int xmin = floor(x - krad);
int ymin = floor(y - krad);
int xmax = ceil(x + krad);
int ymax = ceil(y + krad);
// Kernel lookup variables (TODO: give clearer names)
float kii, kdi;
int ri;
for (int iy = ymin; iy <= ymax; iy++)
{
if ((iy >= 0) && (iy < N[0]))
{
float dy = abs(y - iy);
kii = dy / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvy = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int ix = xmin; ix <= xmax; ix++)
{
if ((ix >= 0) && (ix < N[1]))
{
float dx = abs(x - ix);
kii = dx / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvx = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
float dr = data[i].real() * dens[i] * kvx * kvy;
float di = data[i].imag() * dens[i] * kvx * kvy;
atomicAdd(&kr[iy * N[1] + ix], dr);
atomicAdd(&ki[iy * N[1] + ix], di);
}
}
}
}
}
}
}
extern "C" __global__
void grid3d(complex<float> *data, float *kr, float *ki, float *traj, float *dens,
long n_points, long *N, float krad, long grid_mod, float *kernel)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_points;
i += blockDim.x * gridDim.x)
{
if (i < n_points) {
float x = (traj[i * 3 + 0] + 0.5) * N[2];
float y = (traj[i * 3 + 1] + 0.5) * N[1];
float z = (traj[i * 3 + 2] + 0.5) * N[0];
int xmin = floor(x - krad);
int ymin = floor(y - krad);
int zmin = floor(z - krad);
int xmax = ceil(x + krad);
int ymax = ceil(y + krad);
int zmax = ceil(z + krad);
// Kernel lookup variables (TODO: give clearer names)
float kii, kdi;
int ri;
for (int iz = zmin; iz <= zmax; iz++)
{
if ((iz >= 0) && (iz < N[0]))
{
float dz = abs(z - iz);
kii = dz / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvz = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int iy = ymin; iy <= ymax; iy++)
{
if ((iy >= 0) && (iy < N[1]))
{
float dy = abs(y - iy);
kii = dy / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvy = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int ix = xmin; ix <= xmax; ix++)
{
if ((ix >= 0) && (ix < N[2]))
{
float dx = abs(x - ix);
kii = dx / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvx = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
float dr = data[i].real() * dens[i] * kvx * kvy * kvz;
float di = data[i].imag() * dens[i] * kvx * kvy * kvz;
atomicAdd(&kr[iz * N[2] * N[1] + iy * N[2] + ix], dr);
atomicAdd(&ki[iz * N[2] * N[1] + iy * N[2] + ix], di);
}
}
}
}
}
}
}
}
}
extern "C" __global__
void igrid2d(complex<float> *data, complex<float> *kspace, float *traj, float *dens,
long n_points, long *N, float krad, long grid_mod, float *kernel)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_points;
i += blockDim.x * gridDim.x)
{
if (i < n_points) {
float x = (traj[i * 3 + 0] + 0.5) * N[1];
float y = (traj[i * 3 + 1] + 0.5) * N[0];
int xmin = floor(x - krad);
int ymin = floor(y - krad);
int xmax = ceil(x + krad);
int ymax = ceil(y + krad);
// Kernel lookup variables (TODO: give clearer names)
float kii, kdi;
int ri;
for (int iy = ymin; iy <= ymax; iy++)
{
if ((iy >= 0) && (iy < N[0]))
{
float dy = abs(y - iy);
kii = dy / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvy = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int ix = xmin; ix <= xmax; ix++)
{
if ((ix >= 0) && (ix < N[1]))
{
float dx = abs(x - ix);
kii = dx / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvx = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
data[i] += kspace[iy * N[1] + ix] * kvx * kvy;
}
}
}
}
}
}
}
extern "C" __global__
void igrid3d(complex<float> *data, complex<float> *kspace, float *traj, float *dens,
long n_points, long *N, float krad, long grid_mod, float *kernel)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_points;
i += blockDim.x * gridDim.x)
{
if (i < n_points) {
float x = (traj[i * 3 + 0] + 0.5) * N[2];
float y = (traj[i * 3 + 1] + 0.5) * N[1];
float z = (traj[i * 3 + 2] + 0.5) * N[0];
int xmin = floor(x - krad);
int ymin = floor(y - krad);
int zmin = floor(z - krad);
int xmax = ceil(x + krad);
int ymax = ceil(y + krad);
int zmax = ceil(z + krad);
// Kernel lookup variables (TODO: give clearer names)
float kii, kdi;
int ri;
for (int iz = zmin; iz <= zmax; iz++)
{
if ((iz >= 0) && (iz < N[0]))
{
float dz = abs(z - iz);
kii = dz / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvz = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int iy = ymin; iy <= ymax; iy++)
{
if ((iy >= 0) && (iy < N[1]))
{
float dy = abs(y - iy);
kii = dy / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvy = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
for (int ix = xmin; ix <= xmax; ix++)
{
if ((ix >= 0) && (ix < N[2]))
{
float dx = abs(x - ix);
kii = dx / krad * grid_mod;
ri = floor(kii);
kdi = kii - ri;
float kvx = (kernel[ri] * (1 - kdi) + kernel[ri + 1] * kdi);
data[i] += kspace[iz * N[2] * N[1] + iy * N[2] + ix] * kvx * kvy * kvz;
}
}
}
}
}
}
}
}
}
extern "C" __global__
void deapp3(complex<float> *A, long *N, float *D0, float *D1, float *D2)
{
for (int k = blockIdx.z * blockDim.z + threadIdx.z;
k < N[0];
k += blockDim.z * gridDim.z)
{
for (int j = blockIdx.y * blockDim.y + threadIdx.y;
j < N[1];
j += blockDim.y * gridDim.y)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N[2];
i += blockDim.x * gridDim.x)
{
A[k*N[2]*N[1] + j*N[2] + i] /= (D0[k] * D1[j] * D2[i]);
}}}
}
extern "C" __global__
void deapp2(complex<float> *A, long *N, float *D0, float *D1)
{
for (int j = blockIdx.y * blockDim.y + threadIdx.y;
j < N[0];
j += blockDim.y * gridDim.y)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N[1];
i += blockDim.x * gridDim.x)
{
A[j*N[1] + i] /= (D0[j] * D1[i]);
}}
} |
357020b823418a7f2cb98043dd76cc44e2557d2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/unique.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename T>
struct is_equal_div_10_unique
{
__host__ __device__
bool operator()(const T x, const T& y) const { return ((int) x / 10) == ((int) y / 10); }
};
template<typename Vector>
void initialize_keys(Vector& keys)
{
keys.resize(9);
keys[0] = 11;
keys[1] = 11;
keys[2] = 21;
keys[3] = 20;
keys[4] = 21;
keys[5] = 21;
keys[6] = 21;
keys[7] = 37;
keys[8] = 37;
}
template<typename Vector>
void initialize_values(Vector& values)
{
values.resize(9);
values[0] = 0;
values[1] = 1;
values[2] = 2;
values[3] = 3;
values[4] = 4;
values[5] = 5;
values[6] = 6;
values[7] = 7;
values[8] = 8;
}
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void unique_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 result)
{
*result = thrust::unique_by_key(thrust::seq, keys_first, keys_last, values_first);
}
template<typename Iterator1, typename Iterator2, typename BinaryPredicate, typename Iterator3>
__global__
void unique_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, BinaryPredicate pred, Iterator3 result)
{
*result = thrust::unique_by_key(thrust::seq, keys_first, keys_last, values_first, pred);
}
void TestUniqueByKeyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector keys;
Vector values;
typedef thrust::pair<typename Vector::iterator, typename Vector::iterator> iter_pair;
thrust::device_vector<iter_pair> new_last_vec(1);
iter_pair new_last;
// basic test
initialize_keys(keys); initialize_values(values);
hipLaunchKernelGGL(( unique_by_key_kernel), dim3(1),dim3(1), 0, 0, keys.begin(), keys.end(), values.begin(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last.first - keys.begin(), 5);
ASSERT_EQUAL(new_last.second - values.begin(), 5);
ASSERT_EQUAL(keys[0], 11);
ASSERT_EQUAL(keys[1], 21);
ASSERT_EQUAL(keys[2], 20);
ASSERT_EQUAL(keys[3], 21);
ASSERT_EQUAL(keys[4], 37);
ASSERT_EQUAL(values[0], 0);
ASSERT_EQUAL(values[1], 2);
ASSERT_EQUAL(values[2], 3);
ASSERT_EQUAL(values[3], 4);
ASSERT_EQUAL(values[4], 7);
// test BinaryPredicate
initialize_keys(keys); initialize_values(values);
hipLaunchKernelGGL(( unique_by_key_kernel), dim3(1),dim3(1), 0, 0, keys.begin(), keys.end(), values.begin(), is_equal_div_10_unique<T>(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last.first - keys.begin(), 3);
ASSERT_EQUAL(new_last.second - values.begin(), 3);
ASSERT_EQUAL(keys[0], 11);
ASSERT_EQUAL(keys[1], 21);
ASSERT_EQUAL(keys[2], 37);
ASSERT_EQUAL(values[0], 0);
ASSERT_EQUAL(values[1], 2);
ASSERT_EQUAL(values[2], 7);
}
DECLARE_UNITTEST(TestUniqueByKeyDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5>
__global__
void unique_by_key_copy_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 keys_result, Iterator4 values_result, Iterator5 result)
{
*result = thrust::unique_by_key_copy(thrust::seq, keys_first, keys_last, values_first, keys_result, values_result);
}
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename BinaryPredicate, typename Iterator5>
__global__
void unique_by_key_copy_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 keys_result, Iterator4 values_result, BinaryPredicate pred, Iterator5 result)
{
*result = thrust::unique_by_key_copy(thrust::seq, keys_first, keys_last, values_first, keys_result, values_result, pred);
}
void TestUniqueCopyByKeyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector keys;
Vector values;
typedef thrust::pair<typename Vector::iterator, typename Vector::iterator> iter_pair;
thrust::device_vector<iter_pair> new_last_vec(1);
iter_pair new_last;
// basic test
initialize_keys(keys); initialize_values(values);
Vector output_keys(keys.size());
Vector output_values(values.size());
hipLaunchKernelGGL(( unique_by_key_copy_kernel), dim3(1),dim3(1), 0, 0, keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last.first - output_keys.begin(), 5);
ASSERT_EQUAL(new_last.second - output_values.begin(), 5);
ASSERT_EQUAL(output_keys[0], 11);
ASSERT_EQUAL(output_keys[1], 21);
ASSERT_EQUAL(output_keys[2], 20);
ASSERT_EQUAL(output_keys[3], 21);
ASSERT_EQUAL(output_keys[4], 37);
ASSERT_EQUAL(output_values[0], 0);
ASSERT_EQUAL(output_values[1], 2);
ASSERT_EQUAL(output_values[2], 3);
ASSERT_EQUAL(output_values[3], 4);
ASSERT_EQUAL(output_values[4], 7);
// test BinaryPredicate
initialize_keys(keys); initialize_values(values);
hipLaunchKernelGGL(( unique_by_key_copy_kernel), dim3(1),dim3(1), 0, 0, keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin(), is_equal_div_10_unique<T>(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last.first - output_keys.begin(), 3);
ASSERT_EQUAL(new_last.second - output_values.begin(), 3);
ASSERT_EQUAL(output_keys[0], 11);
ASSERT_EQUAL(output_keys[1], 21);
ASSERT_EQUAL(output_keys[2], 37);
ASSERT_EQUAL(output_values[0], 0);
ASSERT_EQUAL(output_values[1], 2);
ASSERT_EQUAL(output_values[2], 7);
}
DECLARE_UNITTEST(TestUniqueCopyByKeyDeviceSeq);
| 357020b823418a7f2cb98043dd76cc44e2557d2c.cu | #include <unittest/unittest.h>
#include <thrust/unique.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename T>
struct is_equal_div_10_unique
{
__host__ __device__
bool operator()(const T x, const T& y) const { return ((int) x / 10) == ((int) y / 10); }
};
template<typename Vector>
void initialize_keys(Vector& keys)
{
keys.resize(9);
keys[0] = 11;
keys[1] = 11;
keys[2] = 21;
keys[3] = 20;
keys[4] = 21;
keys[5] = 21;
keys[6] = 21;
keys[7] = 37;
keys[8] = 37;
}
template<typename Vector>
void initialize_values(Vector& values)
{
values.resize(9);
values[0] = 0;
values[1] = 1;
values[2] = 2;
values[3] = 3;
values[4] = 4;
values[5] = 5;
values[6] = 6;
values[7] = 7;
values[8] = 8;
}
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void unique_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 result)
{
*result = thrust::unique_by_key(thrust::seq, keys_first, keys_last, values_first);
}
template<typename Iterator1, typename Iterator2, typename BinaryPredicate, typename Iterator3>
__global__
void unique_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, BinaryPredicate pred, Iterator3 result)
{
*result = thrust::unique_by_key(thrust::seq, keys_first, keys_last, values_first, pred);
}
void TestUniqueByKeyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector keys;
Vector values;
typedef thrust::pair<typename Vector::iterator, typename Vector::iterator> iter_pair;
thrust::device_vector<iter_pair> new_last_vec(1);
iter_pair new_last;
// basic test
initialize_keys(keys); initialize_values(values);
unique_by_key_kernel<<<1,1>>>(keys.begin(), keys.end(), values.begin(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last.first - keys.begin(), 5);
ASSERT_EQUAL(new_last.second - values.begin(), 5);
ASSERT_EQUAL(keys[0], 11);
ASSERT_EQUAL(keys[1], 21);
ASSERT_EQUAL(keys[2], 20);
ASSERT_EQUAL(keys[3], 21);
ASSERT_EQUAL(keys[4], 37);
ASSERT_EQUAL(values[0], 0);
ASSERT_EQUAL(values[1], 2);
ASSERT_EQUAL(values[2], 3);
ASSERT_EQUAL(values[3], 4);
ASSERT_EQUAL(values[4], 7);
// test BinaryPredicate
initialize_keys(keys); initialize_values(values);
unique_by_key_kernel<<<1,1>>>(keys.begin(), keys.end(), values.begin(), is_equal_div_10_unique<T>(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last.first - keys.begin(), 3);
ASSERT_EQUAL(new_last.second - values.begin(), 3);
ASSERT_EQUAL(keys[0], 11);
ASSERT_EQUAL(keys[1], 21);
ASSERT_EQUAL(keys[2], 37);
ASSERT_EQUAL(values[0], 0);
ASSERT_EQUAL(values[1], 2);
ASSERT_EQUAL(values[2], 7);
}
DECLARE_UNITTEST(TestUniqueByKeyDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5>
__global__
void unique_by_key_copy_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 keys_result, Iterator4 values_result, Iterator5 result)
{
*result = thrust::unique_by_key_copy(thrust::seq, keys_first, keys_last, values_first, keys_result, values_result);
}
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename BinaryPredicate, typename Iterator5>
__global__
void unique_by_key_copy_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 keys_result, Iterator4 values_result, BinaryPredicate pred, Iterator5 result)
{
*result = thrust::unique_by_key_copy(thrust::seq, keys_first, keys_last, values_first, keys_result, values_result, pred);
}
void TestUniqueCopyByKeyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector keys;
Vector values;
typedef thrust::pair<typename Vector::iterator, typename Vector::iterator> iter_pair;
thrust::device_vector<iter_pair> new_last_vec(1);
iter_pair new_last;
// basic test
initialize_keys(keys); initialize_values(values);
Vector output_keys(keys.size());
Vector output_values(values.size());
unique_by_key_copy_kernel<<<1,1>>>(keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last.first - output_keys.begin(), 5);
ASSERT_EQUAL(new_last.second - output_values.begin(), 5);
ASSERT_EQUAL(output_keys[0], 11);
ASSERT_EQUAL(output_keys[1], 21);
ASSERT_EQUAL(output_keys[2], 20);
ASSERT_EQUAL(output_keys[3], 21);
ASSERT_EQUAL(output_keys[4], 37);
ASSERT_EQUAL(output_values[0], 0);
ASSERT_EQUAL(output_values[1], 2);
ASSERT_EQUAL(output_values[2], 3);
ASSERT_EQUAL(output_values[3], 4);
ASSERT_EQUAL(output_values[4], 7);
// test BinaryPredicate
initialize_keys(keys); initialize_values(values);
unique_by_key_copy_kernel<<<1,1>>>(keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin(), is_equal_div_10_unique<T>(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last.first - output_keys.begin(), 3);
ASSERT_EQUAL(new_last.second - output_values.begin(), 3);
ASSERT_EQUAL(output_keys[0], 11);
ASSERT_EQUAL(output_keys[1], 21);
ASSERT_EQUAL(output_keys[2], 37);
ASSERT_EQUAL(output_values[0], 0);
ASSERT_EQUAL(output_values[1], 2);
ASSERT_EQUAL(output_values[2], 7);
}
DECLARE_UNITTEST(TestUniqueCopyByKeyDeviceSeq);
|
fc722038e7b6722a05e712b0697169237a441a72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void LinearPolynomForwardImpl( const float* probs, int batchSize, const float* values, int polynomCount, int outputDim, float* out) {
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
int polynomId = blockIdx.x;
const int dimId = blockIdx.y;
int tid = threadIdx.x;
if (tid >= batchSize) {
return;
}
float sum = 0;
probs += threadIdx.x;
values += dimId;
while (polynomId < polynomCount) {
const float polynomProb = __ldg(probs + polynomId * batchSize); // includes x
const float v = __ldg(values + polynomId * outputDim);
sum += polynomProb * v;
polynomId += gridDim.x;
}
atomicAdd(out + dimId * batchSize + threadIdx.x, sum);
} | fc722038e7b6722a05e712b0697169237a441a72.cu | #include "includes.h"
__global__ void LinearPolynomForwardImpl( const float* probs, int batchSize, const float* values, int polynomCount, int outputDim, float* out) {
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
int polynomId = blockIdx.x;
const int dimId = blockIdx.y;
int tid = threadIdx.x;
if (tid >= batchSize) {
return;
}
float sum = 0;
probs += threadIdx.x;
values += dimId;
while (polynomId < polynomCount) {
const float polynomProb = __ldg(probs + polynomId * batchSize); // includes x
const float v = __ldg(values + polynomId * outputDim);
sum += polynomProb * v;
polynomId += gridDim.x;
}
atomicAdd(out + dimId * batchSize + threadIdx.x, sum);
} |
713ada9abe8f3dff6dab0626da1940de5c6750d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/LaunchUtils.h>
#include <ATen/AccumulateType.h>
#include <THH/THHReduceApplyUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHNumerics.cuh>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
namespace at { namespace native {
namespace {
#define MAX_NUM_BLOCKS 64
// Normalizes the L1 norm of every row to 1; used by multinomial
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void renormRowsL1(scalar_t* dist, long rows, long cols) {
extern __shared__ unsigned char my_smem[];
scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem);
scalar_t zero = static_cast<scalar_t>(0);
scalar_t val;
for (int64_t row = blockIdx.x; row < rows; row += gridDim.x) {
scalar_t sum = static_cast<scalar_t>(0);
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
val = dist[row * cols + col];
CUDA_KERNEL_ASSERT(!THCNumerics<scalar_t>::lt(val, zero)); // ! < 0 for NaN handling
sum = sum + val;
}
sum = reduceBlock(smem, blockDim.x, sum, ReduceAdd<scalar_t>(), zero);
if (threadIdx.x == 0) {
CUDA_KERNEL_ASSERT(!THCNumerics<scalar_t>::lt(val, zero)); // ! < 0 for NaN handling
smem[0] = sum;
}
__syncthreads();
sum = smem[0];
if (sum > zero) {
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
dist[row * cols + col] = dist[row * cols + col] / sum;
}
}
}
}
void renormRows(Tensor& t) {
TORCH_CHECK(t.dim() == 2);
int64_t rows = t.size(0);
int64_t cols = t.size(1);
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(t.scalar_type(), "renormRows_cuda", [&] {
hipLaunchKernelGGL(( renormRowsL1<scalar_t>)
, dim3(grid), dim3(block), block.x * sizeof(scalar_t),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), t.data_ptr<scalar_t>(),
rows, cols);
});
}
template <typename scalar_t>
__device__ int binarySearchForMultinomial(scalar_t* cumdist,
scalar_t* dist,
int size,
scalar_t val) {
int start = 0;
int end = size;
// cumdist[size - 1] = 0 => all zero prob dist
CUDA_KERNEL_ASSERT(cumdist[size - 1] > static_cast<scalar_t>(0));
while (end - start > 0) {
int mid = start + (end - start) / 2;
scalar_t midVal = cumdist[mid];
if (midVal < val) {
start = mid + 1;
} else {
end = mid;
}
}
if (start == size) {
// No probability mass or precision problems; just return the
// first non-zero element by setting start to size-1 here,
// the code below will move it to the last non-zero probability
// this actually can happen when the random number is 1
// (github pytorch issue #4858).
start = size - 1;
}
while(start >= 1 && dist[start] == 0) start--;
return start;
}
template <typename scalar_t>
__global__ void
sampleMultinomialWithReplacement(std::pair<uint64_t, uint64_t> seeds,
int totalSamples,
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* normDistPrefixSum,
scalar_t* normDist) {
// At the moment, each warp computes one sample value in the binary
// search due to divergence. It seems possible to compute multiple
// values and limit divergence though later on.
// global index formula for 2D grid of 1D blocks
int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seeds.first, idx, seeds.second, &state);
// The block determines the distribution for which we generate a point
for (int64_t curDist = blockIdx.y;
curDist < distributions;
curDist += gridDim.y) {
for (int sample = blockIdx.x*blockDim.x + threadIdx.x;
sample < totalSamples; sample += blockDim.x*gridDim.x) {
//we are losing 3 out of 4 generated numbers but it's ok
//this kernel is not very efficient anyway
auto rand = hiprand_uniform4(&state);
scalar_t r = static_cast<scalar_t>(rand.x);
// Find the bucket that a uniform sample lies in
int choice = binarySearchForMultinomial<scalar_t>(
normDistPrefixSum + curDist * categories,
normDist + curDist * categories,
categories,
r);
dest[curDist * totalSamples + sample] = choice;
}
}
}
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void
sampleMultinomialOnce(int64_t* dest,
int64_t distributions,
int categories,
scalar_t* sampled,
scalar_t* dist,
int stride_dist, // dist->stride(0)
int stride_categories // dist->stride(1)
) {
extern __shared__ unsigned char my_smem[];
__shared__ bool found;
// Shared Memory hold blockdim.x T for holding the cumulative sum,
// blockDim.x AccT for normalizing the probabilities,
scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem);
accscalar_t *asmem = reinterpret_cast<accscalar_t *>(&my_smem[blockDim.x * sizeof(scalar_t)]);
accscalar_t accZero = static_cast<accscalar_t>(0);
scalar_t zero = static_cast<scalar_t>(0);
for (int64_t curDist = blockIdx.x;
curDist < distributions; curDist += gridDim.x) {
// Each block handles one distribution
// First pass, find the total sum of the distribution
accscalar_t sum = accZero;
scalar_t val;
for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) {
val = dist[curDist * stride_dist + cat * stride_categories];
CUDA_KERNEL_ASSERT(val >= zero);
CUDA_KERNEL_ASSERT(!THCNumerics<scalar_t>::isinf(val));
CUDA_KERNEL_ASSERT(!THCNumerics<scalar_t>::isnan(val));
sum = sum + static_cast<accscalar_t>(val);
}
// threadIdx.x == 0 has the sum value from this
sum = reduceBlock(asmem, blockDim.x, sum, ReduceAdd<accscalar_t>(), accZero);
// Broadcast sum and sample value
if (threadIdx.x == 0) {
// Make sure the sum of our distribution didn't overflow
CUDA_KERNEL_ASSERT(!THCNumerics<accscalar_t>::isinf(sum));
CUDA_KERNEL_ASSERT(sum > accZero);
asmem[0] = sum;
smem[0] = sampled[curDist];
}
__syncthreads();
sum = asmem[0];
scalar_t sample = smem[0];
__syncthreads();
if (sum == accZero) {
// Choose the first element
if (threadIdx.x == 0) {
dest[curDist] = 0;
}
continue;
}
int chunks = (categories + (int)blockDim.x - 1) / blockDim.x;
scalar_t prevHighProb = zero;
found = false;
for (int chunk = 0; chunk < chunks && !found; ++chunk) {
// All threads in bounds load a value
int cat = chunk * blockDim.x + threadIdx.x;
accscalar_t a_dist_val = cat < categories ?
static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum :
accZero;
scalar_t dist_val = static_cast<scalar_t>(a_dist_val);
smem[threadIdx.x] = dist_val;
__syncthreads();
// Perform an inclusive prefix sum of the shared memory contents
for (int offset = 1; offset < blockDim.x; offset *= 2) {
scalar_t val = zero;
if (threadIdx.x >= offset) {
val = smem[threadIdx.x - offset] + smem[threadIdx.x];
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
// Each thread will check to see if the sample falls in its
// bucket
scalar_t curBucket = smem[threadIdx.x] + prevHighProb;
scalar_t prevBucket =
threadIdx.x == 0 ? prevHighProb :
smem[threadIdx.x - 1] + prevHighProb;
bool inBucket =
(cat < categories) &&
(!(sample >= curBucket) &&
(sample >= prevBucket) &&
(dist_val > zero));
if (inBucket) {
// We're done; we have the sample
// Torch indices are 1-based
dest[curDist] = cat;
found = true;
}
// Store the previous scan's high value for future use
prevHighProb = prevHighProb + smem[blockDim.x - 1];
__syncthreads();
}
if (threadIdx.x == 0 && !found) {
// This should address a rare bug where we don't select a valid index. This likely occurs when
// due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but
// and our uniform sample is greater than this value. In this case we likely have unitialized memory
// in dest[curDist]. So basically we will loop through the distribution and pick the largest index
// where the distribution is non-zero. This is obviously terribly inefficient, but due to the
// rarity in which this occurs, this should not be an issue.
for (int cat = categories - 1; cat >= 0; --cat) {
if (dist[curDist * stride_dist + cat * stride_categories] > zero) {
dest[curDist] = cat;
break;
}
}
}
}
}
void multinomial_kernel_impl(Tensor& result, const Tensor& self, const int64_t n_sample, const bool with_replacement, c10::optional<Generator> generator) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(generator, cuda::detail::getDefaultCUDAGenerator());
int inputSize = self.dim();
int64_t numDist =
inputSize == 1 ? 1 : self.size(0);
int numCategories =
inputSize == 1 ? self.size(0) : self.size(1);
// Restructure data for 2d
auto self_v = inputSize == 1 ? self.view({numDist, numCategories}) : self;
result.resize_({numDist, n_sample});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self_v.scalar_type(), "multinomial_kernel_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads)
* (sizeof(scalar_t) + sizeof(accscalar_t));
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
Tensor sampled = native::empty_cuda({numDist, n_sample}, optTypeMetaToScalarType(self_v.options().dtype_opt()),
self_v.options().layout_opt(), self_v.options().device_opt(),
self_v.options().pinned_memory_opt());
at::native::uniform_(sampled, 0.0, 1.0, generator);
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
hipLaunchKernelGGL(( sampleMultinomialOnce<scalar_t, accscalar_t>)
, dim3(grid), dim3(block),
requiredShared,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<int64_t>(),
numDist,
numCategories,
sampled.data_ptr<scalar_t>(),
self_v.data_ptr<scalar_t>(),
self_v.stride(0),
self_v.stride(1)
);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
Tensor origDist = native::empty_like(self_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
origDist.copy_(self_v);
Tensor normDist = native::empty_like(self_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor prefixSum = native::empty_like(self_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// Renorm along rows
normDist.copy_(origDist);
renormRows(normDist);
// Prefix sum along rows
at::_cumsum_out(prefixSum, normDist, 1);
std::pair<uint64_t, uint64_t> rng_engine_inputs;
if (with_replacement) {
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(128);
// Each block will generate a sample from one
// distribution concurrently.
int grid_y=std::min<int>(numDist, at::cuda::getCurrentDeviceProperties()->maxGridSize[1]);
dim3 grid((n_sample-1)/block.x+1, grid_y);
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// each thread generates a single sample for (numdist/numblocks.y) distributions, however, since we have to use
// hiprand_uniform4 (See Note [Register spilling in hiprand call for CUDA < 10]),
// offset is 4 times that.
auto offset = ((numDist-1)/grid.y+1)*4;
rng_engine_inputs = gen->philox_engine_inputs(offset);
}
// Sample with replacement
hipLaunchKernelGGL(( sampleMultinomialWithReplacement)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
rng_engine_inputs,
n_sample,
result.data_ptr<int64_t>(),
numDist, numCategories,
prefixSum.data_ptr<scalar_t>(),
normDist.data_ptr<scalar_t>());
}
}
});
AT_CUDA_CHECK(hipGetLastError());
if (inputSize == 1) {
result.resize_({n_sample});
}
}
}
REGISTER_DISPATCH(multinomial_stub, &multinomial_kernel_impl);
}}
| 713ada9abe8f3dff6dab0626da1940de5c6750d9.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/LaunchUtils.h>
#include <ATen/AccumulateType.h>
#include <THC/THCReduceApplyUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCNumerics.cuh>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
namespace at { namespace native {
namespace {
#define MAX_NUM_BLOCKS 200
// Normalizes the L1 norm of every row to 1; used by multinomial
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void renormRowsL1(scalar_t* dist, long rows, long cols) {
extern __shared__ unsigned char my_smem[];
scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem);
scalar_t zero = static_cast<scalar_t>(0);
scalar_t val;
for (int64_t row = blockIdx.x; row < rows; row += gridDim.x) {
scalar_t sum = static_cast<scalar_t>(0);
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
val = dist[row * cols + col];
CUDA_KERNEL_ASSERT(!THCNumerics<scalar_t>::lt(val, zero)); // ! < 0 for NaN handling
sum = sum + val;
}
sum = reduceBlock(smem, blockDim.x, sum, ReduceAdd<scalar_t>(), zero);
if (threadIdx.x == 0) {
CUDA_KERNEL_ASSERT(!THCNumerics<scalar_t>::lt(val, zero)); // ! < 0 for NaN handling
smem[0] = sum;
}
__syncthreads();
sum = smem[0];
if (sum > zero) {
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
dist[row * cols + col] = dist[row * cols + col] / sum;
}
}
}
}
void renormRows(Tensor& t) {
TORCH_CHECK(t.dim() == 2);
int64_t rows = t.size(0);
int64_t cols = t.size(1);
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(t.scalar_type(), "renormRows_cuda", [&] {
renormRowsL1<scalar_t>
<<<grid, block, block.x * sizeof(scalar_t),
at::cuda::getCurrentCUDAStream()>>>(t.data_ptr<scalar_t>(),
rows, cols);
});
}
template <typename scalar_t>
__device__ int binarySearchForMultinomial(scalar_t* cumdist,
scalar_t* dist,
int size,
scalar_t val) {
int start = 0;
int end = size;
// cumdist[size - 1] = 0 => all zero prob dist
CUDA_KERNEL_ASSERT(cumdist[size - 1] > static_cast<scalar_t>(0));
while (end - start > 0) {
int mid = start + (end - start) / 2;
scalar_t midVal = cumdist[mid];
if (midVal < val) {
start = mid + 1;
} else {
end = mid;
}
}
if (start == size) {
// No probability mass or precision problems; just return the
// first non-zero element by setting start to size-1 here,
// the code below will move it to the last non-zero probability
// this actually can happen when the random number is 1
// (github pytorch issue #4858).
start = size - 1;
}
while(start >= 1 && dist[start] == 0) start--;
return start;
}
template <typename scalar_t>
__global__ void
sampleMultinomialWithReplacement(std::pair<uint64_t, uint64_t> seeds,
int totalSamples,
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* normDistPrefixSum,
scalar_t* normDist) {
// At the moment, each warp computes one sample value in the binary
// search due to divergence. It seems possible to compute multiple
// values and limit divergence though later on.
// global index formula for 2D grid of 1D blocks
int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seeds.first, idx, seeds.second, &state);
// The block determines the distribution for which we generate a point
for (int64_t curDist = blockIdx.y;
curDist < distributions;
curDist += gridDim.y) {
for (int sample = blockIdx.x*blockDim.x + threadIdx.x;
sample < totalSamples; sample += blockDim.x*gridDim.x) {
//we are losing 3 out of 4 generated numbers but it's ok
//this kernel is not very efficient anyway
auto rand = curand_uniform4(&state);
scalar_t r = static_cast<scalar_t>(rand.x);
// Find the bucket that a uniform sample lies in
int choice = binarySearchForMultinomial<scalar_t>(
normDistPrefixSum + curDist * categories,
normDist + curDist * categories,
categories,
r);
dest[curDist * totalSamples + sample] = choice;
}
}
}
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void
sampleMultinomialOnce(int64_t* dest,
int64_t distributions,
int categories,
scalar_t* sampled,
scalar_t* dist,
int stride_dist, // dist->stride(0)
int stride_categories // dist->stride(1)
) {
extern __shared__ unsigned char my_smem[];
__shared__ bool found;
// Shared Memory hold blockdim.x T for holding the cumulative sum,
// blockDim.x AccT for normalizing the probabilities,
scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem);
accscalar_t *asmem = reinterpret_cast<accscalar_t *>(&my_smem[blockDim.x * sizeof(scalar_t)]);
accscalar_t accZero = static_cast<accscalar_t>(0);
scalar_t zero = static_cast<scalar_t>(0);
for (int64_t curDist = blockIdx.x;
curDist < distributions; curDist += gridDim.x) {
// Each block handles one distribution
// First pass, find the total sum of the distribution
accscalar_t sum = accZero;
scalar_t val;
for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) {
val = dist[curDist * stride_dist + cat * stride_categories];
CUDA_KERNEL_ASSERT(val >= zero);
CUDA_KERNEL_ASSERT(!THCNumerics<scalar_t>::isinf(val));
CUDA_KERNEL_ASSERT(!THCNumerics<scalar_t>::isnan(val));
sum = sum + static_cast<accscalar_t>(val);
}
// threadIdx.x == 0 has the sum value from this
sum = reduceBlock(asmem, blockDim.x, sum, ReduceAdd<accscalar_t>(), accZero);
// Broadcast sum and sample value
if (threadIdx.x == 0) {
// Make sure the sum of our distribution didn't overflow
CUDA_KERNEL_ASSERT(!THCNumerics<accscalar_t>::isinf(sum));
CUDA_KERNEL_ASSERT(sum > accZero);
asmem[0] = sum;
smem[0] = sampled[curDist];
}
__syncthreads();
sum = asmem[0];
scalar_t sample = smem[0];
__syncthreads();
if (sum == accZero) {
// Choose the first element
if (threadIdx.x == 0) {
dest[curDist] = 0;
}
continue;
}
int chunks = (categories + (int)blockDim.x - 1) / blockDim.x;
scalar_t prevHighProb = zero;
found = false;
for (int chunk = 0; chunk < chunks && !found; ++chunk) {
// All threads in bounds load a value
int cat = chunk * blockDim.x + threadIdx.x;
accscalar_t a_dist_val = cat < categories ?
static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum :
accZero;
scalar_t dist_val = static_cast<scalar_t>(a_dist_val);
smem[threadIdx.x] = dist_val;
__syncthreads();
// Perform an inclusive prefix sum of the shared memory contents
for (int offset = 1; offset < blockDim.x; offset *= 2) {
scalar_t val = zero;
if (threadIdx.x >= offset) {
val = smem[threadIdx.x - offset] + smem[threadIdx.x];
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
// Each thread will check to see if the sample falls in its
// bucket
scalar_t curBucket = smem[threadIdx.x] + prevHighProb;
scalar_t prevBucket =
threadIdx.x == 0 ? prevHighProb :
smem[threadIdx.x - 1] + prevHighProb;
bool inBucket =
(cat < categories) &&
(!(sample >= curBucket) &&
(sample >= prevBucket) &&
(dist_val > zero));
if (inBucket) {
// We're done; we have the sample
// Torch indices are 1-based
dest[curDist] = cat;
found = true;
}
// Store the previous scan's high value for future use
prevHighProb = prevHighProb + smem[blockDim.x - 1];
__syncthreads();
}
if (threadIdx.x == 0 && !found) {
// This should address a rare bug where we don't select a valid index. This likely occurs when
// due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but
// and our uniform sample is greater than this value. In this case we likely have unitialized memory
// in dest[curDist]. So basically we will loop through the distribution and pick the largest index
// where the distribution is non-zero. This is obviously terribly inefficient, but due to the
// rarity in which this occurs, this should not be an issue.
for (int cat = categories - 1; cat >= 0; --cat) {
if (dist[curDist * stride_dist + cat * stride_categories] > zero) {
dest[curDist] = cat;
break;
}
}
}
}
}
void multinomial_kernel_impl(Tensor& result, const Tensor& self, const int64_t n_sample, const bool with_replacement, c10::optional<Generator> generator) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(generator, cuda::detail::getDefaultCUDAGenerator());
int inputSize = self.dim();
int64_t numDist =
inputSize == 1 ? 1 : self.size(0);
int numCategories =
inputSize == 1 ? self.size(0) : self.size(1);
// Restructure data for 2d
auto self_v = inputSize == 1 ? self.view({numDist, numCategories}) : self;
result.resize_({numDist, n_sample});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self_v.scalar_type(), "multinomial_kernel_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads)
* (sizeof(scalar_t) + sizeof(accscalar_t));
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
Tensor sampled = native::empty_cuda({numDist, n_sample}, optTypeMetaToScalarType(self_v.options().dtype_opt()),
self_v.options().layout_opt(), self_v.options().device_opt(),
self_v.options().pinned_memory_opt());
at::native::uniform_(sampled, 0.0, 1.0, generator);
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
sampleMultinomialOnce<scalar_t, accscalar_t>
<<<grid, block,
requiredShared,
at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<int64_t>(),
numDist,
numCategories,
sampled.data_ptr<scalar_t>(),
self_v.data_ptr<scalar_t>(),
self_v.stride(0),
self_v.stride(1)
);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
Tensor origDist = native::empty_like(self_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
origDist.copy_(self_v);
Tensor normDist = native::empty_like(self_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor prefixSum = native::empty_like(self_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// Renorm along rows
normDist.copy_(origDist);
renormRows(normDist);
// Prefix sum along rows
at::_cumsum_out(prefixSum, normDist, 1);
std::pair<uint64_t, uint64_t> rng_engine_inputs;
if (with_replacement) {
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(128);
// Each block will generate a sample from one
// distribution concurrently.
int grid_y=std::min<int>(numDist, at::cuda::getCurrentDeviceProperties()->maxGridSize[1]);
dim3 grid((n_sample-1)/block.x+1, grid_y);
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// each thread generates a single sample for (numdist/numblocks.y) distributions, however, since we have to use
// curand_uniform4 (See Note [Register spilling in curand call for CUDA < 10]),
// offset is 4 times that.
auto offset = ((numDist-1)/grid.y+1)*4;
rng_engine_inputs = gen->philox_engine_inputs(offset);
}
// Sample with replacement
sampleMultinomialWithReplacement
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
rng_engine_inputs,
n_sample,
result.data_ptr<int64_t>(),
numDist, numCategories,
prefixSum.data_ptr<scalar_t>(),
normDist.data_ptr<scalar_t>());
}
}
});
AT_CUDA_CHECK(cudaGetLastError());
if (inputSize == 1) {
result.resize_({n_sample});
}
}
}
REGISTER_DISPATCH(multinomial_stub, &multinomial_kernel_impl);
}}
|
0930eb96219f651bb23555af7da079517baf39b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "util.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define CHUNK 1024
#define SIZE_X 4194304
#define MASK_SIZE 7
#define PARTE 2
void cudaCheck()
{
hipError_t hipError_t;
hipError_t = hipGetLastError();
if(hipError_t != hipSuccess)
{
printf(" hipGetLastError() returned %d: %s\n", hipError_t, hipGetErrorString(hipError_t));
}else{
//printf(" todo ok\n" );
}
}
#if PARTE == 3
__global__ void Kernel_Convolucion_Constante(int * inputArray, int* outputArray)
{
}
#endif
__global__ void Kernel_Convolucion_Shared(int * inputArray, int* outputArray, int* mask)
{
}
__global__ void Kernel_Convolucion_Simple(int * inputArray, int* outputArray, int* mask)
{
int ratio = MASK_SIZE / 2;
int pValor = 0;
int pos = threadIdx.x + blockIdx.x * blockDim.x;
for (int j=0;j<MASK_SIZE;j++){
if (((pos + j - ratio) >= 0) && ((pos + j - ratio) < SIZE_X)){
pValor = pValor + inputArray[pos + j - ratio] * mask[j];
}
}
//outputArray[pos]=pValor;
atomicAdd(&outputArray[pos],pValor);
}
void Convolucion_C(int * inputArray, int* ouputArray, int * mask)
{
int i, j;
for( i = 0; i<SIZE_X;i++)
{
for( j =0; j<MASK_SIZE;j++)
{
int position = i-(int)(MASK_SIZE/2) + j;
if(position>=0 && position<SIZE_X)
ouputArray[i] += inputArray[position] * mask[j];
}
}
}
int main() {
int* inputArray = (int*)malloc(sizeof(int) * SIZE_X);
int* outputArray = (int*)malloc(sizeof(int) * SIZE_X);
int* outputArray_GPU = (int*)malloc(sizeof(int) * SIZE_X);
int* mask = (int*)malloc(sizeof(int) * MASK_SIZE);
int i;
int * inputArray_k;
int * outputArray_k;
#if PARTE != 3
int* mask_k;
#endif
//hipMalloc del array de entrada
//hipMalloc del array de salida
size_t size = SIZE_X * sizeof(int);
hipMalloc(&(inputArray_k),size);
hipMalloc(&(outputArray_k),size);
#if PARTE != 3
//hipMalloc de la mascara
size_t sizeMask = MASK_SIZE * sizeof(int);
hipMalloc(&mask_k,sizeMask);
#endif
for(i =0; i<SIZE_X;i++)
{
inputArray[i] = i;
outputArray[i] = 0;
}
for(i =0; i<MASK_SIZE; i++)
{
mask[i] = 1;
}
clockStart();
Convolucion_C(inputArray, outputArray, mask);
clockStop("CPU");
clockStart();
///////////////////////////////////////
//copiar datos de entrada a la GPU
///////////////////////////////////////
hipMemcpy(inputArray_k,inputArray,size,hipMemcpyHostToDevice);
hipMemset(outputArray_k, 0, sizeof(int)*SIZE_X );
#if PARTE == 3
//copia a memoria constante
#else
hipMemcpy(mask_k, mask, sizeof(int)*MASK_SIZE, hipMemcpyHostToDevice);
#endif
hipDeviceSynchronize();
clockStop("Tranferencias a host");
clockStart();
int cantBloques = SIZE_X / CHUNK + (SIZE_X % CHUNK == 0 ? 0 : 1);
int tamGrid = cantBloques;
int tamBlock = CHUNK;
#if PARTE == 3
hipLaunchKernelGGL(( Kernel_Convolucion_Constante), dim3(tamGrid), dim3(tamBlock), 0, 0, inputArray_k, outputArray_k);
#else
#if PARTE == 4
hipLaunchKernelGGL(( Kernel_Convolucion_Shared), dim3(tamGrid), dim3(tamBlock), 0, 0, inputArray_k, outputArray_k, mask_k);
#else
hipLaunchKernelGGL(( Kernel_Convolucion_Simple), dim3(tamGrid), dim3(tamBlock), 0, 0, inputArray_k, outputArray_k, mask_k);
#endif
#endif
hipDeviceSynchronize();
clockStop("GPU");
cudaCheck();
clockStart();
hipMemcpy(outputArray_GPU,outputArray_k,size,hipMemcpyDeviceToHost);
hipFree(inputArray_k);
hipFree(outputArray_k);
hipFree(mask_k);
///////////////////////////////////////
//traer salida de la GPU
///////////////////////////////////////
clockStop("Tranferencias host a CPU");
if(equal_arrays(outputArray_GPU,outputArray, SIZE_X))
printf("Enhorabuena");
else
printf("Rayos y centellas");
free(outputArray_GPU);
free(outputArray);
free(inputArray);
char character;
scanf("%c", &character);
return 0;
}
| 0930eb96219f651bb23555af7da079517baf39b6.cu | #include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include "util.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define CHUNK 1024
#define SIZE_X 4194304
#define MASK_SIZE 7
#define PARTE 2
void cudaCheck()
{
cudaError_t cudaError;
cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
printf(" cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError));
}else{
//printf(" todo ok\n" );
}
}
#if PARTE == 3
__global__ void Kernel_Convolucion_Constante(int * inputArray, int* outputArray)
{
}
#endif
__global__ void Kernel_Convolucion_Shared(int * inputArray, int* outputArray, int* mask)
{
}
__global__ void Kernel_Convolucion_Simple(int * inputArray, int* outputArray, int* mask)
{
int ratio = MASK_SIZE / 2;
int pValor = 0;
int pos = threadIdx.x + blockIdx.x * blockDim.x;
for (int j=0;j<MASK_SIZE;j++){
if (((pos + j - ratio) >= 0) && ((pos + j - ratio) < SIZE_X)){
pValor = pValor + inputArray[pos + j - ratio] * mask[j];
}
}
//outputArray[pos]=pValor;
atomicAdd(&outputArray[pos],pValor);
}
void Convolucion_C(int * inputArray, int* ouputArray, int * mask)
{
int i, j;
for( i = 0; i<SIZE_X;i++)
{
for( j =0; j<MASK_SIZE;j++)
{
int position = i-(int)(MASK_SIZE/2) + j;
if(position>=0 && position<SIZE_X)
ouputArray[i] += inputArray[position] * mask[j];
}
}
}
int main() {
int* inputArray = (int*)malloc(sizeof(int) * SIZE_X);
int* outputArray = (int*)malloc(sizeof(int) * SIZE_X);
int* outputArray_GPU = (int*)malloc(sizeof(int) * SIZE_X);
int* mask = (int*)malloc(sizeof(int) * MASK_SIZE);
int i;
int * inputArray_k;
int * outputArray_k;
#if PARTE != 3
int* mask_k;
#endif
//cudaMalloc del array de entrada
//cudaMalloc del array de salida
size_t size = SIZE_X * sizeof(int);
cudaMalloc(&(inputArray_k),size);
cudaMalloc(&(outputArray_k),size);
#if PARTE != 3
//cudaMalloc de la mascara
size_t sizeMask = MASK_SIZE * sizeof(int);
cudaMalloc(&mask_k,sizeMask);
#endif
for(i =0; i<SIZE_X;i++)
{
inputArray[i] = i;
outputArray[i] = 0;
}
for(i =0; i<MASK_SIZE; i++)
{
mask[i] = 1;
}
clockStart();
Convolucion_C(inputArray, outputArray, mask);
clockStop("CPU");
clockStart();
///////////////////////////////////////
//copiar datos de entrada a la GPU
///////////////////////////////////////
cudaMemcpy(inputArray_k,inputArray,size,cudaMemcpyHostToDevice);
cudaMemset(outputArray_k, 0, sizeof(int)*SIZE_X );
#if PARTE == 3
//copia a memoria constante
#else
cudaMemcpy(mask_k, mask, sizeof(int)*MASK_SIZE, cudaMemcpyHostToDevice);
#endif
cudaDeviceSynchronize();
clockStop("Tranferencias a host");
clockStart();
int cantBloques = SIZE_X / CHUNK + (SIZE_X % CHUNK == 0 ? 0 : 1);
int tamGrid = cantBloques;
int tamBlock = CHUNK;
#if PARTE == 3
Kernel_Convolucion_Constante<<<tamGrid, tamBlock>>>(inputArray_k, outputArray_k);
#else
#if PARTE == 4
Kernel_Convolucion_Shared<<<tamGrid, tamBlock>>>(inputArray_k, outputArray_k, mask_k);
#else
Kernel_Convolucion_Simple<<<tamGrid, tamBlock>>>(inputArray_k, outputArray_k, mask_k);
#endif
#endif
cudaDeviceSynchronize();
clockStop("GPU");
cudaCheck();
clockStart();
cudaMemcpy(outputArray_GPU,outputArray_k,size,cudaMemcpyDeviceToHost);
cudaFree(inputArray_k);
cudaFree(outputArray_k);
cudaFree(mask_k);
///////////////////////////////////////
//traer salida de la GPU
///////////////////////////////////////
clockStop("Tranferencias host a CPU");
if(equal_arrays(outputArray_GPU,outputArray, SIZE_X))
printf("Enhorabuena");
else
printf("Rayos y centellas");
free(outputArray_GPU);
free(outputArray);
free(inputArray);
char character;
scanf("%c", &character);
return 0;
}
|
66564b15831151a0f8488430a65e6be3c33a0c4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// *----------------------------------------------
// Author Contact Information:
// Hao Gao
// hao.gao@emory.edu || hao.gao.2012@gmail.com
// Department of Mathematics and Computer Science, Emory University
// Department of Radiology and Imaging Sciences, Emory University
//
// Copyright (c) Hao Gao 2012
// ----------------------------------------------*/
//
// If you find this code useful, you may cite the following reference:
// H. Gao. "Fast parallel algorithms for the X-ray transform and its adjoint", Medical Physics (2012).
// The full source codes are available at https://sites.google.com/site/fastxraytransform
#include <math.h>
#include <malloc.h>
#define ABS(a) (a>0?a:-(a))
#define BLOCK_SIZE_x 16
#define BLOCK_SIZE_y 16
extern "C" void Ax_fan_mf_gpu_new(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X,int nt);
__global__ void Ax_fan_mf_gpu_new_kernel(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X)
// Please note that this version has O(Nx) per thread, since GPU threads are already saturated.
// O(1) per thread can be achieved by parallelizing the "for" loop here, given sufficient number of GPU threads.
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx0=threadIdx.x;
int ty0=threadIdx.y;
int iv=bx*BLOCK_SIZE_x+tx0;
int id=by*BLOCK_SIZE_y+ty0;
if(iv<nv&&id<nd)
{
int n,nx2,ny2,ix,iy,c1,c2;
float *x,cos_phi,sin_phi,x1,y1,x2,y2,xx1,yy1,xx2,yy2,slope,l,d;
nx2=nx/2;ny2=ny/2;
n=nx*ny;
x=&X[id_X[iv]*n];
cos_phi=(float)cos(sd_phi[iv]);sin_phi=(float)sin(sd_phi[iv]);
x1=cos_phi*(-SO);
y1=sin_phi*(-SO);
x2=cos_phi*OD-sin_phi*y_det[id];
y2=sin_phi*OD+cos_phi*y_det[id];
y[iv*nd+id]=0;
if(ABS(x1-x2)>ABS(y1-y2))
{ slope=(y2-y1)/(x2-x1);
for(ix=0;ix<nx;ix++)
{ xx1=(float)(ix-nx2);xx2=xx1+1;
if(slope>=0)
{ yy1=y1+slope*(xx1-x1)+ny2;
yy2=y1+slope*(xx2-x1)+ny2;
}
else
{ yy1=y1+slope*(xx2-x1)+ny2;
yy2=y1+slope*(xx1-x1)+ny2;
}
c1=(int)floor(yy1);
c2=(int)floor(yy2);
if(c2==c1)// c1 and c2 differs less than 1
{ if(c1>=0&&c1<=ny-1)
{ d=yy2-yy1;l=(float)sqrt(d*d+1);
iy=c1;y[iv*nd+id]+=l*x[iy*nx+ix];
}
}
else
{ if(c2>0&&c2<ny)
{ d=yy2-yy1;l=(float)sqrt(d*d+1);
iy=c1;y[iv*nd+id]+=((c2-yy1)/d)*l*x[iy*nx+ix];
iy=c2;y[iv*nd+id]+=((yy2-c2)/d)*l*x[iy*nx+ix];
}
else
{ if(c2==0)
{ d=yy2-yy1;l=(float)sqrt(d*d+1);
iy=c2;y[iv*nd+id]+=((yy2-c2)/d)*l*x[iy*nx+ix];
}
if(c2==ny)
{ d=yy2-yy1;l=(float)sqrt(d*d+1);
iy=c1;y[iv*nd+id]+=((c2-yy1)/d)*l*x[iy*nx+ix];
}
}
}
}
}
else
{ slope=(x2-x1)/(y2-y1);
for(iy=0;iy<ny;iy++)
{ yy1=(float)(iy-ny2);yy2=yy1+1;
if(slope>=0)
{ xx1=x1+slope*(yy1-y1)+nx2;
xx2=x1+slope*(yy2-y1)+nx2;
}
else
{ xx1=x1+slope*(yy2-y1)+nx2;
xx2=x1+slope*(yy1-y1)+nx2;
}
c1=(int)floor(xx1);
c2=(int)floor(xx2);
if(c2==c1)// c1 and c2 differs less than 1
{ if(c1>=0&&c1<=nx-1)
{ d=xx2-xx1;l=(float)sqrt(d*d+1);
ix=c1;y[iv*nd+id]+=l*x[iy*nx+ix];
}
}
else
{ if(c2>0&&c2<nx)
{ d=xx2-xx1;l=(float)sqrt(d*d+1);
ix=c1;y[iv*nd+id]+=((c2-xx1)/d)*l*x[iy*nx+ix];
ix=c2;y[iv*nd+id]+=((xx2-c2)/d)*l*x[iy*nx+ix];
}
else
{ if(c2==0)
{ d=xx2-xx1;l=(float)sqrt(d*d+1);
ix=c2;y[iv*nd+id]+=((xx2-c2)/d)*l*x[iy*nx+ix];
}
if(c2==ny)
{ d=xx2-xx1;l=(float)sqrt(d*d+1);
ix=c1;y[iv*nd+id]+=((c2-xx1)/d)*l*x[iy*nx+ix];
}
}
}
}
}
y[iv*nd+id]*=scale;
}
}
void Ax_fan_mf_gpu_new(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X,int nt)
// A new method for computing the X-ray transform (infinitely-narrow beam)
// The algorithm details are available in
// H. Gao. "Fast parallel algorithms for the X-ray transform and its adjoint", Medical Physics (2012).
{ float *y_d,*X_d,*sd_phi_d,*y_det_d;
int *id_X_d;
hipMalloc(&y_d,nv*nd*sizeof(float));
hipMalloc(&X_d,nx*ny*nt*sizeof(float));
hipMalloc(&sd_phi_d,nv*sizeof(float));
hipMalloc(&y_det_d,nd*sizeof(float));
hipMalloc(&id_X_d,nv*sizeof(int));
hipMemcpy(X_d,X,nx*ny*nt*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(sd_phi_d,sd_phi,nv*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(y_det_d,y_det,nd*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(id_X_d,id_X,nv*sizeof(int),hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid_t((nv+dimBlock.x-1)/dimBlock.x,(nd+dimBlock.y-1)/dimBlock.y);
hipLaunchKernelGGL(( Ax_fan_mf_gpu_new_kernel), dim3(dimGrid_t), dim3(dimBlock), 0, 0, X_d,y_d,SO,OD,scale,nx,ny,nv,sd_phi_d,nd,y_det_d,id_X_d);
hipMemcpy(y,y_d,nv*nd*sizeof(float),hipMemcpyDeviceToHost);
hipFree(y_d);hipFree(X_d);hipFree(sd_phi_d);hipFree(y_det_d);hipFree(id_X_d);
}
| 66564b15831151a0f8488430a65e6be3c33a0c4c.cu | // *----------------------------------------------
// Author Contact Information:
// Hao Gao
// hao.gao@emory.edu || hao.gao.2012@gmail.com
// Department of Mathematics and Computer Science, Emory University
// Department of Radiology and Imaging Sciences, Emory University
//
// Copyright (c) Hao Gao 2012
// ----------------------------------------------*/
//
// If you find this code useful, you may cite the following reference:
// H. Gao. "Fast parallel algorithms for the X-ray transform and its adjoint", Medical Physics (2012).
// The full source codes are available at https://sites.google.com/site/fastxraytransform
#include <math.h>
#include <malloc.h>
#define ABS(a) (a>0?a:-(a))
#define BLOCK_SIZE_x 16
#define BLOCK_SIZE_y 16
extern "C" void Ax_fan_mf_gpu_new(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X,int nt);
__global__ void Ax_fan_mf_gpu_new_kernel(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X)
// Please note that this version has O(Nx) per thread, since GPU threads are already saturated.
// O(1) per thread can be achieved by parallelizing the "for" loop here, given sufficient number of GPU threads.
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx0=threadIdx.x;
int ty0=threadIdx.y;
int iv=bx*BLOCK_SIZE_x+tx0;
int id=by*BLOCK_SIZE_y+ty0;
if(iv<nv&&id<nd)
{
int n,nx2,ny2,ix,iy,c1,c2;
float *x,cos_phi,sin_phi,x1,y1,x2,y2,xx1,yy1,xx2,yy2,slope,l,d;
nx2=nx/2;ny2=ny/2;
n=nx*ny;
x=&X[id_X[iv]*n];
cos_phi=(float)cos(sd_phi[iv]);sin_phi=(float)sin(sd_phi[iv]);
x1=cos_phi*(-SO);
y1=sin_phi*(-SO);
x2=cos_phi*OD-sin_phi*y_det[id];
y2=sin_phi*OD+cos_phi*y_det[id];
y[iv*nd+id]=0;
if(ABS(x1-x2)>ABS(y1-y2))
{ slope=(y2-y1)/(x2-x1);
for(ix=0;ix<nx;ix++)
{ xx1=(float)(ix-nx2);xx2=xx1+1;
if(slope>=0)
{ yy1=y1+slope*(xx1-x1)+ny2;
yy2=y1+slope*(xx2-x1)+ny2;
}
else
{ yy1=y1+slope*(xx2-x1)+ny2;
yy2=y1+slope*(xx1-x1)+ny2;
}
c1=(int)floor(yy1);
c2=(int)floor(yy2);
if(c2==c1)// c1 and c2 differs less than 1
{ if(c1>=0&&c1<=ny-1)
{ d=yy2-yy1;l=(float)sqrt(d*d+1);
iy=c1;y[iv*nd+id]+=l*x[iy*nx+ix];
}
}
else
{ if(c2>0&&c2<ny)
{ d=yy2-yy1;l=(float)sqrt(d*d+1);
iy=c1;y[iv*nd+id]+=((c2-yy1)/d)*l*x[iy*nx+ix];
iy=c2;y[iv*nd+id]+=((yy2-c2)/d)*l*x[iy*nx+ix];
}
else
{ if(c2==0)
{ d=yy2-yy1;l=(float)sqrt(d*d+1);
iy=c2;y[iv*nd+id]+=((yy2-c2)/d)*l*x[iy*nx+ix];
}
if(c2==ny)
{ d=yy2-yy1;l=(float)sqrt(d*d+1);
iy=c1;y[iv*nd+id]+=((c2-yy1)/d)*l*x[iy*nx+ix];
}
}
}
}
}
else
{ slope=(x2-x1)/(y2-y1);
for(iy=0;iy<ny;iy++)
{ yy1=(float)(iy-ny2);yy2=yy1+1;
if(slope>=0)
{ xx1=x1+slope*(yy1-y1)+nx2;
xx2=x1+slope*(yy2-y1)+nx2;
}
else
{ xx1=x1+slope*(yy2-y1)+nx2;
xx2=x1+slope*(yy1-y1)+nx2;
}
c1=(int)floor(xx1);
c2=(int)floor(xx2);
if(c2==c1)// c1 and c2 differs less than 1
{ if(c1>=0&&c1<=nx-1)
{ d=xx2-xx1;l=(float)sqrt(d*d+1);
ix=c1;y[iv*nd+id]+=l*x[iy*nx+ix];
}
}
else
{ if(c2>0&&c2<nx)
{ d=xx2-xx1;l=(float)sqrt(d*d+1);
ix=c1;y[iv*nd+id]+=((c2-xx1)/d)*l*x[iy*nx+ix];
ix=c2;y[iv*nd+id]+=((xx2-c2)/d)*l*x[iy*nx+ix];
}
else
{ if(c2==0)
{ d=xx2-xx1;l=(float)sqrt(d*d+1);
ix=c2;y[iv*nd+id]+=((xx2-c2)/d)*l*x[iy*nx+ix];
}
if(c2==ny)
{ d=xx2-xx1;l=(float)sqrt(d*d+1);
ix=c1;y[iv*nd+id]+=((c2-xx1)/d)*l*x[iy*nx+ix];
}
}
}
}
}
y[iv*nd+id]*=scale;
}
}
void Ax_fan_mf_gpu_new(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X,int nt)
// A new method for computing the X-ray transform (infinitely-narrow beam)
// The algorithm details are available in
// H. Gao. "Fast parallel algorithms for the X-ray transform and its adjoint", Medical Physics (2012).
{ float *y_d,*X_d,*sd_phi_d,*y_det_d;
int *id_X_d;
cudaMalloc(&y_d,nv*nd*sizeof(float));
cudaMalloc(&X_d,nx*ny*nt*sizeof(float));
cudaMalloc(&sd_phi_d,nv*sizeof(float));
cudaMalloc(&y_det_d,nd*sizeof(float));
cudaMalloc(&id_X_d,nv*sizeof(int));
cudaMemcpy(X_d,X,nx*ny*nt*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(sd_phi_d,sd_phi,nv*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(y_det_d,y_det,nd*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(id_X_d,id_X,nv*sizeof(int),cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid_t((nv+dimBlock.x-1)/dimBlock.x,(nd+dimBlock.y-1)/dimBlock.y);
Ax_fan_mf_gpu_new_kernel<<<dimGrid_t, dimBlock>>>(X_d,y_d,SO,OD,scale,nx,ny,nv,sd_phi_d,nd,y_det_d,id_X_d);
cudaMemcpy(y,y_d,nv*nd*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(y_d);cudaFree(X_d);cudaFree(sd_phi_d);cudaFree(y_det_d);cudaFree(id_X_d);
}
|
4e1d486d424b44a58e72095c06192d12d30ff4af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
#include <stdio.h>
#define N (48*1024)
#define M (48*1024)
#define P 256
#define SIZE 4
float rand_unit_box() {
return (rand() + 0.5) / (RAND_MAX + 1.0) - 0.5;
}
template<class T>
struct SharedMemory {
__device__ inline operator T *() {
extern __shared__ int __smem[];
return (T*) __smem;
}
__device__ inline operator const T *() const {
extern __shared__ int __smem[];
return (T*) __smem;
}
};
__global__
void compute(float *__restrict__ G, const float *__restrict__ Y, const float *__restrict__ X) {
// const int i = threadIdx.x + blockIdx.x * P;
auto ys = SharedMemory<float>();
//
// for (int tile = 0; tile < M / P; tile++) {
// const int j0 = tile * P * SIZE;
// int base = threadIdx.x * SIZE;
// ys[base] = Y[j0 + base];
// ys[base + 1] = Y[j0 + base + 1];
// ys[base + 2] = Y[j0 + base + 2];
// __syncthreads();
//#pragma unroll 128
// for (int j = 0; j < P; j++) {
// const int i0 = i * SIZE;
// const int j0 = j * SIZE;
// const auto dx = X[i0] - ys[j0];
// const auto dy = X[i0 + 1] - ys[j0 + 1];
// const auto dz = X[i0 + 2] - ys[j0 + 2];
// const auto tmp = rsqrt(dx * dx + dy * dy + dz * dz);
// const auto r3inv = tmp * tmp * tmp;
// G[i0] += dx * r3inv ;
// G[i0 + 1] += dy * r3inv;
// G[i0 + 2] += dz * r3inv;
// }
// __syncthreads();
// }
//
const int i = threadIdx.x + blockIdx.x * P;
const auto G0 = (SIZE+1) * i;
float g1 = 0.0;
float g2 = 0.0;
float g3 = 0.0;
float g4 = 0.0;
for (int tile = 0; tile < M / P; tile++) {
const int j0 = tile * P * SIZE;
int base = threadIdx.x * SIZE;
ys[base] = Y[j0 + base];
ys[base + 1] = Y[j0 + base + 1];
ys[base + 2] = Y[j0 + base + 2];
__syncthreads();
//if (i < N) {
const auto X0 = SIZE * i;
const auto x1 = X[X0];
const auto x2 = X[X0 + 1];
const auto x3 = X[X0 + 2];
for (int j = 0; j < P; j++) {
const auto Y0 = SIZE * j;
const auto dx1 = x1 - ys[Y0]; // 1 OP
const auto dx2 = x2 - ys[Y0 + 1]; // 1 OP
const auto dx3 = x3 - ys[Y0 + 2]; // 1 OP
const auto r2 = dx1 * dx1 + dx2 * dx2 + dx3 * dx3; // 5 OP
const auto rinv = rsqrt(r2); // 1 OP
const auto nrinv3 = -rinv * rinv * rinv; // 3 OP
g1 = g1 + dx1 * nrinv3; // 2 OP
g2 = g2 + dx2 * nrinv3; // 2 OP
g3 = g3 + dx3 * nrinv3; // 2 OP
g4 = g4 - rinv; // 1 OP
}
__syncthreads();
// }
}
G[G0] = g1;
G[G0 + 1] = g2;
G[G0 + 2] = g3;
G[G0 + 3] = g4;
}
int main() {
float *hostX;
float *hostY;
float *hostG;
float *deviceX;
float *deviceY;
float *deviceG;
hipSetDeviceFlags (hipDeviceMapHost);
hipHostMalloc((void**) &hostG, (SIZE+1) * N * sizeof(float), hipHostMallocMapped | hipHostMallocPortable);
hipHostMalloc((void**) &hostX, (SIZE) * N * sizeof(float), hipHostMallocMapped | hipHostMallocPortable);
hipHostMalloc((void**) &hostY, (SIZE) * M * sizeof(float), hipHostMallocMapped | hipHostMallocPortable);
hipHostGetDevicePointer((void**) &deviceG, hostG, 0);
hipHostGetDevicePointer((void**) &deviceX, hostX, 0);
hipHostGetDevicePointer((void**) &deviceY, hostY, 0);
for (int i = 0; i < N; i++) {
for (int d = 0; d < SIZE; d++) {
hostX[SIZE * i] = rand_unit_box();
hostX[SIZE * i + 1] = rand_unit_box();
hostX[SIZE * i + 2] = rand_unit_box();
}
}
for (int i = 0; i < M; i++) {
for (int d = 0; d < SIZE; d++) {
hostY[SIZE * i] = rand_unit_box();
hostY[SIZE * i + 1] = rand_unit_box();
hostY[SIZE * i + 2] = rand_unit_box();
}
}
auto start = time(NULL);
for (int i = 0; i < 1000; i++) {
hipLaunchKernelGGL(( compute), dim3(N/P),dim3(P),P * SIZE*sizeof(float), 0, deviceG,deviceX,deviceY);
hipDeviceSynchronize();
auto end = time(NULL);
double ops = (i + 1) * (double) N * (double) M * 20.0 / (1024.0 * 1024.0 * 1024.0 * 1024.0);
double t = (double) (end - start);
double flops = ops / t;
printf("%i %e TFLOP in %e seconds for %e TFLOPS\n", i, ops, t, flops);
}
}
| 4e1d486d424b44a58e72095c06192d12d30ff4af.cu | #include <time.h>
#include <stdio.h>
#define N (48*1024)
#define M (48*1024)
#define P 256
#define SIZE 4
float rand_unit_box() {
return (rand() + 0.5) / (RAND_MAX + 1.0) - 0.5;
}
template<class T>
struct SharedMemory {
__device__ inline operator T *() {
extern __shared__ int __smem[];
return (T*) __smem;
}
__device__ inline operator const T *() const {
extern __shared__ int __smem[];
return (T*) __smem;
}
};
__global__
void compute(float *__restrict__ G, const float *__restrict__ Y, const float *__restrict__ X) {
// const int i = threadIdx.x + blockIdx.x * P;
auto ys = SharedMemory<float>();
//
// for (int tile = 0; tile < M / P; tile++) {
// const int j0 = tile * P * SIZE;
// int base = threadIdx.x * SIZE;
// ys[base] = Y[j0 + base];
// ys[base + 1] = Y[j0 + base + 1];
// ys[base + 2] = Y[j0 + base + 2];
// __syncthreads();
//#pragma unroll 128
// for (int j = 0; j < P; j++) {
// const int i0 = i * SIZE;
// const int j0 = j * SIZE;
// const auto dx = X[i0] - ys[j0];
// const auto dy = X[i0 + 1] - ys[j0 + 1];
// const auto dz = X[i0 + 2] - ys[j0 + 2];
// const auto tmp = rsqrt(dx * dx + dy * dy + dz * dz);
// const auto r3inv = tmp * tmp * tmp;
// G[i0] += dx * r3inv ;
// G[i0 + 1] += dy * r3inv;
// G[i0 + 2] += dz * r3inv;
// }
// __syncthreads();
// }
//
const int i = threadIdx.x + blockIdx.x * P;
const auto G0 = (SIZE+1) * i;
float g1 = 0.0;
float g2 = 0.0;
float g3 = 0.0;
float g4 = 0.0;
for (int tile = 0; tile < M / P; tile++) {
const int j0 = tile * P * SIZE;
int base = threadIdx.x * SIZE;
ys[base] = Y[j0 + base];
ys[base + 1] = Y[j0 + base + 1];
ys[base + 2] = Y[j0 + base + 2];
__syncthreads();
//if (i < N) {
const auto X0 = SIZE * i;
const auto x1 = X[X0];
const auto x2 = X[X0 + 1];
const auto x3 = X[X0 + 2];
for (int j = 0; j < P; j++) {
const auto Y0 = SIZE * j;
const auto dx1 = x1 - ys[Y0]; // 1 OP
const auto dx2 = x2 - ys[Y0 + 1]; // 1 OP
const auto dx3 = x3 - ys[Y0 + 2]; // 1 OP
const auto r2 = dx1 * dx1 + dx2 * dx2 + dx3 * dx3; // 5 OP
const auto rinv = rsqrt(r2); // 1 OP
const auto nrinv3 = -rinv * rinv * rinv; // 3 OP
g1 = g1 + dx1 * nrinv3; // 2 OP
g2 = g2 + dx2 * nrinv3; // 2 OP
g3 = g3 + dx3 * nrinv3; // 2 OP
g4 = g4 - rinv; // 1 OP
}
__syncthreads();
// }
}
G[G0] = g1;
G[G0 + 1] = g2;
G[G0 + 2] = g3;
G[G0 + 3] = g4;
}
int main() {
float *hostX;
float *hostY;
float *hostG;
float *deviceX;
float *deviceY;
float *deviceG;
cudaSetDeviceFlags (cudaDeviceMapHost);
cudaHostAlloc((void**) &hostG, (SIZE+1) * N * sizeof(float), cudaHostAllocMapped | cudaHostAllocPortable);
cudaHostAlloc((void**) &hostX, (SIZE) * N * sizeof(float), cudaHostAllocMapped | cudaHostAllocPortable);
cudaHostAlloc((void**) &hostY, (SIZE) * M * sizeof(float), cudaHostAllocMapped | cudaHostAllocPortable);
cudaHostGetDevicePointer((void**) &deviceG, hostG, 0);
cudaHostGetDevicePointer((void**) &deviceX, hostX, 0);
cudaHostGetDevicePointer((void**) &deviceY, hostY, 0);
for (int i = 0; i < N; i++) {
for (int d = 0; d < SIZE; d++) {
hostX[SIZE * i] = rand_unit_box();
hostX[SIZE * i + 1] = rand_unit_box();
hostX[SIZE * i + 2] = rand_unit_box();
}
}
for (int i = 0; i < M; i++) {
for (int d = 0; d < SIZE; d++) {
hostY[SIZE * i] = rand_unit_box();
hostY[SIZE * i + 1] = rand_unit_box();
hostY[SIZE * i + 2] = rand_unit_box();
}
}
auto start = time(NULL);
for (int i = 0; i < 1000; i++) {
compute<<<N/P,P,P * SIZE*sizeof(float)>>>(deviceG,deviceX,deviceY);
cudaDeviceSynchronize();
auto end = time(NULL);
double ops = (i + 1) * (double) N * (double) M * 20.0 / (1024.0 * 1024.0 * 1024.0 * 1024.0);
double t = (double) (end - start);
double flops = ops / t;
printf("%i %e TFLOP in %e seconds for %e TFLOPS\n", i, ops, t, flops);
}
}
|
ec054c6ae02dfccbf77edfdede82a2c2894dd932.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <assert.h>
#include <vector>
#include "../utils.h"
template<typename T, int numWarps>
__global__
void fastTranspose(const T *array_in, const T *array_out,
int rows_in, int cols_in)
{
const int warpId = threadIdx.y;
const int lane = threadIdx.x;
const int warpSize = 32;
//...
}
template<typename T>
void isTranspose(const std::vector<T> &A,
const std::vector<T> &B,
int side)
{
for (int n = 0; n < side; ++n) {
for (int m = 0; m < side; ++m) {
assert(A[n * side + m] == B[m * side + n]);
}
}
}
int main(void) {
const int side = 2048;
std::vector<int> hIn (side * side);
std::vector<int> hOut(side * side);
for(int i = 0; i < side * side; ++i)
hIn[i] = random() % 100;
int *dIn, *dOut;
checkCudaErrors(hipMalloc(&dIn, sizeof(int) * side * side));
checkCudaErrors(hipMalloc(&dOut, sizeof(int) * side * side));
checkCudaErrors(hipMemcpy(dIn, &hIn[0], sizeof(int) * side * side, hipMemcpyHostToDevice));
const int warpSize = 32;
const int numWarpsPerBlock = 4;
dim3 bDim, gDim;
bDim.x = warpSize;
bDim.y = numWarpsPerBlock;
gDim.x = ;//?
gDim.y = ;//?
hipLaunchKernelGGL(( fastTranspose<int, numWarpsPerBlock>), dim3(gDim), dim3(bDim), 0, 0, dIn, dOut, side, side);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(&hOut[0], dOut, sizeof(int) * side * side, hipMemcpyDeviceToHost));
isTranspose(hIn, hOut, side);
checkCudaErrors(hipFree(dIn));
checkCudaErrors(hipFree(dOut));
return 0;
}
| ec054c6ae02dfccbf77edfdede82a2c2894dd932.cu | #include <cstdio>
#include <cstdlib>
#include <assert.h>
#include <vector>
#include "../utils.h"
template<typename T, int numWarps>
__global__
void fastTranspose(const T *array_in, const T *array_out,
int rows_in, int cols_in)
{
const int warpId = threadIdx.y;
const int lane = threadIdx.x;
const int warpSize = 32;
//...
}
template<typename T>
void isTranspose(const std::vector<T> &A,
const std::vector<T> &B,
int side)
{
for (int n = 0; n < side; ++n) {
for (int m = 0; m < side; ++m) {
assert(A[n * side + m] == B[m * side + n]);
}
}
}
int main(void) {
const int side = 2048;
std::vector<int> hIn (side * side);
std::vector<int> hOut(side * side);
for(int i = 0; i < side * side; ++i)
hIn[i] = random() % 100;
int *dIn, *dOut;
checkCudaErrors(cudaMalloc(&dIn, sizeof(int) * side * side));
checkCudaErrors(cudaMalloc(&dOut, sizeof(int) * side * side));
checkCudaErrors(cudaMemcpy(dIn, &hIn[0], sizeof(int) * side * side, cudaMemcpyHostToDevice));
const int warpSize = 32;
const int numWarpsPerBlock = 4;
dim3 bDim, gDim;
bDim.x = warpSize;
bDim.y = numWarpsPerBlock;
gDim.x = ;//?
gDim.y = ;//?
fastTranspose<int, numWarpsPerBlock><<<gDim, bDim>>>(dIn, dOut, side, side);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(&hOut[0], dOut, sizeof(int) * side * side, cudaMemcpyDeviceToHost));
isTranspose(hIn, hOut, side);
checkCudaErrors(cudaFree(dIn));
checkCudaErrors(cudaFree(dOut));
return 0;
}
|
bcf11c02a8f58bfdd038d1ced000c72c487270c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
extern "C" __global__ void generate(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_0, const Int64 shIn1_2, const Int64 shIn1_1, const Int64 shIn1_0, const double* __restrict__ arrIn1_3, const double* __restrict__ arrIn1_2, const double* __restrict__ arrIn1_1, const double* __restrict__ arrIn1_0, const Int64 shIn2_2, const Int64 shIn2_1, const Int64 shIn2_0, const double* __restrict__ arrIn2_2, const double* __restrict__ arrIn2_1, const double* __restrict__ arrIn2_0, const Int64 shIn3_2, const Int64 shIn3_1, const Int64 shIn3_0, const double* __restrict__ arrIn3_2, const double* __restrict__ arrIn3_1, const double* __restrict__ arrIn3_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_5, double* __restrict__ arrOut_4, double* __restrict__ arrOut_3, double* __restrict__ arrOut_2, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 tmp_2 = tmp_1 / shOut_1;
const Int64 sh2 = tmp_2 % shOut_2;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Int64 v0 = (Int64) 1;
const Int64 v1 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0;
const Int64 v2 = (sh2 * shIn2_1 + sh1) * shIn2_0 + (v0 + sh0);
const Int64 v3 = (sh2 * shIn2_1 + (v0 + sh1)) * shIn2_0 + (v0 + sh0);
const Int64 v4 = (sh2 * shIn2_1 + (v0 + sh1)) * shIn2_0 + sh0;
const Int64 v5 = ((v0 + sh2) * shIn2_1 + sh1) * shIn2_0 + sh0;
const Int64 v6 = ((v0 + sh2) * shIn2_1 + sh1) * shIn2_0 + (v0 + sh0);
const Int64 v7 = ((v0 + sh2) * shIn2_1 + (v0 + sh1)) * shIn2_0 + (v0 + sh0);
const Int64 v8 = ((v0 + sh2) * shIn2_1 + (v0 + sh1)) * shIn2_0 + sh0;
const double v9 = arrIn2_2[v1];
const double v10 = arrIn2_1[v1];
const double v11 = arrIn2_0[v1];
const double v12 = arrIn2_2[v2];
const double v13 = arrIn2_1[v2];
const double v14 = arrIn2_0[v2];
const double v15 = arrIn2_2[v3];
const double v16 = arrIn2_1[v3];
const double v17 = arrIn2_0[v3];
const double v18 = arrIn2_2[v4];
const double v19 = arrIn2_1[v4];
const double v20 = arrIn2_0[v4];
const double v21 = arrIn2_2[v5];
const double v22 = arrIn2_1[v5];
const double v23 = arrIn2_0[v5];
const double v24 = arrIn2_2[v6];
const double v25 = arrIn2_1[v6];
const double v26 = arrIn2_0[v6];
const double v27 = arrIn2_2[v7];
const double v28 = arrIn2_1[v7];
const double v29 = arrIn2_0[v7];
const double v30 = arrIn2_2[v8];
const double v31 = arrIn2_1[v8];
const double v32 = arrIn2_0[v8];
const double v33 = v21 + v30;
const double v34 = v22 + v31;
const double v35 = v23 + v32;
const double v36 = v33 + v27;
const double v37 = v34 + v28;
const double v38 = v35 + v29;
const double v39 = v36 + v24;
const double v40 = v37 + v25;
const double v41 = v38 + v26;
const double v42 = v9 + v12;
const double v43 = v10 + v13;
const double v44 = v11 + v14;
const double v45 = v42 + v15;
const double v46 = v43 + v16;
const double v47 = v44 + v17;
const double v48 = v45 + v18;
const double v49 = v46 + v19;
const double v50 = v47 + v20;
const double v51 = v39 - v48;
const double v52 = v40 - v49;
const double v53 = v41 - v50;
const double v54 = 0.25 * v51;
const double v55 = 0.25 * v52;
const double v56 = 0.25 * v53;
const double v57 = v15 + v27;
const double v58 = v16 + v28;
const double v59 = v17 + v29;
const double v60 = v57 + v30;
const double v61 = v58 + v31;
const double v62 = v59 + v32;
const double v63 = v60 + v18;
const double v64 = v61 + v19;
const double v65 = v62 + v20;
const double v66 = v9 + v21;
const double v67 = v10 + v22;
const double v68 = v11 + v23;
const double v69 = v66 + v24;
const double v70 = v67 + v25;
const double v71 = v68 + v26;
const double v72 = v69 + v12;
const double v73 = v70 + v13;
const double v74 = v71 + v14;
const double v75 = v63 - v72;
const double v76 = v64 - v73;
const double v77 = v65 - v74;
const double v78 = 0.25 * v75;
const double v79 = 0.25 * v76;
const double v80 = 0.25 * v77;
const double v81 = v12 + v24;
const double v82 = v13 + v25;
const double v83 = v14 + v26;
const double v84 = v81 + v27;
const double v85 = v82 + v28;
const double v86 = v83 + v29;
const double v87 = v84 + v15;
const double v88 = v85 + v16;
const double v89 = v86 + v17;
const double v90 = v18 + v30;
const double v91 = v19 + v31;
const double v92 = v20 + v32;
const double v93 = v90 + v21;
const double v94 = v91 + v22;
const double v95 = v92 + v23;
const double v96 = v93 + v9;
const double v97 = v94 + v10;
const double v98 = v95 + v11;
const double v99 = v87 - v96;
const double v100 = v88 - v97;
const double v101 = v89 - v98;
const double v102 = 0.25 * v99;
const double v103 = 0.25 * v100;
const double v104 = 0.25 * v101;
const double v105 = v79 * v56 - v80 * v55;
const double v106 = v80 * v54 - v78 * v56;
const double v107 = v78 * v55 - v79 * v54;
const double v108 = v55 * v104 - v56 * v103;
const double v109 = v56 * v102 - v54 * v104;
const double v110 = v54 * v103 - v55 * v102;
const double v111 = v103 * v80 - v104 * v79;
const double v112 = v104 * v78 - v102 * v80;
const double v113 = v102 * v79 - v103 * v78;
const double v116 = ({ const Int64 v114 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0; ; arrIn1_3[v114]; }) * ({ const Int64 v115 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0; ; arrIn0_0[v115]; });
const Int64 v117 = (sh2 * shIn3_1 + sh1) * shIn3_0 + sh0;
const Int64 v118 = (sh2 * shIn3_1 + sh1) * shIn3_0 + (v0 + sh0);
const Int64 v119 = (sh2 * shIn3_1 + (v0 + sh1)) * shIn3_0 + (v0 + sh0);
const Int64 v120 = (sh2 * shIn3_1 + (v0 + sh1)) * shIn3_0 + sh0;
const Int64 v121 = ((v0 + sh2) * shIn3_1 + sh1) * shIn3_0 + sh0;
const Int64 v122 = ((v0 + sh2) * shIn3_1 + sh1) * shIn3_0 + (v0 + sh0);
const Int64 v123 = ((v0 + sh2) * shIn3_1 + (v0 + sh1)) * shIn3_0 + (v0 + sh0);
const Int64 v124 = ((v0 + sh2) * shIn3_1 + (v0 + sh1)) * shIn3_0 + sh0;
const double v125 = arrIn3_2[v117];
const double v126 = arrIn3_1[v117];
const double v127 = arrIn3_0[v117];
const double v128 = arrIn3_2[v118];
const double v129 = arrIn3_1[v118];
const double v130 = arrIn3_0[v118];
const double v131 = arrIn3_2[v119];
const double v132 = arrIn3_1[v119];
const double v133 = arrIn3_0[v119];
const double v134 = arrIn3_2[v120];
const double v135 = arrIn3_1[v120];
const double v136 = arrIn3_0[v120];
const double v137 = arrIn3_2[v121];
const double v138 = arrIn3_1[v121];
const double v139 = arrIn3_0[v121];
const double v140 = arrIn3_2[v122];
const double v141 = arrIn3_1[v122];
const double v142 = arrIn3_0[v122];
const double v143 = arrIn3_2[v123];
const double v144 = arrIn3_1[v123];
const double v145 = arrIn3_0[v123];
const double v146 = arrIn3_2[v124];
const double v147 = arrIn3_1[v124];
const double v148 = arrIn3_0[v124];
const double v149 = 1.0 / v116;
arrOut_5[ix] = v116 / sqrt(v105 * v105 + v106 * v106 + v107 * v107);
arrOut_4[ix] = v116 / sqrt(v108 * v108 + v109 * v109 + v110 * v110);
arrOut_3[ix] = v116 / sqrt(v111 * v111 + v112 * v112 + v113 * v113);
arrOut_2[ix] = v149 * ({
const double v150 = v128 + v140;
const double v151 = v129 + v141;
const double v152 = v130 + v142;
const double v153 = v150 + v143;
const double v154 = v151 + v144;
const double v155 = v152 + v145;
const double v156 = v153 + v131;
const double v157 = v154 + v132;
const double v158 = v155 + v133;
const double v159 = v134 + v146;
const double v160 = v135 + v147;
const double v161 = v136 + v148;
const double v162 = v159 + v137;
const double v163 = v160 + v138;
const double v164 = v161 + v139;
const double v165 = v162 + v125;
const double v166 = v163 + v126;
const double v167 = v164 + v127;
const double v168 = v156 - v165;
const double v169 = v157 - v166;
const double v170 = v158 - v167;
const double v171 = 0.25 * v168;
const double v172 = 0.25 * v169;
const double v173 = 0.25 * v170;
;
v105 * v171 + v106 * v172 + v107 * v173;
});
arrOut_1[ix] = v149 * ({
const double v174 = v131 + v143;
const double v175 = v132 + v144;
const double v176 = v133 + v145;
const double v177 = v174 + v146;
const double v178 = v175 + v147;
const double v179 = v176 + v148;
const double v180 = v177 + v134;
const double v181 = v178 + v135;
const double v182 = v179 + v136;
const double v183 = v125 + v137;
const double v184 = v126 + v138;
const double v185 = v127 + v139;
const double v186 = v183 + v140;
const double v187 = v184 + v141;
const double v188 = v185 + v142;
const double v189 = v186 + v128;
const double v190 = v187 + v129;
const double v191 = v188 + v130;
const double v192 = v180 - v189;
const double v193 = v181 - v190;
const double v194 = v182 - v191;
const double v195 = 0.25 * v192;
const double v196 = 0.25 * v193;
const double v197 = 0.25 * v194;
;
v108 * v195 + v109 * v196 + v110 * v197;
});
arrOut_0[ix] = v149 * ({
const double v198 = v137 + v146;
const double v199 = v138 + v147;
const double v200 = v139 + v148;
const double v201 = v198 + v143;
const double v202 = v199 + v144;
const double v203 = v200 + v145;
const double v204 = v201 + v140;
const double v205 = v202 + v141;
const double v206 = v203 + v142;
const double v207 = v125 + v128;
const double v208 = v126 + v129;
const double v209 = v127 + v130;
const double v210 = v207 + v131;
const double v211 = v208 + v132;
const double v212 = v209 + v133;
const double v213 = v210 + v134;
const double v214 = v211 + v135;
const double v215 = v212 + v136;
const double v216 = v204 - v213;
const double v217 = v205 - v214;
const double v218 = v206 - v215;
const double v219 = 0.25 * v216;
const double v220 = 0.25 * v217;
const double v221 = 0.25 * v218;
;
v111 * v219 + v112 * v220 + v113 * v221;
});
}
}
| bcf11c02a8f58bfdd038d1ced000c72c487270c1.cu | #include <accelerate_cuda.h>
extern "C" __global__ void generate(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_0, const Int64 shIn1_2, const Int64 shIn1_1, const Int64 shIn1_0, const double* __restrict__ arrIn1_3, const double* __restrict__ arrIn1_2, const double* __restrict__ arrIn1_1, const double* __restrict__ arrIn1_0, const Int64 shIn2_2, const Int64 shIn2_1, const Int64 shIn2_0, const double* __restrict__ arrIn2_2, const double* __restrict__ arrIn2_1, const double* __restrict__ arrIn2_0, const Int64 shIn3_2, const Int64 shIn3_1, const Int64 shIn3_0, const double* __restrict__ arrIn3_2, const double* __restrict__ arrIn3_1, const double* __restrict__ arrIn3_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_5, double* __restrict__ arrOut_4, double* __restrict__ arrOut_3, double* __restrict__ arrOut_2, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 tmp_2 = tmp_1 / shOut_1;
const Int64 sh2 = tmp_2 % shOut_2;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Int64 v0 = (Int64) 1;
const Int64 v1 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0;
const Int64 v2 = (sh2 * shIn2_1 + sh1) * shIn2_0 + (v0 + sh0);
const Int64 v3 = (sh2 * shIn2_1 + (v0 + sh1)) * shIn2_0 + (v0 + sh0);
const Int64 v4 = (sh2 * shIn2_1 + (v0 + sh1)) * shIn2_0 + sh0;
const Int64 v5 = ((v0 + sh2) * shIn2_1 + sh1) * shIn2_0 + sh0;
const Int64 v6 = ((v0 + sh2) * shIn2_1 + sh1) * shIn2_0 + (v0 + sh0);
const Int64 v7 = ((v0 + sh2) * shIn2_1 + (v0 + sh1)) * shIn2_0 + (v0 + sh0);
const Int64 v8 = ((v0 + sh2) * shIn2_1 + (v0 + sh1)) * shIn2_0 + sh0;
const double v9 = arrIn2_2[v1];
const double v10 = arrIn2_1[v1];
const double v11 = arrIn2_0[v1];
const double v12 = arrIn2_2[v2];
const double v13 = arrIn2_1[v2];
const double v14 = arrIn2_0[v2];
const double v15 = arrIn2_2[v3];
const double v16 = arrIn2_1[v3];
const double v17 = arrIn2_0[v3];
const double v18 = arrIn2_2[v4];
const double v19 = arrIn2_1[v4];
const double v20 = arrIn2_0[v4];
const double v21 = arrIn2_2[v5];
const double v22 = arrIn2_1[v5];
const double v23 = arrIn2_0[v5];
const double v24 = arrIn2_2[v6];
const double v25 = arrIn2_1[v6];
const double v26 = arrIn2_0[v6];
const double v27 = arrIn2_2[v7];
const double v28 = arrIn2_1[v7];
const double v29 = arrIn2_0[v7];
const double v30 = arrIn2_2[v8];
const double v31 = arrIn2_1[v8];
const double v32 = arrIn2_0[v8];
const double v33 = v21 + v30;
const double v34 = v22 + v31;
const double v35 = v23 + v32;
const double v36 = v33 + v27;
const double v37 = v34 + v28;
const double v38 = v35 + v29;
const double v39 = v36 + v24;
const double v40 = v37 + v25;
const double v41 = v38 + v26;
const double v42 = v9 + v12;
const double v43 = v10 + v13;
const double v44 = v11 + v14;
const double v45 = v42 + v15;
const double v46 = v43 + v16;
const double v47 = v44 + v17;
const double v48 = v45 + v18;
const double v49 = v46 + v19;
const double v50 = v47 + v20;
const double v51 = v39 - v48;
const double v52 = v40 - v49;
const double v53 = v41 - v50;
const double v54 = 0.25 * v51;
const double v55 = 0.25 * v52;
const double v56 = 0.25 * v53;
const double v57 = v15 + v27;
const double v58 = v16 + v28;
const double v59 = v17 + v29;
const double v60 = v57 + v30;
const double v61 = v58 + v31;
const double v62 = v59 + v32;
const double v63 = v60 + v18;
const double v64 = v61 + v19;
const double v65 = v62 + v20;
const double v66 = v9 + v21;
const double v67 = v10 + v22;
const double v68 = v11 + v23;
const double v69 = v66 + v24;
const double v70 = v67 + v25;
const double v71 = v68 + v26;
const double v72 = v69 + v12;
const double v73 = v70 + v13;
const double v74 = v71 + v14;
const double v75 = v63 - v72;
const double v76 = v64 - v73;
const double v77 = v65 - v74;
const double v78 = 0.25 * v75;
const double v79 = 0.25 * v76;
const double v80 = 0.25 * v77;
const double v81 = v12 + v24;
const double v82 = v13 + v25;
const double v83 = v14 + v26;
const double v84 = v81 + v27;
const double v85 = v82 + v28;
const double v86 = v83 + v29;
const double v87 = v84 + v15;
const double v88 = v85 + v16;
const double v89 = v86 + v17;
const double v90 = v18 + v30;
const double v91 = v19 + v31;
const double v92 = v20 + v32;
const double v93 = v90 + v21;
const double v94 = v91 + v22;
const double v95 = v92 + v23;
const double v96 = v93 + v9;
const double v97 = v94 + v10;
const double v98 = v95 + v11;
const double v99 = v87 - v96;
const double v100 = v88 - v97;
const double v101 = v89 - v98;
const double v102 = 0.25 * v99;
const double v103 = 0.25 * v100;
const double v104 = 0.25 * v101;
const double v105 = v79 * v56 - v80 * v55;
const double v106 = v80 * v54 - v78 * v56;
const double v107 = v78 * v55 - v79 * v54;
const double v108 = v55 * v104 - v56 * v103;
const double v109 = v56 * v102 - v54 * v104;
const double v110 = v54 * v103 - v55 * v102;
const double v111 = v103 * v80 - v104 * v79;
const double v112 = v104 * v78 - v102 * v80;
const double v113 = v102 * v79 - v103 * v78;
const double v116 = ({ const Int64 v114 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0; ; arrIn1_3[v114]; }) * ({ const Int64 v115 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0; ; arrIn0_0[v115]; });
const Int64 v117 = (sh2 * shIn3_1 + sh1) * shIn3_0 + sh0;
const Int64 v118 = (sh2 * shIn3_1 + sh1) * shIn3_0 + (v0 + sh0);
const Int64 v119 = (sh2 * shIn3_1 + (v0 + sh1)) * shIn3_0 + (v0 + sh0);
const Int64 v120 = (sh2 * shIn3_1 + (v0 + sh1)) * shIn3_0 + sh0;
const Int64 v121 = ((v0 + sh2) * shIn3_1 + sh1) * shIn3_0 + sh0;
const Int64 v122 = ((v0 + sh2) * shIn3_1 + sh1) * shIn3_0 + (v0 + sh0);
const Int64 v123 = ((v0 + sh2) * shIn3_1 + (v0 + sh1)) * shIn3_0 + (v0 + sh0);
const Int64 v124 = ((v0 + sh2) * shIn3_1 + (v0 + sh1)) * shIn3_0 + sh0;
const double v125 = arrIn3_2[v117];
const double v126 = arrIn3_1[v117];
const double v127 = arrIn3_0[v117];
const double v128 = arrIn3_2[v118];
const double v129 = arrIn3_1[v118];
const double v130 = arrIn3_0[v118];
const double v131 = arrIn3_2[v119];
const double v132 = arrIn3_1[v119];
const double v133 = arrIn3_0[v119];
const double v134 = arrIn3_2[v120];
const double v135 = arrIn3_1[v120];
const double v136 = arrIn3_0[v120];
const double v137 = arrIn3_2[v121];
const double v138 = arrIn3_1[v121];
const double v139 = arrIn3_0[v121];
const double v140 = arrIn3_2[v122];
const double v141 = arrIn3_1[v122];
const double v142 = arrIn3_0[v122];
const double v143 = arrIn3_2[v123];
const double v144 = arrIn3_1[v123];
const double v145 = arrIn3_0[v123];
const double v146 = arrIn3_2[v124];
const double v147 = arrIn3_1[v124];
const double v148 = arrIn3_0[v124];
const double v149 = 1.0 / v116;
arrOut_5[ix] = v116 / sqrt(v105 * v105 + v106 * v106 + v107 * v107);
arrOut_4[ix] = v116 / sqrt(v108 * v108 + v109 * v109 + v110 * v110);
arrOut_3[ix] = v116 / sqrt(v111 * v111 + v112 * v112 + v113 * v113);
arrOut_2[ix] = v149 * ({
const double v150 = v128 + v140;
const double v151 = v129 + v141;
const double v152 = v130 + v142;
const double v153 = v150 + v143;
const double v154 = v151 + v144;
const double v155 = v152 + v145;
const double v156 = v153 + v131;
const double v157 = v154 + v132;
const double v158 = v155 + v133;
const double v159 = v134 + v146;
const double v160 = v135 + v147;
const double v161 = v136 + v148;
const double v162 = v159 + v137;
const double v163 = v160 + v138;
const double v164 = v161 + v139;
const double v165 = v162 + v125;
const double v166 = v163 + v126;
const double v167 = v164 + v127;
const double v168 = v156 - v165;
const double v169 = v157 - v166;
const double v170 = v158 - v167;
const double v171 = 0.25 * v168;
const double v172 = 0.25 * v169;
const double v173 = 0.25 * v170;
;
v105 * v171 + v106 * v172 + v107 * v173;
});
arrOut_1[ix] = v149 * ({
const double v174 = v131 + v143;
const double v175 = v132 + v144;
const double v176 = v133 + v145;
const double v177 = v174 + v146;
const double v178 = v175 + v147;
const double v179 = v176 + v148;
const double v180 = v177 + v134;
const double v181 = v178 + v135;
const double v182 = v179 + v136;
const double v183 = v125 + v137;
const double v184 = v126 + v138;
const double v185 = v127 + v139;
const double v186 = v183 + v140;
const double v187 = v184 + v141;
const double v188 = v185 + v142;
const double v189 = v186 + v128;
const double v190 = v187 + v129;
const double v191 = v188 + v130;
const double v192 = v180 - v189;
const double v193 = v181 - v190;
const double v194 = v182 - v191;
const double v195 = 0.25 * v192;
const double v196 = 0.25 * v193;
const double v197 = 0.25 * v194;
;
v108 * v195 + v109 * v196 + v110 * v197;
});
arrOut_0[ix] = v149 * ({
const double v198 = v137 + v146;
const double v199 = v138 + v147;
const double v200 = v139 + v148;
const double v201 = v198 + v143;
const double v202 = v199 + v144;
const double v203 = v200 + v145;
const double v204 = v201 + v140;
const double v205 = v202 + v141;
const double v206 = v203 + v142;
const double v207 = v125 + v128;
const double v208 = v126 + v129;
const double v209 = v127 + v130;
const double v210 = v207 + v131;
const double v211 = v208 + v132;
const double v212 = v209 + v133;
const double v213 = v210 + v134;
const double v214 = v211 + v135;
const double v215 = v212 + v136;
const double v216 = v204 - v213;
const double v217 = v205 - v214;
const double v218 = v206 - v215;
const double v219 = 0.25 * v216;
const double v220 = 0.25 * v217;
const double v221 = 0.25 * v218;
;
v111 * v219 + v112 * v220 + v113 * v221;
});
}
}
|
33e68b71391175825f0d0fdca30e7d1d1a6c650b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zgemm_fermi.cu normal z -> d, Fri Jul 18 17:34:13 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
gemm_stencil.cu defines the GPU kernel. It gets included
multiple times, once for each transpose version.
*/
#include "common_magma.h"
#include "commonblas_d.h"
#include <assert.h>
#define PRECISION_d
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "dgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**T,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
TRANSA CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**T.
@param[in]
TRANSB CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**T.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( d_A ) and of the matrix d_C. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( d_B ) and the number of columns of the matrix d_C. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( d_A ) and the number of rows of the matrix op( d_B ). K must
be at least zero.
@param[in]
alpha DOUBLE_PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_A DOUBLE_PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = MagmaNoTrans, and is m otherwise.
Before entry with TRANSA = MagmaNoTrans, the leading m by k
part of the array d_A must contain the matrix d_A, otherwise
the leading k by m part of the array d_A must contain the
matrix d_A.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
d_B DOUBLE_PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = MagmaNoTrans, and is k otherwise.
Before entry with TRANSB = MagmaNoTrans, the leading k by n
part of the array d_B must contain the matrix d_B, otherwise
the leading n by k part of the array d_B must contain the
matrix d_B.
@param[in]
ldb INTEGER.
On entry, LDB specifies the first dimension of d_B as declared
in the calling (sub) program. When TRANSB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta DOUBLE_PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then d_C need not be set on input.
@param[in,out]
d_C DOUBLE_PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array d_C must
contain the matrix d_C, except when beta is zero, in which
case d_C need not be set on entry.
On exit, the array d_C is overwritten by the m by n matrix
( alpha*op( d_A )*op( d_B ) + beta*d_C ).
@param[in]
ldc INTEGER.
On entry, LDC specifies the first dimension of d_C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dgemm(
magma_trans_t TRANSA, magma_trans_t TRANSB, magma_int_t m, magma_int_t n, magma_int_t k,
double alpha,
const double *d_A, magma_int_t lda,
const double *d_B, magma_int_t ldb,
double beta,
double *d_C, magma_int_t ldc )
{
magma_int_t info = 0;
if ( TRANSA != MagmaNoTrans && TRANSA != MagmaTrans && TRANSA != Magma_ConjTrans )
info = -1;
else if ( TRANSB != MagmaNoTrans && TRANSB != MagmaTrans && TRANSB != Magma_ConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( TRANSA == MagmaNoTrans ? lda < m : lda < k )
info = -8;
else if ( TRANSB == MagmaNoTrans ? ldb < k : ldb < n )
info = -10;
else if ( ldc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_dgemm(
TRANSA, TRANSB,
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#else
magmablas_dgemm_tesla(
TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( TRANSA == MagmaTrans )
TransA = 1;
else if ( TRANSA == MagmaNoTrans )
TransA = 0;
if ( TRANSB == MagmaTrans )
TransB = 1;
else if ( TRANSB == MagmaNoTrans )
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_dgemm( TRANSA, TRANSB, m, n, k, alpha,
d_A, lda, d_B, ldb,
beta, d_C, ldc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = hipFilterModePoint;
tex_ref_A.addressMode[0] = hipAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = hipFilterModePoint;
tex_ref_B.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_A, d_A, sizeA*sizeof(double));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
err = hipBindTexture(&offsetB, tex_ref_B, d_B, sizeB*sizeof(double));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err );
hipUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(d_A[0]);
offsetB = offsetB/sizeof(d_B[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_nn + 1,
(n - 1)/BLK_N_nn + 1 );
hipLaunchKernelGGL(( dgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 );
hipLaunchKernelGGL(( dgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 );
hipLaunchKernelGGL(( dgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 );
hipLaunchKernelGGL(( dgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_tt + 1,
(n - 1)/BLK_N_tt + 1 );
hipLaunchKernelGGL(( dgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_tc + 1,
(n - 1)/BLK_N_tc + 1 );
hipLaunchKernelGGL(( dgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 );
hipLaunchKernelGGL(( dgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_ct + 1,
(n - 1)/BLK_N_ct + 1 );
hipLaunchKernelGGL(( dgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_cc + 1,
(n - 1)/BLK_N_cc + 1 );
hipLaunchKernelGGL(( dgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
hipUnbindTexture( tex_ref_A );
hipUnbindTexture( tex_ref_B );
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 33e68b71391175825f0d0fdca30e7d1d1a6c650b.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zgemm_fermi.cu normal z -> d, Fri Jul 18 17:34:13 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
gemm_stencil.cu defines the GPU kernel. It gets included
multiple times, once for each transpose version.
*/
#include "common_magma.h"
#include "commonblas_d.h"
#include <assert.h>
#define PRECISION_d
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "dgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**T,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
TRANSA CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**T.
@param[in]
TRANSB CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**T.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( d_A ) and of the matrix d_C. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( d_B ) and the number of columns of the matrix d_C. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( d_A ) and the number of rows of the matrix op( d_B ). K must
be at least zero.
@param[in]
alpha DOUBLE_PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_A DOUBLE_PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = MagmaNoTrans, and is m otherwise.
Before entry with TRANSA = MagmaNoTrans, the leading m by k
part of the array d_A must contain the matrix d_A, otherwise
the leading k by m part of the array d_A must contain the
matrix d_A.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
d_B DOUBLE_PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = MagmaNoTrans, and is k otherwise.
Before entry with TRANSB = MagmaNoTrans, the leading k by n
part of the array d_B must contain the matrix d_B, otherwise
the leading n by k part of the array d_B must contain the
matrix d_B.
@param[in]
ldb INTEGER.
On entry, LDB specifies the first dimension of d_B as declared
in the calling (sub) program. When TRANSB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta DOUBLE_PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then d_C need not be set on input.
@param[in,out]
d_C DOUBLE_PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array d_C must
contain the matrix d_C, except when beta is zero, in which
case d_C need not be set on entry.
On exit, the array d_C is overwritten by the m by n matrix
( alpha*op( d_A )*op( d_B ) + beta*d_C ).
@param[in]
ldc INTEGER.
On entry, LDC specifies the first dimension of d_C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dgemm(
magma_trans_t TRANSA, magma_trans_t TRANSB, magma_int_t m, magma_int_t n, magma_int_t k,
double alpha,
const double *d_A, magma_int_t lda,
const double *d_B, magma_int_t ldb,
double beta,
double *d_C, magma_int_t ldc )
{
magma_int_t info = 0;
if ( TRANSA != MagmaNoTrans && TRANSA != MagmaTrans && TRANSA != Magma_ConjTrans )
info = -1;
else if ( TRANSB != MagmaNoTrans && TRANSB != MagmaTrans && TRANSB != Magma_ConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( TRANSA == MagmaNoTrans ? lda < m : lda < k )
info = -8;
else if ( TRANSB == MagmaNoTrans ? ldb < k : ldb < n )
info = -10;
else if ( ldc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_dgemm(
TRANSA, TRANSB,
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#else
magmablas_dgemm_tesla(
TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( TRANSA == MagmaTrans )
TransA = 1;
else if ( TRANSA == MagmaNoTrans )
TransA = 0;
if ( TRANSB == MagmaTrans )
TransB = 1;
else if ( TRANSB == MagmaNoTrans )
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_dgemm( TRANSA, TRANSB, m, n, k, alpha,
d_A, lda, d_B, ldb,
beta, d_C, ldc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = cudaFilterModePoint;
tex_ref_A.addressMode[0] = cudaAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = cudaFilterModePoint;
tex_ref_B.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_A, d_A, sizeA*sizeof(double));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
err = cudaBindTexture(&offsetB, tex_ref_B, d_B, sizeB*sizeof(double));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err );
cudaUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(d_A[0]);
offsetB = offsetB/sizeof(d_B[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_nn + 1,
(n - 1)/BLK_N_nn + 1 );
dgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 );
dgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 );
dgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 );
dgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_tt + 1,
(n - 1)/BLK_N_tt + 1 );
dgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_tc + 1,
(n - 1)/BLK_N_tc + 1 );
dgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 );
dgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_ct + 1,
(n - 1)/BLK_N_ct + 1 );
dgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_cc + 1,
(n - 1)/BLK_N_cc + 1 );
dgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
cudaUnbindTexture( tex_ref_A );
cudaUnbindTexture( tex_ref_B );
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
9bf2950d9860baf0700ce5df0f36b14e49e6f75f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// backward kernel function: for combiner=sum
template <typename TypeEmbeddingComp>
__global__ void backward_sum_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeEmbeddingComp *top_grad, TypeEmbeddingComp *wgrad) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
size_t feature_index = (size_t)(bid * slot_num + i) * embedding_vec_size + tid;
wgrad[feature_index] = top_grad[feature_index];
}
}
}
__global__ void backward_sum_align2_kernel(int batch_size, int slot_num, int embedding_vec_size,
const __half *top_grad, __half *wgrad) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
const __half2 *top_grad2 = reinterpret_cast<const __half2 *>(top_grad);
__half2 *wgrad2 = reinterpret_cast<__half2 *>(wgrad);
for (int i = 0; i < slot_num; i++) {
size_t feature_index = (size_t)(bid * slot_num + i) * embedding_vec_size + tid;
wgrad2[feature_index] = top_grad2[feature_index];
}
}
}
// backward kernel function: for combiner=mean
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void backward_mean_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeKey *row_offset, const TypeEmbeddingComp *top_grad,
TypeEmbeddingComp *wgrad) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
size_t feature_row_index = bid * slot_num + i;
int value_num = row_offset[feature_row_index + 1] - row_offset[feature_row_index];
float scaler = 1.0f;
if (value_num > 1) {
scaler = 1.0f / value_num; // partial derivatice of MEAN
}
size_t feature_index = feature_row_index * embedding_vec_size + tid;
float g = TypeConvertFunc<float, TypeEmbeddingComp>::convert(top_grad[feature_index]);
g *= scaler;
wgrad[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(g);
}
}
}
template <typename TypeKey>
__global__ void backward_mean_align2_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeKey *row_offset, const __half *top_grad,
__half *wgrad) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
const __half2 *top_grad2 = reinterpret_cast<const __half2 *>(top_grad);
__half2 *wgrad2 = reinterpret_cast<__half2 *>(wgrad);
for (int i = 0; i < slot_num; i++) {
size_t feature_row_index = bid * slot_num + i;
int value_num = row_offset[feature_row_index + 1] - row_offset[feature_row_index];
__half2 scaler = __float2half2_rn(1.0f);
if (value_num > 1) {
scaler = __float2half2_rn(1.0f / (float)value_num); // partial derivatice of MEAN
}
size_t feature_index = feature_row_index * embedding_vec_size + tid;
wgrad2[feature_index] = __hmul2(scaler, top_grad2[feature_index]);
}
}
}
template <typename TypeEmbeddingComp>
void backward_sum(size_t batch_size, size_t slot_num, size_t embedding_vec_size,
const TypeEmbeddingComp *top_grad, TypeEmbeddingComp *wgrad,
hipStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_sum_kernel), dim3(grid_size), dim3(block_size), 0, stream, batch_size, slot_num,
embedding_vec_size, top_grad, wgrad);
}
template <>
void backward_sum<__half>(size_t batch_size, size_t slot_num, size_t embedding_vec_size,
const __half *top_grad, __half *wgrad, hipStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
if (embedding_vec_size % 2 == 0) {
const size_t block_size =
embedding_vec_size / 2; // each thread corresponds to one element in an embedding vetor
hipLaunchKernelGGL(( backward_sum_align2_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size, slot_num, embedding_vec_size / 2, top_grad, wgrad);
} else {
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_sum_kernel), dim3(grid_size), dim3(block_size), 0, stream, batch_size, slot_num,
embedding_vec_size, top_grad, wgrad);
}
}
template <typename TypeKey, typename TypeEmbeddingComp>
void backward_mean(size_t batch_size, size_t slot_size, size_t embedding_vec_size,
const TypeKey *row_offset, const TypeEmbeddingComp *top_grad,
TypeEmbeddingComp *wgrad, hipStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_mean_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size, slot_size, embedding_vec_size, row_offset, top_grad, wgrad);
}
template <typename TypeKey>
void backward_mean(size_t batch_size, size_t slot_size, size_t embedding_vec_size,
const TypeKey *row_offset, const __half *top_grad, __half *wgrad,
hipStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
hipLaunchKernelGGL(( backward_mean_align2_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size, slot_size, embedding_vec_size / 2, row_offset, top_grad, wgrad);
} else {
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_mean_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size, slot_size, embedding_vec_size, row_offset, top_grad, wgrad);
}
}
} // namespace
/**
* backward propagation for DistributedSlotSparseEmbeddingHash
* The first step of backward propagation: computing the wgrad.
* @param batch_size batch size for the current mini-batch computation.
* @param slot_num the number of slots in hash table.
* @param embedding_vec_size embedding vector size.
* @param combiner combiner type: 0-sum, 1-mean
* @param row_offset_allreduce_tensors row_offsets tensors after all_reduce of mulitple GPUs
* @param embedding_feature_tensors embedding features tensors of multiplu GPUs, storing dgrad
* from the top layer
* @param wgrad_tensors wgrad tensors of multi GPUs, the output of this function.
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device
*/
template <typename TypeHashKey, typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward(size_t batch_size, size_t slot_num,
size_t embedding_vec_size, int combiner,
const Tensors2<TypeHashKey> &row_offset_allreduce_tensors,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensors2<TypeEmbeddingComp> &wgrad_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
const TypeEmbeddingComp *top_grad = embedding_feature_tensors[id].get_ptr();
const TypeHashKey *row_offset = row_offset_allreduce_tensors[id].get_ptr();
TypeEmbeddingComp *wgrad = wgrad_tensors[id].get_ptr();
if (combiner == 0) // sum
{
backward_sum(batch_size, slot_num, embedding_vec_size, top_grad, wgrad,
local_gpu->get_stream());
} else if (combiner == 1) // mean
{
backward_mean(batch_size, slot_num, embedding_vec_size, row_offset, top_grad, wgrad,
local_gpu->get_stream());
} else {
CK_THROW_(Error_t::WrongInput, "Invalid combiner type ");
}
}
return;
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward(size_t batch_size,
const std::vector<size_t> &slot_num_per_gpu,
size_t embedding_vec_size, int combiner,
const Tensors2<TypeHashKey> &row_offset_allreduce_tensors,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensors2<TypeEmbeddingComp> &wgrad_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
if (slot_num_per_gpu[id] == 0) {
continue;
}
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
const TypeEmbeddingComp *top_grad = embedding_feature_tensors[id].get_ptr();
const TypeHashKey *row_offset = row_offset_allreduce_tensors[id].get_ptr();
TypeEmbeddingComp *wgrad = wgrad_tensors[id].get_ptr();
if (combiner == 0) // sum
{
backward_sum(batch_size, slot_num_per_gpu[id], embedding_vec_size, top_grad, wgrad,
local_gpu->get_stream());
} else if (combiner == 1) // mean
{
backward_mean(batch_size, slot_num_per_gpu[id], embedding_vec_size, row_offset, top_grad,
wgrad, local_gpu->get_stream());
} else {
CK_THROW_(Error_t::WrongInput, "Invalid combiner type ");
}
}
return;
}
template void SparseEmbeddingFunctors::backward<unsigned int, float>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, float>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, __half>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, __half>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, float>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, float>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, __half>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, __half>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR | 9bf2950d9860baf0700ce5df0f36b14e49e6f75f.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// backward kernel function: for combiner=sum
template <typename TypeEmbeddingComp>
__global__ void backward_sum_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeEmbeddingComp *top_grad, TypeEmbeddingComp *wgrad) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
size_t feature_index = (size_t)(bid * slot_num + i) * embedding_vec_size + tid;
wgrad[feature_index] = top_grad[feature_index];
}
}
}
__global__ void backward_sum_align2_kernel(int batch_size, int slot_num, int embedding_vec_size,
const __half *top_grad, __half *wgrad) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
const __half2 *top_grad2 = reinterpret_cast<const __half2 *>(top_grad);
__half2 *wgrad2 = reinterpret_cast<__half2 *>(wgrad);
for (int i = 0; i < slot_num; i++) {
size_t feature_index = (size_t)(bid * slot_num + i) * embedding_vec_size + tid;
wgrad2[feature_index] = top_grad2[feature_index];
}
}
}
// backward kernel function: for combiner=mean
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void backward_mean_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeKey *row_offset, const TypeEmbeddingComp *top_grad,
TypeEmbeddingComp *wgrad) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
size_t feature_row_index = bid * slot_num + i;
int value_num = row_offset[feature_row_index + 1] - row_offset[feature_row_index];
float scaler = 1.0f;
if (value_num > 1) {
scaler = 1.0f / value_num; // partial derivatice of MEAN
}
size_t feature_index = feature_row_index * embedding_vec_size + tid;
float g = TypeConvertFunc<float, TypeEmbeddingComp>::convert(top_grad[feature_index]);
g *= scaler;
wgrad[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(g);
}
}
}
template <typename TypeKey>
__global__ void backward_mean_align2_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeKey *row_offset, const __half *top_grad,
__half *wgrad) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
const __half2 *top_grad2 = reinterpret_cast<const __half2 *>(top_grad);
__half2 *wgrad2 = reinterpret_cast<__half2 *>(wgrad);
for (int i = 0; i < slot_num; i++) {
size_t feature_row_index = bid * slot_num + i;
int value_num = row_offset[feature_row_index + 1] - row_offset[feature_row_index];
__half2 scaler = __float2half2_rn(1.0f);
if (value_num > 1) {
scaler = __float2half2_rn(1.0f / (float)value_num); // partial derivatice of MEAN
}
size_t feature_index = feature_row_index * embedding_vec_size + tid;
wgrad2[feature_index] = __hmul2(scaler, top_grad2[feature_index]);
}
}
}
template <typename TypeEmbeddingComp>
void backward_sum(size_t batch_size, size_t slot_num, size_t embedding_vec_size,
const TypeEmbeddingComp *top_grad, TypeEmbeddingComp *wgrad,
cudaStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
const size_t block_size = embedding_vec_size;
backward_sum_kernel<<<grid_size, block_size, 0, stream>>>(batch_size, slot_num,
embedding_vec_size, top_grad, wgrad);
}
template <>
void backward_sum<__half>(size_t batch_size, size_t slot_num, size_t embedding_vec_size,
const __half *top_grad, __half *wgrad, cudaStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
if (embedding_vec_size % 2 == 0) {
const size_t block_size =
embedding_vec_size / 2; // each thread corresponds to one element in an embedding vetor
backward_sum_align2_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size, slot_num, embedding_vec_size / 2, top_grad, wgrad);
} else {
const size_t block_size = embedding_vec_size;
backward_sum_kernel<<<grid_size, block_size, 0, stream>>>(batch_size, slot_num,
embedding_vec_size, top_grad, wgrad);
}
}
template <typename TypeKey, typename TypeEmbeddingComp>
void backward_mean(size_t batch_size, size_t slot_size, size_t embedding_vec_size,
const TypeKey *row_offset, const TypeEmbeddingComp *top_grad,
TypeEmbeddingComp *wgrad, cudaStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
const size_t block_size = embedding_vec_size;
backward_mean_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size, slot_size, embedding_vec_size, row_offset, top_grad, wgrad);
}
template <typename TypeKey>
void backward_mean(size_t batch_size, size_t slot_size, size_t embedding_vec_size,
const TypeKey *row_offset, const __half *top_grad, __half *wgrad,
cudaStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
backward_mean_align2_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size, slot_size, embedding_vec_size / 2, row_offset, top_grad, wgrad);
} else {
const size_t block_size = embedding_vec_size;
backward_mean_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size, slot_size, embedding_vec_size, row_offset, top_grad, wgrad);
}
}
} // namespace
/**
* backward propagation for DistributedSlotSparseEmbeddingHash
* The first step of backward propagation: computing the wgrad.
* @param batch_size batch size for the current mini-batch computation.
* @param slot_num the number of slots in hash table.
* @param embedding_vec_size embedding vector size.
* @param combiner combiner type: 0-sum, 1-mean
* @param row_offset_allreduce_tensors row_offsets tensors after all_reduce of mulitple GPUs
* @param embedding_feature_tensors embedding features tensors of multiplu GPUs, storing dgrad
* from the top layer
* @param wgrad_tensors wgrad tensors of multi GPUs, the output of this function.
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device
*/
template <typename TypeHashKey, typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward(size_t batch_size, size_t slot_num,
size_t embedding_vec_size, int combiner,
const Tensors2<TypeHashKey> &row_offset_allreduce_tensors,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensors2<TypeEmbeddingComp> &wgrad_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
const TypeEmbeddingComp *top_grad = embedding_feature_tensors[id].get_ptr();
const TypeHashKey *row_offset = row_offset_allreduce_tensors[id].get_ptr();
TypeEmbeddingComp *wgrad = wgrad_tensors[id].get_ptr();
if (combiner == 0) // sum
{
backward_sum(batch_size, slot_num, embedding_vec_size, top_grad, wgrad,
local_gpu->get_stream());
} else if (combiner == 1) // mean
{
backward_mean(batch_size, slot_num, embedding_vec_size, row_offset, top_grad, wgrad,
local_gpu->get_stream());
} else {
CK_THROW_(Error_t::WrongInput, "Invalid combiner type ");
}
}
return;
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward(size_t batch_size,
const std::vector<size_t> &slot_num_per_gpu,
size_t embedding_vec_size, int combiner,
const Tensors2<TypeHashKey> &row_offset_allreduce_tensors,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensors2<TypeEmbeddingComp> &wgrad_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
if (slot_num_per_gpu[id] == 0) {
continue;
}
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
const TypeEmbeddingComp *top_grad = embedding_feature_tensors[id].get_ptr();
const TypeHashKey *row_offset = row_offset_allreduce_tensors[id].get_ptr();
TypeEmbeddingComp *wgrad = wgrad_tensors[id].get_ptr();
if (combiner == 0) // sum
{
backward_sum(batch_size, slot_num_per_gpu[id], embedding_vec_size, top_grad, wgrad,
local_gpu->get_stream());
} else if (combiner == 1) // mean
{
backward_mean(batch_size, slot_num_per_gpu[id], embedding_vec_size, row_offset, top_grad,
wgrad, local_gpu->get_stream());
} else {
CK_THROW_(Error_t::WrongInput, "Invalid combiner type ");
}
}
return;
}
template void SparseEmbeddingFunctors::backward<unsigned int, float>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, float>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, __half>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, __half>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, float>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, float>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, __half>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, __half>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR |
bb5ef0d54fb8f4482837404b8530d09d73308504.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sga_right_weight_backward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const float *bottom_data = NULL;
hipMalloc(&bottom_data, XSIZE*YSIZE);
const float *top_data = NULL;
hipMalloc(&top_data, XSIZE*YSIZE);
const float *temp_diff = NULL;
hipMalloc(&temp_diff, XSIZE*YSIZE);
const int height = 1;
const int width = 1;
const int depth = 1;
const int wsize = 1;
float *filters_diff = NULL;
hipMalloc(&filters_diff, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sga_right_weight_backward), dim3(gridBlock),dim3(threadBlock), 0, 0, n,bottom_data,top_data,temp_diff,height,width,depth,wsize,filters_diff);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sga_right_weight_backward), dim3(gridBlock),dim3(threadBlock), 0, 0, n,bottom_data,top_data,temp_diff,height,width,depth,wsize,filters_diff);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sga_right_weight_backward), dim3(gridBlock),dim3(threadBlock), 0, 0, n,bottom_data,top_data,temp_diff,height,width,depth,wsize,filters_diff);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bb5ef0d54fb8f4482837404b8530d09d73308504.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sga_right_weight_backward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const float *bottom_data = NULL;
cudaMalloc(&bottom_data, XSIZE*YSIZE);
const float *top_data = NULL;
cudaMalloc(&top_data, XSIZE*YSIZE);
const float *temp_diff = NULL;
cudaMalloc(&temp_diff, XSIZE*YSIZE);
const int height = 1;
const int width = 1;
const int depth = 1;
const int wsize = 1;
float *filters_diff = NULL;
cudaMalloc(&filters_diff, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sga_right_weight_backward<<<gridBlock,threadBlock>>>(n,bottom_data,top_data,temp_diff,height,width,depth,wsize,filters_diff);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sga_right_weight_backward<<<gridBlock,threadBlock>>>(n,bottom_data,top_data,temp_diff,height,width,depth,wsize,filters_diff);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sga_right_weight_backward<<<gridBlock,threadBlock>>>(n,bottom_data,top_data,temp_diff,height,width,depth,wsize,filters_diff);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ed360180e35ffa85676bf039b383981442418a0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaSortTOP.h"
#include "thrust/device_ptr.h"
//#include "thrust/for_each.h"
//#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
//no performance difference if using float Mono input instead of float4 RGBA
//texture<float, hipTextureType2D, hipReadModeElementType> inTex;
//g_odata[offset] = tex2D(inTex, xc, yc);
texture<float4, hipTextureType2D, hipReadModeElementType> inTex;
surface<void, cudaSurfaceType2D> outputSurface;
__device__ float4 operator+(const float4 & a, const float4 & b) {
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
__global__ void
arrayToData(float *g_odata, uint* keys, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * imgw;
if (x < imgw && y < imgh) {
float xc = x + 0.5;
float yc = y + 0.5;
g_odata[offset] = tex2D(inTex, xc, yc).x;
keys[offset] = offset;
}
}
__global__ void
dataToTex(uint* indices, float4 *g_odata, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * imgw;
if (x < imgw && y < imgh) {
float res = indices[offset];
g_odata[offset] = make_float4(res, 0, 0, 1);
}
}
//https://stackoverflow.com/questions/27741888/writing-to-a-floating-point-opengl-texture-in-cuda-via-a-surface
//hipBoundaryModeClamp
__global__ void
dataToArray(uint* indices, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * imgw;
if (x < imgw && y < imgh) {
float res = indices[offset];
//surf2Dwrite(make_float4(res, 0, 0, 1), outputSurface, (int)sizeof(float4)*x, y);
surf2Dwrite(res, outputSurface, (int)sizeof(float)*x, y);
}
}
extern "C" void
launch_arrayToData(dim3 grid, dim3 block, hipArray *g_data_array, float *g_odata, uint* keys, int imgw, int imgh) {
cudaCheck(hipBindTextureToArray(inTex, g_data_array));
struct hipChannelFormatDesc desc;
cudaCheck(hipGetChannelDesc(&desc, g_data_array));
arrayToData << < grid, block >> >(g_odata, keys, imgw, imgh);
cudaCheck(hipUnbindTexture(inTex));
}
extern "C" void
launch_dataToTex(dim3 grid, dim3 block, uint *mIndices, float4 *g_odata, int imgw, int imgh) {
dataToTex << < grid, block >> >(mIndices, g_odata, imgw, imgh);
}
extern "C" void
launch_dataToArray(dim3 grid, dim3 block, uint *mIndices, hipArray *output, int imgw, int imgh) {
cudaCheck(hipBindSurfaceToArray(outputSurface, output));
dataToArray << < grid, block >> >(mIndices, imgw, imgh);
}
extern "C" void
sortParticles(float *sortKeys, uint *indices, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<float>(sortKeys),
thrust::device_ptr<float>(sortKeys + numParticles),
thrust::device_ptr<uint>(indices));
} | ed360180e35ffa85676bf039b383981442418a0b.cu |
#include "CudaSortTOP.h"
#include "thrust/device_ptr.h"
//#include "thrust/for_each.h"
//#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
//no performance difference if using float Mono input instead of float4 RGBA
//texture<float, cudaTextureType2D, cudaReadModeElementType> inTex;
//g_odata[offset] = tex2D(inTex, xc, yc);
texture<float4, cudaTextureType2D, cudaReadModeElementType> inTex;
surface<void, cudaSurfaceType2D> outputSurface;
__device__ float4 operator+(const float4 & a, const float4 & b) {
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
__global__ void
arrayToData(float *g_odata, uint* keys, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * imgw;
if (x < imgw && y < imgh) {
float xc = x + 0.5;
float yc = y + 0.5;
g_odata[offset] = tex2D(inTex, xc, yc).x;
keys[offset] = offset;
}
}
__global__ void
dataToTex(uint* indices, float4 *g_odata, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * imgw;
if (x < imgw && y < imgh) {
float res = indices[offset];
g_odata[offset] = make_float4(res, 0, 0, 1);
}
}
//https://stackoverflow.com/questions/27741888/writing-to-a-floating-point-opengl-texture-in-cuda-via-a-surface
//cudaBoundaryModeClamp
__global__ void
dataToArray(uint* indices, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * imgw;
if (x < imgw && y < imgh) {
float res = indices[offset];
//surf2Dwrite(make_float4(res, 0, 0, 1), outputSurface, (int)sizeof(float4)*x, y);
surf2Dwrite(res, outputSurface, (int)sizeof(float)*x, y);
}
}
extern "C" void
launch_arrayToData(dim3 grid, dim3 block, cudaArray *g_data_array, float *g_odata, uint* keys, int imgw, int imgh) {
cudaCheck(cudaBindTextureToArray(inTex, g_data_array));
struct cudaChannelFormatDesc desc;
cudaCheck(cudaGetChannelDesc(&desc, g_data_array));
arrayToData << < grid, block >> >(g_odata, keys, imgw, imgh);
cudaCheck(cudaUnbindTexture(inTex));
}
extern "C" void
launch_dataToTex(dim3 grid, dim3 block, uint *mIndices, float4 *g_odata, int imgw, int imgh) {
dataToTex << < grid, block >> >(mIndices, g_odata, imgw, imgh);
}
extern "C" void
launch_dataToArray(dim3 grid, dim3 block, uint *mIndices, cudaArray *output, int imgw, int imgh) {
cudaCheck(cudaBindSurfaceToArray(outputSurface, output));
dataToArray << < grid, block >> >(mIndices, imgw, imgh);
}
extern "C" void
sortParticles(float *sortKeys, uint *indices, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<float>(sortKeys),
thrust::device_ptr<float>(sortKeys + numParticles),
thrust::device_ptr<uint>(indices));
} |
03cb4a7e9d91ef9d37dc233834a20b3efb87518e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2019-2020 XGBoost contributors
*/
#include <xgboost/data.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include "../common/categorical.h"
#include "../common/hist_util.cuh"
#include "../common/random.h"
#include "./ellpack_page.cuh"
#include "device_adapter_hip.cuh"
namespace xgboost {
EllpackPage::EllpackPage() : impl_{new EllpackPageImpl()} {}
EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param)
: impl_{new EllpackPageImpl(dmat, param)} {}
EllpackPage::~EllpackPage() = default;
EllpackPage::EllpackPage(EllpackPage&& that) { std::swap(impl_, that.impl_); }
size_t EllpackPage::Size() const { return impl_->Size(); }
void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); }
// Bin each input data entry, store the bin indices in compressed form.
__global__ void CompressBinEllpackKernel(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistogramCuts::cut_values_
const uint32_t* __restrict__ cut_rows, // HistogramCuts::cut_ptrs_
common::Span<FeatureType const> feature_types,
size_t base_row, // batch_row_begin
size_t n_rows,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride) {
return;
}
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float* feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
bool is_cat = common::IsCat(feature_types, ifeature);
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
if (is_cat) {
auto it = dh::MakeTransformIterator<int>(
feature_cuts, [](float v) { return common::AsCat(v); });
bin = thrust::lower_bound(thrust::seq, it, it + ncuts, common::AsCat(fvalue)) - it;
} else {
bin = thrust::upper_bound(thrust::seq, feature_cuts, feature_cuts + ncuts,
fvalue) -
feature_cuts;
}
if (bin >= ncuts) {
bin = ncuts - 1;
}
// Add the number of bins in previous features.
bin += cut_rows[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
// Construct an ELLPACK matrix with the given number of empty rows.
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
bool is_dense, size_t row_stride,
size_t n_rows)
: is_dense(is_dense),
cuts_(std::move(cuts)),
row_stride(row_stride),
n_rows(n_rows) {
monitor_.Init("ellpack_page");
dh::safe_cuda(hipSetDevice(device));
monitor_.Start("InitCompressedData");
InitCompressedData(device);
monitor_.Stop("InitCompressedData");
}
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
const SparsePage &page, bool is_dense,
size_t row_stride,
common::Span<FeatureType const> feature_types)
: cuts_(std::move(cuts)), is_dense(is_dense), n_rows(page.Size()),
row_stride(row_stride) {
this->InitCompressedData(device);
this->CreateHistIndices(device, page, feature_types);
}
// Construct an ELLPACK matrix in memory.
EllpackPageImpl::EllpackPageImpl(DMatrix* dmat, const BatchParam& param)
: is_dense(dmat->IsDense()) {
monitor_.Init("ellpack_page");
dh::safe_cuda(hipSetDevice(param.gpu_id));
n_rows = dmat->Info().num_row_;
monitor_.Start("Quantiles");
// Create the quantile sketches for the dmatrix and initialize HistogramCuts.
row_stride = GetRowStride(dmat);
cuts_ = common::DeviceSketch(param.gpu_id, dmat, param.max_bin);
monitor_.Stop("Quantiles");
monitor_.Start("InitCompressedData");
this->InitCompressedData(param.gpu_id);
monitor_.Stop("InitCompressedData");
dmat->Info().feature_types.SetDevice(param.gpu_id);
auto ft = dmat->Info().feature_types.ConstDeviceSpan();
monitor_.Start("BinningCompression");
CHECK(dmat->SingleColBlock());
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
CreateHistIndices(param.gpu_id, batch, ft);
}
monitor_.Stop("BinningCompression");
}
template <typename AdapterBatchT>
struct WriteCompressedEllpackFunctor {
WriteCompressedEllpackFunctor(common::CompressedByteT* buffer,
const common::CompressedBufferWriter& writer,
AdapterBatchT batch,
EllpackDeviceAccessor accessor,
common::Span<FeatureType const> feature_types,
const data::IsValidFunctor& is_valid)
: d_buffer(buffer),
writer(writer),
batch(std::move(batch)),
accessor(std::move(accessor)),
feature_types(std::move(feature_types)),
is_valid(is_valid) {}
common::CompressedByteT* d_buffer;
common::CompressedBufferWriter writer;
AdapterBatchT batch;
EllpackDeviceAccessor accessor;
common::Span<FeatureType const> feature_types;
data::IsValidFunctor is_valid;
using Tuple = thrust::tuple<size_t, size_t, size_t>;
__device__ size_t operator()(Tuple out) {
auto e = batch.GetElement(out.get<2>());
if (is_valid(e)) {
// -1 because the scan is inclusive
size_t output_position =
accessor.row_stride * e.row_idx + out.get<1>() - 1;
uint32_t bin_idx = 0;
if (common::IsCat(feature_types, e.column_idx)) {
bin_idx = accessor.SearchBin<true>(e.value, e.column_idx);
} else {
bin_idx = accessor.SearchBin<false>(e.value, e.column_idx);
}
writer.AtomicWriteSymbol(d_buffer, bin_idx, output_position);
}
return 0;
}
};
template <typename Tuple>
struct TupleScanOp {
__device__ Tuple operator()(Tuple a, Tuple b) {
// Key equal
if (a.template get<0>() == b.template get<0>()) {
b.template get<1>() += a.template get<1>();
return b;
}
// Not equal
return b;
}
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterBatchT>
void CopyDataToEllpack(const AdapterBatchT &batch,
common::Span<FeatureType const> feature_types,
EllpackPageImpl *dst, int device_idx, float missing) {
// Some witchcraft happens here
// The goal is to copy valid elements out of the input to an ELLPACK matrix
// with a given row stride, using no extra working memory Standard stream
// compaction needs to be modified to do this, so we manually define a
// segmented stream compaction via operators on an inclusive scan. The output
// of this inclusive scan is fed to a custom function which works out the
// correct output position
auto counting = thrust::make_counting_iterator(0llu);
data::IsValidFunctor is_valid(missing);
auto key_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) {
return batch.GetElement(idx).row_idx;
});
auto value_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) -> size_t {
return is_valid(batch.GetElement(idx));
});
auto key_value_index_iter = thrust::make_zip_iterator(
thrust::make_tuple(key_iter, value_iter, counting));
// Tuple[0] = The row index of the input, used as a key to define segments
// Tuple[1] = Scanned flags of valid elements for each row
// Tuple[2] = The index in the input data
using Tuple = thrust::tuple<size_t, size_t, size_t>;
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
// We redirect the scan output into this functor to do the actual writing
WriteCompressedEllpackFunctor<AdapterBatchT> functor(
d_compressed_buffer, writer, batch, device_accessor, feature_types,
is_valid);
dh::TypedDiscard<Tuple> discard;
thrust::transform_output_iterator<
WriteCompressedEllpackFunctor<AdapterBatchT>, decltype(discard)>
out(discard, functor);
// Go one level down into hipcub::DeviceScan API to set OffsetT as 64 bit
// So we don't crash on n > 2^31
size_t temp_storage_bytes = 0;
using DispatchScan =
cub::DispatchScan<decltype(key_value_index_iter), decltype(out),
TupleScanOp<Tuple>, cub::NullType, int64_t>;
DispatchScan::Dispatch(nullptr, temp_storage_bytes, key_value_index_iter, out,
TupleScanOp<Tuple>(), cub::NullType(), batch.Size(),
nullptr, false);
dh::TemporaryArray<char> temp_storage(temp_storage_bytes);
DispatchScan::Dispatch(temp_storage.data().get(), temp_storage_bytes,
key_value_index_iter, out, TupleScanOp<Tuple>(),
cub::NullType(), batch.Size(), nullptr, false);
}
void WriteNullValues(EllpackPageImpl* dst, int device_idx,
common::Span<size_t> row_counts) {
// Write the null values
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
auto row_stride = dst->row_stride;
dh::LaunchN(row_stride * dst->n_rows, [=] __device__(size_t idx) {
// For some reason this variable got captured as const
auto writer_non_const = writer;
size_t row_idx = idx / row_stride;
size_t row_offset = idx % row_stride;
if (row_offset >= row_counts[row_idx]) {
writer_non_const.AtomicWriteSymbol(d_compressed_buffer,
device_accessor.NullValue(), idx);
}
});
}
template <typename AdapterBatch>
EllpackPageImpl::EllpackPageImpl(AdapterBatch batch, float missing, int device, bool is_dense,
common::Span<size_t> row_counts_span,
common::Span<FeatureType const> feature_types, size_t row_stride,
size_t n_rows, common::HistogramCuts const& cuts) {
dh::safe_cuda(hipSetDevice(device));
*this = EllpackPageImpl(device, cuts, is_dense, row_stride, n_rows);
CopyDataToEllpack(batch, feature_types, this, device, missing);
WriteNullValues(this, device, row_counts_span);
}
#define ELLPACK_BATCH_SPECIALIZE(__BATCH_T) \
template EllpackPageImpl::EllpackPageImpl( \
__BATCH_T batch, float missing, int device, bool is_dense, \
common::Span<size_t> row_counts_span, common::Span<FeatureType const> feature_types, \
size_t row_stride, size_t n_rows, common::HistogramCuts const& cuts);
ELLPACK_BATCH_SPECIALIZE(data::CudfAdapterBatch)
ELLPACK_BATCH_SPECIALIZE(data::CupyAdapterBatch)
// A functor that copies the data from one EllpackPage to another.
struct CopyPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
// The number of elements to skip.
size_t offset;
CopyPage(EllpackPageImpl *dst, EllpackPageImpl const *src, size_t offset)
: cbw{dst->NumSymbols()}, dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
offset(offset) {}
__device__ void operator()(size_t element_id) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[element_id],
element_id + offset);
}
};
// Copy the data from the given EllpackPage to the current page.
size_t EllpackPageImpl::Copy(int device, EllpackPageImpl const *page,
size_t offset) {
monitor_.Start("Copy");
size_t num_elements = page->n_rows * page->row_stride;
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_GE(n_rows * row_stride, offset + num_elements);
if (page == this) {
LOG(FATAL) << "Concatenating the same Ellpack.";
return this->n_rows * this->row_stride;
}
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(num_elements, CopyPage(this, page, offset));
monitor_.Stop("Copy");
return num_elements;
}
// A functor that compacts the rows from one EllpackPage into another.
struct CompactPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
/*! \brief An array that maps the rows from the full DMatrix to the compacted
* page.
*
* The total size is the number of rows in the original, uncompacted DMatrix.
* Elements are the row ids in the compacted page. Rows not needed are set to
* SIZE_MAX.
*
* An example compacting 16 rows to 8 rows:
* [SIZE_MAX, 0, 1, SIZE_MAX, SIZE_MAX, 2, SIZE_MAX, 3, 4, 5, SIZE_MAX, 6,
* SIZE_MAX, 7, SIZE_MAX, SIZE_MAX]
*/
common::Span<size_t> row_indexes;
size_t base_rowid;
size_t row_stride;
CompactPage(EllpackPageImpl* dst, EllpackPageImpl const* src,
common::Span<size_t> row_indexes)
: cbw{dst->NumSymbols()},
dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
row_indexes(row_indexes),
base_rowid{src->base_rowid},
row_stride{src->row_stride} {}
__device__ void operator()(size_t row_id) {
size_t src_row = base_rowid + row_id;
size_t dst_row = row_indexes[src_row];
if (dst_row == SIZE_MAX) return;
size_t dst_offset = dst_row * row_stride;
size_t src_offset = row_id * row_stride;
for (size_t j = 0; j < row_stride; j++) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[src_offset + j],
dst_offset + j);
}
}
};
// Compacts the data from the given EllpackPage into the current page.
void EllpackPageImpl::Compact(int device, EllpackPageImpl const* page,
common::Span<size_t> row_indexes) {
monitor_.Start("Compact");
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_LE(page->base_rowid + page->n_rows, row_indexes.size());
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(page->n_rows, CompactPage(this, page, row_indexes));
monitor_.Stop("Compact");
}
// Initialize the buffer to stored compressed features.
void EllpackPageImpl::InitCompressedData(int device) {
size_t num_symbols = NumSymbols();
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
gidx_buffer.SetDevice(device);
// Don't call fill unnecessarily
if (gidx_buffer.Size() == 0) {
gidx_buffer.Resize(compressed_size_bytes, 0);
} else {
gidx_buffer.Resize(compressed_size_bytes, 0);
thrust::fill(dh::tbegin(gidx_buffer), dh::tend(gidx_buffer), 0);
}
}
// Compress a CSR page into ELLPACK.
void EllpackPageImpl::CreateHistIndices(int device,
const SparsePage& row_batch,
common::Span<FeatureType const> feature_types) {
if (row_batch.Size() == 0) return;
unsigned int null_gidx_value = NumSymbols() - 1;
const auto& offset_vec = row_batch.offset.ConstHostVector();
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
::min(dh::TotalMemory(device) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(row_batch.Size()));
size_t gpu_nbatches = common::DivRoundUp(row_batch.Size(), gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end =
::min((gpu_batch + 1) * gpu_batch_nrows, row_batch.Size());
size_t batch_nrows = batch_row_end - batch_row_begin;
const auto ent_cnt_begin = offset_vec[batch_row_begin];
const auto ent_cnt_end = offset_vec[batch_row_end];
/*! \brief row offset in SparsePage (the input data). */
dh::device_vector<size_t> row_ptrs(batch_nrows + 1);
thrust::copy(offset_vec.data() + batch_row_begin,
offset_vec.data() + batch_row_end + 1, row_ptrs.begin());
// number of entries in this batch.
size_t n_entries = ent_cnt_end - ent_cnt_begin;
dh::device_vector<Entry> entries_d(n_entries);
// copy data entries to device.
if (row_batch.data.DeviceCanRead()) {
auto const& d_data = row_batch.data.ConstDeviceSpan();
dh::safe_cuda(hipMemcpyAsync(
entries_d.data().get(), d_data.data() + ent_cnt_begin,
n_entries * sizeof(Entry), hipMemcpyDefault));
} else {
const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector();
dh::safe_cuda(hipMemcpyAsync(
entries_d.data().get(), data_vec.data() + ent_cnt_begin,
n_entries * sizeof(Entry), hipMemcpyDefault));
}
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x),
common::DivRoundUp(row_stride, block3.y), 1);
auto device_accessor = GetDeviceAccessor(device);
dh::LaunchKernel {grid3, block3}(
CompressBinEllpackKernel, common::CompressedBufferWriter(NumSymbols()),
gidx_buffer.DevicePointer(), row_ptrs.data().get(),
entries_d.data().get(), device_accessor.gidx_fvalue_map.data(),
device_accessor.feature_segments.data(), feature_types,
batch_row_begin, batch_nrows, row_stride,
null_gidx_value);
}
}
// Return the number of rows contained in this page.
size_t EllpackPageImpl::Size() const { return n_rows; }
// Return the memory cost for storing the compressed features.
size_t EllpackPageImpl::MemCostBytes(size_t num_rows, size_t row_stride,
const common::HistogramCuts& cuts) {
// Required buffer size for storing data matrix in EtoLLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * num_rows,
cuts.TotalBins() + 1);
return compressed_size_bytes;
}
EllpackDeviceAccessor EllpackPageImpl::GetDeviceAccessor(
int device, common::Span<FeatureType const> feature_types) const {
gidx_buffer.SetDevice(device);
return {device,
cuts_,
is_dense,
row_stride,
base_rowid,
n_rows,
common::CompressedIterator<uint32_t>(gidx_buffer.ConstDevicePointer(),
NumSymbols()),
feature_types};
}
} // namespace xgboost
| 03cb4a7e9d91ef9d37dc233834a20b3efb87518e.cu | /*!
* Copyright 2019-2020 XGBoost contributors
*/
#include <xgboost/data.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include "../common/categorical.h"
#include "../common/hist_util.cuh"
#include "../common/random.h"
#include "./ellpack_page.cuh"
#include "device_adapter.cuh"
namespace xgboost {
EllpackPage::EllpackPage() : impl_{new EllpackPageImpl()} {}
EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param)
: impl_{new EllpackPageImpl(dmat, param)} {}
EllpackPage::~EllpackPage() = default;
EllpackPage::EllpackPage(EllpackPage&& that) { std::swap(impl_, that.impl_); }
size_t EllpackPage::Size() const { return impl_->Size(); }
void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); }
// Bin each input data entry, store the bin indices in compressed form.
__global__ void CompressBinEllpackKernel(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistogramCuts::cut_values_
const uint32_t* __restrict__ cut_rows, // HistogramCuts::cut_ptrs_
common::Span<FeatureType const> feature_types,
size_t base_row, // batch_row_begin
size_t n_rows,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride) {
return;
}
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float* feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
bool is_cat = common::IsCat(feature_types, ifeature);
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
if (is_cat) {
auto it = dh::MakeTransformIterator<int>(
feature_cuts, [](float v) { return common::AsCat(v); });
bin = thrust::lower_bound(thrust::seq, it, it + ncuts, common::AsCat(fvalue)) - it;
} else {
bin = thrust::upper_bound(thrust::seq, feature_cuts, feature_cuts + ncuts,
fvalue) -
feature_cuts;
}
if (bin >= ncuts) {
bin = ncuts - 1;
}
// Add the number of bins in previous features.
bin += cut_rows[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
// Construct an ELLPACK matrix with the given number of empty rows.
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
bool is_dense, size_t row_stride,
size_t n_rows)
: is_dense(is_dense),
cuts_(std::move(cuts)),
row_stride(row_stride),
n_rows(n_rows) {
monitor_.Init("ellpack_page");
dh::safe_cuda(cudaSetDevice(device));
monitor_.Start("InitCompressedData");
InitCompressedData(device);
monitor_.Stop("InitCompressedData");
}
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
const SparsePage &page, bool is_dense,
size_t row_stride,
common::Span<FeatureType const> feature_types)
: cuts_(std::move(cuts)), is_dense(is_dense), n_rows(page.Size()),
row_stride(row_stride) {
this->InitCompressedData(device);
this->CreateHistIndices(device, page, feature_types);
}
// Construct an ELLPACK matrix in memory.
EllpackPageImpl::EllpackPageImpl(DMatrix* dmat, const BatchParam& param)
: is_dense(dmat->IsDense()) {
monitor_.Init("ellpack_page");
dh::safe_cuda(cudaSetDevice(param.gpu_id));
n_rows = dmat->Info().num_row_;
monitor_.Start("Quantiles");
// Create the quantile sketches for the dmatrix and initialize HistogramCuts.
row_stride = GetRowStride(dmat);
cuts_ = common::DeviceSketch(param.gpu_id, dmat, param.max_bin);
monitor_.Stop("Quantiles");
monitor_.Start("InitCompressedData");
this->InitCompressedData(param.gpu_id);
monitor_.Stop("InitCompressedData");
dmat->Info().feature_types.SetDevice(param.gpu_id);
auto ft = dmat->Info().feature_types.ConstDeviceSpan();
monitor_.Start("BinningCompression");
CHECK(dmat->SingleColBlock());
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
CreateHistIndices(param.gpu_id, batch, ft);
}
monitor_.Stop("BinningCompression");
}
template <typename AdapterBatchT>
struct WriteCompressedEllpackFunctor {
WriteCompressedEllpackFunctor(common::CompressedByteT* buffer,
const common::CompressedBufferWriter& writer,
AdapterBatchT batch,
EllpackDeviceAccessor accessor,
common::Span<FeatureType const> feature_types,
const data::IsValidFunctor& is_valid)
: d_buffer(buffer),
writer(writer),
batch(std::move(batch)),
accessor(std::move(accessor)),
feature_types(std::move(feature_types)),
is_valid(is_valid) {}
common::CompressedByteT* d_buffer;
common::CompressedBufferWriter writer;
AdapterBatchT batch;
EllpackDeviceAccessor accessor;
common::Span<FeatureType const> feature_types;
data::IsValidFunctor is_valid;
using Tuple = thrust::tuple<size_t, size_t, size_t>;
__device__ size_t operator()(Tuple out) {
auto e = batch.GetElement(out.get<2>());
if (is_valid(e)) {
// -1 because the scan is inclusive
size_t output_position =
accessor.row_stride * e.row_idx + out.get<1>() - 1;
uint32_t bin_idx = 0;
if (common::IsCat(feature_types, e.column_idx)) {
bin_idx = accessor.SearchBin<true>(e.value, e.column_idx);
} else {
bin_idx = accessor.SearchBin<false>(e.value, e.column_idx);
}
writer.AtomicWriteSymbol(d_buffer, bin_idx, output_position);
}
return 0;
}
};
template <typename Tuple>
struct TupleScanOp {
__device__ Tuple operator()(Tuple a, Tuple b) {
// Key equal
if (a.template get<0>() == b.template get<0>()) {
b.template get<1>() += a.template get<1>();
return b;
}
// Not equal
return b;
}
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterBatchT>
void CopyDataToEllpack(const AdapterBatchT &batch,
common::Span<FeatureType const> feature_types,
EllpackPageImpl *dst, int device_idx, float missing) {
// Some witchcraft happens here
// The goal is to copy valid elements out of the input to an ELLPACK matrix
// with a given row stride, using no extra working memory Standard stream
// compaction needs to be modified to do this, so we manually define a
// segmented stream compaction via operators on an inclusive scan. The output
// of this inclusive scan is fed to a custom function which works out the
// correct output position
auto counting = thrust::make_counting_iterator(0llu);
data::IsValidFunctor is_valid(missing);
auto key_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) {
return batch.GetElement(idx).row_idx;
});
auto value_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) -> size_t {
return is_valid(batch.GetElement(idx));
});
auto key_value_index_iter = thrust::make_zip_iterator(
thrust::make_tuple(key_iter, value_iter, counting));
// Tuple[0] = The row index of the input, used as a key to define segments
// Tuple[1] = Scanned flags of valid elements for each row
// Tuple[2] = The index in the input data
using Tuple = thrust::tuple<size_t, size_t, size_t>;
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
// We redirect the scan output into this functor to do the actual writing
WriteCompressedEllpackFunctor<AdapterBatchT> functor(
d_compressed_buffer, writer, batch, device_accessor, feature_types,
is_valid);
dh::TypedDiscard<Tuple> discard;
thrust::transform_output_iterator<
WriteCompressedEllpackFunctor<AdapterBatchT>, decltype(discard)>
out(discard, functor);
// Go one level down into cub::DeviceScan API to set OffsetT as 64 bit
// So we don't crash on n > 2^31
size_t temp_storage_bytes = 0;
using DispatchScan =
cub::DispatchScan<decltype(key_value_index_iter), decltype(out),
TupleScanOp<Tuple>, cub::NullType, int64_t>;
DispatchScan::Dispatch(nullptr, temp_storage_bytes, key_value_index_iter, out,
TupleScanOp<Tuple>(), cub::NullType(), batch.Size(),
nullptr, false);
dh::TemporaryArray<char> temp_storage(temp_storage_bytes);
DispatchScan::Dispatch(temp_storage.data().get(), temp_storage_bytes,
key_value_index_iter, out, TupleScanOp<Tuple>(),
cub::NullType(), batch.Size(), nullptr, false);
}
void WriteNullValues(EllpackPageImpl* dst, int device_idx,
common::Span<size_t> row_counts) {
// Write the null values
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
auto row_stride = dst->row_stride;
dh::LaunchN(row_stride * dst->n_rows, [=] __device__(size_t idx) {
// For some reason this variable got captured as const
auto writer_non_const = writer;
size_t row_idx = idx / row_stride;
size_t row_offset = idx % row_stride;
if (row_offset >= row_counts[row_idx]) {
writer_non_const.AtomicWriteSymbol(d_compressed_buffer,
device_accessor.NullValue(), idx);
}
});
}
template <typename AdapterBatch>
EllpackPageImpl::EllpackPageImpl(AdapterBatch batch, float missing, int device, bool is_dense,
common::Span<size_t> row_counts_span,
common::Span<FeatureType const> feature_types, size_t row_stride,
size_t n_rows, common::HistogramCuts const& cuts) {
dh::safe_cuda(cudaSetDevice(device));
*this = EllpackPageImpl(device, cuts, is_dense, row_stride, n_rows);
CopyDataToEllpack(batch, feature_types, this, device, missing);
WriteNullValues(this, device, row_counts_span);
}
#define ELLPACK_BATCH_SPECIALIZE(__BATCH_T) \
template EllpackPageImpl::EllpackPageImpl( \
__BATCH_T batch, float missing, int device, bool is_dense, \
common::Span<size_t> row_counts_span, common::Span<FeatureType const> feature_types, \
size_t row_stride, size_t n_rows, common::HistogramCuts const& cuts);
ELLPACK_BATCH_SPECIALIZE(data::CudfAdapterBatch)
ELLPACK_BATCH_SPECIALIZE(data::CupyAdapterBatch)
// A functor that copies the data from one EllpackPage to another.
struct CopyPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
// The number of elements to skip.
size_t offset;
CopyPage(EllpackPageImpl *dst, EllpackPageImpl const *src, size_t offset)
: cbw{dst->NumSymbols()}, dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
offset(offset) {}
__device__ void operator()(size_t element_id) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[element_id],
element_id + offset);
}
};
// Copy the data from the given EllpackPage to the current page.
size_t EllpackPageImpl::Copy(int device, EllpackPageImpl const *page,
size_t offset) {
monitor_.Start("Copy");
size_t num_elements = page->n_rows * page->row_stride;
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_GE(n_rows * row_stride, offset + num_elements);
if (page == this) {
LOG(FATAL) << "Concatenating the same Ellpack.";
return this->n_rows * this->row_stride;
}
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(num_elements, CopyPage(this, page, offset));
monitor_.Stop("Copy");
return num_elements;
}
// A functor that compacts the rows from one EllpackPage into another.
struct CompactPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
/*! \brief An array that maps the rows from the full DMatrix to the compacted
* page.
*
* The total size is the number of rows in the original, uncompacted DMatrix.
* Elements are the row ids in the compacted page. Rows not needed are set to
* SIZE_MAX.
*
* An example compacting 16 rows to 8 rows:
* [SIZE_MAX, 0, 1, SIZE_MAX, SIZE_MAX, 2, SIZE_MAX, 3, 4, 5, SIZE_MAX, 6,
* SIZE_MAX, 7, SIZE_MAX, SIZE_MAX]
*/
common::Span<size_t> row_indexes;
size_t base_rowid;
size_t row_stride;
CompactPage(EllpackPageImpl* dst, EllpackPageImpl const* src,
common::Span<size_t> row_indexes)
: cbw{dst->NumSymbols()},
dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
row_indexes(row_indexes),
base_rowid{src->base_rowid},
row_stride{src->row_stride} {}
__device__ void operator()(size_t row_id) {
size_t src_row = base_rowid + row_id;
size_t dst_row = row_indexes[src_row];
if (dst_row == SIZE_MAX) return;
size_t dst_offset = dst_row * row_stride;
size_t src_offset = row_id * row_stride;
for (size_t j = 0; j < row_stride; j++) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[src_offset + j],
dst_offset + j);
}
}
};
// Compacts the data from the given EllpackPage into the current page.
void EllpackPageImpl::Compact(int device, EllpackPageImpl const* page,
common::Span<size_t> row_indexes) {
monitor_.Start("Compact");
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_LE(page->base_rowid + page->n_rows, row_indexes.size());
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(page->n_rows, CompactPage(this, page, row_indexes));
monitor_.Stop("Compact");
}
// Initialize the buffer to stored compressed features.
void EllpackPageImpl::InitCompressedData(int device) {
size_t num_symbols = NumSymbols();
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
gidx_buffer.SetDevice(device);
// Don't call fill unnecessarily
if (gidx_buffer.Size() == 0) {
gidx_buffer.Resize(compressed_size_bytes, 0);
} else {
gidx_buffer.Resize(compressed_size_bytes, 0);
thrust::fill(dh::tbegin(gidx_buffer), dh::tend(gidx_buffer), 0);
}
}
// Compress a CSR page into ELLPACK.
void EllpackPageImpl::CreateHistIndices(int device,
const SparsePage& row_batch,
common::Span<FeatureType const> feature_types) {
if (row_batch.Size() == 0) return;
unsigned int null_gidx_value = NumSymbols() - 1;
const auto& offset_vec = row_batch.offset.ConstHostVector();
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
std::min(dh::TotalMemory(device) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(row_batch.Size()));
size_t gpu_nbatches = common::DivRoundUp(row_batch.Size(), gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end =
std::min((gpu_batch + 1) * gpu_batch_nrows, row_batch.Size());
size_t batch_nrows = batch_row_end - batch_row_begin;
const auto ent_cnt_begin = offset_vec[batch_row_begin];
const auto ent_cnt_end = offset_vec[batch_row_end];
/*! \brief row offset in SparsePage (the input data). */
dh::device_vector<size_t> row_ptrs(batch_nrows + 1);
thrust::copy(offset_vec.data() + batch_row_begin,
offset_vec.data() + batch_row_end + 1, row_ptrs.begin());
// number of entries in this batch.
size_t n_entries = ent_cnt_end - ent_cnt_begin;
dh::device_vector<Entry> entries_d(n_entries);
// copy data entries to device.
if (row_batch.data.DeviceCanRead()) {
auto const& d_data = row_batch.data.ConstDeviceSpan();
dh::safe_cuda(cudaMemcpyAsync(
entries_d.data().get(), d_data.data() + ent_cnt_begin,
n_entries * sizeof(Entry), cudaMemcpyDefault));
} else {
const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector();
dh::safe_cuda(cudaMemcpyAsync(
entries_d.data().get(), data_vec.data() + ent_cnt_begin,
n_entries * sizeof(Entry), cudaMemcpyDefault));
}
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x),
common::DivRoundUp(row_stride, block3.y), 1);
auto device_accessor = GetDeviceAccessor(device);
dh::LaunchKernel {grid3, block3}(
CompressBinEllpackKernel, common::CompressedBufferWriter(NumSymbols()),
gidx_buffer.DevicePointer(), row_ptrs.data().get(),
entries_d.data().get(), device_accessor.gidx_fvalue_map.data(),
device_accessor.feature_segments.data(), feature_types,
batch_row_begin, batch_nrows, row_stride,
null_gidx_value);
}
}
// Return the number of rows contained in this page.
size_t EllpackPageImpl::Size() const { return n_rows; }
// Return the memory cost for storing the compressed features.
size_t EllpackPageImpl::MemCostBytes(size_t num_rows, size_t row_stride,
const common::HistogramCuts& cuts) {
// Required buffer size for storing data matrix in EtoLLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * num_rows,
cuts.TotalBins() + 1);
return compressed_size_bytes;
}
EllpackDeviceAccessor EllpackPageImpl::GetDeviceAccessor(
int device, common::Span<FeatureType const> feature_types) const {
gidx_buffer.SetDevice(device);
return {device,
cuts_,
is_dense,
row_stride,
base_rowid,
n_rows,
common::CompressedIterator<uint32_t>(gidx_buffer.ConstDevicePointer(),
NumSymbols()),
feature_types};
}
} // namespace xgboost
|
e011005d432bb180e941e9fe17fbb5199f3b61a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RiemannFitOnGPU.h"
#include "CUDACore/device_unique_ptr.h"
void HelixFitOnGPU::launchRiemannKernels(HitsView const *hv,
uint32_t nhits,
uint32_t maxNumberOfTuples,
hipStream_t stream) {
assert(tuples_);
auto blockSize = 64;
auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize;
// Fit internals
auto hitsGPU = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<4>) / sizeof(double), stream);
auto hits_geGPU = cms::cuda::make_device_unique<float[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6x4f) / sizeof(float), stream);
auto fast_fit_resultsGPU = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double), stream);
auto circle_fit_resultsGPU_holder =
cms::cuda::make_device_unique<char[]>(maxNumberOfConcurrentFits_ * sizeof(riemannFit::CircleFit), stream);
riemannFit::CircleFit *circle_fit_resultsGPU_ = (riemannFit::CircleFit *)(circle_fit_resultsGPU_holder.get());
for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) {
// triplets
hipLaunchKernelGGL(( kernel_FastFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream,
tuples_, tupleMultiplicity_, 3, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_CircleFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_,
3,
bField_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_LineFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_,
3,
bField_,
outputSoa_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(hipGetLastError());
// quads
hipLaunchKernelGGL(( kernel_FastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream,
tuples_, tupleMultiplicity_, 4, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_CircleFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_,
4,
bField_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_LineFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_,
4,
bField_,
outputSoa_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(hipGetLastError());
if (fit5as4_) {
// penta
hipLaunchKernelGGL(( kernel_FastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream,
tuples_, tupleMultiplicity_, 5, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_CircleFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_,
5,
bField_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_LineFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_,
5,
bField_,
outputSoa_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(hipGetLastError());
} else {
// penta all 5
hipLaunchKernelGGL(( kernel_FastFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream,
tuples_, tupleMultiplicity_, 5, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_CircleFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_,
5,
bField_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_LineFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_,
5,
bField_,
outputSoa_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(hipGetLastError());
}
}
}
| e011005d432bb180e941e9fe17fbb5199f3b61a0.cu | #include "RiemannFitOnGPU.h"
#include "CUDACore/device_unique_ptr.h"
void HelixFitOnGPU::launchRiemannKernels(HitsView const *hv,
uint32_t nhits,
uint32_t maxNumberOfTuples,
cudaStream_t stream) {
assert(tuples_);
auto blockSize = 64;
auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize;
// Fit internals
auto hitsGPU = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<4>) / sizeof(double), stream);
auto hits_geGPU = cms::cuda::make_device_unique<float[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6x4f) / sizeof(float), stream);
auto fast_fit_resultsGPU = cms::cuda::make_device_unique<double[]>(
maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double), stream);
auto circle_fit_resultsGPU_holder =
cms::cuda::make_device_unique<char[]>(maxNumberOfConcurrentFits_ * sizeof(riemannFit::CircleFit), stream);
riemannFit::CircleFit *circle_fit_resultsGPU_ = (riemannFit::CircleFit *)(circle_fit_resultsGPU_holder.get());
for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) {
// triplets
kernel_FastFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(
tuples_, tupleMultiplicity_, 3, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset);
cudaCheck(cudaGetLastError());
kernel_CircleFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_,
3,
bField_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(cudaGetLastError());
kernel_LineFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_,
3,
bField_,
outputSoa_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(cudaGetLastError());
// quads
kernel_FastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(
tuples_, tupleMultiplicity_, 4, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset);
cudaCheck(cudaGetLastError());
kernel_CircleFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_,
4,
bField_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(cudaGetLastError());
kernel_LineFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_,
4,
bField_,
outputSoa_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(cudaGetLastError());
if (fit5as4_) {
// penta
kernel_FastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(
tuples_, tupleMultiplicity_, 5, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset);
cudaCheck(cudaGetLastError());
kernel_CircleFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_,
5,
bField_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(cudaGetLastError());
kernel_LineFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_,
5,
bField_,
outputSoa_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(cudaGetLastError());
} else {
// penta all 5
kernel_FastFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(
tuples_, tupleMultiplicity_, 5, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset);
cudaCheck(cudaGetLastError());
kernel_CircleFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_,
5,
bField_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(cudaGetLastError());
kernel_LineFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_,
5,
bField_,
outputSoa_,
hitsGPU.get(),
hits_geGPU.get(),
fast_fit_resultsGPU.get(),
circle_fit_resultsGPU_,
offset);
cudaCheck(cudaGetLastError());
}
}
}
|
f4fd6a318d1e0fd37c26dadb2c6d54ce4c511ad6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
Copyright 2016 Joachim Wolff
Master Thesis
Tutors: Fabrizio Costa, Milad Miladi
Winter semester 2015/2016
Chair of Bioinformatics
Department of Computer Science
Faculty of Engineering
Albert-Ludwigs-University Freiburg im Breisgau
**/
#include "inverseIndexCuda.h"
#include "kernel.h"
InverseIndexCuda::InverseIndexCuda(size_t pNumberOfHashFunctions,
size_t pShingle, size_t pShingleSize,
size_t pBlockSize, size_t pHashAlgorithm) {
mNumberOfHashFunctions = pNumberOfHashFunctions;
mShingle = pShingle;
mShingleSize = pShingleSize;
mBlockSize = pBlockSize;
mHashAlgorithm = pHashAlgorithm;
}
InverseIndexCuda::~InverseIndexCuda() {
}
void InverseIndexCuda::copyDataToGpu(SparseMatrixFloat* pRawData, int** pDevFeatureList,
float** pDevValueList, size_t** pSizeList) {
// memory for the number of features per instance
hipMalloc((void **) &(*pSizeList),
sizeof(size_t) * pRawData->size());
// copy the size of all instances to the gpu
hipMemcpy((*pSizeList), pRawData->getSparseMatrixSizeOfInstances(),
sizeof(size_t) * pRawData->size(),
hipMemcpyHostToDevice);
// memory for instances and their featureIds
hipMalloc((void **) &(*pDevFeatureList),
pRawData->size() * pRawData->getMaxNnz() * sizeof(int));
// memory for the values of the features of the instances
hipMalloc((void **) &(*pDevValueList),
pRawData->size() * pRawData->getMaxNnz() * sizeof(float));
// copy instances and their feature ids to the gpu
hipMemcpy((*pDevFeatureList), pRawData->getSparseMatrixIndex(),
pRawData->size() * pRawData->getMaxNnz() * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy((*pDevValueList), pRawData->getSparseMatrixValues(),
pRawData->size() * pRawData->getMaxNnz() * sizeof(float),
hipMemcpyHostToDevice);
}
void InverseIndexCuda::computeSignaturesFittingOnGpu(SparseMatrixFloat* pRawData,
size_t pStartIndex, size_t pEndIndex,
size_t pNumberOfInstances, size_t pNumberOfBlocks,
size_t pNumberOfThreads, size_t pShingleFactor,
size_t pBlockSizeShingle,
vvsize_t_p* pSignatures, size_t pRangeK) {
// copy data to gpu
int* mDev_FeatureList;
size_t* mDev_SizeOfInstanceList;
int* mDev_ComputedSignaturesPerInstance;
float* mDev_ValuesList;
// size_t* mDev_JumpLength;
// float* mDev_DotProduct;
copyDataToGpu(pRawData, &mDev_FeatureList, &mDev_ValuesList, &mDev_SizeOfInstanceList);
size_t signaturesSize = ceil(mNumberOfHashFunctions * pBlockSizeShingle / (float) pShingleFactor);
int* instancesHashValues = (int*) malloc(pRawData->size() * signaturesSize * sizeof(int));
// memory for the inverse index on the gpu.
// for each instance the number of hash functions
hipMalloc((void **) &mDev_ComputedSignaturesPerInstance,
pRawData->size() * signaturesSize * sizeof(int));
int* dev_SignaturesBlockSize;
hipMalloc((void **) &dev_SignaturesBlockSize,
128 * mNumberOfHashFunctions * pBlockSizeShingle * sizeof(int));
// execute kernel on gpu
if (mHashAlgorithm == 0) {
hipLaunchKernelGGL(( fitCudaMinHash), dim3(128), dim3(128), 0, 0,
mDev_FeatureList,
mDev_SizeOfInstanceList,
mNumberOfHashFunctions,
pRawData->getMaxNnz(),
mDev_ComputedSignaturesPerInstance,
pRawData->size(), 0, mBlockSize, mShingleSize, dev_SignaturesBlockSize);
hipDeviceSynchronize();
} else {
// fitCudaWtaHash<<<128, 128>>>
// (mDev_FeatureList,
// mDev_SizeOfInstanceList,
// mNumberOfHashFunctions,
// mDev_JumpLength,
// mDev_ComputedSignaturesPerInstance,
// end, start, mBlockSize, mShingleSize, dev_SignaturesBlockSize);
}
// copy results back to host
hipMemcpy(instancesHashValues, mDev_ComputedSignaturesPerInstance,
pRawData->size() * signaturesSize * sizeof(int),
hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// copy values into one vector per instance
for(size_t i = 0; i < pRawData->size(); ++i) {
vsize_t* instance = new vsize_t(signaturesSize);
for (size_t j = 0; j < signaturesSize; ++j) {
(*instance)[j] = static_cast<size_t> (instancesHashValues[i*signaturesSize + j]);
}
(*pSignatures)[i] = instance;
}
hipDeviceSynchronize();
free(instancesHashValues);
hipFree(mDev_ComputedSignaturesPerInstance);
hipFree(dev_SignaturesBlockSize);
hipFree(mDev_FeatureList);
hipFree(mDev_ValuesList);
hipFree(mDev_SizeOfInstanceList);
hipDeviceSynchronize();
}
void InverseIndexCuda::computeSignaturesQueryOnGpu(SparseMatrixFloat* pRawData,
size_t pStartIndex, size_t pEndIndex,
size_t pNumberOfInstances, size_t pNumberOfBlocks,
size_t pNumberOfThreads, size_t pShingleFactor,
size_t pBlockSizeShingle,
vvsize_t_p* pSignatures, size_t pRangeK) {
// copy data to gpu
int* featureList;
float* valueList;
size_t* sizeOfInstances;
copyDataToGpu(pRawData, &featureList, &valueList, &sizeOfInstances);
size_t signaturesSize = ceil(mNumberOfHashFunctions * pBlockSizeShingle / (float) pShingleFactor);
int* instancesHashValues = (int*) malloc(pRawData->size() * signaturesSize * sizeof(int));
int* computedSignaturesPerInstance;
// memory for the inverse index on the gpu.
// for each instance the number of hash functions
hipMalloc((void **) &computedSignaturesPerInstance,
pRawData->size() * signaturesSize * sizeof(int));
int* dev_SignaturesBlockSize;
hipMalloc((void **) &dev_SignaturesBlockSize,
128 * mNumberOfHashFunctions * pBlockSizeShingle * sizeof(int));
// execute kernel on gpu
if (mHashAlgorithm == 0) {
hipLaunchKernelGGL(( fitCudaMinHash), dim3(128), dim3(128), 0, 0,
featureList,
sizeOfInstances,
mNumberOfHashFunctions,
pRawData->getMaxNnz(),
computedSignaturesPerInstance,
pRawData->size(), 0, mBlockSize, mShingleSize, dev_SignaturesBlockSize);
hipDeviceSynchronize();
} else {
// fitCudaWtaHash<<<128, 128>>>
// (featureList,
// valueList,
// sizeOfInstances,
// mNumberOfHashFunctions,
// pRawData->getMaxNnz(),
// computedSignaturesPerInstance,
// pRawData->size(), 0, mBlockSize, mShingleSize, dev_SignaturesBlockSize, (int) pRangeK);
// hipDeviceSynchronize();
}
// copy results back to host
hipMemcpy(instancesHashValues, computedSignaturesPerInstance,
pRawData->size() * signaturesSize * sizeof(int),
hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// copy values into one vector per instance
for(size_t i = 0; i < pRawData->size(); ++i) {
vsize_t* instance = new vsize_t(signaturesSize);
for (size_t j = 0; j < signaturesSize; ++j) {
(*instance)[j] = static_cast<size_t> (instancesHashValues[i*signaturesSize + j]);
}
(*pSignatures)[i] = instance;
}
hipDeviceSynchronize();
free(instancesHashValues);
hipFree(computedSignaturesPerInstance);
hipFree(dev_SignaturesBlockSize);
hipFree(featureList);
hipFree(valueList);
hipFree(sizeOfInstances);
hipDeviceSynchronize();
} | f4fd6a318d1e0fd37c26dadb2c6d54ce4c511ad6.cu | /**
Copyright 2016 Joachim Wolff
Master Thesis
Tutors: Fabrizio Costa, Milad Miladi
Winter semester 2015/2016
Chair of Bioinformatics
Department of Computer Science
Faculty of Engineering
Albert-Ludwigs-University Freiburg im Breisgau
**/
#include "inverseIndexCuda.h"
#include "kernel.h"
InverseIndexCuda::InverseIndexCuda(size_t pNumberOfHashFunctions,
size_t pShingle, size_t pShingleSize,
size_t pBlockSize, size_t pHashAlgorithm) {
mNumberOfHashFunctions = pNumberOfHashFunctions;
mShingle = pShingle;
mShingleSize = pShingleSize;
mBlockSize = pBlockSize;
mHashAlgorithm = pHashAlgorithm;
}
InverseIndexCuda::~InverseIndexCuda() {
}
void InverseIndexCuda::copyDataToGpu(SparseMatrixFloat* pRawData, int** pDevFeatureList,
float** pDevValueList, size_t** pSizeList) {
// memory for the number of features per instance
cudaMalloc((void **) &(*pSizeList),
sizeof(size_t) * pRawData->size());
// copy the size of all instances to the gpu
cudaMemcpy((*pSizeList), pRawData->getSparseMatrixSizeOfInstances(),
sizeof(size_t) * pRawData->size(),
cudaMemcpyHostToDevice);
// memory for instances and their featureIds
cudaMalloc((void **) &(*pDevFeatureList),
pRawData->size() * pRawData->getMaxNnz() * sizeof(int));
// memory for the values of the features of the instances
cudaMalloc((void **) &(*pDevValueList),
pRawData->size() * pRawData->getMaxNnz() * sizeof(float));
// copy instances and their feature ids to the gpu
cudaMemcpy((*pDevFeatureList), pRawData->getSparseMatrixIndex(),
pRawData->size() * pRawData->getMaxNnz() * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy((*pDevValueList), pRawData->getSparseMatrixValues(),
pRawData->size() * pRawData->getMaxNnz() * sizeof(float),
cudaMemcpyHostToDevice);
}
void InverseIndexCuda::computeSignaturesFittingOnGpu(SparseMatrixFloat* pRawData,
size_t pStartIndex, size_t pEndIndex,
size_t pNumberOfInstances, size_t pNumberOfBlocks,
size_t pNumberOfThreads, size_t pShingleFactor,
size_t pBlockSizeShingle,
vvsize_t_p* pSignatures, size_t pRangeK) {
// copy data to gpu
int* mDev_FeatureList;
size_t* mDev_SizeOfInstanceList;
int* mDev_ComputedSignaturesPerInstance;
float* mDev_ValuesList;
// size_t* mDev_JumpLength;
// float* mDev_DotProduct;
copyDataToGpu(pRawData, &mDev_FeatureList, &mDev_ValuesList, &mDev_SizeOfInstanceList);
size_t signaturesSize = ceil(mNumberOfHashFunctions * pBlockSizeShingle / (float) pShingleFactor);
int* instancesHashValues = (int*) malloc(pRawData->size() * signaturesSize * sizeof(int));
// memory for the inverse index on the gpu.
// for each instance the number of hash functions
cudaMalloc((void **) &mDev_ComputedSignaturesPerInstance,
pRawData->size() * signaturesSize * sizeof(int));
int* dev_SignaturesBlockSize;
cudaMalloc((void **) &dev_SignaturesBlockSize,
128 * mNumberOfHashFunctions * pBlockSizeShingle * sizeof(int));
// execute kernel on gpu
if (mHashAlgorithm == 0) {
fitCudaMinHash<<<128, 128>>>
(mDev_FeatureList,
mDev_SizeOfInstanceList,
mNumberOfHashFunctions,
pRawData->getMaxNnz(),
mDev_ComputedSignaturesPerInstance,
pRawData->size(), 0, mBlockSize, mShingleSize, dev_SignaturesBlockSize);
cudaDeviceSynchronize();
} else {
// fitCudaWtaHash<<<128, 128>>>
// (mDev_FeatureList,
// mDev_SizeOfInstanceList,
// mNumberOfHashFunctions,
// mDev_JumpLength,
// mDev_ComputedSignaturesPerInstance,
// end, start, mBlockSize, mShingleSize, dev_SignaturesBlockSize);
}
// copy results back to host
cudaMemcpy(instancesHashValues, mDev_ComputedSignaturesPerInstance,
pRawData->size() * signaturesSize * sizeof(int),
cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// copy values into one vector per instance
for(size_t i = 0; i < pRawData->size(); ++i) {
vsize_t* instance = new vsize_t(signaturesSize);
for (size_t j = 0; j < signaturesSize; ++j) {
(*instance)[j] = static_cast<size_t> (instancesHashValues[i*signaturesSize + j]);
}
(*pSignatures)[i] = instance;
}
cudaDeviceSynchronize();
free(instancesHashValues);
cudaFree(mDev_ComputedSignaturesPerInstance);
cudaFree(dev_SignaturesBlockSize);
cudaFree(mDev_FeatureList);
cudaFree(mDev_ValuesList);
cudaFree(mDev_SizeOfInstanceList);
cudaDeviceSynchronize();
}
void InverseIndexCuda::computeSignaturesQueryOnGpu(SparseMatrixFloat* pRawData,
size_t pStartIndex, size_t pEndIndex,
size_t pNumberOfInstances, size_t pNumberOfBlocks,
size_t pNumberOfThreads, size_t pShingleFactor,
size_t pBlockSizeShingle,
vvsize_t_p* pSignatures, size_t pRangeK) {
// copy data to gpu
int* featureList;
float* valueList;
size_t* sizeOfInstances;
copyDataToGpu(pRawData, &featureList, &valueList, &sizeOfInstances);
size_t signaturesSize = ceil(mNumberOfHashFunctions * pBlockSizeShingle / (float) pShingleFactor);
int* instancesHashValues = (int*) malloc(pRawData->size() * signaturesSize * sizeof(int));
int* computedSignaturesPerInstance;
// memory for the inverse index on the gpu.
// for each instance the number of hash functions
cudaMalloc((void **) &computedSignaturesPerInstance,
pRawData->size() * signaturesSize * sizeof(int));
int* dev_SignaturesBlockSize;
cudaMalloc((void **) &dev_SignaturesBlockSize,
128 * mNumberOfHashFunctions * pBlockSizeShingle * sizeof(int));
// execute kernel on gpu
if (mHashAlgorithm == 0) {
fitCudaMinHash<<<128, 128>>>
(featureList,
sizeOfInstances,
mNumberOfHashFunctions,
pRawData->getMaxNnz(),
computedSignaturesPerInstance,
pRawData->size(), 0, mBlockSize, mShingleSize, dev_SignaturesBlockSize);
cudaDeviceSynchronize();
} else {
// fitCudaWtaHash<<<128, 128>>>
// (featureList,
// valueList,
// sizeOfInstances,
// mNumberOfHashFunctions,
// pRawData->getMaxNnz(),
// computedSignaturesPerInstance,
// pRawData->size(), 0, mBlockSize, mShingleSize, dev_SignaturesBlockSize, (int) pRangeK);
// cudaDeviceSynchronize();
}
// copy results back to host
cudaMemcpy(instancesHashValues, computedSignaturesPerInstance,
pRawData->size() * signaturesSize * sizeof(int),
cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// copy values into one vector per instance
for(size_t i = 0; i < pRawData->size(); ++i) {
vsize_t* instance = new vsize_t(signaturesSize);
for (size_t j = 0; j < signaturesSize; ++j) {
(*instance)[j] = static_cast<size_t> (instancesHashValues[i*signaturesSize + j]);
}
(*pSignatures)[i] = instance;
}
cudaDeviceSynchronize();
free(instancesHashValues);
cudaFree(computedSignaturesPerInstance);
cudaFree(dev_SignaturesBlockSize);
cudaFree(featureList);
cudaFree(valueList);
cudaFree(sizeOfInstances);
cudaDeviceSynchronize();
} |
7941db24267e96e1a8fe8ece8af8aa3306891c5d.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/ReduceOps.h>
#include <ATen/native/Resize.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Resize.h>
#include <ATen/native/hip/Normalization.cuh>
#include <c10/hip/HIPMathCompat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/batch_norm_backward_elemt_native.h>
#include <ATen/ops/batch_norm_backward_reduce_native.h>
#include <ATen/ops/batch_norm_elemt_native.h>
#include <ATen/ops/batch_norm_gather_stats_native.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts_native.h>
#include <ATen/ops/batch_norm_stats_native.h>
#include <ATen/ops/batch_norm_update_stats_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/from_blob.h>
#include <ATen/ops/native_batch_norm_backward_native.h>
#include <ATen/ops/native_batch_norm_native.h>
#include <ATen/ops/scalar_tensor.h>
#endif
namespace at { namespace native {
namespace {
ScalarType first_type() {
return ScalarType::Undefined;
}
template <typename... Args>
ScalarType first_type(const Tensor& arg, const Args&... parameters) {
return arg.defined() ? arg.scalar_type() : first_type(parameters...);
}
// A transform is mixed type if the parameters are higher precision than the input
template <typename... Args>
bool is_mixed_type(const Tensor& input, const Args&... parameters) {
const auto parameter_type = first_type(parameters...);
return ((parameter_type != ScalarType::Undefined) &&
(parameter_type != input.scalar_type()));
}
inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) {
return (
self.is_contiguous(at::MemoryFormat::ChannelsLast) ||
self.is_contiguous(at::MemoryFormat::ChannelsLast3d) ||
(self.is_contiguous() && self.strides()[1] == 1)
);
}
enum class Impl {
Contiguous,
ChannelsLast,
General,
};
inline Impl batch_norm_choose_impl(const Tensor& self) {
if (!at::cuda::detail::canUse32BitIndexMath(self)) {
return Impl::General;
}
if (self.is_contiguous()) {
return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous;
}
if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) {
return Impl::ChannelsLast;
}
return Impl::General;
}
inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) {
auto imp1 = batch_norm_choose_impl(in1);
if (imp1 == Impl::General) {
return imp1;
}
auto imp2 = batch_norm_choose_impl(in2);
return imp1 == imp2 ? imp1 : Impl::General;
}
void batch_norm_elementwise(
const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt,
const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) {
switch (batch_norm_choose_impl(self)) {
case Impl::Contiguous: {
c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt);
c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt);
resize_output(out, self.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(),
"batch_norm_elementwise_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(self, *weight, *bias);
if (mixed_type) {
batch_norm_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(
out, self, *weight, *bias, mean_, invstd_);
} else {
batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(
out, self, *weight, *bias, mean_, invstd_);
}
});
return;
}
case Impl::ChannelsLast: {
auto weight = at::borrow_from_optional_tensor(weight_opt);
auto bias = at::borrow_from_optional_tensor(bias_opt);
if (resize_output_check(out, self.sizes())) {
resize_impl_cuda_(out.unsafeGetTensorImpl(), self.sizes(), self.strides());
}
if ((out.strides() == self.strides()) &&
(!weight->defined() || weight->is_contiguous()) &&
(!bias->defined() || bias->is_contiguous()) &&
(!mean_.defined() || mean_.is_contiguous()) &&
(!invstd_.defined() || invstd_.is_contiguous())) {
batch_norm_elemt_channels_last_cuda_template(
out, self, *weight, *bias, mean_, invstd_);
return;
}
C10_FALLTHROUGH;
}
case Impl::General: {
const int64_t ndim = self.dim();
DimVector sizes(ndim, 1), strides(ndim, 0);
// Helper to convert 1d tensors to an nd tensor that broadcasts with input
// All elements go into the channel dimension
auto as_nd = [&](const Tensor& t) {
TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1);
sizes[1] = t.sizes()[0];
strides[1] = t.strides()[0];
return t.as_strided(sizes, strides);
};
auto weight = weight_opt.has_value() && weight_opt->defined() ?
as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options());
auto bias = bias_opt.has_value() && bias_opt->defined() ?
as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options());
auto mean = as_nd(mean_);
auto invstd = as_nd(invstd_);
auto iter = TensorIteratorConfig()
.add_output(out)
.add_input(self)
.add_input(weight)
.add_input(bias)
.add_input(mean)
.add_input(invstd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(),
"batch_norm_elementwise_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias,
acc_t mean, acc_t invstd) -> scalar_t {
return ((input - mean) * invstd) * weight + bias;
});
});
return;
}
}
}
Tensor batch_norm_elementwise_backward_train(
const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd,
const Tensor& weight, const Tensor& sum_dy, const Tensor& sum_dy_xmu) {
switch (batch_norm_choose_impl(input, grad_out)) {
case Impl::Contiguous: {
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"batch_norm_backward_elemt", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(input, weight);
if (mixed_type) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
}
});
}
case Impl::ChannelsLast: {
if ((!weight.defined() || weight.is_contiguous()) &&
mean.is_contiguous() && invstd.is_contiguous()) {
return batch_norm_backward_elemt_channels_last_cuda_template(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
}
C10_FALLTHROUGH;
}
case Impl::General: {
const auto ndim = input.dim();
DimVector sizes(ndim, 1), strides(ndim, 0);
auto as_nd = [&](const Tensor& t) {
TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1);
sizes[1] = t.sizes()[0];
strides[1] = t.strides()[0];
return t.as_strided(sizes, strides);
};
auto invstd_nd = as_nd(invstd);
auto mean_nd = as_nd(mean);
auto sum_dy_nd = as_nd(sum_dy);
auto sum_dy_xmu_nd = as_nd(sum_dy_xmu);
auto weight_nd = weight.defined() ? as_nd(weight) :
at::scalar_tensor(1.0, input.options().dtype(mean.scalar_type()));
Tensor grad_input = at::empty(input.sizes(), grad_out.options());
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(input)
.add_input(weight_nd)
.add_input(mean_nd)
.add_input(invstd_nd)
.add_input(sum_dy_xmu_nd)
.add_input(sum_dy_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
auto norm_fct = static_cast<accscalar_t>(1.0 / (input.numel() /input.size(1)) );
gpu_kernel(iter, [norm_fct] GPU_LAMBDA (scalar_t gO, scalar_t input, accscalar_t weight,
accscalar_t mean, accscalar_t invstd,
accscalar_t xmu, accscalar_t dy) -> scalar_t {
auto factor_1_c = invstd * invstd * xmu * norm_fct;
auto factor_2_c = weight * invstd;
auto m_dy_c = dy * norm_fct;
return (gO - m_dy_c - (input - mean) * factor_1_c) * factor_2_c;
});
});
return grad_input;
}
}
TORCH_INTERNAL_ASSERT(false);
}
Tensor batch_norm_elementwise_backward_eval(
const Tensor& grad_out, const Tensor& input,
const Tensor& invstd, const Tensor& weight) {
const auto ndim = input.dim();
DimVector shape(ndim, 1), strides(ndim, 0);
shape[1] = invstd.sizes()[0];
strides[1] = invstd.strides()[0];
auto invstd_nd = invstd.as_strided(shape, strides);
Tensor grad_input = at::empty(input.sizes(), grad_out.options());
if (weight.defined()) {
strides[1] = weight.strides()[0];
auto weight_nd = weight.as_strided(shape, strides);
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(invstd_nd)
.add_input(weight_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd, accscalar_t weight)
-> scalar_t {
return gO * weight * invstd;
});
});
} else {
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(invstd_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd) -> scalar_t {
return gO * invstd;
});
});
}
return grad_input;
}
void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) {
// NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored.
const double dummy_epsilon = 1e-5;
switch (batch_norm_choose_impl(self)) {
case Impl::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] {
batch_norm_stats_cuda_template<scalar_t, int32_t, Var>(
save_mean, save_var, self, dummy_epsilon);
});
return;
}
case Impl::ChannelsLast: {
if ((!save_mean.defined() || save_mean.is_contiguous()) &&
(!save_var.defined() || save_var.is_contiguous())) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] {
batch_norm_stats_channels_last_cuda_template<scalar_t, Var>(
save_mean, save_var, self, dummy_epsilon);
});
return;
}
C10_FALLTHROUGH;
}
case Impl::General: {
const int64_t ndim = self.dim();
DimVector reduce_dims(ndim - 1);
reduce_dims[0] = 0;
for (int64_t i = 2; i < ndim; ++i) {
reduce_dims[i - 1] = i;
}
// For some reason this isn't an actual operator but it exists anyway...
at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims,
/*unbiased=*/false, /*keepdim=*/false);
return;
}
}
}
void batch_norm_update_stats(
const Tensor& save_mean, const Tensor& save_var,
const Tensor& running_mean, const Tensor& running_var,
double momentum_, int64_t N) {
auto iter = TensorIteratorConfig()
.add_output(running_mean)
.add_output(running_var)
.add_input(save_mean)
.add_input(save_var)
.add_input(running_mean)
.add_input(running_var)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(),
"batch_norm_update_stats_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
const auto bessel_correction_factor = static_cast<acc_t>(
static_cast<double>(N) / static_cast<double>(N - 1));
const auto momentum = static_cast<acc_t>(momentum_);
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var)
-> thrust::tuple<scalar_t, scalar_t> {
const auto unbiased_var = var * bessel_correction_factor;
return thrust::tuple<scalar_t, scalar_t>{
mean * momentum + (1 - momentum) * running_mean,
unbiased_var * momentum + (1 - momentum) * running_var,
};
});
});
}
void batch_norm_update_stats_and_invert(
const Tensor& save_mean, const Tensor& save_var,
const Tensor& running_mean, const Tensor& running_var,
double momentum_, double epsilon, int64_t N) {
auto iter = TensorIteratorConfig()
.add_output(running_mean)
.add_output(running_var)
.add_output(save_var)
.add_input(save_mean)
.add_input(save_var)
.add_input(running_mean)
.add_input(running_var)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(),
"batch_norm_update_stats_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
const auto bessel_correction_factor = static_cast<acc_t>(
static_cast<double>(N) / static_cast<double>(N - 1));
const auto eps = static_cast<acc_t>(epsilon);
const auto momentum = static_cast<acc_t>(momentum_);
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var)
-> thrust::tuple<scalar_t, scalar_t, acc_t> {
const auto unbiased_var = var * bessel_correction_factor;
return thrust::tuple<scalar_t, scalar_t, acc_t>{
mean * momentum + (1 - momentum) * running_mean,
unbiased_var * momentum + (1 - momentum) * running_var,
c10::hip::compat::rsqrt(var + eps)
};
});
});
}
void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) {
auto iter = TensorIteratorConfig()
.add_output(out_invstd)
.add_input(running_var)
.check_all_same_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(),
"batch_norm_invert_std_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
auto eps = static_cast<acc_t>(epsilon);
gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t {
return c10::hip::compat::rsqrt(var + eps);
});
});
}
}
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined());
const bool has_running_var = (running_var_opt.has_value() && running_var_opt->defined());
TORCH_CHECK(has_running_mean == has_running_var);
if (train) {
batch_norm_mean_var(self, save_mean, save_invstd);
if (has_running_mean) {
const int64_t N = self.numel() / save_mean.numel();
batch_norm_update_stats_and_invert(
save_mean, save_invstd, *running_mean_opt, *running_var_opt,
momentum, epsilon, N);
} else {
batch_norm_calc_invstd(save_invstd, save_invstd, epsilon);
}
} else {
TORCH_CHECK(has_running_mean);
at::native::resize_output(save_mean, running_mean_opt->sizes());
save_mean.copy_(*running_mean_opt, /*non_blocking=*/true);
batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon);
}
batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd);
return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) {
auto output = at::empty_like(self);
int64_t n_input = self.size(1);
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto save_mean = at::empty({n_input}, options);
auto save_invstd = at::empty({n_input}, options);
at::native::batch_norm_cuda_out(
self,
weight_opt,
bias_opt,
running_mean_opt,
running_var_opt,
train,
momentum,
epsilon,
output,
save_mean,
save_invstd);
return std::make_tuple(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon) {
return batch_norm_cuda(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, epsilon);
}
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_no_stats_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, bool train, double momentum, double epsilon) {
return batch_norm_cuda(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, epsilon);
}
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
return batch_norm_cuda_out(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, epsilon, output, save_mean, save_invstd);
}
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_no_stats_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
return batch_norm_cuda_out(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, epsilon, output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt);
c10::MaybeOwned<Tensor> save_mean = at::borrow_from_optional_tensor(save_mean_opt);
c10::MaybeOwned<Tensor> save_invstd = at::borrow_from_optional_tensor(save_invstd_opt);
c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt);
c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt);
const bool needs_reduction = train || grad_input_mask[1] || grad_input_mask[2];
// Fused reducion & elementwise kernel
if (needs_reduction && grad_input_mask[0] &&
!batch_norm_use_channels_last_kernels(input) &&
cuda::detail::canUse32BitIndexMath(input) &&
cuda::detail::canUse32BitIndexMath(grad_out)) {
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"batch_norm_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(input, *weight, *running_mean, *running_var);
if (mixed_type) {
return batch_norm_backward_cuda_template<scalar_t, accscalar_t, int32_t>(
grad_out, input, *weight, *running_mean, *running_var,
*save_mean, *save_invstd, train, epsilon, grad_input_mask);
} else {
return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(
grad_out, input, *weight, *running_mean, *running_var,
*save_mean, *save_invstd, train, epsilon, grad_input_mask);
}
});
}
// NOTE: native_batch_norm always returns save_mean and save_invstd to be reused in backward.
// However, this is also called from cudnn_batch_norm in eval mode which doesn't give
// save_mean and save_invstd, so it needs recalculated.
const auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true);
Tensor mean;
TORCH_INTERNAL_ASSERT(save_mean->defined(), "save_mean should always be defined\n");
if (save_mean->numel() != 0) {
mean = *save_mean;
} else if (needs_reduction) {
TORCH_CHECK(!train && running_mean->defined());
mean = (running_mean->scalar_type() == acc_type) ?
*running_mean : running_mean->to(acc_type);
}
Tensor invstd;
TORCH_INTERNAL_ASSERT(save_invstd->defined(), "save_invstd should always be defined\n");
if (save_invstd->numel() != 0) {
invstd = *save_invstd;
} else {
TORCH_CHECK(!train && running_var->defined());
auto n_channels = input.sizes()[1];
invstd = at::empty({n_channels}, input.options().dtype(acc_type));
batch_norm_calc_invstd(invstd, *running_var, epsilon);
}
Tensor sum_dy, sum_dy_xmu, grad_weight, grad_bias;
if (needs_reduction) {
std::tie(sum_dy, sum_dy_xmu, grad_weight, grad_bias) =
batch_norm_backward_reduce_cuda(
grad_out, input, mean, invstd, *weight,
grad_input_mask[0], grad_input_mask[1], grad_input_mask[2]);
}
Tensor grad_input;
if (grad_input_mask[0]) {
if (train) {
// NOTE: sum_dy and sum_dy_xmy are defined, as train implies needs_reduction
grad_input = batch_norm_elementwise_backward_train(
grad_out, input, mean, invstd, *weight, sum_dy, sum_dy_xmu);
} else {
grad_input = batch_norm_elementwise_backward_eval(
grad_out, input, invstd, *weight);
}
}
return std::make_tuple(grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) {
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto n_channels = self.size(1);
auto save_mean = at::empty({n_channels}, options);
auto save_invstd = at::empty({n_channels}, options);
bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "batch_norm_stats_cuda", [&] {
if (cuda::detail::canUse32BitIndexMath(self)) {
if (use_channels_last_kernel) {
batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>(
save_mean, save_invstd, self, epsilon);
} else {
batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>(
save_mean, save_invstd, self, epsilon);
}
} else {
batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>(
save_mean, save_invstd, self, epsilon);
}
});
return std::tuple<Tensor, Tensor>(save_mean, save_invstd);
}
Tensor batch_norm_elemt_cuda(
const Tensor& self, const c10::optional<Tensor>& weight_opt,
const c10::optional<Tensor>& bias_opt, const Tensor& mean,
const Tensor& invstd, double epsilon) {
auto output = at::empty_like(self);
// FIXME: Epsilon parameter isn't required, we don't take the reciprocal
batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd);
return output;
}
Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) {
// FIXME: Epsilon parameter isn't required, we don't take the reciprocal
batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd);
return output;
}
// accepting input(self) here to determine template data types, since running_mean/running_var are optional
std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
std::vector<int64_t> counts(mean.size(0), count);
Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU));
counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype());
return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_);
}
std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda(
const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type();
return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts);
} else {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts);
}
});
}
std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
if (at::cuda::detail::canUse32BitIndexMath(grad_output) &&
batch_norm_use_channels_last_kernels(grad_output) &&
batch_norm_use_channels_last_kernels(input) &&
(!weight.defined() || weight.is_contiguous()) &&
mean.is_contiguous() && invstd.is_contiguous()){
return batch_norm_backward_reduce_cuda_channels_last_template(
grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "batch_norm_backward_reduce", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
const bool mixed_type = is_mixed_type(input, weight);
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(grad_output)) {
if (mixed_type) {
return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
} else {
if (mixed_type) {
return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
}
});
}
Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
if (at::cuda::detail::canUse32BitIndexMath(self) &&
batch_norm_use_channels_last_kernels(self) &&
batch_norm_use_channels_last_kernels(input)) {
return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat;
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float || is_bfloat16_float) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
} else {
if (is_half_float || is_bfloat16_float) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
}
});
}
std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda(
const Tensor& self, const c10::optional<Tensor>& running_mean_opt,
const c10::optional<Tensor>& running_var_opt, double momentum) {
c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt);
c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt);
const int64_t n_input = self.size(1);
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto save_mean = at::empty({n_input}, options);
auto save_var = at::empty({n_input}, options);
batch_norm_mean_var(self, save_mean, save_var);
TORCH_CHECK(running_mean->defined() == running_var->defined());
if (running_mean->defined()) {
const int64_t N = self.numel() / save_mean.numel();
batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N);
}
return std::tuple<Tensor, Tensor>(save_mean, save_var);
}
} } // namespace at::native
| 7941db24267e96e1a8fe8ece8af8aa3306891c5d.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/ReduceOps.h>
#include <ATen/native/Resize.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Resize.h>
#include <ATen/native/cuda/Normalization.cuh>
#include <c10/cuda/CUDAMathCompat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/batch_norm_backward_elemt_native.h>
#include <ATen/ops/batch_norm_backward_reduce_native.h>
#include <ATen/ops/batch_norm_elemt_native.h>
#include <ATen/ops/batch_norm_gather_stats_native.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts_native.h>
#include <ATen/ops/batch_norm_stats_native.h>
#include <ATen/ops/batch_norm_update_stats_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/from_blob.h>
#include <ATen/ops/native_batch_norm_backward_native.h>
#include <ATen/ops/native_batch_norm_native.h>
#include <ATen/ops/scalar_tensor.h>
#endif
namespace at { namespace native {
namespace {
ScalarType first_type() {
return ScalarType::Undefined;
}
template <typename... Args>
ScalarType first_type(const Tensor& arg, const Args&... parameters) {
return arg.defined() ? arg.scalar_type() : first_type(parameters...);
}
// A transform is mixed type if the parameters are higher precision than the input
template <typename... Args>
bool is_mixed_type(const Tensor& input, const Args&... parameters) {
const auto parameter_type = first_type(parameters...);
return ((parameter_type != ScalarType::Undefined) &&
(parameter_type != input.scalar_type()));
}
inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) {
return (
self.is_contiguous(at::MemoryFormat::ChannelsLast) ||
self.is_contiguous(at::MemoryFormat::ChannelsLast3d) ||
(self.is_contiguous() && self.strides()[1] == 1)
);
}
enum class Impl {
Contiguous,
ChannelsLast,
General,
};
inline Impl batch_norm_choose_impl(const Tensor& self) {
if (!at::cuda::detail::canUse32BitIndexMath(self)) {
return Impl::General;
}
if (self.is_contiguous()) {
return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous;
}
if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) {
return Impl::ChannelsLast;
}
return Impl::General;
}
inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) {
auto imp1 = batch_norm_choose_impl(in1);
if (imp1 == Impl::General) {
return imp1;
}
auto imp2 = batch_norm_choose_impl(in2);
return imp1 == imp2 ? imp1 : Impl::General;
}
void batch_norm_elementwise(
const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt,
const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) {
switch (batch_norm_choose_impl(self)) {
case Impl::Contiguous: {
c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt);
c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt);
resize_output(out, self.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(),
"batch_norm_elementwise_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(self, *weight, *bias);
if (mixed_type) {
batch_norm_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(
out, self, *weight, *bias, mean_, invstd_);
} else {
batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(
out, self, *weight, *bias, mean_, invstd_);
}
});
return;
}
case Impl::ChannelsLast: {
auto weight = at::borrow_from_optional_tensor(weight_opt);
auto bias = at::borrow_from_optional_tensor(bias_opt);
if (resize_output_check(out, self.sizes())) {
resize_impl_cuda_(out.unsafeGetTensorImpl(), self.sizes(), self.strides());
}
if ((out.strides() == self.strides()) &&
(!weight->defined() || weight->is_contiguous()) &&
(!bias->defined() || bias->is_contiguous()) &&
(!mean_.defined() || mean_.is_contiguous()) &&
(!invstd_.defined() || invstd_.is_contiguous())) {
batch_norm_elemt_channels_last_cuda_template(
out, self, *weight, *bias, mean_, invstd_);
return;
}
C10_FALLTHROUGH;
}
case Impl::General: {
const int64_t ndim = self.dim();
DimVector sizes(ndim, 1), strides(ndim, 0);
// Helper to convert 1d tensors to an nd tensor that broadcasts with input
// All elements go into the channel dimension
auto as_nd = [&](const Tensor& t) {
TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1);
sizes[1] = t.sizes()[0];
strides[1] = t.strides()[0];
return t.as_strided(sizes, strides);
};
auto weight = weight_opt.has_value() && weight_opt->defined() ?
as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options());
auto bias = bias_opt.has_value() && bias_opt->defined() ?
as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options());
auto mean = as_nd(mean_);
auto invstd = as_nd(invstd_);
auto iter = TensorIteratorConfig()
.add_output(out)
.add_input(self)
.add_input(weight)
.add_input(bias)
.add_input(mean)
.add_input(invstd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(),
"batch_norm_elementwise_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias,
acc_t mean, acc_t invstd) -> scalar_t {
return ((input - mean) * invstd) * weight + bias;
});
});
return;
}
}
}
Tensor batch_norm_elementwise_backward_train(
const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd,
const Tensor& weight, const Tensor& sum_dy, const Tensor& sum_dy_xmu) {
switch (batch_norm_choose_impl(input, grad_out)) {
case Impl::Contiguous: {
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"batch_norm_backward_elemt", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(input, weight);
if (mixed_type) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
}
});
}
case Impl::ChannelsLast: {
if ((!weight.defined() || weight.is_contiguous()) &&
mean.is_contiguous() && invstd.is_contiguous()) {
return batch_norm_backward_elemt_channels_last_cuda_template(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
}
C10_FALLTHROUGH;
}
case Impl::General: {
const auto ndim = input.dim();
DimVector sizes(ndim, 1), strides(ndim, 0);
auto as_nd = [&](const Tensor& t) {
TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1);
sizes[1] = t.sizes()[0];
strides[1] = t.strides()[0];
return t.as_strided(sizes, strides);
};
auto invstd_nd = as_nd(invstd);
auto mean_nd = as_nd(mean);
auto sum_dy_nd = as_nd(sum_dy);
auto sum_dy_xmu_nd = as_nd(sum_dy_xmu);
auto weight_nd = weight.defined() ? as_nd(weight) :
at::scalar_tensor(1.0, input.options().dtype(mean.scalar_type()));
Tensor grad_input = at::empty(input.sizes(), grad_out.options());
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(input)
.add_input(weight_nd)
.add_input(mean_nd)
.add_input(invstd_nd)
.add_input(sum_dy_xmu_nd)
.add_input(sum_dy_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
auto norm_fct = static_cast<accscalar_t>(1.0 / (input.numel() /input.size(1)) );
gpu_kernel(iter, [norm_fct] GPU_LAMBDA (scalar_t gO, scalar_t input, accscalar_t weight,
accscalar_t mean, accscalar_t invstd,
accscalar_t xmu, accscalar_t dy) -> scalar_t {
auto factor_1_c = invstd * invstd * xmu * norm_fct;
auto factor_2_c = weight * invstd;
auto m_dy_c = dy * norm_fct;
return (gO - m_dy_c - (input - mean) * factor_1_c) * factor_2_c;
});
});
return grad_input;
}
}
TORCH_INTERNAL_ASSERT(false);
}
Tensor batch_norm_elementwise_backward_eval(
const Tensor& grad_out, const Tensor& input,
const Tensor& invstd, const Tensor& weight) {
const auto ndim = input.dim();
DimVector shape(ndim, 1), strides(ndim, 0);
shape[1] = invstd.sizes()[0];
strides[1] = invstd.strides()[0];
auto invstd_nd = invstd.as_strided(shape, strides);
Tensor grad_input = at::empty(input.sizes(), grad_out.options());
if (weight.defined()) {
strides[1] = weight.strides()[0];
auto weight_nd = weight.as_strided(shape, strides);
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(invstd_nd)
.add_input(weight_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd, accscalar_t weight)
-> scalar_t {
return gO * weight * invstd;
});
});
} else {
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(invstd_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd) -> scalar_t {
return gO * invstd;
});
});
}
return grad_input;
}
void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) {
// NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored.
const double dummy_epsilon = 1e-5;
switch (batch_norm_choose_impl(self)) {
case Impl::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] {
batch_norm_stats_cuda_template<scalar_t, int32_t, Var>(
save_mean, save_var, self, dummy_epsilon);
});
return;
}
case Impl::ChannelsLast: {
if ((!save_mean.defined() || save_mean.is_contiguous()) &&
(!save_var.defined() || save_var.is_contiguous())) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] {
batch_norm_stats_channels_last_cuda_template<scalar_t, Var>(
save_mean, save_var, self, dummy_epsilon);
});
return;
}
C10_FALLTHROUGH;
}
case Impl::General: {
const int64_t ndim = self.dim();
DimVector reduce_dims(ndim - 1);
reduce_dims[0] = 0;
for (int64_t i = 2; i < ndim; ++i) {
reduce_dims[i - 1] = i;
}
// For some reason this isn't an actual operator but it exists anyway...
at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims,
/*unbiased=*/false, /*keepdim=*/false);
return;
}
}
}
void batch_norm_update_stats(
const Tensor& save_mean, const Tensor& save_var,
const Tensor& running_mean, const Tensor& running_var,
double momentum_, int64_t N) {
auto iter = TensorIteratorConfig()
.add_output(running_mean)
.add_output(running_var)
.add_input(save_mean)
.add_input(save_var)
.add_input(running_mean)
.add_input(running_var)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(),
"batch_norm_update_stats_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
const auto bessel_correction_factor = static_cast<acc_t>(
static_cast<double>(N) / static_cast<double>(N - 1));
const auto momentum = static_cast<acc_t>(momentum_);
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var)
-> thrust::tuple<scalar_t, scalar_t> {
const auto unbiased_var = var * bessel_correction_factor;
return thrust::tuple<scalar_t, scalar_t>{
mean * momentum + (1 - momentum) * running_mean,
unbiased_var * momentum + (1 - momentum) * running_var,
};
});
});
}
void batch_norm_update_stats_and_invert(
const Tensor& save_mean, const Tensor& save_var,
const Tensor& running_mean, const Tensor& running_var,
double momentum_, double epsilon, int64_t N) {
auto iter = TensorIteratorConfig()
.add_output(running_mean)
.add_output(running_var)
.add_output(save_var)
.add_input(save_mean)
.add_input(save_var)
.add_input(running_mean)
.add_input(running_var)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(),
"batch_norm_update_stats_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
const auto bessel_correction_factor = static_cast<acc_t>(
static_cast<double>(N) / static_cast<double>(N - 1));
const auto eps = static_cast<acc_t>(epsilon);
const auto momentum = static_cast<acc_t>(momentum_);
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var)
-> thrust::tuple<scalar_t, scalar_t, acc_t> {
const auto unbiased_var = var * bessel_correction_factor;
return thrust::tuple<scalar_t, scalar_t, acc_t>{
mean * momentum + (1 - momentum) * running_mean,
unbiased_var * momentum + (1 - momentum) * running_var,
c10::cuda::compat::rsqrt(var + eps)
};
});
});
}
void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) {
auto iter = TensorIteratorConfig()
.add_output(out_invstd)
.add_input(running_var)
.check_all_same_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(),
"batch_norm_invert_std_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
auto eps = static_cast<acc_t>(epsilon);
gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t {
return c10::cuda::compat::rsqrt(var + eps);
});
});
}
}
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined());
const bool has_running_var = (running_var_opt.has_value() && running_var_opt->defined());
TORCH_CHECK(has_running_mean == has_running_var);
if (train) {
batch_norm_mean_var(self, save_mean, save_invstd);
if (has_running_mean) {
const int64_t N = self.numel() / save_mean.numel();
batch_norm_update_stats_and_invert(
save_mean, save_invstd, *running_mean_opt, *running_var_opt,
momentum, epsilon, N);
} else {
batch_norm_calc_invstd(save_invstd, save_invstd, epsilon);
}
} else {
TORCH_CHECK(has_running_mean);
at::native::resize_output(save_mean, running_mean_opt->sizes());
save_mean.copy_(*running_mean_opt, /*non_blocking=*/true);
batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon);
}
batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd);
return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) {
auto output = at::empty_like(self);
int64_t n_input = self.size(1);
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto save_mean = at::empty({n_input}, options);
auto save_invstd = at::empty({n_input}, options);
at::native::batch_norm_cuda_out(
self,
weight_opt,
bias_opt,
running_mean_opt,
running_var_opt,
train,
momentum,
epsilon,
output,
save_mean,
save_invstd);
return std::make_tuple(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon) {
return batch_norm_cuda(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, epsilon);
}
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_no_stats_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, bool train, double momentum, double epsilon) {
return batch_norm_cuda(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, epsilon);
}
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
return batch_norm_cuda_out(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, epsilon, output, save_mean, save_invstd);
}
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_no_stats_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
return batch_norm_cuda_out(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, epsilon, output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt);
c10::MaybeOwned<Tensor> save_mean = at::borrow_from_optional_tensor(save_mean_opt);
c10::MaybeOwned<Tensor> save_invstd = at::borrow_from_optional_tensor(save_invstd_opt);
c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt);
c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt);
const bool needs_reduction = train || grad_input_mask[1] || grad_input_mask[2];
// Fused reducion & elementwise kernel
if (needs_reduction && grad_input_mask[0] &&
!batch_norm_use_channels_last_kernels(input) &&
cuda::detail::canUse32BitIndexMath(input) &&
cuda::detail::canUse32BitIndexMath(grad_out)) {
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"batch_norm_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(input, *weight, *running_mean, *running_var);
if (mixed_type) {
return batch_norm_backward_cuda_template<scalar_t, accscalar_t, int32_t>(
grad_out, input, *weight, *running_mean, *running_var,
*save_mean, *save_invstd, train, epsilon, grad_input_mask);
} else {
return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(
grad_out, input, *weight, *running_mean, *running_var,
*save_mean, *save_invstd, train, epsilon, grad_input_mask);
}
});
}
// NOTE: native_batch_norm always returns save_mean and save_invstd to be reused in backward.
// However, this is also called from cudnn_batch_norm in eval mode which doesn't give
// save_mean and save_invstd, so it needs recalculated.
const auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true);
Tensor mean;
TORCH_INTERNAL_ASSERT(save_mean->defined(), "save_mean should always be defined\n");
if (save_mean->numel() != 0) {
mean = *save_mean;
} else if (needs_reduction) {
TORCH_CHECK(!train && running_mean->defined());
mean = (running_mean->scalar_type() == acc_type) ?
*running_mean : running_mean->to(acc_type);
}
Tensor invstd;
TORCH_INTERNAL_ASSERT(save_invstd->defined(), "save_invstd should always be defined\n");
if (save_invstd->numel() != 0) {
invstd = *save_invstd;
} else {
TORCH_CHECK(!train && running_var->defined());
auto n_channels = input.sizes()[1];
invstd = at::empty({n_channels}, input.options().dtype(acc_type));
batch_norm_calc_invstd(invstd, *running_var, epsilon);
}
Tensor sum_dy, sum_dy_xmu, grad_weight, grad_bias;
if (needs_reduction) {
std::tie(sum_dy, sum_dy_xmu, grad_weight, grad_bias) =
batch_norm_backward_reduce_cuda(
grad_out, input, mean, invstd, *weight,
grad_input_mask[0], grad_input_mask[1], grad_input_mask[2]);
}
Tensor grad_input;
if (grad_input_mask[0]) {
if (train) {
// NOTE: sum_dy and sum_dy_xmy are defined, as train implies needs_reduction
grad_input = batch_norm_elementwise_backward_train(
grad_out, input, mean, invstd, *weight, sum_dy, sum_dy_xmu);
} else {
grad_input = batch_norm_elementwise_backward_eval(
grad_out, input, invstd, *weight);
}
}
return std::make_tuple(grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) {
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto n_channels = self.size(1);
auto save_mean = at::empty({n_channels}, options);
auto save_invstd = at::empty({n_channels}, options);
bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "batch_norm_stats_cuda", [&] {
if (cuda::detail::canUse32BitIndexMath(self)) {
if (use_channels_last_kernel) {
batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>(
save_mean, save_invstd, self, epsilon);
} else {
batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>(
save_mean, save_invstd, self, epsilon);
}
} else {
batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>(
save_mean, save_invstd, self, epsilon);
}
});
return std::tuple<Tensor, Tensor>(save_mean, save_invstd);
}
Tensor batch_norm_elemt_cuda(
const Tensor& self, const c10::optional<Tensor>& weight_opt,
const c10::optional<Tensor>& bias_opt, const Tensor& mean,
const Tensor& invstd, double epsilon) {
auto output = at::empty_like(self);
// FIXME: Epsilon parameter isn't required, we don't take the reciprocal
batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd);
return output;
}
Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) {
// FIXME: Epsilon parameter isn't required, we don't take the reciprocal
batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd);
return output;
}
// accepting input(self) here to determine template data types, since running_mean/running_var are optional
std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
std::vector<int64_t> counts(mean.size(0), count);
Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU));
counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype());
return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_);
}
std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda(
const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type();
return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts);
} else {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts);
}
});
}
std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
if (at::cuda::detail::canUse32BitIndexMath(grad_output) &&
batch_norm_use_channels_last_kernels(grad_output) &&
batch_norm_use_channels_last_kernels(input) &&
(!weight.defined() || weight.is_contiguous()) &&
mean.is_contiguous() && invstd.is_contiguous()){
return batch_norm_backward_reduce_cuda_channels_last_template(
grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "batch_norm_backward_reduce", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
const bool mixed_type = is_mixed_type(input, weight);
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(grad_output)) {
if (mixed_type) {
return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
} else {
if (mixed_type) {
return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
}
});
}
Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
if (at::cuda::detail::canUse32BitIndexMath(self) &&
batch_norm_use_channels_last_kernels(self) &&
batch_norm_use_channels_last_kernels(input)) {
return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat;
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float || is_bfloat16_float) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
} else {
if (is_half_float || is_bfloat16_float) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
}
});
}
std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda(
const Tensor& self, const c10::optional<Tensor>& running_mean_opt,
const c10::optional<Tensor>& running_var_opt, double momentum) {
c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt);
c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt);
const int64_t n_input = self.size(1);
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto save_mean = at::empty({n_input}, options);
auto save_var = at::empty({n_input}, options);
batch_norm_mean_var(self, save_mean, save_var);
TORCH_CHECK(running_mean->defined() == running_var->defined());
if (running_mean->defined()) {
const int64_t N = self.numel() / save_mean.numel();
batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N);
}
return std::tuple<Tensor, Tensor>(save_mean, save_var);
}
} } // namespace at::native
|
7c952c20d0ed4fdaef9b3126159e98b64ca60446.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/type_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace detail
{
__device__ __forceinline__ int cvAtomicAdd(int* address, int val)
{
return ::atomicAdd(address, val);
}
__device__ __forceinline__ unsigned int cvAtomicAdd(unsigned int* address, unsigned int val)
{
return ::atomicAdd(address, val);
}
__device__ __forceinline__ float cvAtomicAdd(float* address, float val)
{
#if __CUDA_ARCH__ >= 200
return ::atomicAdd(address, val);
#else
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(val + __int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
#endif
}
__device__ __forceinline__ double cvAtomicAdd(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
__device__ __forceinline__ int cvAtomicMin(int* address, int val)
{
return ::atomicMin(address, val);
}
__device__ __forceinline__ float cvAtomicMin(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
(void) address;
(void) val;
return 0.0f;
#endif
}
__device__ __forceinline__ double cvAtomicMin(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmin(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
__device__ __forceinline__ int cvAtomicMax(int* address, int val)
{
return ::atomicMax(address, val);
}
__device__ __forceinline__ float cvAtomicMax(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
(void) address;
(void) val;
return 0.0f;
#endif
}
__device__ __forceinline__ double cvAtomicMax(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmax(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
}
namespace detail
{
template <int cn> struct Unroll;
template <> struct Unroll<1>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ volatile R* smem_tuple(R* smem)
{
return smem;
}
template <typename R>
static __device__ __forceinline__ R& tie(R& val)
{
return val;
}
template <class Op>
static __device__ __forceinline__ const Op& op(const Op& op)
{
return op;
}
};
template <> struct Unroll<2>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op);
}
};
template <> struct Unroll<3>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y, val.z);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op, op);
}
};
template <> struct Unroll<4>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y, val.z, val.w);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op, op, op);
}
};
}
/////////////////////////////////////////////////////////////
// sum
namespace sum
{
__device__ unsigned int blocks_finished = 0;
template <typename R, int cn> struct AtomicAdd;
template <typename R> struct AtomicAdd<R, 1>
{
static __device__ void run(R* ptr, R val)
{
detail::cvAtomicAdd(ptr, val);
}
};
template <typename R> struct AtomicAdd<R, 2>
{
typedef typename TypeVec<R, 2>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
}
};
template <typename R> struct AtomicAdd<R, 3>
{
typedef typename TypeVec<R, 3>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
detail::cvAtomicAdd(ptr + 2, val.z);
}
};
template <typename R> struct AtomicAdd<R, 4>
{
typedef typename TypeVec<R, 4>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
detail::cvAtomicAdd(ptr + 2, val.z);
detail::cvAtomicAdd(ptr + 3, val.w);
}
};
template <int BLOCK_SIZE, typename R, int cn>
struct GlobalReduce
{
typedef typename TypeVec<R, cn>::vec_type result_type;
static __device__ void run(result_type& sum, result_type* result, int tid, int bid, R* smem)
{
#if __CUDA_ARCH__ >= 200
if (tid == 0)
AtomicAdd<R, cn>::run((R*) result, sum);
#else
__shared__ bool is_last;
if (tid == 0)
{
result[bid] = sum;
__threadfence();
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
sum = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<result_type>::all(0);
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
if (tid == 0)
{
result[0] = sum;
blocks_finished = 0;
}
}
#endif
}
};
template <int BLOCK_SIZE, typename src_type, typename result_type, class Mask, class Op>
__global__ void kernel(const PtrStepSz<src_type> src, result_type* result, const Mask mask, const Op op, const int twidth, const int theight)
{
typedef typename VecTraits<src_type>::elem_type T;
typedef typename VecTraits<result_type>::elem_type R;
const int cn = VecTraits<src_type>::cn;
__shared__ R smem[BLOCK_SIZE * cn];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
result_type sum = VecTraits<result_type>::all(0);
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const src_type* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const src_type srcVal = ptr[x];
sum = sum + op(saturate_cast<result_type>(srcVal));
}
}
}
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
GlobalReduce<BLOCK_SIZE, R, cn>::run(sum, result, tid, bid, smem);
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int cn, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(double) * cn;
bufrows = 1;
}
template <typename T, typename R, int cn, template <typename> class Op>
void caller(PtrStepSzb src_, void* buf_, double* out, PtrStepSzb mask)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<R, cn>::vec_type result_type;
PtrStepSz<src_type> src(src_);
result_type* buf = (result_type*) buf_;
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
Op<result_type> op;
if (mask.data)
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, src, buf, SingleMask(mask), op, twidth, theight);
else
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, src, buf, WithOutMask(), op, twidth, theight);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall( hipMemcpy(&result, buf, sizeof(result_type), hipMemcpyDeviceToHost) );
out[0] = result[0];
out[1] = result[1];
out[2] = result[2];
out[3] = result[3];
}
template <typename T> struct SumType;
template <> struct SumType<uchar> { typedef unsigned int R; };
template <> struct SumType<schar> { typedef int R; };
template <> struct SumType<ushort> { typedef unsigned int R; };
template <> struct SumType<short> { typedef int R; };
template <> struct SumType<int> { typedef int R; };
template <> struct SumType<float> { typedef float R; };
template <> struct SumType<double> { typedef double R; };
template <typename T, int cn>
void run(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
typedef typename SumType<T>::R R;
caller<T, R, cn, identity>(src, buf, out, mask);
}
template void run<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template void run<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template <typename T, int cn>
void runAbs(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
typedef typename SumType<T>::R R;
caller<T, R, cn, abs_func>(src, buf, out, mask);
}
template void runAbs<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void runAbs<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template void runAbs<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void runAbs<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return x * x;
}
};
template <typename T, int cn>
void runSqr(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
caller<T, double, cn, Sqr>(src, buf, out, mask);
}
template void runSqr<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void runSqr<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template void runSqr<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void runSqr<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
}
/////////////////////////////////////////////////////////////
// minMax
namespace minMax
{
__device__ unsigned int blocks_finished = 0;
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits;
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<schar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<ushort> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
template <int BLOCK_SIZE, typename R>
struct GlobalReduce
{
static __device__ void run(R& mymin, R& mymax, R* minval, R* maxval, int tid, int bid, R* sminval, R* smaxval)
{
#if __CUDA_ARCH__ >= 200
if (tid == 0)
{
detail::cvAtomicMin(minval, mymin);
detail::cvAtomicMax(maxval, mymax);
}
#else
__shared__ bool is_last;
if (tid == 0)
{
minval[bid] = mymin;
maxval[bid] = mymax;
__threadfence();
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
int idx = ::min(tid, gridDim.x * gridDim.y - 1);
mymin = minval[idx];
mymax = maxval[idx];
const minimum<R> minOp;
const maximum<R> maxOp;
device::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
if (tid == 0)
{
minval[0] = mymin;
maxval[0] = mymax;
blocks_finished = 0;
}
}
#endif
}
};
template <int BLOCK_SIZE, typename T, typename R, class Mask>
__global__ void kernel(const PtrStepSz<T> src, const Mask mask, R* minval, R* maxval, const int twidth, const int theight)
{
__shared__ R sminval[BLOCK_SIZE];
__shared__ R smaxval[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
R mymin = numeric_limits<R>::max();
R mymax = -numeric_limits<R>::max();
const minimum<R> minOp;
const maximum<R> maxOp;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const R srcVal = ptr[x];
mymin = minOp(mymin, srcVal);
mymax = maxOp(mymax, srcVal);
}
}
}
device::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
GlobalReduce<BLOCK_SIZE, R>::run(mymin, mymax, minval, maxval, tid, bid, sminval, smaxval);
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(double);
bufrows = 2;
}
__global__ void setDefaultKernel(int* minval_buf, int* maxval_buf)
{
*minval_buf = numeric_limits<int>::max();
*maxval_buf = numeric_limits<int>::min();
}
__global__ void setDefaultKernel(float* minval_buf, float* maxval_buf)
{
*minval_buf = numeric_limits<float>::max();
*maxval_buf = -numeric_limits<float>::max();
}
__global__ void setDefaultKernel(double* minval_buf, double* maxval_buf)
{
*minval_buf = numeric_limits<double>::max();
*maxval_buf = -numeric_limits<double>::max();
}
template <typename R>
void setDefault(R* minval_buf, R* maxval_buf)
{
hipLaunchKernelGGL(( setDefaultKernel), dim3(1), dim3(1), 0, 0, minval_buf, maxval_buf);
}
template <typename T>
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)
{
typedef typename MinMaxTypeTraits<T>::best_type R;
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
R* minval_buf = (R*) buf.ptr(0);
R* maxval_buf = (R*) buf.ptr(1);
setDefault(minval_buf, maxval_buf);
if (mask.data)
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, twidth, theight);
else
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, twidth, theight);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(R), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(R), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void run<uchar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<schar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<ushort>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
#endif
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
#endif
}
/////////////////////////////////////////////////////////////
// minMaxLoc
namespace minMaxLoc
{
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits;
template <> struct MinMaxTypeTraits<unsigned char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<signed char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<unsigned short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
template <int BLOCK_SIZE, typename T, class Mask>
__global__ void kernel_pass_1(const PtrStepSz<T> src, const Mask mask, T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, const int twidth, const int theight)
{
typedef typename MinMaxTypeTraits<T>::best_type work_type;
__shared__ work_type sminval[BLOCK_SIZE];
__shared__ work_type smaxval[BLOCK_SIZE];
__shared__ unsigned int sminloc[BLOCK_SIZE];
__shared__ unsigned int smaxloc[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
work_type mymin = numeric_limits<work_type>::max();
work_type mymax = -numeric_limits<work_type>::max();
unsigned int myminloc = 0;
unsigned int mymaxloc = 0;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const work_type srcVal = ptr[x];
if (srcVal < mymin)
{
mymin = srcVal;
myminloc = y * src.cols + x;
}
if (srcVal > mymax)
{
mymax = srcVal;
mymaxloc = y * src.cols + x;
}
}
}
}
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
tid,
thrust::make_tuple(less<work_type>(), greater<work_type>()));
if (tid == 0)
{
minval[bid] = (T) mymin;
maxval[bid] = (T) mymax;
minloc[bid] = myminloc;
maxloc[bid] = mymaxloc;
}
}
template <int BLOCK_SIZE, typename T>
__global__ void kernel_pass_2(T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, int count)
{
typedef typename MinMaxTypeTraits<T>::best_type work_type;
__shared__ work_type sminval[BLOCK_SIZE];
__shared__ work_type smaxval[BLOCK_SIZE];
__shared__ unsigned int sminloc[BLOCK_SIZE];
__shared__ unsigned int smaxloc[BLOCK_SIZE];
unsigned int idx = ::min(threadIdx.x, count - 1);
work_type mymin = minval[idx];
work_type mymax = maxval[idx];
unsigned int myminloc = minloc[idx];
unsigned int mymaxloc = maxloc[idx];
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
threadIdx.x,
thrust::make_tuple(less<work_type>(), greater<work_type>()));
if (threadIdx.x == 0)
{
minval[0] = (T) mymin;
maxval[0] = (T) mymax;
minloc[0] = myminloc;
maxloc[0] = mymaxloc;
}
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, size_t elem_size, int& b1cols, int& b1rows, int& b2cols, int& b2rows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
// For values
b1cols = (int)(grid.x * grid.y * elem_size);
b1rows = 2;
// For locations
b2cols = grid.x * grid.y * sizeof(int);
b2rows = 2;
}
template <typename T>
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf)
{
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
T* minval_buf = (T*) valbuf.ptr(0);
T* maxval_buf = (T*) valbuf.ptr(1);
unsigned int* minloc_buf = locbuf.ptr(0);
unsigned int* maxloc_buf = locbuf.ptr(1);
if (mask.data)
hipLaunchKernelGGL(( kernel_pass_1<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
else
hipLaunchKernelGGL(( kernel_pass_1<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( kernel_pass_2<threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0, minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
unsigned int minloc_, maxloc_;
cudaSafeCall( hipMemcpy(&minloc_, minloc_buf, sizeof(unsigned int), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxloc_, maxloc_buf, sizeof(unsigned int), hipMemcpyDeviceToHost) );
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void run<unsigned char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<signed char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<unsigned short>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
#endif
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
#endif
}
/////////////////////////////////////////////////////////////
// countNonZero
namespace countNonZero
{
__device__ unsigned int blocks_finished = 0;
template <int BLOCK_SIZE, typename T>
__global__ void kernel(const PtrStepSz<T> src, unsigned int* count, const int twidth, const int theight)
{
__shared__ unsigned int scount[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int mycount = 0;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
const T srcVal = ptr[x];
mycount += (srcVal != 0);
}
}
device::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
#if __CUDA_ARCH__ >= 200
if (tid == 0)
::atomicAdd(count, mycount);
#else
__shared__ bool is_last;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
if (tid == 0)
{
count[bid] = mycount;
__threadfence();
unsigned int ticket = ::atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
mycount = tid < gridDim.x * gridDim.y ? count[tid] : 0;
device::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
if (tid == 0)
{
count[0] = mycount;
blocks_finished = 0;
}
}
#endif
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(int);
bufrows = 1;
}
template <typename T>
int run(const PtrStepSzb src, PtrStep<unsigned int> buf)
{
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
unsigned int* count_buf = buf.ptr(0);
cudaSafeCall( hipMemset(count_buf, 0, sizeof(unsigned int)) );
hipLaunchKernelGGL(( kernel<threads_x * threads_y>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>) src, count_buf, twidth, theight);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
unsigned int count;
cudaSafeCall(hipMemcpy(&count, count_buf, sizeof(unsigned int), hipMemcpyDeviceToHost));
return count;
}
template int run<uchar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
#ifndef OPENCV_TINY_GPU_MODULE
template int run<schar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<ushort>(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<short >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<int >(const PtrStepSzb src, PtrStep<unsigned int> buf);
#endif
template int run<float >(const PtrStepSzb src, PtrStep<unsigned int> buf);
#ifndef OPENCV_TINY_GPU_MODULE
template int run<double>(const PtrStepSzb src, PtrStep<unsigned int> buf);
#endif
}
//////////////////////////////////////////////////////////////////////////////
// reduce
namespace reduce
{
struct Sum
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Sum() {}
__device__ __forceinline__ Sum(const Sum&) {}
};
struct Avg
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ typename TypeVec<double, VecTraits<T>::cn>::vec_type result(T r, double sz) const
{
return r / sz;
}
__device__ __forceinline__ Avg() {}
__device__ __forceinline__ Avg(const Avg&) {}
};
struct Min
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
minimum<T> minOp;
return minOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Min() {}
__device__ __forceinline__ Min(const Min&) {}
};
struct Max
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(-numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
maximum<T> maxOp;
return maxOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Max() {}
__device__ __forceinline__ Max(const Max&) {}
};
///////////////////////////////////////////////////////////
template <typename T, typename S, typename D, class Op>
__global__ void rowsKernel(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.template startValue<S>();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
{
S srcVal = src(y, x);
myVal = op(myVal, srcVal);
}
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
volatile S* srow = smem + threadIdx.y * 16;
myVal = srow[threadIdx.x];
device::reduce<16>(srow, myVal, threadIdx.x, op);
if (threadIdx.x == 0)
srow[0] = myVal;
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = (D) op.result(smem[threadIdx.x * 16], src.rows);
}
template <typename T, typename S, typename D, class Op>
void rowsCaller(PtrStepSz<T> src, D* dst, hipStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op op;
hipLaunchKernelGGL(( rowsKernel<T, S, D, Op>), dim3(grid), dim3(block), 0, stream, src, dst, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D>
void rows(PtrStepSzb src, void* dst, int op, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSz<T> src, D* dst, hipStream_t stream);
static const func_t funcs[] =
{
rowsCaller<T, S, D, Sum>,
rowsCaller<T, S, D, Avg>,
rowsCaller<T, S, D, Max>,
rowsCaller<T, S, D, Min>
};
funcs[op]((PtrStepSz<T>) src, (D*) dst, stream);
}
#ifdef OPENCV_TINY_GPU_MODULE
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
#else
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, int, short>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<float, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<double, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
#endif
///////////////////////////////////////////////////////////
template <int BLOCK_SIZE, typename T, typename S, typename D, int cn, class Op>
__global__ void colsKernel(const PtrStepSz<typename TypeVec<T, cn>::vec_type> src, typename TypeVec<D, cn>::vec_type* dst, const Op op)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<S, cn>::vec_type work_type;
typedef typename TypeVec<D, cn>::vec_type dst_type;
__shared__ S smem[BLOCK_SIZE * cn];
const int y = blockIdx.x;
const src_type* srcRow = src.ptr(y);
work_type myVal = op.template startValue<work_type>();
for (int x = threadIdx.x; x < src.cols; x += BLOCK_SIZE)
myVal = op(myVal, saturate_cast<work_type>(srcRow[x]));
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(myVal), threadIdx.x, detail::Unroll<cn>::op(op));
if (threadIdx.x == 0)
dst[y] = saturate_cast<dst_type>(op.result(myVal, src.cols));
}
template <typename T, typename S, typename D, int cn, class Op> void colsCaller(PtrStepSzb src, void* dst, hipStream_t stream)
{
const int BLOCK_SIZE = 256;
const dim3 block(BLOCK_SIZE);
const dim3 grid(src.rows);
Op op;
hipLaunchKernelGGL(( colsKernel<BLOCK_SIZE, T, S, D, cn, Op>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<typename TypeVec<T, cn>::vec_type>) src, (typename TypeVec<D, cn>::vec_type*) dst, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D> void cols(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, void* dst, hipStream_t stream);
static const func_t funcs[5][4] =
{
{0,0,0,0},
{colsCaller<T, S, D, 1, Sum>, colsCaller<T, S, D, 1, Avg>, colsCaller<T, S, D, 1, Max>, colsCaller<T, S, D, 1, Min>},
{colsCaller<T, S, D, 2, Sum>, colsCaller<T, S, D, 2, Avg>, colsCaller<T, S, D, 2, Max>, colsCaller<T, S, D, 2, Min>},
{colsCaller<T, S, D, 3, Sum>, colsCaller<T, S, D, 3, Avg>, colsCaller<T, S, D, 3, Max>, colsCaller<T, S, D, 3, Min>},
{colsCaller<T, S, D, 4, Sum>, colsCaller<T, S, D, 4, Avg>, colsCaller<T, S, D, 4, Max>, colsCaller<T, S, D, 4, Min>},
};
funcs[cn][op](src, dst, stream);
}
#ifdef OPENCV_TINY_GPU_MODULE
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
#else
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, int, short>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<float, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<double, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
#endif
}
#endif /* CUDA_DISABLER */
| 7c952c20d0ed4fdaef9b3126159e98b64ca60446.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/reduce.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/type_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace detail
{
__device__ __forceinline__ int cvAtomicAdd(int* address, int val)
{
return ::atomicAdd(address, val);
}
__device__ __forceinline__ unsigned int cvAtomicAdd(unsigned int* address, unsigned int val)
{
return ::atomicAdd(address, val);
}
__device__ __forceinline__ float cvAtomicAdd(float* address, float val)
{
#if __CUDA_ARCH__ >= 200
return ::atomicAdd(address, val);
#else
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(val + __int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
#endif
}
__device__ __forceinline__ double cvAtomicAdd(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
__device__ __forceinline__ int cvAtomicMin(int* address, int val)
{
return ::atomicMin(address, val);
}
__device__ __forceinline__ float cvAtomicMin(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
(void) address;
(void) val;
return 0.0f;
#endif
}
__device__ __forceinline__ double cvAtomicMin(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmin(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
__device__ __forceinline__ int cvAtomicMax(int* address, int val)
{
return ::atomicMax(address, val);
}
__device__ __forceinline__ float cvAtomicMax(float* address, float val)
{
#if __CUDA_ARCH__ >= 120
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
#else
(void) address;
(void) val;
return 0.0f;
#endif
}
__device__ __forceinline__ double cvAtomicMax(double* address, double val)
{
#if __CUDA_ARCH__ >= 130
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_ull, assumed,
__double_as_longlong(::fmax(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
#else
(void) address;
(void) val;
return 0.0;
#endif
}
}
namespace detail
{
template <int cn> struct Unroll;
template <> struct Unroll<1>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ volatile R* smem_tuple(R* smem)
{
return smem;
}
template <typename R>
static __device__ __forceinline__ R& tie(R& val)
{
return val;
}
template <class Op>
static __device__ __forceinline__ const Op& op(const Op& op)
{
return op;
}
};
template <> struct Unroll<2>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op);
}
};
template <> struct Unroll<3>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y, val.z);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op, op);
}
};
template <> struct Unroll<4>
{
template <int BLOCK_SIZE, typename R>
static __device__ __forceinline__ thrust::tuple<volatile R*, volatile R*, volatile R*, volatile R*> smem_tuple(R* smem)
{
return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE);
}
template <typename R>
static __device__ __forceinline__ thrust::tuple<typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&, typename VecTraits<R>::elem_type&> tie(R& val)
{
return thrust::tie(val.x, val.y, val.z, val.w);
}
template <class Op>
static __device__ __forceinline__ const thrust::tuple<Op, Op, Op, Op> op(const Op& op)
{
return thrust::make_tuple(op, op, op, op);
}
};
}
/////////////////////////////////////////////////////////////
// sum
namespace sum
{
__device__ unsigned int blocks_finished = 0;
template <typename R, int cn> struct AtomicAdd;
template <typename R> struct AtomicAdd<R, 1>
{
static __device__ void run(R* ptr, R val)
{
detail::cvAtomicAdd(ptr, val);
}
};
template <typename R> struct AtomicAdd<R, 2>
{
typedef typename TypeVec<R, 2>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
}
};
template <typename R> struct AtomicAdd<R, 3>
{
typedef typename TypeVec<R, 3>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
detail::cvAtomicAdd(ptr + 2, val.z);
}
};
template <typename R> struct AtomicAdd<R, 4>
{
typedef typename TypeVec<R, 4>::vec_type val_type;
static __device__ void run(R* ptr, val_type val)
{
detail::cvAtomicAdd(ptr, val.x);
detail::cvAtomicAdd(ptr + 1, val.y);
detail::cvAtomicAdd(ptr + 2, val.z);
detail::cvAtomicAdd(ptr + 3, val.w);
}
};
template <int BLOCK_SIZE, typename R, int cn>
struct GlobalReduce
{
typedef typename TypeVec<R, cn>::vec_type result_type;
static __device__ void run(result_type& sum, result_type* result, int tid, int bid, R* smem)
{
#if __CUDA_ARCH__ >= 200
if (tid == 0)
AtomicAdd<R, cn>::run((R*) result, sum);
#else
__shared__ bool is_last;
if (tid == 0)
{
result[bid] = sum;
__threadfence();
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
sum = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<result_type>::all(0);
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
if (tid == 0)
{
result[0] = sum;
blocks_finished = 0;
}
}
#endif
}
};
template <int BLOCK_SIZE, typename src_type, typename result_type, class Mask, class Op>
__global__ void kernel(const PtrStepSz<src_type> src, result_type* result, const Mask mask, const Op op, const int twidth, const int theight)
{
typedef typename VecTraits<src_type>::elem_type T;
typedef typename VecTraits<result_type>::elem_type R;
const int cn = VecTraits<src_type>::cn;
__shared__ R smem[BLOCK_SIZE * cn];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
result_type sum = VecTraits<result_type>::all(0);
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const src_type* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const src_type srcVal = ptr[x];
sum = sum + op(saturate_cast<result_type>(srcVal));
}
}
}
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(sum), tid, detail::Unroll<cn>::op(plus<R>()));
GlobalReduce<BLOCK_SIZE, R, cn>::run(sum, result, tid, bid, smem);
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int cn, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(double) * cn;
bufrows = 1;
}
template <typename T, typename R, int cn, template <typename> class Op>
void caller(PtrStepSzb src_, void* buf_, double* out, PtrStepSzb mask)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<R, cn>::vec_type result_type;
PtrStepSz<src_type> src(src_);
result_type* buf = (result_type*) buf_;
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
Op<result_type> op;
if (mask.data)
kernel<threads_x * threads_y><<<grid, block>>>(src, buf, SingleMask(mask), op, twidth, theight);
else
kernel<threads_x * threads_y><<<grid, block>>>(src, buf, WithOutMask(), op, twidth, theight);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall( cudaMemcpy(&result, buf, sizeof(result_type), cudaMemcpyDeviceToHost) );
out[0] = result[0];
out[1] = result[1];
out[2] = result[2];
out[3] = result[3];
}
template <typename T> struct SumType;
template <> struct SumType<uchar> { typedef unsigned int R; };
template <> struct SumType<schar> { typedef int R; };
template <> struct SumType<ushort> { typedef unsigned int R; };
template <> struct SumType<short> { typedef int R; };
template <> struct SumType<int> { typedef int R; };
template <> struct SumType<float> { typedef float R; };
template <> struct SumType<double> { typedef double R; };
template <typename T, int cn>
void run(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
typedef typename SumType<T>::R R;
caller<T, R, cn, identity>(src, buf, out, mask);
}
template void run<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template void run<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void run<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template <typename T, int cn>
void runAbs(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
typedef typename SumType<T>::R R;
caller<T, R, cn, abs_func>(src, buf, out, mask);
}
template void runAbs<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void runAbs<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template void runAbs<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void runAbs<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runAbs<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return x * x;
}
};
template <typename T, int cn>
void runSqr(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
{
caller<T, double, cn, Sqr>(src, buf, out, mask);
}
template void runSqr<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void runSqr<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
template void runSqr<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#ifndef OPENCV_TINY_GPU_MODULE
template void runSqr<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
template void runSqr<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
#endif
}
/////////////////////////////////////////////////////////////
// minMax
namespace minMax
{
__device__ unsigned int blocks_finished = 0;
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits;
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<schar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<ushort> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
template <int BLOCK_SIZE, typename R>
struct GlobalReduce
{
static __device__ void run(R& mymin, R& mymax, R* minval, R* maxval, int tid, int bid, R* sminval, R* smaxval)
{
#if __CUDA_ARCH__ >= 200
if (tid == 0)
{
detail::cvAtomicMin(minval, mymin);
detail::cvAtomicMax(maxval, mymax);
}
#else
__shared__ bool is_last;
if (tid == 0)
{
minval[bid] = mymin;
maxval[bid] = mymax;
__threadfence();
unsigned int ticket = ::atomicAdd(&blocks_finished, 1);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
int idx = ::min(tid, gridDim.x * gridDim.y - 1);
mymin = minval[idx];
mymax = maxval[idx];
const minimum<R> minOp;
const maximum<R> maxOp;
device::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
if (tid == 0)
{
minval[0] = mymin;
maxval[0] = mymax;
blocks_finished = 0;
}
}
#endif
}
};
template <int BLOCK_SIZE, typename T, typename R, class Mask>
__global__ void kernel(const PtrStepSz<T> src, const Mask mask, R* minval, R* maxval, const int twidth, const int theight)
{
__shared__ R sminval[BLOCK_SIZE];
__shared__ R smaxval[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
R mymin = numeric_limits<R>::max();
R mymax = -numeric_limits<R>::max();
const minimum<R> minOp;
const maximum<R> maxOp;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const R srcVal = ptr[x];
mymin = minOp(mymin, srcVal);
mymax = maxOp(mymax, srcVal);
}
}
}
device::reduce<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax), tid, thrust::make_tuple(minOp, maxOp));
GlobalReduce<BLOCK_SIZE, R>::run(mymin, mymax, minval, maxval, tid, bid, sminval, smaxval);
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(double);
bufrows = 2;
}
__global__ void setDefaultKernel(int* minval_buf, int* maxval_buf)
{
*minval_buf = numeric_limits<int>::max();
*maxval_buf = numeric_limits<int>::min();
}
__global__ void setDefaultKernel(float* minval_buf, float* maxval_buf)
{
*minval_buf = numeric_limits<float>::max();
*maxval_buf = -numeric_limits<float>::max();
}
__global__ void setDefaultKernel(double* minval_buf, double* maxval_buf)
{
*minval_buf = numeric_limits<double>::max();
*maxval_buf = -numeric_limits<double>::max();
}
template <typename R>
void setDefault(R* minval_buf, R* maxval_buf)
{
setDefaultKernel<<<1, 1>>>(minval_buf, maxval_buf);
}
template <typename T>
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)
{
typedef typename MinMaxTypeTraits<T>::best_type R;
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
R* minval_buf = (R*) buf.ptr(0);
R* maxval_buf = (R*) buf.ptr(1);
setDefault(minval_buf, maxval_buf);
if (mask.data)
kernel<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, twidth, theight);
else
kernel<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, twidth, theight);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(R), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(R), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void run<uchar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<schar >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<ushort>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
#endif
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf);
#endif
}
/////////////////////////////////////////////////////////////
// minMaxLoc
namespace minMaxLoc
{
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits;
template <> struct MinMaxTypeTraits<unsigned char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<signed char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<unsigned short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
template <int BLOCK_SIZE, typename T, class Mask>
__global__ void kernel_pass_1(const PtrStepSz<T> src, const Mask mask, T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, const int twidth, const int theight)
{
typedef typename MinMaxTypeTraits<T>::best_type work_type;
__shared__ work_type sminval[BLOCK_SIZE];
__shared__ work_type smaxval[BLOCK_SIZE];
__shared__ unsigned int sminloc[BLOCK_SIZE];
__shared__ unsigned int smaxloc[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
work_type mymin = numeric_limits<work_type>::max();
work_type mymax = -numeric_limits<work_type>::max();
unsigned int myminloc = 0;
unsigned int mymaxloc = 0;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
if (mask(y, x))
{
const work_type srcVal = ptr[x];
if (srcVal < mymin)
{
mymin = srcVal;
myminloc = y * src.cols + x;
}
if (srcVal > mymax)
{
mymax = srcVal;
mymaxloc = y * src.cols + x;
}
}
}
}
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
tid,
thrust::make_tuple(less<work_type>(), greater<work_type>()));
if (tid == 0)
{
minval[bid] = (T) mymin;
maxval[bid] = (T) mymax;
minloc[bid] = myminloc;
maxloc[bid] = mymaxloc;
}
}
template <int BLOCK_SIZE, typename T>
__global__ void kernel_pass_2(T* minval, T* maxval, unsigned int* minloc, unsigned int* maxloc, int count)
{
typedef typename MinMaxTypeTraits<T>::best_type work_type;
__shared__ work_type sminval[BLOCK_SIZE];
__shared__ work_type smaxval[BLOCK_SIZE];
__shared__ unsigned int sminloc[BLOCK_SIZE];
__shared__ unsigned int smaxloc[BLOCK_SIZE];
unsigned int idx = ::min(threadIdx.x, count - 1);
work_type mymin = minval[idx];
work_type mymax = maxval[idx];
unsigned int myminloc = minloc[idx];
unsigned int mymaxloc = maxloc[idx];
reduceKeyVal<BLOCK_SIZE>(smem_tuple(sminval, smaxval), thrust::tie(mymin, mymax),
smem_tuple(sminloc, smaxloc), thrust::tie(myminloc, mymaxloc),
threadIdx.x,
thrust::make_tuple(less<work_type>(), greater<work_type>()));
if (threadIdx.x == 0)
{
minval[0] = (T) mymin;
maxval[0] = (T) mymax;
minloc[0] = myminloc;
maxloc[0] = mymaxloc;
}
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, size_t elem_size, int& b1cols, int& b1rows, int& b2cols, int& b2rows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
// For values
b1cols = (int)(grid.x * grid.y * elem_size);
b1rows = 2;
// For locations
b2cols = grid.x * grid.y * sizeof(int);
b2rows = 2;
}
template <typename T>
void run(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf)
{
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
T* minval_buf = (T*) valbuf.ptr(0);
T* maxval_buf = (T*) valbuf.ptr(1);
unsigned int* minloc_buf = locbuf.ptr(0);
unsigned int* maxloc_buf = locbuf.ptr(1);
if (mask.data)
kernel_pass_1<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, SingleMask(mask), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
else
kernel_pass_1<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, WithOutMask(), minval_buf, maxval_buf, minloc_buf, maxloc_buf, twidth, theight);
cudaSafeCall( cudaGetLastError() );
kernel_pass_2<threads_x * threads_y><<<1, threads_x * threads_y>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
unsigned int minloc_, maxloc_;
cudaSafeCall( cudaMemcpy(&minloc_, minloc_buf, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxloc_, maxloc_buf, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void run<unsigned char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<signed char >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<unsigned short>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<short >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
#endif
template void run<int >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
template void run<float >(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
#ifndef OPENCV_TINY_GPU_MODULE
template void run<double>(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
#endif
}
/////////////////////////////////////////////////////////////
// countNonZero
namespace countNonZero
{
__device__ unsigned int blocks_finished = 0;
template <int BLOCK_SIZE, typename T>
__global__ void kernel(const PtrStepSz<T> src, unsigned int* count, const int twidth, const int theight)
{
__shared__ unsigned int scount[BLOCK_SIZE];
const int x0 = blockIdx.x * blockDim.x * twidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * theight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int mycount = 0;
for (int i = 0, y = y0; i < theight && y < src.rows; ++i, y += blockDim.y)
{
const T* ptr = src.ptr(y);
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
{
const T srcVal = ptr[x];
mycount += (srcVal != 0);
}
}
device::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
#if __CUDA_ARCH__ >= 200
if (tid == 0)
::atomicAdd(count, mycount);
#else
__shared__ bool is_last;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
if (tid == 0)
{
count[bid] = mycount;
__threadfence();
unsigned int ticket = ::atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
mycount = tid < gridDim.x * gridDim.y ? count[tid] : 0;
device::reduce<BLOCK_SIZE>(scount, mycount, tid, plus<unsigned int>());
if (tid == 0)
{
count[0] = mycount;
blocks_finished = 0;
}
}
#endif
}
const int threads_x = 32;
const int threads_y = 8;
void getLaunchCfg(int cols, int rows, dim3& block, dim3& grid)
{
block = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, block.x * block.y),
divUp(rows, block.y * block.x));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
}
void getBufSize(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 block, grid;
getLaunchCfg(cols, rows, block, grid);
bufcols = grid.x * grid.y * sizeof(int);
bufrows = 1;
}
template <typename T>
int run(const PtrStepSzb src, PtrStep<unsigned int> buf)
{
dim3 block, grid;
getLaunchCfg(src.cols, src.rows, block, grid);
const int twidth = divUp(divUp(src.cols, grid.x), block.x);
const int theight = divUp(divUp(src.rows, grid.y), block.y);
unsigned int* count_buf = buf.ptr(0);
cudaSafeCall( cudaMemset(count_buf, 0, sizeof(unsigned int)) );
kernel<threads_x * threads_y><<<grid, block>>>((PtrStepSz<T>) src, count_buf, twidth, theight);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
unsigned int count;
cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(unsigned int), cudaMemcpyDeviceToHost));
return count;
}
template int run<uchar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
#ifndef OPENCV_TINY_GPU_MODULE
template int run<schar >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<ushort>(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<short >(const PtrStepSzb src, PtrStep<unsigned int> buf);
template int run<int >(const PtrStepSzb src, PtrStep<unsigned int> buf);
#endif
template int run<float >(const PtrStepSzb src, PtrStep<unsigned int> buf);
#ifndef OPENCV_TINY_GPU_MODULE
template int run<double>(const PtrStepSzb src, PtrStep<unsigned int> buf);
#endif
}
//////////////////////////////////////////////////////////////////////////////
// reduce
namespace reduce
{
struct Sum
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Sum() {}
__device__ __forceinline__ Sum(const Sum&) {}
};
struct Avg
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ typename TypeVec<double, VecTraits<T>::cn>::vec_type result(T r, double sz) const
{
return r / sz;
}
__device__ __forceinline__ Avg() {}
__device__ __forceinline__ Avg(const Avg&) {}
};
struct Min
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
minimum<T> minOp;
return minOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Min() {}
__device__ __forceinline__ Min(const Min&) {}
};
struct Max
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(-numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
maximum<T> maxOp;
return maxOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Max() {}
__device__ __forceinline__ Max(const Max&) {}
};
///////////////////////////////////////////////////////////
template <typename T, typename S, typename D, class Op>
__global__ void rowsKernel(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.template startValue<S>();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
{
S srcVal = src(y, x);
myVal = op(myVal, srcVal);
}
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
volatile S* srow = smem + threadIdx.y * 16;
myVal = srow[threadIdx.x];
device::reduce<16>(srow, myVal, threadIdx.x, op);
if (threadIdx.x == 0)
srow[0] = myVal;
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = (D) op.result(smem[threadIdx.x * 16], src.rows);
}
template <typename T, typename S, typename D, class Op>
void rowsCaller(PtrStepSz<T> src, D* dst, cudaStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op op;
rowsKernel<T, S, D, Op><<<grid, block, 0, stream>>>(src, dst, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D>
void rows(PtrStepSzb src, void* dst, int op, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSz<T> src, D* dst, cudaStream_t stream);
static const func_t funcs[] =
{
rowsCaller<T, S, D, Sum>,
rowsCaller<T, S, D, Avg>,
rowsCaller<T, S, D, Max>,
rowsCaller<T, S, D, Min>
};
funcs[op]((PtrStepSz<T>) src, (D*) dst, stream);
}
#ifdef OPENCV_TINY_GPU_MODULE
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
#else
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, int, short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<float, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<double, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
#endif
///////////////////////////////////////////////////////////
template <int BLOCK_SIZE, typename T, typename S, typename D, int cn, class Op>
__global__ void colsKernel(const PtrStepSz<typename TypeVec<T, cn>::vec_type> src, typename TypeVec<D, cn>::vec_type* dst, const Op op)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<S, cn>::vec_type work_type;
typedef typename TypeVec<D, cn>::vec_type dst_type;
__shared__ S smem[BLOCK_SIZE * cn];
const int y = blockIdx.x;
const src_type* srcRow = src.ptr(y);
work_type myVal = op.template startValue<work_type>();
for (int x = threadIdx.x; x < src.cols; x += BLOCK_SIZE)
myVal = op(myVal, saturate_cast<work_type>(srcRow[x]));
device::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(myVal), threadIdx.x, detail::Unroll<cn>::op(op));
if (threadIdx.x == 0)
dst[y] = saturate_cast<dst_type>(op.result(myVal, src.cols));
}
template <typename T, typename S, typename D, int cn, class Op> void colsCaller(PtrStepSzb src, void* dst, cudaStream_t stream)
{
const int BLOCK_SIZE = 256;
const dim3 block(BLOCK_SIZE);
const dim3 grid(src.rows);
Op op;
colsKernel<BLOCK_SIZE, T, S, D, cn, Op><<<grid, block, 0, stream>>>((PtrStepSz<typename TypeVec<T, cn>::vec_type>) src, (typename TypeVec<D, cn>::vec_type*) dst, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D> void cols(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, void* dst, cudaStream_t stream);
static const func_t funcs[5][4] =
{
{0,0,0,0},
{colsCaller<T, S, D, 1, Sum>, colsCaller<T, S, D, 1, Avg>, colsCaller<T, S, D, 1, Max>, colsCaller<T, S, D, 1, Min>},
{colsCaller<T, S, D, 2, Sum>, colsCaller<T, S, D, 2, Avg>, colsCaller<T, S, D, 2, Max>, colsCaller<T, S, D, 2, Min>},
{colsCaller<T, S, D, 3, Sum>, colsCaller<T, S, D, 3, Avg>, colsCaller<T, S, D, 3, Max>, colsCaller<T, S, D, 3, Min>},
{colsCaller<T, S, D, 4, Sum>, colsCaller<T, S, D, 4, Avg>, colsCaller<T, S, D, 4, Max>, colsCaller<T, S, D, 4, Min>},
};
funcs[cn][op](src, dst, stream);
}
#ifdef OPENCV_TINY_GPU_MODULE
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
#else
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, int, short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<float, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<double, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
#endif
}
#endif /* CUDA_DISABLER */
|
e46c8817c588d73daad40a7f19554b45fca54383.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "common.h"
const double one = 1.0;
const double minus_one = -1.0;
///
/// @brief Forms a LU decomposition in a scalar manner.
///
/// @param[in] matrix size
/// @param[in] ldA leading dimension
/// @param[inout] A in: matrix, out: LU decomposition
///
void simple_lu(int n, int ldA, double *A)
{
for (int i = 0; i < n; i++) {
for (int j = i+1; j < n; j++) {
A[i*ldA+j] /= A[i*ldA+i];
for (int k = i+1; k < n; k++)
A[k*ldA+j] -= A[i*ldA+j] * A[k*ldA+i];
}
}
}
///
/// @brief Forms a LU decomposition in a blocked manner.
///
/// @param[in] block_size block size
/// @param[in] n matrix dimension
/// @param[in] ldA leading dimension
/// @param[inout] A in: matrix, out: LU decomposition
///
void blocked_lu(int block_size, int n, int ldA, double *A)
{
int block_count = DIVCEIL(n, block_size);
// allocate and fill an array that stores the block pointers
double ***blocks = (double ***) malloc(block_count*sizeof(double**));
for (int i = 0; i < block_count; i++) {
blocks[i] = (double **) malloc(block_count*sizeof(double*));
for (int j = 0; j < block_count; j++)
blocks[i][j] = A+(j*ldA+i)*block_size;
}
//
// iterate through the diagonal blocks
//
// +--+--+--+--+
// | 0| | | |
// +--+--+--+--+
// | | 1| | |
// +--+--+--+--+
// | | | 2| |
// +--+--+--+--+
// | | | | 3|
// +--+--+--+--+
//
for (int i = 0; i < block_count; i++) {
// calculate diagonal block size
int dsize = min(block_size, n-i*block_size);
// calculate trailing matrix size
int tsize = n-(i+1)*block_size;
//
// compute the LU decomposition of the diagonal block
//
// +--+--+--+--+
// | | | | |
// +--+--+--+--+ ## - process (read-write)
// | |##| | |
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
//
simple_lu(dsize, ldA, blocks[i][i]);
if (0 < tsize) {
//
// blocks[i][i+1:] <- L1(blocks[i][i]) \ blocks[i][i+1:]
//
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
// | |rr|##|##| ## - process (read-write)
// +--+--+--+--+ rr - read
// | | | | |
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
//
// hipblasDtrsm(handle, HIPBLAS_SIDE_LEFT,
// HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, HIPBLAS_DIAG_UNIT,
// ....
dtrsm_("Left", "Lower", "No transpose", "Unit triangular",
&dsize, &tsize, &one, blocks[i][i], &ldA, blocks[i][i+1], &ldA);
//
// blocks[i+1:][i] <- U(blocks[i][i]) / blocks[i+1:][i]
//
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
// | |rr| | | ## - process (read-write)
// +--+--+--+--+ rr - read
// | |##| | |
// +--+--+--+--+
// | |##| | |
// +--+--+--+--+
//
// hipblasDtrsm(handle, HIPBLAS_SIDE_RIGHT,
// HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT,
// ....
dtrsm_("Right", "Upper", "No Transpose", "Not unit triangular",
&tsize, &dsize, &one, blocks[i][i], &ldA, blocks[i+1][i], &ldA);
//
// blocks[i+1:][i+1:] <- blocks[i+1:][i+1:] -
// blocks[i+1:][i] * blocks[i][i+1:]
//
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
// | | |rr|rr| ## - process (read-write)
// +--+--+--+--+ rr - read
// | |rr|##|##|
// +--+--+--+--+
// | |rr|##|##|
// +--+--+--+--+
//
// hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
// ....
dgemm_("No Transpose", "No Transpose",
&tsize, &tsize, &dsize, &minus_one, blocks[i+1][i],
&ldA, blocks[i][i+1], &ldA, &one, blocks[i+1][i+1], &ldA);
}
}
// free allocated resources
for (int i = 0; i < block_count; i++)
free(blocks[i]);
free(blocks);
}
int main(int argc, char **argv)
{
//
// check arguments
//
if (argc != 3) {
fprintf(stderr,
"[error] Incorrect arguments. Use %s (n) (block size)\n", argv[0]);
return EXIT_FAILURE;
}
int n = atoi(argv[1]);
if (n < 1) {
fprintf(stderr, "[error] Invalid matrix dimension.\n");
return EXIT_FAILURE;
}
int block_size = atoi(argv[2]);
if (block_size < 2) {
fprintf(stderr, "[error] Invalid block size.\n");
return EXIT_FAILURE;
}
//
// Initialize matrix A and store a duplicate to matrix B. Matrix C is for
// validation.
//
srand(time(NULL));
int ldA, ldB, ldC;
ldA = ldB = ldC = DIVCEIL(n, 8)*8; // align to 64 bytes
double *A = (double *) aligned_alloc(8, n*ldA*sizeof(double));
double *B = (double *) aligned_alloc(8, n*ldB*sizeof(double));
double *C = (double *) aligned_alloc(8, n*ldC*sizeof(double));
if (A == NULL || B == NULL || C == NULL) {
fprintf(stderr, "[error] Failed to allocate memory.\n");
return EXIT_FAILURE;
}
// A <- random diagonally dominant matrix
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++)
A[i*ldA+j] = B[i*ldB+j] = 2.0*rand()/RAND_MAX - 1.0;
A[i*ldA+i] = B[i*ldB+i] = 1.0*rand()/RAND_MAX + n;
}
//
// compute
//
struct timespec ts_start;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
// A <- (L,U)
blocked_lu(block_size, n, ldA, A);
struct timespec ts_stop;
clock_gettime(CLOCK_MONOTONIC, &ts_stop);
printf("Time = %f s\n",
ts_stop.tv_sec - ts_start.tv_sec +
1.0E-9*(ts_stop.tv_nsec - ts_start.tv_nsec));
// C <- L * U
mul_lu(n, ldA, ldC, A, C);
//
// validate
//
// C <- L * U - B
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
C[i*ldC+j] -= B[i*ldB+j];
// compute || C ||_F / || B ||_F = || L * U - B ||_F / || B ||_F
double residual = dlange_("Frobenius", &n, &n, C, &ldC, NULL) /
dlange_("Frobenius", &n, &n, B, &ldB, NULL);
printf("Residual = %E\n", residual);
int ret = EXIT_SUCCESS;
if (1.0E-12 < residual) {
fprintf(stderr, "The residual is too large.\n");
ret = EXIT_FAILURE;
}
//
// cleanup
//
free(A);
free(B);
free(C);
return ret;
}
| e46c8817c588d73daad40a7f19554b45fca54383.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "common.h"
const double one = 1.0;
const double minus_one = -1.0;
///
/// @brief Forms a LU decomposition in a scalar manner.
///
/// @param[in] matrix size
/// @param[in] ldA leading dimension
/// @param[inout] A in: matrix, out: LU decomposition
///
void simple_lu(int n, int ldA, double *A)
{
for (int i = 0; i < n; i++) {
for (int j = i+1; j < n; j++) {
A[i*ldA+j] /= A[i*ldA+i];
for (int k = i+1; k < n; k++)
A[k*ldA+j] -= A[i*ldA+j] * A[k*ldA+i];
}
}
}
///
/// @brief Forms a LU decomposition in a blocked manner.
///
/// @param[in] block_size block size
/// @param[in] n matrix dimension
/// @param[in] ldA leading dimension
/// @param[inout] A in: matrix, out: LU decomposition
///
void blocked_lu(int block_size, int n, int ldA, double *A)
{
int block_count = DIVCEIL(n, block_size);
// allocate and fill an array that stores the block pointers
double ***blocks = (double ***) malloc(block_count*sizeof(double**));
for (int i = 0; i < block_count; i++) {
blocks[i] = (double **) malloc(block_count*sizeof(double*));
for (int j = 0; j < block_count; j++)
blocks[i][j] = A+(j*ldA+i)*block_size;
}
//
// iterate through the diagonal blocks
//
// +--+--+--+--+
// | 0| | | |
// +--+--+--+--+
// | | 1| | |
// +--+--+--+--+
// | | | 2| |
// +--+--+--+--+
// | | | | 3|
// +--+--+--+--+
//
for (int i = 0; i < block_count; i++) {
// calculate diagonal block size
int dsize = min(block_size, n-i*block_size);
// calculate trailing matrix size
int tsize = n-(i+1)*block_size;
//
// compute the LU decomposition of the diagonal block
//
// +--+--+--+--+
// | | | | |
// +--+--+--+--+ ## - process (read-write)
// | |##| | |
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
//
simple_lu(dsize, ldA, blocks[i][i]);
if (0 < tsize) {
//
// blocks[i][i+1:] <- L1(blocks[i][i]) \ blocks[i][i+1:]
//
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
// | |rr|##|##| ## - process (read-write)
// +--+--+--+--+ rr - read
// | | | | |
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
//
// cublasDtrsm(handle, CUBLAS_SIDE_LEFT,
// CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
// ....
dtrsm_("Left", "Lower", "No transpose", "Unit triangular",
&dsize, &tsize, &one, blocks[i][i], &ldA, blocks[i][i+1], &ldA);
//
// blocks[i+1:][i] <- U(blocks[i][i]) / blocks[i+1:][i]
//
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
// | |rr| | | ## - process (read-write)
// +--+--+--+--+ rr - read
// | |##| | |
// +--+--+--+--+
// | |##| | |
// +--+--+--+--+
//
// cublasDtrsm(handle, CUBLAS_SIDE_RIGHT,
// CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT,
// ....
dtrsm_("Right", "Upper", "No Transpose", "Not unit triangular",
&tsize, &dsize, &one, blocks[i][i], &ldA, blocks[i+1][i], &ldA);
//
// blocks[i+1:][i+1:] <- blocks[i+1:][i+1:] -
// blocks[i+1:][i] * blocks[i][i+1:]
//
// +--+--+--+--+
// | | | | |
// +--+--+--+--+
// | | |rr|rr| ## - process (read-write)
// +--+--+--+--+ rr - read
// | |rr|##|##|
// +--+--+--+--+
// | |rr|##|##|
// +--+--+--+--+
//
// cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,
// ....
dgemm_("No Transpose", "No Transpose",
&tsize, &tsize, &dsize, &minus_one, blocks[i+1][i],
&ldA, blocks[i][i+1], &ldA, &one, blocks[i+1][i+1], &ldA);
}
}
// free allocated resources
for (int i = 0; i < block_count; i++)
free(blocks[i]);
free(blocks);
}
int main(int argc, char **argv)
{
//
// check arguments
//
if (argc != 3) {
fprintf(stderr,
"[error] Incorrect arguments. Use %s (n) (block size)\n", argv[0]);
return EXIT_FAILURE;
}
int n = atoi(argv[1]);
if (n < 1) {
fprintf(stderr, "[error] Invalid matrix dimension.\n");
return EXIT_FAILURE;
}
int block_size = atoi(argv[2]);
if (block_size < 2) {
fprintf(stderr, "[error] Invalid block size.\n");
return EXIT_FAILURE;
}
//
// Initialize matrix A and store a duplicate to matrix B. Matrix C is for
// validation.
//
srand(time(NULL));
int ldA, ldB, ldC;
ldA = ldB = ldC = DIVCEIL(n, 8)*8; // align to 64 bytes
double *A = (double *) aligned_alloc(8, n*ldA*sizeof(double));
double *B = (double *) aligned_alloc(8, n*ldB*sizeof(double));
double *C = (double *) aligned_alloc(8, n*ldC*sizeof(double));
if (A == NULL || B == NULL || C == NULL) {
fprintf(stderr, "[error] Failed to allocate memory.\n");
return EXIT_FAILURE;
}
// A <- random diagonally dominant matrix
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++)
A[i*ldA+j] = B[i*ldB+j] = 2.0*rand()/RAND_MAX - 1.0;
A[i*ldA+i] = B[i*ldB+i] = 1.0*rand()/RAND_MAX + n;
}
//
// compute
//
struct timespec ts_start;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
// A <- (L,U)
blocked_lu(block_size, n, ldA, A);
struct timespec ts_stop;
clock_gettime(CLOCK_MONOTONIC, &ts_stop);
printf("Time = %f s\n",
ts_stop.tv_sec - ts_start.tv_sec +
1.0E-9*(ts_stop.tv_nsec - ts_start.tv_nsec));
// C <- L * U
mul_lu(n, ldA, ldC, A, C);
//
// validate
//
// C <- L * U - B
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
C[i*ldC+j] -= B[i*ldB+j];
// compute || C ||_F / || B ||_F = || L * U - B ||_F / || B ||_F
double residual = dlange_("Frobenius", &n, &n, C, &ldC, NULL) /
dlange_("Frobenius", &n, &n, B, &ldB, NULL);
printf("Residual = %E\n", residual);
int ret = EXIT_SUCCESS;
if (1.0E-12 < residual) {
fprintf(stderr, "The residual is too large.\n");
ret = EXIT_FAILURE;
}
//
// cleanup
//
free(A);
free(B);
free(C);
return ret;
}
|
7fd2e5220e58dc6c4e32dc415ec0cf869f54f816.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "gather_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T, typename Tin>
__global__ void _GatherKernel(
const int64_t input_block_size,
const int64_t indices_max,
const fast_divmod output_block_size,
const fast_divmod block_size,
const Tin* indices_data,
const T* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG input_index = 0;
int input_block_index, block_offset;
output_block_size.divmod(id, input_block_index, block_offset);
int indices_index, offset;
block_size.divmod(block_offset, indices_index, offset);
int64_t idx = indices_data[indices_index];
idx = idx < 0 ? idx + indices_max : idx;
if (idx < 0 || idx >= indices_max) {
output_data[id] = 0;
return;
}
input_index = input_block_index * input_block_size + idx * block_size.d_ + offset;
output_data[id] = input_data[input_index];
}
template <typename T, typename Tin>
void GatherImpl(
const int64_t input_block_size,
const int64_t indices_max,
const fast_divmod& output_block_size,
const fast_divmod& block_size,
const Tin* indices_data,
const T* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( _GatherKernel<T, Tin>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
input_block_size, indices_max, output_block_size, block_size, indices_data, input_data, output_data, (CUDA_LONG)N);
}
#define SPECIALIZED_IMPL(T) \
template void GatherImpl<T, int32_t>(const int64_t input_block_size, const int64_t indices_max, \
const fast_divmod& output_block_size, const fast_divmod& block_size, \
const int32_t* indices_data, const T* input_data, T* output_data, const size_t N); \
template void GatherImpl<T, int64_t>(const int64_t input_block_size, const int64_t indices_max, \
const fast_divmod& output_block_size, const fast_divmod& block_size, \
const int64_t* indices_data, const T* input_data, T* output_data, const size_t N);
SPECIALIZED_IMPL(int8_t)
SPECIALIZED_IMPL(int16_t)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(uint8_t)
SPECIALIZED_IMPL(uint16_t)
SPECIALIZED_IMPL(uint32_t)
SPECIALIZED_IMPL(uint64_t)
SPECIALIZED_IMPL(half)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
SPECIALIZED_IMPL(bool)
} // namespace cuda
} // namespace onnxruntime
| 7fd2e5220e58dc6c4e32dc415ec0cf869f54f816.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "gather_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T, typename Tin>
__global__ void _GatherKernel(
const int64_t input_block_size,
const int64_t indices_max,
const fast_divmod output_block_size,
const fast_divmod block_size,
const Tin* indices_data,
const T* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG input_index = 0;
int input_block_index, block_offset;
output_block_size.divmod(id, input_block_index, block_offset);
int indices_index, offset;
block_size.divmod(block_offset, indices_index, offset);
int64_t idx = indices_data[indices_index];
idx = idx < 0 ? idx + indices_max : idx;
if (idx < 0 || idx >= indices_max) {
output_data[id] = 0;
return;
}
input_index = input_block_index * input_block_size + idx * block_size.d_ + offset;
output_data[id] = input_data[input_index];
}
template <typename T, typename Tin>
void GatherImpl(
const int64_t input_block_size,
const int64_t indices_max,
const fast_divmod& output_block_size,
const fast_divmod& block_size,
const Tin* indices_data,
const T* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GatherKernel<T, Tin><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
input_block_size, indices_max, output_block_size, block_size, indices_data, input_data, output_data, (CUDA_LONG)N);
}
#define SPECIALIZED_IMPL(T) \
template void GatherImpl<T, int32_t>(const int64_t input_block_size, const int64_t indices_max, \
const fast_divmod& output_block_size, const fast_divmod& block_size, \
const int32_t* indices_data, const T* input_data, T* output_data, const size_t N); \
template void GatherImpl<T, int64_t>(const int64_t input_block_size, const int64_t indices_max, \
const fast_divmod& output_block_size, const fast_divmod& block_size, \
const int64_t* indices_data, const T* input_data, T* output_data, const size_t N);
SPECIALIZED_IMPL(int8_t)
SPECIALIZED_IMPL(int16_t)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(uint8_t)
SPECIALIZED_IMPL(uint16_t)
SPECIALIZED_IMPL(uint32_t)
SPECIALIZED_IMPL(uint64_t)
SPECIALIZED_IMPL(half)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
SPECIALIZED_IMPL(bool)
} // namespace cuda
} // namespace onnxruntime
|
dc01c54c848ede77af8e35576b1fd7cd38eb99bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_rand_init(hiprandState_t *__restrict__ pState, int seed) {
const int gtid_x = blockIdx.x * blockDim.x + threadIdx.x;
const int gtid_y = blockIdx.y * blockDim.y + threadIdx.y;
const int gtid = gtid_y * gridDim.x * blockDim.x + gtid_x;
hiprandState_t state;
hiprand_init(seed, gtid, 0, &state);
pState[gtid] = state;
} | dc01c54c848ede77af8e35576b1fd7cd38eb99bb.cu | #include "includes.h"
__global__ void kernel_rand_init(curandState *__restrict__ pState, int seed) {
const int gtid_x = blockIdx.x * blockDim.x + threadIdx.x;
const int gtid_y = blockIdx.y * blockDim.y + threadIdx.y;
const int gtid = gtid_y * gridDim.x * blockDim.x + gtid_x;
curandState state;
curand_init(seed, gtid, 0, &state);
pState[gtid] = state;
} |
d9a22db96686fbb9decd38af9611a9f98333f0b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void __l2dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, L2dist not supported on arch <= 200\n");
} | d9a22db96686fbb9decd38af9611a9f98333f0b5.cu | #include "includes.h"
__global__ void __l2dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, L2dist not supported on arch <= 200\n");
} |
4b5b68db13b1c99034837f568e164ae1ac3c646b.hip | // !!! This is a file automatically generated by hipify!!!
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC3D_KERNEL_H_
#define _CUBIC3D_KERNEL_H_
#include "internal/bspline_kernel.cu"
//! Trilinearly interpolated texture lookup, using unnormalized coordinates.
//! This function merely serves as a reference for the tricubic versions.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum hipTextureReadMode mode>
__device__ float linearTex3D(texture<T, 3, mode> tex, float3 coord)
{
return tex3D(tex, coord.x, coord.y, coord.z);
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Straight forward implementation, using 64 nearest neighbour lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum hipTextureReadMode mode>
__device__ float cubicTex3DSimple(texture<T, 3, mode> tex, float3 coord)
{
// transform the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5f;
float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
index = index + 0.5f; //move from [-0.5, extent-0.5] to [0, extent]
float result = 0.0f;
for (float z=-1; z < 2.5f; z++) //range [-1, 2]
{
float bsplineZ = bspline(z-fraction.z);
float w = index.z + z;
for (float y=-1; y < 2.5f; y++)
{
float bsplineYZ = bspline(y-fraction.y) * bsplineZ;
float v = index.y + y;
for (float x=-1; x < 2.5f; x++)
{
float bsplineXYZ = bspline(x-fraction.x) * bsplineYZ;
float u = index.x + x;
result += bsplineXYZ * tex3D(tex, u, v, w);
}
}
}
return result;
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Fast implementation, using 8 trilinear lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
#define WEIGHTS bspline_weights
#define CUBICTEX3D cubicTex3D
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
// Fast tricubic interpolated 1st order derivative texture lookup in x-, y-
// and z-direction, using unnormalized coordinates.
__device__ void bspline_weights_1st_derivative_x(float3 fraction, float3& w0, float3& w1, float3& w2, float3& w3)
{
float t0, t1, t2, t3;
bspline_weights_1st_derivative(fraction.x, t0, t1, t2, t3);
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights(fraction.y, t0, t1, t2, t3);
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights(fraction.z, t0, t1, t2, t3);
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_y(float3 fraction, float3& w0, float3& w1, float3& w2, float3& w3)
{
float t0, t1, t2, t3;
bspline_weights(fraction.x, t0, t1, t2, t3);
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights_1st_derivative(fraction.y, t0, t1, t2, t3);
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights(fraction.z, t0, t1, t2, t3);
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_z(float3 fraction, float3& w0, float3& w1, float3& w2, float3& w3)
{
float t0, t1, t2, t3;
bspline_weights(fraction.x, t0, t1, t2, t3);
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights(fraction.y, t0, t1, t2, t3);
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights_1st_derivative(fraction.z, t0, t1, t2, t3);
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
#define WEIGHTS bspline_weights_1st_derivative_x
#define CUBICTEX3D cubicTex3D_1st_derivative_x
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_y
#define CUBICTEX3D cubicTex3D_1st_derivative_y
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_z
#define CUBICTEX3D cubicTex3D_1st_derivative_z
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#endif // _CUBIC3D_KERNEL_H_
| 4b5b68db13b1c99034837f568e164ae1ac3c646b.cu | /*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC3D_KERNEL_H_
#define _CUBIC3D_KERNEL_H_
#include "internal/bspline_kernel.cu"
//! Trilinearly interpolated texture lookup, using unnormalized coordinates.
//! This function merely serves as a reference for the tricubic versions.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum cudaTextureReadMode mode>
__device__ float linearTex3D(texture<T, 3, mode> tex, float3 coord)
{
return tex3D(tex, coord.x, coord.y, coord.z);
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Straight forward implementation, using 64 nearest neighbour lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum cudaTextureReadMode mode>
__device__ float cubicTex3DSimple(texture<T, 3, mode> tex, float3 coord)
{
// transform the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5f;
float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
index = index + 0.5f; //move from [-0.5, extent-0.5] to [0, extent]
float result = 0.0f;
for (float z=-1; z < 2.5f; z++) //range [-1, 2]
{
float bsplineZ = bspline(z-fraction.z);
float w = index.z + z;
for (float y=-1; y < 2.5f; y++)
{
float bsplineYZ = bspline(y-fraction.y) * bsplineZ;
float v = index.y + y;
for (float x=-1; x < 2.5f; x++)
{
float bsplineXYZ = bspline(x-fraction.x) * bsplineYZ;
float u = index.x + x;
result += bsplineXYZ * tex3D(tex, u, v, w);
}
}
}
return result;
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Fast implementation, using 8 trilinear lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
#define WEIGHTS bspline_weights
#define CUBICTEX3D cubicTex3D
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
// Fast tricubic interpolated 1st order derivative texture lookup in x-, y-
// and z-direction, using unnormalized coordinates.
__device__ void bspline_weights_1st_derivative_x(float3 fraction, float3& w0, float3& w1, float3& w2, float3& w3)
{
float t0, t1, t2, t3;
bspline_weights_1st_derivative(fraction.x, t0, t1, t2, t3);
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights(fraction.y, t0, t1, t2, t3);
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights(fraction.z, t0, t1, t2, t3);
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_y(float3 fraction, float3& w0, float3& w1, float3& w2, float3& w3)
{
float t0, t1, t2, t3;
bspline_weights(fraction.x, t0, t1, t2, t3);
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights_1st_derivative(fraction.y, t0, t1, t2, t3);
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights(fraction.z, t0, t1, t2, t3);
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
__device__ void bspline_weights_1st_derivative_z(float3 fraction, float3& w0, float3& w1, float3& w2, float3& w3)
{
float t0, t1, t2, t3;
bspline_weights(fraction.x, t0, t1, t2, t3);
w0.x = t0; w1.x = t1; w2.x = t2; w3.x = t3;
bspline_weights(fraction.y, t0, t1, t2, t3);
w0.y = t0; w1.y = t1; w2.y = t2; w3.y = t3;
bspline_weights_1st_derivative(fraction.z, t0, t1, t2, t3);
w0.z = t0; w1.z = t1; w2.z = t2; w3.z = t3;
}
#define WEIGHTS bspline_weights_1st_derivative_x
#define CUBICTEX3D cubicTex3D_1st_derivative_x
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_y
#define CUBICTEX3D cubicTex3D_1st_derivative_y
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#define WEIGHTS bspline_weights_1st_derivative_z
#define CUBICTEX3D cubicTex3D_1st_derivative_z
#include "internal/cubicTex3D_kernel.cu"
#undef CUBICTEX3D
#undef WEIGHTS
#endif // _CUBIC3D_KERNEL_H_
|
59d560d83445ffc3e6a10f0b97f78749efb76d13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/prelu_layer.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_ROCM
// CUDA kernele for forward
template<typename Dtype>
__global__ void PReLUForward(const int_tp n, const int_tp channels,
const int_tp dim, const Dtype* in, Dtype* out,
const Dtype* slope_data, const int_tp div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int_tp c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template<typename Dtype>
__global__ void PReLUBackward(const int_tp n, const int_tp channels,
const int_tp dim, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data,
const int_tp div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int_tp c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index]
* ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template<typename Dtype>
__global__ void PReLUParamBackward(const int_tp n, const int_tp rows,
const int_tp rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
for (int k = 1; k < rows; k++) {
out_diff[index] += in_diff[index + k * rowPitch]
* in_data[index + k * rowPitch]
* (in_data[index + k * rowPitch] <= 0);
}
}
}
#endif // USE_ROCM
template<typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
const int_tp dim = bottom[0]->count(2);
const int_tp channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int_tp div_factor = channel_shared_ ? channels : 1;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
if (top[0] == bottom[0]) {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0,
(cl_mem) (bottom_memory_.mutable_gpu_data()), 0,
&ctx);
}
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_forward"));
viennacl::ocl::enqueue(
oclk_prelu(count, channels, dim, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) slope_data, &ctx), div_factor),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int_tp count = bottom[0]->count();
const int_tp dim = bottom[0]->count(2);
const int_tp channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int_tp cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS)(
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int_tp div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int_tp cdim = channels * dim;
// compute element-wise diff
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_param_backward"));
viennacl::ocl::enqueue(
oclk_prelu(cdim, bottom[0]->num(), top[0]->offset(1),
WrapHandle((cl_mem)top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) (backward_buff_.mutable_gpu_diff()), &ctx)),
ctx.get_queue());
if (channel_shared_) {
Dtype dsum;
greentea_gpu_dot<Dtype>(this->device_->id(), channels * dim,
(cl_mem) (backward_buff_.gpu_diff()), 0,
(cl_mem) (multiplier_.gpu_data()), 0, &dsum);
greentea_gpu_add_scalar<Dtype>(this->device_->id(),
this->blobs_[0]->count(), Dtype(dsum),
(cl_mem) slope_diff, 0);
} else {
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels,
dim, 1., (cl_mem) (backward_buff_.gpu_diff()),
0, (cl_mem) (multiplier_.gpu_data()), 0, 1.,
(cl_mem) slope_diff, 0);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int_tp div_factor = channel_shared_ ? channels : 1;
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_backward"));
viennacl::ocl::enqueue(
oclk_prelu(count, channels, dim, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
WrapHandle((cl_mem) slope_data, &ctx), div_factor),
ctx.get_queue());
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
| 59d560d83445ffc3e6a10f0b97f78749efb76d13.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/prelu_layer.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_CUDA
// CUDA kernele for forward
template<typename Dtype>
__global__ void PReLUForward(const int_tp n, const int_tp channels,
const int_tp dim, const Dtype* in, Dtype* out,
const Dtype* slope_data, const int_tp div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int_tp c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template<typename Dtype>
__global__ void PReLUBackward(const int_tp n, const int_tp channels,
const int_tp dim, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data,
const int_tp div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int_tp c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index]
* ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template<typename Dtype>
__global__ void PReLUParamBackward(const int_tp n, const int_tp rows,
const int_tp rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
for (int k = 1; k < rows; k++) {
out_diff[index] += in_diff[index + k * rowPitch]
* in_data[index + k * rowPitch]
* (in_data[index + k * rowPitch] <= 0);
}
}
}
#endif // USE_CUDA
template<typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
const int_tp dim = bottom[0]->count(2);
const int_tp channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int_tp div_factor = channel_shared_ ? channels : 1;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
if (top[0] == bottom[0]) {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0,
(cl_mem) (bottom_memory_.mutable_gpu_data()), 0,
&ctx);
}
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_forward"));
viennacl::ocl::enqueue(
oclk_prelu(count, channels, dim, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) slope_data, &ctx), div_factor),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int_tp count = bottom[0]->count();
const int_tp dim = bottom[0]->count(2);
const int_tp channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int_tp cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS)(
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int_tp div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int_tp cdim = channels * dim;
// compute element-wise diff
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_param_backward"));
viennacl::ocl::enqueue(
oclk_prelu(cdim, bottom[0]->num(), top[0]->offset(1),
WrapHandle((cl_mem)top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) (backward_buff_.mutable_gpu_diff()), &ctx)),
ctx.get_queue());
if (channel_shared_) {
Dtype dsum;
greentea_gpu_dot<Dtype>(this->device_->id(), channels * dim,
(cl_mem) (backward_buff_.gpu_diff()), 0,
(cl_mem) (multiplier_.gpu_data()), 0, &dsum);
greentea_gpu_add_scalar<Dtype>(this->device_->id(),
this->blobs_[0]->count(), Dtype(dsum),
(cl_mem) slope_diff, 0);
} else {
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels,
dim, 1., (cl_mem) (backward_buff_.gpu_diff()),
0, (cl_mem) (multiplier_.gpu_data()), 0, 1.,
(cl_mem) slope_diff, 0);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int_tp div_factor = channel_shared_ ? channels : 1;
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_backward"));
viennacl::ocl::enqueue(
oclk_prelu(count, channels, dim, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
WrapHandle((cl_mem) slope_data, &ctx), div_factor),
ctx.get_queue());
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
|
8833465509a20f7eac549676b10b9de9534070b7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <string.h>
#include <assert.h>
#include <sys/time.h>
#include <malloc.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define BLOCKWIDTH 1024
int *allocateMemoryInt(int length) {
int *vec;
if ((vec = (int *)malloc(length * sizeof(int))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
memset(vec, 0, length * sizeof(int));
return vec;
}
double *allocateMemoryDouble(int length) {
double *vec;
if ((vec = (double *)malloc(length * sizeof(double))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
return vec;
}
__global__ void CalcPageRank(int nodes, int edges, int *in_d, int *out_d,
int *run_d, int *edges_d, double *pagerank_old_d, double *pagerank_new_d) {
int node_index = blockIdx.x * BLOCKWIDTH + threadIdx.x;
if(node_index<nodes) {
double sum = 0;
double d = 0.85;
double jumpChance = (1 - d) * (1.0 / nodes);
int stopIdx = run_d[node_index] + in_d[node_index];
int k;
for (k = run_d[node_index]; k < stopIdx; k++) {
int jk = edges_d[k];
sum += pagerank_old_d[jk] / out_d[jk];
}
pagerank_new_d[node_index] = sum * d + jumpChance;
}
__syncthreads();
pagerank_old_d[node_index] = pagerank_new_d[node_index];
}
int main () {
int i = 0, j = 0, k = 0, run = 0, idx = 0;
int nodes = 38;
int edges = 156;
int iter = 38;
struct timeval stop, start;
int* indegree_count=allocateMemoryInt(nodes);
int* outdegree_count=allocateMemoryInt(nodes);
int* running_edge_indices=allocateMemoryInt(nodes);
int* edges_1D = allocateMemoryInt(edges);//node1:node2|node2->node1
double* pagerank_new;
double* pagerank_old;
if ((pagerank_new = (double *)malloc(nodes * sizeof(double))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
if ((pagerank_old = (double *)malloc(nodes * sizeof(double))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
for (i = 0; i < nodes; i++) {
pagerank_old[i] = 1.0 / (double)nodes;
}
memset(pagerank_new, 0, nodes * sizeof(double));
// set starting values for pagerank values to 1/n
for (i = 0; i < nodes; i++)
pagerank_old[i] = 1.0 / (double)nodes;
gettimeofday(&start, NULL);
setvbuf(stdin, NULL, _IOFBF, edges);
//reads in edges
for (i = 0; i < edges; i++) {
scanf("%d\n", &j);
edges_1D[i] = j;
}
//reads in in-degrees, out-degrees, and computes running idx
for (i = 0; i < nodes; i++) {
scanf("%d %d %d\n", &idx, &j, &k);
indegree_count[idx] = j;
outdegree_count[idx] = k;
running_edge_indices[idx] = run;
run += j;
}
gettimeofday(&stop, NULL);
fprintf(stderr, "Read took %lf seconds\n", (stop.tv_sec - start.tv_sec) +
((stop.tv_usec - start.tv_usec) / 1000000.0));
// Begin Cuda Setup
int *in_d, *out_d, *run_d, *edges_d;
double *pagerank_new_d, *pagerank_old_d;
int node_size = nodes * sizeof(int);
int pr_size = nodes * sizeof(double);
int edges_size = edges * sizeof(int);
hipMalloc(&in_d, node_size);
hipMemcpy(in_d, indegree_count, node_size, hipMemcpyHostToDevice);
hipMalloc(&out_d, node_size);
hipMemcpy(out_d, outdegree_count, node_size, hipMemcpyHostToDevice);
hipMalloc(&run_d, node_size);
hipMemcpy(run_d, running_edge_indices, node_size, hipMemcpyHostToDevice);
hipMalloc(&edges_d, edges_size);
hipMemcpy(edges_d, edges_1D, edges_size, hipMemcpyHostToDevice);
hipMalloc(&pagerank_old_d,pr_size);
hipMemcpy(pagerank_old_d, pagerank_old, pr_size, hipMemcpyHostToDevice);
hipMalloc(&pagerank_new_d,pr_size);
//hipMemcpy(pagerank_new_d, pagerank_new, pr_size, hipMemcpyHostToDevice);
int blocks = ceil((double)nodes/(double)BLOCKWIDTH);
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock(BLOCKWIDTH, 1, 1);
for(i=0; i < iter; i++) {
hipLaunchKernelGGL(( CalcPageRank), dim3(dimGrid), dim3(dimBlock), 0, 0, nodes, edges, in_d, out_d, run_d, edges_d, pagerank_old_d, pagerank_new_d);
}
hipMemcpy(pagerank_old, pagerank_old_d, nodes * sizeof(double), hipMemcpyDeviceToHost);
gettimeofday(&stop, NULL);
fprintf(stderr, "Compute took %lf seconds\n", (stop.tv_sec - start.tv_sec) +
((stop.tv_usec - start.tv_usec) / 1000000.0));
hipFree(in_d);
hipFree(out_d);
hipFree(run_d);
hipFree(edges_d);
hipFree(pagerank_old_d);
hipFree(pagerank_new_d);
for (i = 0; i < nodes; i++)
printf("%.15lf:%d,", pagerank_old[i], i);
free(indegree_count);
free(outdegree_count);
free(pagerank_new);
free(pagerank_old);
free(running_edge_indices);
free(edges_1D);
return 0;
}
| 8833465509a20f7eac549676b10b9de9534070b7.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <string.h>
#include <assert.h>
#include <sys/time.h>
#include <malloc.h>
#include <math.h>
#include <cuda_runtime.h>
#define BLOCKWIDTH 1024
int *allocateMemoryInt(int length) {
int *vec;
if ((vec = (int *)malloc(length * sizeof(int))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
memset(vec, 0, length * sizeof(int));
return vec;
}
double *allocateMemoryDouble(int length) {
double *vec;
if ((vec = (double *)malloc(length * sizeof(double))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
return vec;
}
__global__ void CalcPageRank(int nodes, int edges, int *in_d, int *out_d,
int *run_d, int *edges_d, double *pagerank_old_d, double *pagerank_new_d) {
int node_index = blockIdx.x * BLOCKWIDTH + threadIdx.x;
if(node_index<nodes) {
double sum = 0;
double d = 0.85;
double jumpChance = (1 - d) * (1.0 / nodes);
int stopIdx = run_d[node_index] + in_d[node_index];
int k;
for (k = run_d[node_index]; k < stopIdx; k++) {
int jk = edges_d[k];
sum += pagerank_old_d[jk] / out_d[jk];
}
pagerank_new_d[node_index] = sum * d + jumpChance;
}
__syncthreads();
pagerank_old_d[node_index] = pagerank_new_d[node_index];
}
int main () {
int i = 0, j = 0, k = 0, run = 0, idx = 0;
int nodes = 38;
int edges = 156;
int iter = 38;
struct timeval stop, start;
int* indegree_count=allocateMemoryInt(nodes);
int* outdegree_count=allocateMemoryInt(nodes);
int* running_edge_indices=allocateMemoryInt(nodes);
int* edges_1D = allocateMemoryInt(edges);//node1:node2|node2->node1
double* pagerank_new;
double* pagerank_old;
if ((pagerank_new = (double *)malloc(nodes * sizeof(double))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
if ((pagerank_old = (double *)malloc(nodes * sizeof(double))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
for (i = 0; i < nodes; i++) {
pagerank_old[i] = 1.0 / (double)nodes;
}
memset(pagerank_new, 0, nodes * sizeof(double));
// set starting values for pagerank values to 1/n
for (i = 0; i < nodes; i++)
pagerank_old[i] = 1.0 / (double)nodes;
gettimeofday(&start, NULL);
setvbuf(stdin, NULL, _IOFBF, edges);
//reads in edges
for (i = 0; i < edges; i++) {
scanf("%d\n", &j);
edges_1D[i] = j;
}
//reads in in-degrees, out-degrees, and computes running idx
for (i = 0; i < nodes; i++) {
scanf("%d %d %d\n", &idx, &j, &k);
indegree_count[idx] = j;
outdegree_count[idx] = k;
running_edge_indices[idx] = run;
run += j;
}
gettimeofday(&stop, NULL);
fprintf(stderr, "Read took %lf seconds\n", (stop.tv_sec - start.tv_sec) +
((stop.tv_usec - start.tv_usec) / 1000000.0));
// Begin Cuda Setup
int *in_d, *out_d, *run_d, *edges_d;
double *pagerank_new_d, *pagerank_old_d;
int node_size = nodes * sizeof(int);
int pr_size = nodes * sizeof(double);
int edges_size = edges * sizeof(int);
cudaMalloc(&in_d, node_size);
cudaMemcpy(in_d, indegree_count, node_size, cudaMemcpyHostToDevice);
cudaMalloc(&out_d, node_size);
cudaMemcpy(out_d, outdegree_count, node_size, cudaMemcpyHostToDevice);
cudaMalloc(&run_d, node_size);
cudaMemcpy(run_d, running_edge_indices, node_size, cudaMemcpyHostToDevice);
cudaMalloc(&edges_d, edges_size);
cudaMemcpy(edges_d, edges_1D, edges_size, cudaMemcpyHostToDevice);
cudaMalloc(&pagerank_old_d,pr_size);
cudaMemcpy(pagerank_old_d, pagerank_old, pr_size, cudaMemcpyHostToDevice);
cudaMalloc(&pagerank_new_d,pr_size);
//cudaMemcpy(pagerank_new_d, pagerank_new, pr_size, cudaMemcpyHostToDevice);
int blocks = ceil((double)nodes/(double)BLOCKWIDTH);
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock(BLOCKWIDTH, 1, 1);
for(i=0; i < iter; i++) {
CalcPageRank<<<dimGrid, dimBlock>>>(nodes, edges, in_d, out_d, run_d, edges_d, pagerank_old_d, pagerank_new_d);
}
cudaMemcpy(pagerank_old, pagerank_old_d, nodes * sizeof(double), cudaMemcpyDeviceToHost);
gettimeofday(&stop, NULL);
fprintf(stderr, "Compute took %lf seconds\n", (stop.tv_sec - start.tv_sec) +
((stop.tv_usec - start.tv_usec) / 1000000.0));
cudaFree(in_d);
cudaFree(out_d);
cudaFree(run_d);
cudaFree(edges_d);
cudaFree(pagerank_old_d);
cudaFree(pagerank_new_d);
for (i = 0; i < nodes; i++)
printf("%.15lf:%d,", pagerank_old[i], i);
free(indegree_count);
free(outdegree_count);
free(pagerank_new);
free(pagerank_old);
free(running_edge_indices);
free(edges_1D);
return 0;
}
|
bcd69781f7535bc2575b9f51d8c1d0b2cc9305eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "../include/cudaconv2.cuh"
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + loadY * numImages * numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart,
const int moduleStride, const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const uint numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by filterCache.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
* filterCache must be divisible by B_X*B_Y/32
* B_X*B_Y must be divisible by filterCache
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads filterCache weights at a time, so those aren't fully coalesced (depending on size of filterCache).
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCache, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCache + 1];
__shared__ float shHidActs[filterCache][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / filterCache, filtersLoadX = tidx % filterCache;
const int numModules = numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += filterCache) { // multiply with filterCache filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCache) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCache) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (filterCache + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int w = 0; w < filterCache; w++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by filterCacheF.
*
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by filterCacheF
* filterCacheF must be divisible by filterCacheH
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads filterCacheF weights at a time, so those aren't fully coalesced (depending on size of filterCacheF).
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor_kepler(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = threadIdx.y, hidActLoadX = threadIdx.x;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
//const bool noFLoop = filterCacheF == filterCacheH;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = fLoad[i * filterPixels * numFilters];
}
}
//#pragma unroll
for (int fh = f; fh < f + filterCacheF; fh += filterCacheH) {
//conv_img_acts_manycolor_dummy_fhLoop<B_Y, B_X, imgsPerThread, colorsPerThread, filterCacheF, filterCacheH, checkCaseBounds>(hidActs, shHidActLoad, shHidActs, shFilters, moduleIdx, numImages, hidActLoadY, hidActLoadX, blockCaseIdx, numModules, f, fh, prod);
const float* hLoad = &hidActs[(moduleIdx + fh * numModules) * numImages];
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || hidActLoadY + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread*B_X; i += B_X) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
} else {
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
}
__syncthreads();
// Do some actual computation
// Using these variables causes register usage to go from 161 --> 123.
// But nonetheless, the high-register version is faster.
//const float* shF = &shFilters[threadIdx.y][fh-f];
//const float* const shF2 = &shFilters[threadIdx.y][fh];
//const float* shH = &shHidActs[0][threadIdx.x];
#pragma unroll
for (int w = 0; w < filterCacheH; w++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][fh-f + w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* New Titan-optimized stuff.
*/
__device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX,
const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
moduleIdx = my * numModulesX + mx; // out
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out
}
#define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
/*
* Same loop as above but inverted.
*/
#define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
_Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \
for (int w = 0; w < filterCacheH; w++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters];
#define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters);
#define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \
}
#define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
__launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor
// These launch bounds ensure 25% occupancy (128 registers used)
// as oppposed to 13% (130 registers) achieved by defaults.
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [8]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages];
int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages;
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_W_TX(z);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_H_TX((z-4)/4,z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,0);
}
__syncthreads();
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_W_TX(z+4);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_H_TX((z-4)/4, z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
//__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [6]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[moduleIdx * numImages];
int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
__syncthreads();
// It seems that there is no point explicitly interleaving loads
// and computations because the scheduler does that anyway.
IA_PRELOAD_LOOP2(0,0);
IA_PRELOAD_LOOP2(1,0);
IA_PRELOAD_LOOP2(2,0);
IA_PRELOAD_LOOP2(3,0);
IA_PRELOAD_LOOP2(4,0);
IA_PRELOAD_LOOP2(5,0);
IA_PRELOAD_LOOP2(6,0);
IA_PRELOAD_LOOP2(7,0);
IA_PRELOAD_LOOP2(8,0);
IA_PRELOAD_LOOP2(9,0);
IA_PRELOAD_LOOP2(10,0);
IA_PRELOAD_LOOP2(11,0);
IA_PRELOAD_LOOP2(12,0);
IA_PRELOAD_LOOP2(13,0);
IA_PRELOAD_LOOP2(14,0);
IA_PRELOAD_LOOP2(15,0);
IA_PRELOAD_W_TX(0);
IA_PRELOAD_W_TX(1);
IA_PRELOAD_W_TX(2);
IA_PRELOAD_W_TX(3);
IA_PRELOAD_W_TX(4);
IA_PRELOAD_W_TX(5);
IA_PRELOAD_H_TX(0,0);
IA_PRELOAD_H_TX(0,1);
IA_PRELOAD_H_TX(0,2);
IA_PRELOAD_H_TX(0,3);
IA_PRELOAD_H_TX(1,0);
IA_PRELOAD_H_TX(1,1);
IA_PRELOAD_H_TX(1,2);
IA_PRELOAD_H_TX(1,3);
IA_PRELOAD_H_TX(2,0);
IA_PRELOAD_H_TX(2,1);
IA_PRELOAD_H_TX(2,2);
IA_PRELOAD_H_TX(2,3);
IA_PRELOAD_H_TX(3,0);
IA_PRELOAD_H_TX(3,1);
IA_PRELOAD_H_TX(3,2);
IA_PRELOAD_H_TX(3,3);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
CAFFE_ENFORCE(hidActs->ndim() == 2);
CAFFE_ENFORCE(filters->ndim() == 2);
CAFFE_ENFORCE(targets->ndim() == 2);
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs->dim32(1);
int numFilters = filters->dim32(1);
int numModules = hidActs->dim32(0) / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters->dim32(0) / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
CAFFE_ENFORCE(numImgColors % numGroups == 0);
CAFFE_ENFORCE(numFilters % (16*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that.
CAFFE_ENFORCE(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
CAFFE_ENFORCE(numGroups == 1 || numFilterColors % 4 == 0);
CAFFE_ENFORCE(filterPixels == filterSize * filterSize);
CAFFE_ENFORCE(hidActs->dim32(0) == numModules * numFilters);
CAFFE_ENFORCE(filters->dim32(0) == filterModuleMult * numFilterColors * filterPixels);
CAFFE_ENFORCE(numModules == numModulesY * numModulesX);
// These routines don't handle the case when only part of the image is visited in the convolution
CAFFE_ENFORCE(paddingStart <= 0);
CAFFE_ENFORCE(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
CAFFE_ENFORCE(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
CAFFE_ENFORCE(moduleStride <= filterSize);
dim3 blocks;
dim3 threads;
int colorsPerThread, imgsPerThread;
if (numFilterColors % 8 == 0) {
threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4);
colorsPerThread = numFilterColors % 64 == 0 ? 8
: numFilterColors % 48 == 0 ? 12
: numFilterColors % 32 == 0 ? 8
: numFilterColors % 16 == 0 ? 4
: 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
CAFFE_ENFORCE(numFilterColors % (threads.y * colorsPerThread) == 0);
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
// NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!!
} else if (numFilterColors > 3) {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets->Resize(std::vector<int>{numImgColors*imgPixels, numImages});
} else {
CAFFE_ENFORCE(targets->dim32(0) == numImgColors * imgPixels);
CAFFE_ENFORCE(targets->dim32(1) == numImages);
}
const bool scale = scaleTargets != 0;
hipTextureObject_t tex_hidacts = GetTensorTextureObject(hidActs);
hipTextureObject_t tex_filters = GetTensorTextureObject(filters);
float* hidacts_data = hidActs->mutable_data<float>();
float* filters_data = filters->mutable_data<float>();
float* targets_data = targets->mutable_data<float>();
hipStream_t stream = context->cuda_stream();
// hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared);
// hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream,
// tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize,
// imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
//return;
// printf("conv: %d\n", conv);
// printf("scale: %d\n", scale);
// printf("checkCaseBounds: %d\n", checkCaseBounds);
// printf("numFilterColors: %d\n", numFilterColors);
// printf("numImages: %d\n", numImages);
// hipStream_t stream = NVMatrix::getDefaultStream();
if (conv == true) {
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
else if (scale == true) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 3, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 3, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 1, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 1, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, true, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
}
else if (conv == false) {
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
else if (scale == true) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 3, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 3, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 1, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 1, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, true, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
}
checkCudaErrors(hipDestroyTextureObject(tex_hidacts));
checkCudaErrors(hipDestroyTextureObject(tex_filters));
getLastCudaError("imgActs: kernel execution failed");
}
void convImgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(context, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(context, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(context, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(context, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
| bcd69781f7535bc2575b9f51d8c1d0b2cc9305eb.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "../include/cudaconv2.cuh"
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + loadY * numImages * numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart,
const int moduleStride, const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const uint numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by filterCache.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
* filterCache must be divisible by B_X*B_Y/32
* B_X*B_Y must be divisible by filterCache
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads filterCache weights at a time, so those aren't fully coalesced (depending on size of filterCache).
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCache, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCache + 1];
__shared__ float shHidActs[filterCache][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / filterCache, filtersLoadX = tidx % filterCache;
const int numModules = numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += filterCache) { // multiply with filterCache filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCache) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCache) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (filterCache + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int w = 0; w < filterCache; w++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by filterCacheF.
*
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by filterCacheF
* filterCacheF must be divisible by filterCacheH
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads filterCacheF weights at a time, so those aren't fully coalesced (depending on size of filterCacheF).
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor_kepler(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = threadIdx.y, hidActLoadX = threadIdx.x;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
//const bool noFLoop = filterCacheF == filterCacheH;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = fLoad[i * filterPixels * numFilters];
}
}
//#pragma unroll
for (int fh = f; fh < f + filterCacheF; fh += filterCacheH) {
//conv_img_acts_manycolor_dummy_fhLoop<B_Y, B_X, imgsPerThread, colorsPerThread, filterCacheF, filterCacheH, checkCaseBounds>(hidActs, shHidActLoad, shHidActs, shFilters, moduleIdx, numImages, hidActLoadY, hidActLoadX, blockCaseIdx, numModules, f, fh, prod);
const float* hLoad = &hidActs[(moduleIdx + fh * numModules) * numImages];
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || hidActLoadY + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread*B_X; i += B_X) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
} else {
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
}
__syncthreads();
// Do some actual computation
// Using these variables causes register usage to go from 161 --> 123.
// But nonetheless, the high-register version is faster.
//const float* shF = &shFilters[threadIdx.y][fh-f];
//const float* const shF2 = &shFilters[threadIdx.y][fh];
//const float* shH = &shHidActs[0][threadIdx.x];
#pragma unroll
for (int w = 0; w < filterCacheH; w++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][fh-f + w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* New Titan-optimized stuff.
*/
__device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX,
const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
moduleIdx = my * numModulesX + mx; // out
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out
}
#define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
/*
* Same loop as above but inverted.
*/
#define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
_Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \
for (int w = 0; w < filterCacheH; w++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters];
#define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters);
#define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \
}
#define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
__launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor
// These launch bounds ensure 25% occupancy (128 registers used)
// as oppposed to 13% (130 registers) achieved by defaults.
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [8]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages];
int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages;
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_W_TX(z);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_H_TX((z-4)/4,z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,0);
}
__syncthreads();
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_W_TX(z+4);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_H_TX((z-4)/4, z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
//__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [6]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[moduleIdx * numImages];
int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
__syncthreads();
// It seems that there is no point explicitly interleaving loads
// and computations because the scheduler does that anyway.
IA_PRELOAD_LOOP2(0,0);
IA_PRELOAD_LOOP2(1,0);
IA_PRELOAD_LOOP2(2,0);
IA_PRELOAD_LOOP2(3,0);
IA_PRELOAD_LOOP2(4,0);
IA_PRELOAD_LOOP2(5,0);
IA_PRELOAD_LOOP2(6,0);
IA_PRELOAD_LOOP2(7,0);
IA_PRELOAD_LOOP2(8,0);
IA_PRELOAD_LOOP2(9,0);
IA_PRELOAD_LOOP2(10,0);
IA_PRELOAD_LOOP2(11,0);
IA_PRELOAD_LOOP2(12,0);
IA_PRELOAD_LOOP2(13,0);
IA_PRELOAD_LOOP2(14,0);
IA_PRELOAD_LOOP2(15,0);
IA_PRELOAD_W_TX(0);
IA_PRELOAD_W_TX(1);
IA_PRELOAD_W_TX(2);
IA_PRELOAD_W_TX(3);
IA_PRELOAD_W_TX(4);
IA_PRELOAD_W_TX(5);
IA_PRELOAD_H_TX(0,0);
IA_PRELOAD_H_TX(0,1);
IA_PRELOAD_H_TX(0,2);
IA_PRELOAD_H_TX(0,3);
IA_PRELOAD_H_TX(1,0);
IA_PRELOAD_H_TX(1,1);
IA_PRELOAD_H_TX(1,2);
IA_PRELOAD_H_TX(1,3);
IA_PRELOAD_H_TX(2,0);
IA_PRELOAD_H_TX(2,1);
IA_PRELOAD_H_TX(2,2);
IA_PRELOAD_H_TX(2,3);
IA_PRELOAD_H_TX(3,0);
IA_PRELOAD_H_TX(3,1);
IA_PRELOAD_H_TX(3,2);
IA_PRELOAD_H_TX(3,3);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
CAFFE_ENFORCE(hidActs->ndim() == 2);
CAFFE_ENFORCE(filters->ndim() == 2);
CAFFE_ENFORCE(targets->ndim() == 2);
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs->dim32(1);
int numFilters = filters->dim32(1);
int numModules = hidActs->dim32(0) / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters->dim32(0) / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
CAFFE_ENFORCE(numImgColors % numGroups == 0);
CAFFE_ENFORCE(numFilters % (16*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that.
CAFFE_ENFORCE(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
CAFFE_ENFORCE(numGroups == 1 || numFilterColors % 4 == 0);
CAFFE_ENFORCE(filterPixels == filterSize * filterSize);
CAFFE_ENFORCE(hidActs->dim32(0) == numModules * numFilters);
CAFFE_ENFORCE(filters->dim32(0) == filterModuleMult * numFilterColors * filterPixels);
CAFFE_ENFORCE(numModules == numModulesY * numModulesX);
// These routines don't handle the case when only part of the image is visited in the convolution
CAFFE_ENFORCE(paddingStart <= 0);
CAFFE_ENFORCE(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
CAFFE_ENFORCE(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
CAFFE_ENFORCE(moduleStride <= filterSize);
dim3 blocks;
dim3 threads;
int colorsPerThread, imgsPerThread;
if (numFilterColors % 8 == 0) {
threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4);
colorsPerThread = numFilterColors % 64 == 0 ? 8
: numFilterColors % 48 == 0 ? 12
: numFilterColors % 32 == 0 ? 8
: numFilterColors % 16 == 0 ? 4
: 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
CAFFE_ENFORCE(numFilterColors % (threads.y * colorsPerThread) == 0);
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
// NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!!
} else if (numFilterColors > 3) {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets->Resize(std::vector<int>{numImgColors*imgPixels, numImages});
} else {
CAFFE_ENFORCE(targets->dim32(0) == numImgColors * imgPixels);
CAFFE_ENFORCE(targets->dim32(1) == numImages);
}
const bool scale = scaleTargets != 0;
cudaTextureObject_t tex_hidacts = GetTensorTextureObject(hidActs);
cudaTextureObject_t tex_filters = GetTensorTextureObject(filters);
float* hidacts_data = hidActs->mutable_data<float>();
float* filters_data = filters->mutable_data<float>();
float* targets_data = targets->mutable_data<float>();
cudaStream_t stream = context->cuda_stream();
// cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared);
// conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(
// tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize,
// imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
//return;
// printf("conv: %d\n", conv);
// printf("scale: %d\n", scale);
// printf("checkCaseBounds: %d\n", checkCaseBounds);
// printf("numFilterColors: %d\n", numFilterColors);
// printf("numImages: %d\n", numImages);
// cudaStream_t stream = NVMatrix::getDefaultStream();
if (conv == true) {
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 8, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 4, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 8, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 4, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 8, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 4, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
else if (scale == true) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 8, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 4, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 3, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 8, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 3, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 4, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 1, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 8, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 1, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 4, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, true >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, true, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, true, true >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, true, true, true ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
}
else if (conv == false) {
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 8, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 4, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
else if (scale == true) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(tex_hidacts, tex_filters, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 8, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 4, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 3, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 3, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 1, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 1, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if (numFilters % 16 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, true, true, false ><<<blocks, threads, 0, stream>>>(hidacts_data, filters_data, targets_data, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
}
checkCudaErrors(cudaDestroyTextureObject(tex_hidacts));
checkCudaErrors(cudaDestroyTextureObject(tex_filters));
getLastCudaError("imgActs: kernel execution failed");
}
void convImgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(context, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(context, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(context, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(caffe2::CUDAContext* context, caffe2::TensorCUDA* hidActs, caffe2::TensorCUDA* filters, caffe2::TensorCUDA* targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(context, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
|
c4d7fde272aad8603de16b2ae45bdc5a8ac7a770.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file
* nd affine transform on the GPU with CUDA.
*
* \todo Specialized 2d and 3d (maybe 4d) implementations should be faster. Should do these since they are much more common.
* \todo See if using a thread for more than 1 pixel at a time helps
* \todo reduce register usage (50/thread at the moment!)
*/
#include "../core.h"
#include "../ops.h"
#include "stdio.h"
#include <stdint.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math_functions.h"
#include "generic/macros.h"
TYPEDEFS;
/// @cond DEFINES
#define MAXDIMS 8 // should be sizeof uchar
#define WARPS_PER_BLOCK 4
#define BLOCKSIZE (32*WARPS_PER_BLOCK) // threads per block
//#define DEBUG_OUTPUT
#ifdef DEBUG_OUTPUT
#define DBG(...) printf(__VA_ARGS__)
#else
#define DBG(...)
#endif
#define ENDL "\n"
#define LOG(...) ndLogError(dst_,__VA_ARGS__)
#define TRY(e) do{if(!(e)) {LOG("%s(%d): %s()"ENDL "\tExpression evaluated as failure."ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,#e); goto Error; }}while(0)
#define CUTRY(e) do{hipError_t ecode=(e); if(ecode!=hipSuccess) {LOG("%s(%d): %s()"ENDL "\tExpression evaluated as failure."ENDL "\t%s"ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,#e,hipGetErrorString(ecode)); goto Error; }}while(0)
#define FAIL LOG("%s(%d) %s()"ENDL "\tExecution should not have reached here."ENDL,__FILE__,__LINE__,__FUNCTION__); goto Error
#ifndef restrict
#define restrict __restrict__
#endif
// printf() is only supported
// for devices of compute capability 2.0 and higher
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
/// @cond DEFINES
typedef uint8_t u8;
typedef uint32_t u32;
typedef unsigned int uint;
typedef struct arg_t_
{ u8 ndim; ///< The number of dimensions
u32 nelem; ///< The total number of elements
size_t *restrict shape; ///< Buffer of length ndim, ordered [w,h,d,...]. Always agrees with stride. Maintained for convenience.
size_t *restrict strides; ///< Buffer of length ndim+1, strides[i] is the number of bytes layed out between unit steps along dimension i
void *restrict data; ///< A poitner to the data.
} arg_t;
inline __device__ float clamp(float f, float a, float b) {return fmaxf(a, fminf(f, b));}
template<class T> inline __device__ T saturate(float f);
template<> inline __device__ uint8_t saturate<uint8_t>(float f) {return clamp(f,0,UCHAR_MAX);}
template<> inline __device__ uint16_t saturate<uint16_t>(float f) {return clamp(f,0,USHRT_MAX);}
template<> inline __device__ uint32_t saturate<uint32_t>(float f) {return clamp(f,0,ULONG_MAX);}
template<> inline __device__ uint64_t saturate<uint64_t>(float f) {return clamp(f,0,ULLONG_MAX);} // FIXME - will overflow float type
template<> inline __device__ int8_t saturate< int8_t> (float f) {return clamp(f,CHAR_MIN,CHAR_MAX);}
template<> inline __device__ int16_t saturate< int16_t>(float f) {return clamp(f,SHRT_MIN,SHRT_MAX);}
template<> inline __device__ int32_t saturate< int32_t>(float f) {return clamp(f,LONG_MIN,LONG_MAX);}
template<> inline __device__ int64_t saturate< int64_t>(float f) {return clamp(f,LLONG_MIN,LLONG_MAX);} // FIXME - will overflow float type
template<> inline __device__ float saturate<float>(float f) {return f;}
template<> inline __device__ double saturate<double>(float f) {return f;}
#if 0
/**
* nD linear interpolation for maximum intensity composting.
*
* The boundary handling used here is designed for maximum intensity composting.
* A constant (determined by \a param->boundary_value) is returned for
* out-of-bounds samples.
* Samples stradling the border are handled as a special case.
*
* OUT OF USE
* Keeping it here because it was interesting. Might need it in the future? Slow at the moment.
*/
template<class Tsrc,class Tdst>
inline __device__ Tdst sample(arg_t &src,const float *restrict const r,const nd_affine_params_t*const param)
{ uchar2 bounds=inbounds(src.ndim,src.shape,r); // bit i set if inbounds on dim i
// clamp to boundary value for out-of-bounds
if(!bounds.x && !bounds.y)
return param->boundary_value;
// compute offset to top left ish corner of lattice unit
u32 idx=0;
for(u8 i=0;i<src.ndim;++i)
idx+=src.strides[i]*floorf(r[i]);
// iterate over each corner of hypercube
float v(0.0f);
for(u8 i=0;i<((1<<src.ndim)-1);++i) // bits of i select left or right sample on each dimension
{ uchar2 o=make_uchar2(~i&~bounds.x,i&~bounds.y);// don't need to mask high bits of i
float w=1.0f;
int offset=0; // offset so corner clamps to edge
for(u8 idim=0;idim<src.ndim;++idim) // loop for dot-products w bit vector
{ const size_t s=src.strides[idim];
const float a=fpartf(r[idim]),
b=1.0f-a;
#define BIT(bs_,i_) (((bs_)&(1<<(i_)))!=0)
offset+=BIT(o.x,idim)*s // clamp corner (top left ish)
-BIT(o.y,idim)*s // clamp corner (bot right ish)
+BIT(i,idim) *s; // normal corner offset
w*=BIT(i,idim)*a+BIT(~i,idim)*b; // weight for corner is a product of lerp weights for each dimension
#undef BIT
}
v+=w*((Tsrc*)src.data)[idx+offset]; // weighted value for corner
}
return saturate<Tdst>(v);
}
#endif
#define max(a,b) ((a)>(b))?(a):(b)
inline __device__ unsigned prod(dim3 a) {return a.x*a.y*a.z;}
inline __device__ unsigned stride(uint3 a, dim3 b) {return a.x+b.x*(a.y+b.y*a.z);}
inline __device__ unsigned sum(uint3 a) {return a.x+a.y+a.z;}
/** \todo Respect strides. Currently assumes strides reflect shape. */
template<typename Tsrc,typename Tdst>
__global__ void
__launch_bounds__(BLOCKSIZE,1) /*max threads,min blocks*/
affine_kernel(arg_t dst, arg_t src, const float *transform, const nd_affine_params_t param)
{
Tdst o,v;
unsigned idst = sum(threadIdx)+stride(blockIdx,gridDim)*prod(blockDim);
#if 0
if(blockIdx.x==0 && threadIdx.x==2)
printf("ksize src:%d dst:%d\n",(int)sizeof(*ibuf),(int)sizeof(*obuf));
#endif
if(idst<dst.nelem)
{
/////
unsigned isrc=0;
u8 oob=0;
#if 1 // 30 ms without this block, 200 ms with (64x64x64x64)
for(u8 r=0;r<src.ndim;++r)
{ int coord=0;
unsigned i=idst,o=(dst.ndim+1)*r;
for(u8 c=0;c<dst.ndim;++c)
{ coord+=(int)((i%dst.shape[c])*transform[o+c]);
i/=dst.shape[c];
}
coord+=transform[o+dst.ndim];
// bc: nearest for 1 px
if(coord==-1) coord=0;
if(coord==src.shape[r]) coord=src.shape[r]-1;
// bc: clamp to boundary_value elsewhere
if(coord<0 || src.shape[r]<=coord)
{ oob=1;
break;
}
isrc+=src.strides[r]*coord;
}
#endif
v=(oob)?param.boundary_value:saturate<Tdst>(*(Tsrc*)((u8*)src.data+isrc));
/////
o=((Tdst*)dst.data)[idst];
((Tdst*)dst.data)[idst]=max(o,v);
}
}
static arg_t make_arg(const nd_t a)
{ arg_t out =
{ (u8) ndndim(a),
(u32) ndnelem(a),
(size_t*)ndCudaShape(a),
(size_t*)ndCudaStrides(a),
nddata(a)
};
return out;
}
//
// === Interface ===
//
#include <math.h>
static unsigned nextdim(unsigned n, unsigned limit, unsigned *rem)
{ unsigned v=limit,c=limit,low=n/limit,argmin=0,min=limit;
*rem=0;
if(n<limit) return n;
for(c=low+1;c<limit&&v>0;c++)
{ v=(unsigned)(c*ceil(n/(float)c)-n);
if(v<min)
{ min=v;
argmin=c;
}
}
*rem= (min!=0);
return argmin;
}
/**
* Assume the ndkind() of \a src_ and \a dst_ have already been checked.
*/
extern "C" unsigned ndaffine_cuda(nd_t dst_, const nd_t src_, const float *transform, const nd_affine_params_t *param)
{ arg_t dst=make_arg(dst_),
src=make_arg(src_);
unsigned r,blocks=(unsigned)ceil(dst.nelem/float(BLOCKSIZE)),
tpb =BLOCKSIZE;
#ifdef DEBUG_OUTPUT
unsigned b=blocks;
#endif
struct hipDeviceProp_t prop;
dim3 grid,threads=make_uint3(tpb,1,1);
CUTRY(hipGetDeviceProperties(&prop,0));
DBG("MAX GRID: %7d %7d %7d"ENDL,prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
// Pack our 1d indexes into cuda's 3d indexes
TRY(grid.x=nextdim(blocks,prop.maxGridSize[0],&r));
blocks/=grid.x;
blocks+=r;
TRY(grid.y=nextdim(blocks,prop.maxGridSize[1],&r));
blocks/=grid.y;
blocks+=r;
TRY(grid.z=blocks);
DBG(" GRID: %7d %7d %7d"ENDL,grid.x,grid.y,grid.z);
/// @cond DEFINES
#define CASE2(TSRC,TDST) DBG("blocks:%u threads/block:%u\n",b,tpb);\
hipLaunchKernelGGL(( affine_kernel<TSRC,TDST>), dim3(grid),dim3(threads),0,(hipStream_t)ndCudaStream(dst_), dst,src,transform,*param);\
break
#define CASE(T) TYPECASE2(ndtype(dst_),T); break
/// @endcond
TYPECASE(ndtype(src_));
#undef CASE
#undef CASE2
CUTRY(hipGetLastError());
return 1;
Error:
return 0;
}
| c4d7fde272aad8603de16b2ae45bdc5a8ac7a770.cu | /**
* \file
* nd affine transform on the GPU with CUDA.
*
* \todo Specialized 2d and 3d (maybe 4d) implementations should be faster. Should do these since they are much more common.
* \todo See if using a thread for more than 1 pixel at a time helps
* \todo reduce register usage (50/thread at the moment!)
*/
#include "../core.h"
#include "../ops.h"
#include "stdio.h"
#include <stdint.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math_functions.h"
#include "generic/macros.h"
TYPEDEFS;
/// @cond DEFINES
#define MAXDIMS 8 // should be sizeof uchar
#define WARPS_PER_BLOCK 4
#define BLOCKSIZE (32*WARPS_PER_BLOCK) // threads per block
//#define DEBUG_OUTPUT
#ifdef DEBUG_OUTPUT
#define DBG(...) printf(__VA_ARGS__)
#else
#define DBG(...)
#endif
#define ENDL "\n"
#define LOG(...) ndLogError(dst_,__VA_ARGS__)
#define TRY(e) do{if(!(e)) {LOG("%s(%d): %s()"ENDL "\tExpression evaluated as failure."ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,#e); goto Error; }}while(0)
#define CUTRY(e) do{cudaError_t ecode=(e); if(ecode!=cudaSuccess) {LOG("%s(%d): %s()"ENDL "\tExpression evaluated as failure."ENDL "\t%s"ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,#e,cudaGetErrorString(ecode)); goto Error; }}while(0)
#define FAIL LOG("%s(%d) %s()"ENDL "\tExecution should not have reached here."ENDL,__FILE__,__LINE__,__FUNCTION__); goto Error
#ifndef restrict
#define restrict __restrict__
#endif
// printf() is only supported
// for devices of compute capability 2.0 and higher
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
/// @cond DEFINES
typedef uint8_t u8;
typedef uint32_t u32;
typedef unsigned int uint;
typedef struct arg_t_
{ u8 ndim; ///< The number of dimensions
u32 nelem; ///< The total number of elements
size_t *restrict shape; ///< Buffer of length ndim, ordered [w,h,d,...]. Always agrees with stride. Maintained for convenience.
size_t *restrict strides; ///< Buffer of length ndim+1, strides[i] is the number of bytes layed out between unit steps along dimension i
void *restrict data; ///< A poitner to the data.
} arg_t;
inline __device__ float clamp(float f, float a, float b) {return fmaxf(a, fminf(f, b));}
template<class T> inline __device__ T saturate(float f);
template<> inline __device__ uint8_t saturate<uint8_t>(float f) {return clamp(f,0,UCHAR_MAX);}
template<> inline __device__ uint16_t saturate<uint16_t>(float f) {return clamp(f,0,USHRT_MAX);}
template<> inline __device__ uint32_t saturate<uint32_t>(float f) {return clamp(f,0,ULONG_MAX);}
template<> inline __device__ uint64_t saturate<uint64_t>(float f) {return clamp(f,0,ULLONG_MAX);} // FIXME - will overflow float type
template<> inline __device__ int8_t saturate< int8_t> (float f) {return clamp(f,CHAR_MIN,CHAR_MAX);}
template<> inline __device__ int16_t saturate< int16_t>(float f) {return clamp(f,SHRT_MIN,SHRT_MAX);}
template<> inline __device__ int32_t saturate< int32_t>(float f) {return clamp(f,LONG_MIN,LONG_MAX);}
template<> inline __device__ int64_t saturate< int64_t>(float f) {return clamp(f,LLONG_MIN,LLONG_MAX);} // FIXME - will overflow float type
template<> inline __device__ float saturate<float>(float f) {return f;}
template<> inline __device__ double saturate<double>(float f) {return f;}
#if 0
/**
* nD linear interpolation for maximum intensity composting.
*
* The boundary handling used here is designed for maximum intensity composting.
* A constant (determined by \a param->boundary_value) is returned for
* out-of-bounds samples.
* Samples stradling the border are handled as a special case.
*
* OUT OF USE
* Keeping it here because it was interesting. Might need it in the future? Slow at the moment.
*/
template<class Tsrc,class Tdst>
inline __device__ Tdst sample(arg_t &src,const float *restrict const r,const nd_affine_params_t*const param)
{ uchar2 bounds=inbounds(src.ndim,src.shape,r); // bit i set if inbounds on dim i
// clamp to boundary value for out-of-bounds
if(!bounds.x && !bounds.y)
return param->boundary_value;
// compute offset to top left ish corner of lattice unit
u32 idx=0;
for(u8 i=0;i<src.ndim;++i)
idx+=src.strides[i]*floorf(r[i]);
// iterate over each corner of hypercube
float v(0.0f);
for(u8 i=0;i<((1<<src.ndim)-1);++i) // bits of i select left or right sample on each dimension
{ uchar2 o=make_uchar2(~i&~bounds.x,i&~bounds.y);// don't need to mask high bits of i
float w=1.0f;
int offset=0; // offset so corner clamps to edge
for(u8 idim=0;idim<src.ndim;++idim) // loop for dot-products w bit vector
{ const size_t s=src.strides[idim];
const float a=fpartf(r[idim]),
b=1.0f-a;
#define BIT(bs_,i_) (((bs_)&(1<<(i_)))!=0)
offset+=BIT(o.x,idim)*s // clamp corner (top left ish)
-BIT(o.y,idim)*s // clamp corner (bot right ish)
+BIT(i,idim) *s; // normal corner offset
w*=BIT(i,idim)*a+BIT(~i,idim)*b; // weight for corner is a product of lerp weights for each dimension
#undef BIT
}
v+=w*((Tsrc*)src.data)[idx+offset]; // weighted value for corner
}
return saturate<Tdst>(v);
}
#endif
#define max(a,b) ((a)>(b))?(a):(b)
inline __device__ unsigned prod(dim3 a) {return a.x*a.y*a.z;}
inline __device__ unsigned stride(uint3 a, dim3 b) {return a.x+b.x*(a.y+b.y*a.z);}
inline __device__ unsigned sum(uint3 a) {return a.x+a.y+a.z;}
/** \todo Respect strides. Currently assumes strides reflect shape. */
template<typename Tsrc,typename Tdst>
__global__ void
__launch_bounds__(BLOCKSIZE,1) /*max threads,min blocks*/
affine_kernel(arg_t dst, arg_t src, const float *transform, const nd_affine_params_t param)
{
Tdst o,v;
unsigned idst = sum(threadIdx)+stride(blockIdx,gridDim)*prod(blockDim);
#if 0
if(blockIdx.x==0 && threadIdx.x==2)
printf("ksize src:%d dst:%d\n",(int)sizeof(*ibuf),(int)sizeof(*obuf));
#endif
if(idst<dst.nelem)
{
/////
unsigned isrc=0;
u8 oob=0;
#if 1 // 30 ms without this block, 200 ms with (64x64x64x64)
for(u8 r=0;r<src.ndim;++r)
{ int coord=0;
unsigned i=idst,o=(dst.ndim+1)*r;
for(u8 c=0;c<dst.ndim;++c)
{ coord+=(int)((i%dst.shape[c])*transform[o+c]);
i/=dst.shape[c];
}
coord+=transform[o+dst.ndim];
// bc: nearest for 1 px
if(coord==-1) coord=0;
if(coord==src.shape[r]) coord=src.shape[r]-1;
// bc: clamp to boundary_value elsewhere
if(coord<0 || src.shape[r]<=coord)
{ oob=1;
break;
}
isrc+=src.strides[r]*coord;
}
#endif
v=(oob)?param.boundary_value:saturate<Tdst>(*(Tsrc*)((u8*)src.data+isrc));
/////
o=((Tdst*)dst.data)[idst];
((Tdst*)dst.data)[idst]=max(o,v);
}
}
static arg_t make_arg(const nd_t a)
{ arg_t out =
{ (u8) ndndim(a),
(u32) ndnelem(a),
(size_t*)ndCudaShape(a),
(size_t*)ndCudaStrides(a),
nddata(a)
};
return out;
}
//
// === Interface ===
//
#include <math.h>
static unsigned nextdim(unsigned n, unsigned limit, unsigned *rem)
{ unsigned v=limit,c=limit,low=n/limit,argmin=0,min=limit;
*rem=0;
if(n<limit) return n;
for(c=low+1;c<limit&&v>0;c++)
{ v=(unsigned)(c*ceil(n/(float)c)-n);
if(v<min)
{ min=v;
argmin=c;
}
}
*rem= (min!=0);
return argmin;
}
/**
* Assume the ndkind() of \a src_ and \a dst_ have already been checked.
*/
extern "C" unsigned ndaffine_cuda(nd_t dst_, const nd_t src_, const float *transform, const nd_affine_params_t *param)
{ arg_t dst=make_arg(dst_),
src=make_arg(src_);
unsigned r,blocks=(unsigned)ceil(dst.nelem/float(BLOCKSIZE)),
tpb =BLOCKSIZE;
#ifdef DEBUG_OUTPUT
unsigned b=blocks;
#endif
struct cudaDeviceProp prop;
dim3 grid,threads=make_uint3(tpb,1,1);
CUTRY(cudaGetDeviceProperties(&prop,0));
DBG("MAX GRID: %7d %7d %7d"ENDL,prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
// Pack our 1d indexes into cuda's 3d indexes
TRY(grid.x=nextdim(blocks,prop.maxGridSize[0],&r));
blocks/=grid.x;
blocks+=r;
TRY(grid.y=nextdim(blocks,prop.maxGridSize[1],&r));
blocks/=grid.y;
blocks+=r;
TRY(grid.z=blocks);
DBG(" GRID: %7d %7d %7d"ENDL,grid.x,grid.y,grid.z);
/// @cond DEFINES
#define CASE2(TSRC,TDST) DBG("blocks:%u threads/block:%u\n",b,tpb);\
affine_kernel<TSRC,TDST><<<grid,threads,0,(cudaStream_t)ndCudaStream(dst_)>>>(dst,src,transform,*param);\
break
#define CASE(T) TYPECASE2(ndtype(dst_),T); break
/// @endcond
TYPECASE(ndtype(src_));
#undef CASE
#undef CASE2
CUTRY(cudaGetLastError());
return 1;
Error:
return 0;
}
|
231df6dc5b5975fa48079d5916c8277e0a81c08e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "concat_and_split.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void ConcatKernel(const T** inputs,
const int* input_cols,
int col_size,
const int output_rows,
const int output_cols,
T* output) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int curr_segment = 0;
int curr_offset = input_cols[0];
for (; tid_x < output_cols; tid_x += blockDim.x * gridDim.x) {
int curr_col_offset = input_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset;
++curr_segment;
curr_col_offset = input_cols[curr_segment + 1];
}
int local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset;
const T* input_ptr = inputs[curr_segment];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < output_rows; tid_y += blockDim.y * gridDim.y)
output[tid_y * output_cols + tid_x] =
input_ptr[tid_y * segment_width + local_col];
}
}
template <typename T>
__device__ void ConcatKernelDetail(const T** inputs_data,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid_x < out_cols; tid_x += blockDim.x * gridDim.x) {
int split = tid_x * 1.0 / fixed_in_col;
int in_offset = tid_x - split * fixed_in_col;
const T* input_ptr = inputs_data[split];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < out_rows; tid_y += blockDim.y * gridDim.y) {
output_data[tid_y * out_cols + tid_x] =
input_ptr[tid_y * fixed_in_col + in_offset];
}
}
}
template <typename T>
__global__ void ConcatKernel(const T* input_addr0,
const T* input_addr1,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
const T* inputs_data[2];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel(const T* input_addr0,
const T* input_addr1,
const T* input_addr2,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
const T* inputs_data[3];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
inputs_data[2] = input_addr2;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel(const T* input_addr0,
const T* input_addr1,
const T* input_addr2,
const T* input_addr3,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
const T* inputs_data[4];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
inputs_data[2] = input_addr2;
inputs_data[3] = input_addr3;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel(const T** inputs_data,
const int in_num,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int* out_cols,
int out_cols_size,
T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int curr_segment = 0;
int curr_offset = out_cols[0];
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) {
int curr_col_offset = out_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset;
++curr_segment;
curr_col_offset = out_cols[curr_segment + 1];
}
int local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset;
T* output_ptr = outputs_data[curr_segment];
if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * segment_width + local_col] =
input_data[tid_y * in_col + tid_x];
}
}
}
template <typename T>
__device__ void SplitKernelDetail(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) {
int split = tid_x / fixed_out_col;
int in_offset = tid_x - split * fixed_out_col;
T* output_ptr = outputs_data[split];
if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * fixed_out_col + in_offset] =
input_data[tid_y * in_col + tid_x];
}
}
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T** outputs_data) {
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T* outputs_addr0,
T* outputs_addr1) {
T* outputs_data[2];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T* outputs_addr0,
T* outputs_addr1,
T* outputs_addr2) {
T* outputs_data[3];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
outputs_data[2] = outputs_addr2;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T* outputs_addr0,
T* outputs_addr1,
T* outputs_addr2,
T* outputs_addr3) {
T* outputs_data[4];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
outputs_data[2] = outputs_addr2;
outputs_data[3] = outputs_addr3;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
static inline void GetBlockDims(const platform::CUDADeviceContext& context,
int num_rows,
int num_cols,
dim3* block_dims,
dim3* grid_dims) {
// Set the thread block and grid according to CurrentDeviceId
const int kThreadsPerBlock = 1024;
int block_cols = kThreadsPerBlock;
if (num_cols < kThreadsPerBlock) { // block_cols is aligned by 32.
block_cols = ((num_cols + 31) >> 5) << 5;
}
int block_rows = kThreadsPerBlock / block_cols;
*block_dims = dim3(block_cols, block_rows, 1);
int max_threads = context.GetMaxPhysicalThreadCount();
int max_blocks = ::max(max_threads / kThreadsPerBlock, 1);
int grid_cols =
::min((num_cols + block_cols - 1) / block_cols, max_blocks);
int grid_rows =
::min(max_blocks / grid_cols, ::max(num_rows / block_rows, 1));
*grid_dims = dim3(grid_cols, grid_rows, 1);
}
/*
* All tensors' dimension should be the same and the values of
* each dimension must be the same, except the axis dimension.
*/
template <typename T>
class ConcatFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const std::vector<framework::Tensor>& input,
int axis,
framework::Tensor* output) {
// TODO(zcd): Add input data validity checking
int in_num = input.size();
int in_row = 1;
auto dim_0 = input[0].dims();
for (int i = 0; i < axis; ++i) {
in_row *= dim_0[i];
}
int in_col = input[0].numel() / in_row;
int out_row = in_row, out_col = 0;
std::vector<const T*> inputs_data(in_num);
std::vector<int> inputs_col(in_num + 1);
inputs_col[0] = 0;
bool has_same_shape = true;
for (int i = 0; i < in_num; ++i) {
int t_cols = input[i].numel() / in_row;
if (has_same_shape) {
if (t_cols != in_col) has_same_shape = false;
}
out_col += t_cols;
inputs_col[i + 1] = out_col;
inputs_data[i] = input[i].data<T>();
}
dim3 block_dims;
dim3 grid_dims;
GetBlockDims(context, out_row, out_col, &block_dims, &grid_dims);
memory::allocation::AllocationPtr tmp_dev_ins_data;
const T** dev_ins_data = nullptr;
if (!has_same_shape || in_num < 2 || in_num > 4) {
tmp_dev_ins_data =
memory::Alloc(context, inputs_data.size() * sizeof(T*));
memory::Copy(boost::get<platform::CUDAPlace>(context.GetPlace()),
tmp_dev_ins_data->ptr(),
platform::CPUPlace(),
static_cast<void*>(inputs_data.data()),
inputs_data.size() * sizeof(T*),
context.stream());
dev_ins_data = reinterpret_cast<const T**>(tmp_dev_ins_data->ptr());
}
if (has_same_shape) {
if (in_num == 2) {
hipLaunchKernelGGL(( ConcatKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
inputs_data[0],
inputs_data[1],
in_col,
out_row,
out_col,
output->data<T>());
} else if (in_num == 3) {
hipLaunchKernelGGL(( ConcatKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
inputs_data[0],
inputs_data[1],
inputs_data[2],
in_col,
out_row,
out_col,
output->data<T>());
} else if (in_num == 4) {
hipLaunchKernelGGL(( ConcatKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
inputs_data[0],
inputs_data[1],
inputs_data[2],
inputs_data[3],
in_col,
out_row,
out_col,
output->data<T>());
} else {
hipLaunchKernelGGL(( ConcatKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
dev_ins_data, in_num, in_col, out_row, out_col, output->data<T>());
}
} else {
auto tmp_dev_ins_col_data =
memory::Alloc(context, inputs_col.size() * sizeof(int));
memory::Copy(boost::get<platform::CUDAPlace>(context.GetPlace()),
tmp_dev_ins_col_data->ptr(),
platform::CPUPlace(),
static_cast<void*>(inputs_col.data()),
inputs_col.size() * sizeof(int),
context.stream());
int* dev_ins_col_data = static_cast<int*>(tmp_dev_ins_col_data->ptr());
hipLaunchKernelGGL(( ConcatKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
dev_ins_data,
dev_ins_col_data,
static_cast<int>(inputs_col.size()),
out_row,
out_col,
output->data<T>());
}
}
};
/*
* All tensors' dimension should be the same and the values of
* each dimension must be the same, except the axis dimension.
*/
template <typename T>
class SplitFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const std::vector<const framework::Tensor*>& ref_inputs,
int axis,
std::vector<framework::Tensor*>* outputs) {
// TODO(zcd): Add input data validity checking
int o_num = outputs->size();
int out_row = 1;
auto dim_0 = ref_inputs[0]->dims();
for (int i = 0; i < axis; ++i) {
out_row *= dim_0[i];
}
int out0_col = ref_inputs[0]->numel() / out_row;
int in_col = 0, in_row = out_row;
bool has_same_shape = true;
std::vector<T*> outputs_data(o_num);
std::vector<int> outputs_cols(o_num + 1);
outputs_cols[0] = 0;
for (int i = 0; i < o_num; ++i) {
int t_col = ref_inputs.at(i)->numel() / out_row;
if (has_same_shape) {
if (t_col != out0_col) has_same_shape = false;
}
in_col += t_col;
outputs_cols[i + 1] = in_col;
if (outputs->at(i) != nullptr) {
outputs_data[i] = outputs->at(i)->data<T>();
} else {
outputs_data[i] = nullptr;
}
}
dim3 block_dims;
dim3 grid_dims;
GetBlockDims(context, out_row, in_col, &block_dims, &grid_dims);
memory::allocation::AllocationPtr tmp_dev_outs_data;
T** dev_out_gpu_data = nullptr;
if (!has_same_shape || o_num < 2 || o_num > 4) {
tmp_dev_outs_data =
memory::Alloc(context, outputs_data.size() * sizeof(T*));
memory::Copy(boost::get<platform::CUDAPlace>(context.GetPlace()),
tmp_dev_outs_data->ptr(),
platform::CPUPlace(),
reinterpret_cast<void*>(outputs_data.data()),
outputs_data.size() * sizeof(T*),
context.stream());
dev_out_gpu_data = reinterpret_cast<T**>(tmp_dev_outs_data->ptr());
}
if (has_same_shape) {
if (o_num == 2) {
hipLaunchKernelGGL(( SplitKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1]);
} else if (o_num == 3) {
hipLaunchKernelGGL(( SplitKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1],
outputs_data[2]);
} else if (o_num == 4) {
hipLaunchKernelGGL(( SplitKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1],
outputs_data[2],
outputs_data[3]);
} else {
hipLaunchKernelGGL(( SplitKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(), in_row, in_col, out0_col, dev_out_gpu_data);
}
} else {
auto tmp_dev_ins_col_data =
memory::Alloc(context,
outputs_cols.size() * sizeof(int));
memory::Copy(boost::get<platform::CUDAPlace>(context.GetPlace()),
tmp_dev_ins_col_data->ptr(),
platform::CPUPlace(),
reinterpret_cast<void*>(outputs_cols.data()),
outputs_cols.size() * sizeof(int),
context.stream());
int* dev_outs_col_data =
reinterpret_cast<int*>(tmp_dev_ins_col_data->ptr());
hipLaunchKernelGGL(( SplitKernel), dim3(grid_dims), dim3(block_dims), 0, context.stream(),
input.data<T>(),
in_row,
in_col,
dev_outs_col_data,
static_cast<int>(outputs_cols.size()),
dev_out_gpu_data);
}
}
};
#define DEFINE_FUNCTOR(type) \
template class ConcatFunctor<platform::CUDADeviceContext, type>; \
template class SplitFunctor<platform::CUDADeviceContext, type>
FOR_ALL_TYPES(DEFINE_FUNCTOR);
} // namespace math
} // namespace operators
} // namespace paddle
| 231df6dc5b5975fa48079d5916c8277e0a81c08e.cu | /* Copyright (c) 2019 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "concat_and_split.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void ConcatKernel(const T** inputs,
const int* input_cols,
int col_size,
const int output_rows,
const int output_cols,
T* output) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int curr_segment = 0;
int curr_offset = input_cols[0];
for (; tid_x < output_cols; tid_x += blockDim.x * gridDim.x) {
int curr_col_offset = input_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset;
++curr_segment;
curr_col_offset = input_cols[curr_segment + 1];
}
int local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset;
const T* input_ptr = inputs[curr_segment];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < output_rows; tid_y += blockDim.y * gridDim.y)
output[tid_y * output_cols + tid_x] =
input_ptr[tid_y * segment_width + local_col];
}
}
template <typename T>
__device__ void ConcatKernelDetail(const T** inputs_data,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid_x < out_cols; tid_x += blockDim.x * gridDim.x) {
int split = tid_x * 1.0 / fixed_in_col;
int in_offset = tid_x - split * fixed_in_col;
const T* input_ptr = inputs_data[split];
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < out_rows; tid_y += blockDim.y * gridDim.y) {
output_data[tid_y * out_cols + tid_x] =
input_ptr[tid_y * fixed_in_col + in_offset];
}
}
}
template <typename T>
__global__ void ConcatKernel(const T* input_addr0,
const T* input_addr1,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
const T* inputs_data[2];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel(const T* input_addr0,
const T* input_addr1,
const T* input_addr2,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
const T* inputs_data[3];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
inputs_data[2] = input_addr2;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel(const T* input_addr0,
const T* input_addr1,
const T* input_addr2,
const T* input_addr3,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
const T* inputs_data[4];
inputs_data[0] = input_addr0;
inputs_data[1] = input_addr1;
inputs_data[2] = input_addr2;
inputs_data[3] = input_addr3;
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void ConcatKernel(const T** inputs_data,
const int in_num,
const int fixed_in_col,
const int out_rows,
const int out_cols,
T* output_data) {
ConcatKernelDetail<T>(
inputs_data, fixed_in_col, out_rows, out_cols, output_data);
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int* out_cols,
int out_cols_size,
T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int curr_segment = 0;
int curr_offset = out_cols[0];
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) {
int curr_col_offset = out_cols[curr_segment + 1];
while (curr_col_offset <= tid_x) {
curr_offset = curr_col_offset;
++curr_segment;
curr_col_offset = out_cols[curr_segment + 1];
}
int local_col = tid_x - curr_offset;
int segment_width = curr_col_offset - curr_offset;
T* output_ptr = outputs_data[curr_segment];
if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * segment_width + local_col] =
input_data[tid_y * in_col + tid_x];
}
}
}
template <typename T>
__device__ void SplitKernelDetail(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T** outputs_data) {
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) {
int split = tid_x / fixed_out_col;
int in_offset = tid_x - split * fixed_out_col;
T* output_ptr = outputs_data[split];
if (output_ptr != nullptr) {
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y)
output_ptr[tid_y * fixed_out_col + in_offset] =
input_data[tid_y * in_col + tid_x];
}
}
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T** outputs_data) {
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T* outputs_addr0,
T* outputs_addr1) {
T* outputs_data[2];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T* outputs_addr0,
T* outputs_addr1,
T* outputs_addr2) {
T* outputs_data[3];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
outputs_data[2] = outputs_addr2;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
template <typename T>
__global__ void SplitKernel(const T* input_data,
const int in_row,
const int in_col,
const int fixed_out_col,
T* outputs_addr0,
T* outputs_addr1,
T* outputs_addr2,
T* outputs_addr3) {
T* outputs_data[4];
outputs_data[0] = outputs_addr0;
outputs_data[1] = outputs_addr1;
outputs_data[2] = outputs_addr2;
outputs_data[3] = outputs_addr3;
SplitKernelDetail<T>(input_data, in_row, in_col, fixed_out_col, outputs_data);
}
static inline void GetBlockDims(const platform::CUDADeviceContext& context,
int num_rows,
int num_cols,
dim3* block_dims,
dim3* grid_dims) {
// Set the thread block and grid according to CurrentDeviceId
const int kThreadsPerBlock = 1024;
int block_cols = kThreadsPerBlock;
if (num_cols < kThreadsPerBlock) { // block_cols is aligned by 32.
block_cols = ((num_cols + 31) >> 5) << 5;
}
int block_rows = kThreadsPerBlock / block_cols;
*block_dims = dim3(block_cols, block_rows, 1);
int max_threads = context.GetMaxPhysicalThreadCount();
int max_blocks = std::max(max_threads / kThreadsPerBlock, 1);
int grid_cols =
std::min((num_cols + block_cols - 1) / block_cols, max_blocks);
int grid_rows =
std::min(max_blocks / grid_cols, std::max(num_rows / block_rows, 1));
*grid_dims = dim3(grid_cols, grid_rows, 1);
}
/*
* All tensors' dimension should be the same and the values of
* each dimension must be the same, except the axis dimension.
*/
template <typename T>
class ConcatFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const std::vector<framework::Tensor>& input,
int axis,
framework::Tensor* output) {
// TODO(zcd): Add input data validity checking
int in_num = input.size();
int in_row = 1;
auto dim_0 = input[0].dims();
for (int i = 0; i < axis; ++i) {
in_row *= dim_0[i];
}
int in_col = input[0].numel() / in_row;
int out_row = in_row, out_col = 0;
std::vector<const T*> inputs_data(in_num);
std::vector<int> inputs_col(in_num + 1);
inputs_col[0] = 0;
bool has_same_shape = true;
for (int i = 0; i < in_num; ++i) {
int t_cols = input[i].numel() / in_row;
if (has_same_shape) {
if (t_cols != in_col) has_same_shape = false;
}
out_col += t_cols;
inputs_col[i + 1] = out_col;
inputs_data[i] = input[i].data<T>();
}
dim3 block_dims;
dim3 grid_dims;
GetBlockDims(context, out_row, out_col, &block_dims, &grid_dims);
memory::allocation::AllocationPtr tmp_dev_ins_data;
const T** dev_ins_data = nullptr;
if (!has_same_shape || in_num < 2 || in_num > 4) {
tmp_dev_ins_data =
memory::Alloc(context, inputs_data.size() * sizeof(T*));
memory::Copy(boost::get<platform::CUDAPlace>(context.GetPlace()),
tmp_dev_ins_data->ptr(),
platform::CPUPlace(),
static_cast<void*>(inputs_data.data()),
inputs_data.size() * sizeof(T*),
context.stream());
dev_ins_data = reinterpret_cast<const T**>(tmp_dev_ins_data->ptr());
}
if (has_same_shape) {
if (in_num == 2) {
ConcatKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
inputs_data[0],
inputs_data[1],
in_col,
out_row,
out_col,
output->data<T>());
} else if (in_num == 3) {
ConcatKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
inputs_data[0],
inputs_data[1],
inputs_data[2],
in_col,
out_row,
out_col,
output->data<T>());
} else if (in_num == 4) {
ConcatKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
inputs_data[0],
inputs_data[1],
inputs_data[2],
inputs_data[3],
in_col,
out_row,
out_col,
output->data<T>());
} else {
ConcatKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
dev_ins_data, in_num, in_col, out_row, out_col, output->data<T>());
}
} else {
auto tmp_dev_ins_col_data =
memory::Alloc(context, inputs_col.size() * sizeof(int));
memory::Copy(boost::get<platform::CUDAPlace>(context.GetPlace()),
tmp_dev_ins_col_data->ptr(),
platform::CPUPlace(),
static_cast<void*>(inputs_col.data()),
inputs_col.size() * sizeof(int),
context.stream());
int* dev_ins_col_data = static_cast<int*>(tmp_dev_ins_col_data->ptr());
ConcatKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
dev_ins_data,
dev_ins_col_data,
static_cast<int>(inputs_col.size()),
out_row,
out_col,
output->data<T>());
}
}
};
/*
* All tensors' dimension should be the same and the values of
* each dimension must be the same, except the axis dimension.
*/
template <typename T>
class SplitFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const std::vector<const framework::Tensor*>& ref_inputs,
int axis,
std::vector<framework::Tensor*>* outputs) {
// TODO(zcd): Add input data validity checking
int o_num = outputs->size();
int out_row = 1;
auto dim_0 = ref_inputs[0]->dims();
for (int i = 0; i < axis; ++i) {
out_row *= dim_0[i];
}
int out0_col = ref_inputs[0]->numel() / out_row;
int in_col = 0, in_row = out_row;
bool has_same_shape = true;
std::vector<T*> outputs_data(o_num);
std::vector<int> outputs_cols(o_num + 1);
outputs_cols[0] = 0;
for (int i = 0; i < o_num; ++i) {
int t_col = ref_inputs.at(i)->numel() / out_row;
if (has_same_shape) {
if (t_col != out0_col) has_same_shape = false;
}
in_col += t_col;
outputs_cols[i + 1] = in_col;
if (outputs->at(i) != nullptr) {
outputs_data[i] = outputs->at(i)->data<T>();
} else {
outputs_data[i] = nullptr;
}
}
dim3 block_dims;
dim3 grid_dims;
GetBlockDims(context, out_row, in_col, &block_dims, &grid_dims);
memory::allocation::AllocationPtr tmp_dev_outs_data;
T** dev_out_gpu_data = nullptr;
if (!has_same_shape || o_num < 2 || o_num > 4) {
tmp_dev_outs_data =
memory::Alloc(context, outputs_data.size() * sizeof(T*));
memory::Copy(boost::get<platform::CUDAPlace>(context.GetPlace()),
tmp_dev_outs_data->ptr(),
platform::CPUPlace(),
reinterpret_cast<void*>(outputs_data.data()),
outputs_data.size() * sizeof(T*),
context.stream());
dev_out_gpu_data = reinterpret_cast<T**>(tmp_dev_outs_data->ptr());
}
if (has_same_shape) {
if (o_num == 2) {
SplitKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1]);
} else if (o_num == 3) {
SplitKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1],
outputs_data[2]);
} else if (o_num == 4) {
SplitKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(),
in_row,
in_col,
out0_col,
outputs_data[0],
outputs_data[1],
outputs_data[2],
outputs_data[3]);
} else {
SplitKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(), in_row, in_col, out0_col, dev_out_gpu_data);
}
} else {
auto tmp_dev_ins_col_data =
memory::Alloc(context,
outputs_cols.size() * sizeof(int));
memory::Copy(boost::get<platform::CUDAPlace>(context.GetPlace()),
tmp_dev_ins_col_data->ptr(),
platform::CPUPlace(),
reinterpret_cast<void*>(outputs_cols.data()),
outputs_cols.size() * sizeof(int),
context.stream());
int* dev_outs_col_data =
reinterpret_cast<int*>(tmp_dev_ins_col_data->ptr());
SplitKernel<<<grid_dims, block_dims, 0, context.stream()>>>(
input.data<T>(),
in_row,
in_col,
dev_outs_col_data,
static_cast<int>(outputs_cols.size()),
dev_out_gpu_data);
}
}
};
#define DEFINE_FUNCTOR(type) \
template class ConcatFunctor<platform::CUDADeviceContext, type>; \
template class SplitFunctor<platform::CUDADeviceContext, type>
FOR_ALL_TYPES(DEFINE_FUNCTOR);
} // namespace math
} // namespace operators
} // namespace paddle
|
8ddd4f8994b63163e298e1bf4909e502f9b9d111.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zsymmetrize.cu, normal z -> d, Tue Aug 30 09:38:34 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
dsymmetrize_lower( int m, double *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
double *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
double *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = MAGMA_D_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
dsymmetrize_upper( int m, double *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
double *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
double *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = MAGMA_D_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/***************************************************************************//**
Purpose
-------
DSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_symmetrize
*******************************************************************************/
extern "C" void
magmablas_dsymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( dsymmetrize_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda );
}
else {
hipLaunchKernelGGL(( dsymmetrize_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda );
}
}
| 8ddd4f8994b63163e298e1bf4909e502f9b9d111.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zsymmetrize.cu, normal z -> d, Tue Aug 30 09:38:34 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
dsymmetrize_lower( int m, double *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
double *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
double *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = MAGMA_D_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
dsymmetrize_upper( int m, double *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
double *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
double *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = MAGMA_D_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/***************************************************************************//**
Purpose
-------
DSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_symmetrize
*******************************************************************************/
extern "C" void
magmablas_dsymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if ( uplo == MagmaUpper ) {
dsymmetrize_upper<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda );
}
else {
dsymmetrize_lower<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda );
}
}
|
332a18cb6eafd5c31c0760dcce96946b99c191b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hittable.cuh"
#include "hittable_list.cuh"
#include "bvh.cuh"
#include "material.cuh"
#include "ray.cuh"
#define MAX_LEAF_OBJ 5
#define MAX_DEPTH 15
#define MAX_STACK 100
bvh_node::bvh_node() {
type = type_bvh_node;
mat_ptr = NULL;
obj_list = NULL;
left = NULL;
right = NULL;
}
__device__ bool bvh_node::test(aabb &rec) {
rec = box;
return true;
}
bool bvh_node::bounding_box(float t0, float t1, aabb& b) {
b = box;
return true;
}
__device__ bool bvh_node::hit(const ray& r, float t_min, float t_max, hit_record& rec) {
bool is_hit = false;
hittable **node_stack=new hittable*[MAX_STACK];
int top = 0;
node_stack[++top] = (hittable*)this;
rec.t = 1e18;
while (top) {
bvh_node *p = (bvh_node*)node_stack[top--];
if ((p->box).hit(r, t_min, rec.t)) { //p->box.hit(r, t_min, rec.t)
//Leaf node
if (p->left == NULL && p->right == NULL) {
is_hit |= p->obj_list->hit(r, t_min, rec.t, rec);
}
else {
node_stack[++top] = p->left;
node_stack[++top] = p->right;
}
}
}
delete[] node_stack;
return is_hit;
//return false;
}
__global__ void visit(bvh_node **root, int *result) {
//bvh_node *tmp = *root;
//while(tmp!=NULL)
// tmp = (bvh_node*)tmp->left;
//tmp = (bvh_node*)tmp->right;
hittable *node_stack[200];
int top = 0;
node_stack[++top] = (hittable*)(*root);
int tot = 0;
while (top) {
bvh_node *p = (bvh_node*)node_stack[top--];
if (p->left == NULL && p->right == NULL) {
// is_hit |= p->obj_list->hit(r, t_min, rec.t, rec);
aabb box = p->box;
material *mp = p->mat_ptr;
hittable_list* lst = p->obj_list;
int lst_size = lst->list_size;
hittable** obj_lsts = lst->list;
tot+=lst_size;
}
else {
node_stack[++top] = p->left;
node_stack[++top] = p->right;
}
}
*result = tot;
}
hittable *tmp[10000];
void split_objs(hittable **l, int &pl, int L, int R, float split, int axis) {
pl = L;
int pr = R - 1;
//if (L == 15 && R == 18) {
// pl--;
// pl++;
//}
for (int i = L; i < R; i++) {
if (l[i]->center[axis] < split) {
tmp[pl++] = l[i];
}
else {
tmp[pr--] = l[i];
}
}
for (int i = L; i < R; i++)
l[i] = tmp[i];
}
float calc_sah(hittable **l, int mid, int L, int R) {
aabb box_l, box_r, tmp;
l[L]->bounding_box(0, 1, box_l);
l[R - 1]->bounding_box(0, 1, box_r);
for (int i = L; i < mid; i++) {
l[i]->bounding_box(0, 1, tmp);
box_l = surrounding_box(box_l, tmp);
}
for (int i = mid; i < R - 1; i++) {
l[i]->bounding_box(0, 1, tmp);
box_r = surrounding_box(box_r, tmp);
}
float area_l = (box_l.max()[0] - box_l.min()[0])*(box_l.max()[1] - box_l.min()[1]) +
(box_l.max()[0] - box_l.min()[0])*(box_l.max()[2] - box_l.min()[2]) +
(box_l.max()[1] - box_l.min()[1])*(box_l.max()[2] - box_l.min()[2]);
float area_r = (box_r.max()[0] - box_r.min()[0])*(box_r.max()[1] - box_r.min()[1]) +
(box_r.max()[0] - box_r.min()[0])*(box_r.max()[2] - box_r.min()[2]) +
(box_r.max()[1] - box_r.min()[1])*(box_r.max()[2] - box_r.min()[2]);
return (mid - L) * area_l / (area_l + area_r) + (R - mid) * area_r / (area_l + area_r);
}
bvh_node::bvh_node(hittable **l, int L, int R, float time0, float time1, int depth) {
int n = R - L;
type = type_bvh_node;
/*if (n == 1) {
left = right = l[L];
}
else if (n == 2) {
left = l[L];
right = l[L+1];
}
else */
if (depth >= MAX_DEPTH || n <= MAX_LEAF_OBJ) {
mat_ptr = NULL;
/*int mid = (L + R) / 2;
left= new bvh_node(l, L, mid, time0, time1, depth + 1);
right = new bvh_node(l, mid, R, time0, time1, depth + 1);*/
obj_list = new hittable_list(l + L, R - L);
obj_list->bounding_box(0, 1, box);
left = right = NULL;
}
else {
obj_list = NULL;
mat_ptr = NULL;
//hittable **tmp = (hittable**)malloc(n * sizeof(hittable*));
l[L]->bounding_box(0, 1, box);
for (int i = L + 1; i < R; i++) {
aabb box2;
l[i]->bounding_box(0, 1, box2);
box = surrounding_box(box, box2);
}
float min_sah = 1e9;
float best_split;
int best_axis = -1;
//if (R == 18 && L == 15)
// min_sah = 1e10;
for (int axis = 0; axis < 3; axis++) {
float lowerbound = box.min()[axis], upperbound = box.max()[axis];
float step = (upperbound - lowerbound) / (32.0f / (depth + 1.0f));
for (float split = lowerbound + step; split < upperbound - 1e-5f; split += step) {
int mid;
split_objs(l, mid, L, R, split, axis);
if (mid == L || mid == R)
continue;
float now_sah = calc_sah(tmp, mid, L, R);
if (now_sah < min_sah) {
min_sah = now_sah;
best_axis = axis;
best_split = split;
}
}
}
if (best_axis == -1) {
std::cerr << "Can't find the best split!\n";
exit(999);
}
else {
int mid;
split_objs(l, mid, L, R, best_split, best_axis);
//free(tmp);
left = new bvh_node(l, L, mid, time0, time1, depth + 1);
right = new bvh_node(l, mid, R, time0, time1, depth + 1);
}
aabb box_l, box_r;
left->bounding_box(0, 1, box_l);
right->bounding_box(0, 1, box_r);
box = surrounding_box(box_l, box_r);
}
} | 332a18cb6eafd5c31c0760dcce96946b99c191b2.cu | #include "hittable.cuh"
#include "hittable_list.cuh"
#include "bvh.cuh"
#include "material.cuh"
#include "ray.cuh"
#define MAX_LEAF_OBJ 5
#define MAX_DEPTH 15
#define MAX_STACK 100
bvh_node::bvh_node() {
type = type_bvh_node;
mat_ptr = NULL;
obj_list = NULL;
left = NULL;
right = NULL;
}
__device__ bool bvh_node::test(aabb &rec) {
rec = box;
return true;
}
bool bvh_node::bounding_box(float t0, float t1, aabb& b) {
b = box;
return true;
}
__device__ bool bvh_node::hit(const ray& r, float t_min, float t_max, hit_record& rec) {
bool is_hit = false;
hittable **node_stack=new hittable*[MAX_STACK];
int top = 0;
node_stack[++top] = (hittable*)this;
rec.t = 1e18;
while (top) {
bvh_node *p = (bvh_node*)node_stack[top--];
if ((p->box).hit(r, t_min, rec.t)) { //p->box.hit(r, t_min, rec.t)
//Leaf node
if (p->left == NULL && p->right == NULL) {
is_hit |= p->obj_list->hit(r, t_min, rec.t, rec);
}
else {
node_stack[++top] = p->left;
node_stack[++top] = p->right;
}
}
}
delete[] node_stack;
return is_hit;
//return false;
}
__global__ void visit(bvh_node **root, int *result) {
//bvh_node *tmp = *root;
//while(tmp!=NULL)
// tmp = (bvh_node*)tmp->left;
//tmp = (bvh_node*)tmp->right;
hittable *node_stack[200];
int top = 0;
node_stack[++top] = (hittable*)(*root);
int tot = 0;
while (top) {
bvh_node *p = (bvh_node*)node_stack[top--];
if (p->left == NULL && p->right == NULL) {
// is_hit |= p->obj_list->hit(r, t_min, rec.t, rec);
aabb box = p->box;
material *mp = p->mat_ptr;
hittable_list* lst = p->obj_list;
int lst_size = lst->list_size;
hittable** obj_lsts = lst->list;
tot+=lst_size;
}
else {
node_stack[++top] = p->left;
node_stack[++top] = p->right;
}
}
*result = tot;
}
hittable *tmp[10000];
void split_objs(hittable **l, int &pl, int L, int R, float split, int axis) {
pl = L;
int pr = R - 1;
//if (L == 15 && R == 18) {
// pl--;
// pl++;
//}
for (int i = L; i < R; i++) {
if (l[i]->center[axis] < split) {
tmp[pl++] = l[i];
}
else {
tmp[pr--] = l[i];
}
}
for (int i = L; i < R; i++)
l[i] = tmp[i];
}
float calc_sah(hittable **l, int mid, int L, int R) {
aabb box_l, box_r, tmp;
l[L]->bounding_box(0, 1, box_l);
l[R - 1]->bounding_box(0, 1, box_r);
for (int i = L; i < mid; i++) {
l[i]->bounding_box(0, 1, tmp);
box_l = surrounding_box(box_l, tmp);
}
for (int i = mid; i < R - 1; i++) {
l[i]->bounding_box(0, 1, tmp);
box_r = surrounding_box(box_r, tmp);
}
float area_l = (box_l.max()[0] - box_l.min()[0])*(box_l.max()[1] - box_l.min()[1]) +
(box_l.max()[0] - box_l.min()[0])*(box_l.max()[2] - box_l.min()[2]) +
(box_l.max()[1] - box_l.min()[1])*(box_l.max()[2] - box_l.min()[2]);
float area_r = (box_r.max()[0] - box_r.min()[0])*(box_r.max()[1] - box_r.min()[1]) +
(box_r.max()[0] - box_r.min()[0])*(box_r.max()[2] - box_r.min()[2]) +
(box_r.max()[1] - box_r.min()[1])*(box_r.max()[2] - box_r.min()[2]);
return (mid - L) * area_l / (area_l + area_r) + (R - mid) * area_r / (area_l + area_r);
}
bvh_node::bvh_node(hittable **l, int L, int R, float time0, float time1, int depth) {
int n = R - L;
type = type_bvh_node;
/*if (n == 1) {
left = right = l[L];
}
else if (n == 2) {
left = l[L];
right = l[L+1];
}
else */
if (depth >= MAX_DEPTH || n <= MAX_LEAF_OBJ) {
mat_ptr = NULL;
/*int mid = (L + R) / 2;
left= new bvh_node(l, L, mid, time0, time1, depth + 1);
right = new bvh_node(l, mid, R, time0, time1, depth + 1);*/
obj_list = new hittable_list(l + L, R - L);
obj_list->bounding_box(0, 1, box);
left = right = NULL;
}
else {
obj_list = NULL;
mat_ptr = NULL;
//hittable **tmp = (hittable**)malloc(n * sizeof(hittable*));
l[L]->bounding_box(0, 1, box);
for (int i = L + 1; i < R; i++) {
aabb box2;
l[i]->bounding_box(0, 1, box2);
box = surrounding_box(box, box2);
}
float min_sah = 1e9;
float best_split;
int best_axis = -1;
//if (R == 18 && L == 15)
// min_sah = 1e10;
for (int axis = 0; axis < 3; axis++) {
float lowerbound = box.min()[axis], upperbound = box.max()[axis];
float step = (upperbound - lowerbound) / (32.0f / (depth + 1.0f));
for (float split = lowerbound + step; split < upperbound - 1e-5f; split += step) {
int mid;
split_objs(l, mid, L, R, split, axis);
if (mid == L || mid == R)
continue;
float now_sah = calc_sah(tmp, mid, L, R);
if (now_sah < min_sah) {
min_sah = now_sah;
best_axis = axis;
best_split = split;
}
}
}
if (best_axis == -1) {
std::cerr << "Can't find the best split!\n";
exit(999);
}
else {
int mid;
split_objs(l, mid, L, R, best_split, best_axis);
//free(tmp);
left = new bvh_node(l, L, mid, time0, time1, depth + 1);
right = new bvh_node(l, mid, R, time0, time1, depth + 1);
}
aabb box_l, box_r;
left->bounding_box(0, 1, box_l);
right->bounding_box(0, 1, box_r);
box = surrounding_box(box_l, box_r);
}
} |
6b21a8a9e5c27ec1ccf2393975a645cc3107448c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
__global__ void sum(int n, int startIndex, int *odata, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) return;
if (index >= startIndex) {
odata[index] = idata[index - startIndex] + idata[index];
}
else {
odata[index] = idata[index];
}
}
__global__ void inclusiveToExclusiveScan(int n, int *odata, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
odata[index] = index == 0 ? 0 : idata[index - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
float scan(int n, int *odata, const int *idata) {
int blockSize = 128;
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
int* dev_idata;
int* dev_odata;
hipMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("hipMalloc dev_idata failed!");
hipMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
hipMemcpy(dev_idata, idata, sizeof(int) * n, hipMemcpyHostToDevice);
hipMemcpy(dev_odata, idata, sizeof(int) * n, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int numLevels = ilog2ceil(n);
for (int startIndex = 1; startIndex <= (1 << (numLevels - 1)); startIndex *= 2) {
sum << <fullBlocksPerGrid, blockSize >> >(n, startIndex, dev_odata, dev_idata);
std::swap(dev_idata, dev_odata);
}
inclusiveToExclusiveScan << <fullBlocksPerGrid, blockSize >> >(n, dev_odata, dev_idata);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipMemcpy(odata, dev_odata, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(dev_idata);
hipFree(dev_odata);
return milliseconds;
}
}
}
| 6b21a8a9e5c27ec1ccf2393975a645cc3107448c.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <algorithm>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
__global__ void sum(int n, int startIndex, int *odata, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) return;
if (index >= startIndex) {
odata[index] = idata[index - startIndex] + idata[index];
}
else {
odata[index] = idata[index];
}
}
__global__ void inclusiveToExclusiveScan(int n, int *odata, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
odata[index] = index == 0 ? 0 : idata[index - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
float scan(int n, int *odata, const int *idata) {
int blockSize = 128;
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
int* dev_idata;
int* dev_odata;
cudaMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_idata failed!");
cudaMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
cudaMemcpy(dev_idata, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaMemcpy(dev_odata, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int numLevels = ilog2ceil(n);
for (int startIndex = 1; startIndex <= (1 << (numLevels - 1)); startIndex *= 2) {
sum << <fullBlocksPerGrid, blockSize >> >(n, startIndex, dev_odata, dev_idata);
std::swap(dev_idata, dev_odata);
}
inclusiveToExclusiveScan << <fullBlocksPerGrid, blockSize >> >(n, dev_odata, dev_idata);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(dev_idata);
cudaFree(dev_odata);
return milliseconds;
}
}
}
|
54c2b37e0e68f51e1a8fda7cab1e9b2c9359defc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/embed_layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void EmbedForward(const int nthreads, const Dtype *bottom_data,
const Dtype *weight, const int M, const int N,
const int K, Dtype *top_data) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(bottom_data[n]);
#ifdef DEBUG
assert(index >= 0);
assert(index < K);
assert(static_cast<Dtype>(index) == bottom_data[n]);
#endif
const int weight_index = index * N + d;
top_data[top_index] = weight[weight_index];
}
}
template <typename Dtype>
__global__ void EmbedBackward(const int nthreads, const Dtype *bottom_data,
const Dtype *top_diff, const int M, const int N,
const int K, Dtype *weight_diff);
template <typename Dtype>
__global__ void EmbedBackward(const int nthreads, const Dtype *bottom_data,
const Dtype *top_diff, const int M, const int N,
const int K, Dtype *weight_diff) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(bottom_data[n]);
const int weight_index = index * N + d;
caffe_gpu_atomic_add(top_diff[top_index], weight_diff + weight_index);
}
}
template <typename Dtype>
void EmbedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype *top_data = top[0]->mutable_gpu_data();
const Dtype *weight = this->blobs_[0]->gpu_data();
const int count = top[0]->count();
EmbedForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, weight, M_, N_, K_, top_data);
if (bias_term_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, Dtype(1),
bias_multiplier_.gpu_data(),
this->blobs_[1]->gpu_data(), Dtype(1), top_data);
}
}
template <typename Dtype>
void EmbedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input.";
if (this->param_propagate_down_[0]) {
const int top_count = top[0]->count();
const Dtype *top_diff = top[0]->gpu_diff();
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype *weight_diff = this->blobs_[0]->mutable_gpu_diff();
EmbedBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(top_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top_count, bottom_data, top_diff, M_, N_, K_, weight_diff);
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype *top_diff = top[0]->gpu_diff();
Dtype *bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, Dtype(1), top_diff,
bias_multiplier_.gpu_data(), Dtype(1), bias_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EmbedLayer);
} // namespace caffe
| 54c2b37e0e68f51e1a8fda7cab1e9b2c9359defc.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/embed_layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void EmbedForward(const int nthreads, const Dtype *bottom_data,
const Dtype *weight, const int M, const int N,
const int K, Dtype *top_data) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(bottom_data[n]);
#ifdef DEBUG
assert(index >= 0);
assert(index < K);
assert(static_cast<Dtype>(index) == bottom_data[n]);
#endif
const int weight_index = index * N + d;
top_data[top_index] = weight[weight_index];
}
}
template <typename Dtype>
__global__ void EmbedBackward(const int nthreads, const Dtype *bottom_data,
const Dtype *top_diff, const int M, const int N,
const int K, Dtype *weight_diff);
template <typename Dtype>
__global__ void EmbedBackward(const int nthreads, const Dtype *bottom_data,
const Dtype *top_diff, const int M, const int N,
const int K, Dtype *weight_diff) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(bottom_data[n]);
const int weight_index = index * N + d;
caffe_gpu_atomic_add(top_diff[top_index], weight_diff + weight_index);
}
}
template <typename Dtype>
void EmbedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype *top_data = top[0]->mutable_gpu_data();
const Dtype *weight = this->blobs_[0]->gpu_data();
const int count = top[0]->count();
EmbedForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, weight, M_, N_, K_, top_data);
if (bias_term_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, Dtype(1),
bias_multiplier_.gpu_data(),
this->blobs_[1]->gpu_data(), Dtype(1), top_data);
}
}
template <typename Dtype>
void EmbedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input.";
if (this->param_propagate_down_[0]) {
const int top_count = top[0]->count();
const Dtype *top_diff = top[0]->gpu_diff();
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype *weight_diff = this->blobs_[0]->mutable_gpu_diff();
EmbedBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS>>>(
top_count, bottom_data, top_diff, M_, N_, K_, weight_diff);
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype *top_diff = top[0]->gpu_diff();
Dtype *bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, Dtype(1), top_diff,
bias_multiplier_.gpu_data(), Dtype(1), bias_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EmbedLayer);
} // namespace caffe
|
extract_kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip_runtime.h"
// statistical kernel
__global__ void extract(hipLaunchParm lp, long d_Ne,
fp *d_I){ // pointer to input image (DEVICE GLOBAL MEMORY)
// indexes
int bx = hipBlockIdx_x; // get current horizontal block index
int tx = hipThreadIdx_x; // get current horizontal thread index
int ei = (bx*NUMBER_THREADS)+tx; // unique thread id, more threads than actual elements !!!
// copy input to output & log uncompress
if(ei<d_Ne){ // do only for the number of elements, omit extra threads
d_I[ei] = exp(d_I[ei]/255); // exponentiate input IMAGE and copy to output image
}
}
| extract_kernel.cu | #include "hip_runtime.h"
// statistical kernel
__global__ void extract(hipLaunchParm lp, long d_Ne,
fp *d_I){ // pointer to input image (DEVICE GLOBAL MEMORY)
// indexes
int bx = hipBlockIdx_x; // get current horizontal block index
int tx = hipThreadIdx_x; // get current horizontal thread index
int ei = (bx*NUMBER_THREADS)+tx; // unique thread id, more threads than actual elements !!!
// copy input to output & log uncompress
if(ei<d_Ne){ // do only for the number of elements, omit extra threads
d_I[ei] = exp(d_I[ei]/255); // exponentiate input IMAGE and copy to output image
}
}
|
799ba4495ceb68ffd993c17a2bb35bd361a0938c.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _SOMKERNELS_
#define _SOMKERNELS_
#include "include/math/Functions.h"
#include "include/math/Random.h"
#include "include/gpgpu/Kernels.h"
#include <cfloat>
#include <cassert>
#include <cmath>
struct saxmy_functor {
const float a;
saxmy_functor(float _a) : a(_a) {}
__host__ __device__
float operator()(const float& x, const float& y) const {
return a * (x - y);
}
};
// return the biggest of two tuples
struct bigger_tuple_functor {
__device__ __host__
thrust::tuple<float, unsigned int> operator() (
const thrust::tuple<float, unsigned int> &a,
const thrust::tuple<float, unsigned int> &b )
{
return (a >= b) ? a : b;
}
};
// return the biggest of two tuples
struct smaller_tuple_functor {
__device__ __host__
thrust::tuple<float, unsigned int> operator() (
const thrust::tuple<float, unsigned int> &a,
const thrust::tuple<float, unsigned int> &b )
{
return (a <= b) ? a : b;
}
};
float hostGetMax(const thrust::device_vector<float>& vec, unsigned int &ID) {
// create implicit index sequence [0, 1, 2, ... ]
thrust::counting_iterator<unsigned int> begin(0);
thrust::counting_iterator<unsigned int> end(vec.size() );
thrust::tuple<float, unsigned int> init(vec[0], 0);
thrust::tuple<float, unsigned int> smallest;
smallest = reduce( thrust::make_zip_iterator(make_tuple(vec.begin(), begin) ),
thrust::make_zip_iterator(make_tuple(vec.end(), end) ),
init,
bigger_tuple_functor() );
ID = thrust::get<1>(smallest);
return vec[ID];
}
float hostGetMin(const thrust::device_vector<float>& vec, unsigned int &ID) {
// create implicit index sequence [0, 1, 2, ... ]
thrust::counting_iterator<unsigned int> begin(0);
thrust::counting_iterator<unsigned int> end(vec.size() );
thrust::tuple<float, unsigned int> init(vec[0], 0);
thrust::tuple<float, unsigned int> smallest;
smallest = reduce( thrust::make_zip_iterator(make_tuple(vec.begin(), begin) ),
thrust::make_zip_iterator(make_tuple(vec.end(), end) ),
init,
smaller_tuple_functor() );
ID = thrust::get<1>(smallest);
return vec[ID];
}
//////////////////////////////////////////////////////////////////////////////////////////////
struct minus_pow_functor {
const float fVal;
minus_pow_functor(float val) : fVal(val) {}
__host__ __device__
float operator()(const float& val) const {
return pow(fVal-val, 2);
}
};
struct sqrt_functor {
__host__ __device__
float operator()(const float& val) const {
return sqrt(val);
}
};
//////////////////////////////////////////////////////////////////////////////////////////////
struct gaussian_bell_functor {
float fSigmaT;
gaussian_bell_functor(const float &sigmaT) : fSigmaT(sigmaT) {}
__host__ __device__
float operator()(const float& dist) const {
return ANN::fcn_gaussian_bell(dist, fSigmaT);
}
};
struct hebbian_functor {
float fLearningRate;
float fInput;
hebbian_functor(const float &learning_rate, const float &input) :
fLearningRate(learning_rate), fInput(input) {}
__host__ __device__
float operator()(const float& fWeight, const float& fInfluence) const {
return fWeight + (fInfluence*fLearningRate*(fInput-fWeight) );
}
};
/*
* Layout of SOMEdgeMatrix:
* COL1 COL2 COL3 COL(n+1)
* ROW1 toNeur1 toNeur1 toNeur1 ..
* ROW2 toNeur2 toNeur2 toNeur2 ..
* ROW3 toNeur3 toNeur3 toNeur3 ..
* ROW(n+1) .. .. ..
*/
BMUExport
hostSOMFindBMNeuronID(std::vector<SplittedNetExport> &SExp,
const thrust::device_vector<float> &InputVector,
const float &fConscienceRate)
{
BMUExport retBMU;
float fLastBMU = FLT_MAX;
#pragma omp parallel for
for(int iDev = 0; iDev < static_cast<int>(SExp.size() ); iDev++) {
if(hipSetDevice(iDev) != hipSuccess) {
std::cout<<"hostSOMTraining(): Setting new cuda-capable device failed."<<std::endl;
continue;
} else {
unsigned int BMUID = 0;
unsigned int iWidth = SExp.at(iDev).f2dEdges.getW();
unsigned int iHeight = SExp.at(iDev).f2dEdges.getH();
assert(iWidth > 0);
assert(iHeight > 0);
thrust::device_vector<float> dvRes(iWidth, 0.f);
thrust::device_vector<float> dvTmp(iWidth, 0.f);// temporary
for(unsigned int y = 0; y < iHeight; y++) {
thrust::transform(
SExp.at(iDev).f2dEdges.getRowBegin(y), // input
SExp.at(iDev).f2dEdges.getRowEnd(y), // input
dvTmp.begin(), // result
minus_pow_functor(InputVector[y]) ); // functor
thrust::transform(
dvRes.begin(), // input
dvRes.end(), // input
dvTmp.begin(), // input
dvRes.begin(), // result
thrust::plus<float>() ); // functor
}
dvTmp = dvRes;
// implementation of conscience mechanism
if(fConscienceRate > 0.f) {
thrust::device_vector<float> dvConscience(iWidth, 1.f / (float)iWidth);
thrust::transform(
dvConscience.begin(),
dvConscience.end(),
SExp.at(iDev).dvConscience.begin(),
dvConscience.begin(),
thrust::minus<float>() );
thrust::transform(
dvRes.begin(),
dvRes.end(),
dvConscience.begin(),
dvRes.begin(),
thrust::minus<float>() );
}
thrust::transform(
dvTmp.begin(),
dvTmp.end(),
SExp.at(iDev).dvConscience.begin(),
SExp.at(iDev).dvConscience.begin(),
saxmy_functor(fConscienceRate) );
hostGetMin(dvRes, BMUID);
// Check partial results for global BMU in all devices
if(fLastBMU > dvRes[BMUID]) {
fLastBMU = dvRes[BMUID];
thrust::host_vector<float> vPos = SExp.at(iDev).f2dPositions.getCol(BMUID);
retBMU = BMUExport(BMUID, iDev, vPos);
}
}
}
return retBMU;
}
/*
* Layout of SOMPositionMatrix:
* COL1 COL2 COL3 COL(n+1)
* ROW1 Xpos Xpos Xpos ..
* ROW2 Ypos Ypos Ypos ..
* ROW3 Zpos Zpos Zpos ..
* ROW(n+1) .. .. .. ..
*/
void hostSOMPropagateBW( std::vector<SplittedNetExport> &SExp,
const thrust::device_vector<float> &dvInputVector,
const BMUExport &BMU,
const float &fSigmaT,
const float &fLearningRate
)
{
#pragma omp parallel for
for(int iDev = 0; iDev < static_cast<int>(SExp.size() ); iDev++) {
if(hipSetDevice(iDev) != hipSuccess) {
std::cout<<"hostSOMTraining(): Setting new cuda-capable device failed."<<std::endl;
continue;
} else {
unsigned int iWidth = SExp.at(iDev).f2dPositions.getW();
unsigned int iHeight = SExp.at(iDev).f2dPositions.getH();
thrust::device_vector<float> dvBMUPos = BMU.dvBMUPos;
thrust::device_vector<float> dvTmp(iWidth, 0.f); // temporary
thrust::device_vector<float> dvInfluence(iWidth, 0.f);
thrust::device_vector<float> dvDist(iWidth, 0.f);
// 1. Calc distances for all neurons to BMNeuron
// Distance = sqrt(pow(x,2)+pow(y,2)+pow(z,2)+pow(n+1,2) );
for(unsigned int y = 0; y < iHeight; y++) { // for each coordinate position of the neuron
thrust::transform(
SExp.at(iDev).f2dPositions.getRowBegin(y), // input
SExp.at(iDev).f2dPositions.getRowEnd(y), // input
dvTmp.begin(), // result
minus_pow_functor(dvBMUPos[y]) ); // functor
thrust::transform(
dvDist.begin(), // input
dvDist.end(), // input
dvTmp.begin(), // input
dvDist.begin(), // result
thrust::plus<float>() ); // functor
}
thrust::transform(
dvDist.begin(), // input
dvDist.end(), // input
dvDist.begin(), // result
sqrt_functor() ); // functor
// 2. Calculate the influence for each neuron
thrust::transform(
dvDist.begin(), // input
dvDist.end(), // input
dvInfluence.begin(), // result
gaussian_bell_functor(fSigmaT) ); // functor
// 3. Only handle neurons in radius:
// 3a. Make stencil
dvTmp.assign(iWidth, fSigmaT);
thrust::transform(
dvDist.begin(), // input 1
dvDist.end(), // input 1
dvTmp.begin(), // input 1
dvTmp.begin(), // result
thrust::less_equal<float>() // functor
);
// 3b. Use stencil to modify only neurons inside the radius
// Save result in the ANN::Matrix
iWidth = SExp.at(iDev).f2dEdges.getW();
iHeight = SExp.at(iDev).f2dEdges.getH();
for(unsigned int y = 0; y < iHeight; y++) { // for each edge of the neuron
thrust::transform_if(
SExp.at(iDev).f2dEdges.getRowBegin(y), // input 1
SExp.at(iDev).f2dEdges.getRowEnd(y), // input 1
dvInfluence.begin(), // input 2
dvTmp.begin(), // stencil
SExp.at(iDev).f2dEdges.getRowBegin(y), // result
hebbian_functor(fLearningRate, dvInputVector[y]), // functor
thrust::identity<int>() ); // predicate
}
}
}
}
void hostSOMTraining( std::vector<SplittedNetExport> &SExp,
const ANN::TrainingSet &InputSet,
const unsigned int &iCycles,
const float &fSigma0,
const float &fLearningRate0,
const float &fConscienceRate,
float (*pfnDecay)(const float &, const float &, const float &) )
{
float fLambda = iCycles / log(fSigma0);
int iMin = 0;
int iMax = InputSet.GetNrElements()-1;
unsigned int iProgCount = 1;
// use 8 proximal neurons as standard
float fSigmaT = sqrt(2.f);
for(unsigned int i = 0; i < iCycles; i++) {
if(iCycles >= 10) {
if(((i+1) / (iCycles/10)) == iProgCount && (i+1) % (iCycles/10) == 0) {
std::cout<<"Current training progress calculated by the GPU is: "<<iProgCount*10.f<<"%/Step="<<i+1<<std::endl;
iProgCount++;
}
}
else {
std::cout<<"Current training progress calculated by the CPU is: "<<(float)(i+1.f)/(float)iCycles*100.f<<"%/Step="<<i+1<<std::endl;
}
// Set input
std::vector<float> vCurInput = InputSet.GetInput(ANN::RandInt(iMin, iMax) );
thrust::device_vector<float> dvInputVector(vCurInput.size() );
thrust::copy(vCurInput.begin(), vCurInput.end(), dvInputVector.begin() );
// Find BMNeuron
BMUExport BMUExp;
BMUExp = hostSOMFindBMNeuronID(SExp, dvInputVector, fConscienceRate);
// Calc m_fSigmaT if conscience is _not_ used
if(fConscienceRate <= 0.f)
fSigmaT = pfnDecay(fSigma0, i, fLambda);
float fLearningRate = pfnDecay(fLearningRate0, i, iCycles);
// Propagate BW
hostSOMPropagateBW( SExp,
dvInputVector, // const
BMUExp, // const
fSigmaT, // const
fLearningRate ); // const
}
}
#endif
| 799ba4495ceb68ffd993c17a2bb35bd361a0938c.cu | #ifndef _SOMKERNELS_
#define _SOMKERNELS_
#include "include/math/Functions.h"
#include "include/math/Random.h"
#include "include/gpgpu/Kernels.h"
#include <cfloat>
#include <cassert>
#include <cmath>
struct saxmy_functor {
const float a;
saxmy_functor(float _a) : a(_a) {}
__host__ __device__
float operator()(const float& x, const float& y) const {
return a * (x - y);
}
};
// return the biggest of two tuples
struct bigger_tuple_functor {
__device__ __host__
thrust::tuple<float, unsigned int> operator() (
const thrust::tuple<float, unsigned int> &a,
const thrust::tuple<float, unsigned int> &b )
{
return (a >= b) ? a : b;
}
};
// return the biggest of two tuples
struct smaller_tuple_functor {
__device__ __host__
thrust::tuple<float, unsigned int> operator() (
const thrust::tuple<float, unsigned int> &a,
const thrust::tuple<float, unsigned int> &b )
{
return (a <= b) ? a : b;
}
};
float hostGetMax(const thrust::device_vector<float>& vec, unsigned int &ID) {
// create implicit index sequence [0, 1, 2, ... ]
thrust::counting_iterator<unsigned int> begin(0);
thrust::counting_iterator<unsigned int> end(vec.size() );
thrust::tuple<float, unsigned int> init(vec[0], 0);
thrust::tuple<float, unsigned int> smallest;
smallest = reduce( thrust::make_zip_iterator(make_tuple(vec.begin(), begin) ),
thrust::make_zip_iterator(make_tuple(vec.end(), end) ),
init,
bigger_tuple_functor() );
ID = thrust::get<1>(smallest);
return vec[ID];
}
float hostGetMin(const thrust::device_vector<float>& vec, unsigned int &ID) {
// create implicit index sequence [0, 1, 2, ... ]
thrust::counting_iterator<unsigned int> begin(0);
thrust::counting_iterator<unsigned int> end(vec.size() );
thrust::tuple<float, unsigned int> init(vec[0], 0);
thrust::tuple<float, unsigned int> smallest;
smallest = reduce( thrust::make_zip_iterator(make_tuple(vec.begin(), begin) ),
thrust::make_zip_iterator(make_tuple(vec.end(), end) ),
init,
smaller_tuple_functor() );
ID = thrust::get<1>(smallest);
return vec[ID];
}
//////////////////////////////////////////////////////////////////////////////////////////////
struct minus_pow_functor {
const float fVal;
minus_pow_functor(float val) : fVal(val) {}
__host__ __device__
float operator()(const float& val) const {
return pow(fVal-val, 2);
}
};
struct sqrt_functor {
__host__ __device__
float operator()(const float& val) const {
return sqrt(val);
}
};
//////////////////////////////////////////////////////////////////////////////////////////////
struct gaussian_bell_functor {
float fSigmaT;
gaussian_bell_functor(const float &sigmaT) : fSigmaT(sigmaT) {}
__host__ __device__
float operator()(const float& dist) const {
return ANN::fcn_gaussian_bell(dist, fSigmaT);
}
};
struct hebbian_functor {
float fLearningRate;
float fInput;
hebbian_functor(const float &learning_rate, const float &input) :
fLearningRate(learning_rate), fInput(input) {}
__host__ __device__
float operator()(const float& fWeight, const float& fInfluence) const {
return fWeight + (fInfluence*fLearningRate*(fInput-fWeight) );
}
};
/*
* Layout of SOMEdgeMatrix:
* COL1 COL2 COL3 COL(n+1)
* ROW1 toNeur1 toNeur1 toNeur1 ..
* ROW2 toNeur2 toNeur2 toNeur2 ..
* ROW3 toNeur3 toNeur3 toNeur3 ..
* ROW(n+1) .. .. ..
*/
BMUExport
hostSOMFindBMNeuronID(std::vector<SplittedNetExport> &SExp,
const thrust::device_vector<float> &InputVector,
const float &fConscienceRate)
{
BMUExport retBMU;
float fLastBMU = FLT_MAX;
#pragma omp parallel for
for(int iDev = 0; iDev < static_cast<int>(SExp.size() ); iDev++) {
if(cudaSetDevice(iDev) != cudaSuccess) {
std::cout<<"hostSOMTraining(): Setting new cuda-capable device failed."<<std::endl;
continue;
} else {
unsigned int BMUID = 0;
unsigned int iWidth = SExp.at(iDev).f2dEdges.getW();
unsigned int iHeight = SExp.at(iDev).f2dEdges.getH();
assert(iWidth > 0);
assert(iHeight > 0);
thrust::device_vector<float> dvRes(iWidth, 0.f);
thrust::device_vector<float> dvTmp(iWidth, 0.f);// temporary
for(unsigned int y = 0; y < iHeight; y++) {
thrust::transform(
SExp.at(iDev).f2dEdges.getRowBegin(y), // input
SExp.at(iDev).f2dEdges.getRowEnd(y), // input
dvTmp.begin(), // result
minus_pow_functor(InputVector[y]) ); // functor
thrust::transform(
dvRes.begin(), // input
dvRes.end(), // input
dvTmp.begin(), // input
dvRes.begin(), // result
thrust::plus<float>() ); // functor
}
dvTmp = dvRes;
// implementation of conscience mechanism
if(fConscienceRate > 0.f) {
thrust::device_vector<float> dvConscience(iWidth, 1.f / (float)iWidth);
thrust::transform(
dvConscience.begin(),
dvConscience.end(),
SExp.at(iDev).dvConscience.begin(),
dvConscience.begin(),
thrust::minus<float>() );
thrust::transform(
dvRes.begin(),
dvRes.end(),
dvConscience.begin(),
dvRes.begin(),
thrust::minus<float>() );
}
thrust::transform(
dvTmp.begin(),
dvTmp.end(),
SExp.at(iDev).dvConscience.begin(),
SExp.at(iDev).dvConscience.begin(),
saxmy_functor(fConscienceRate) );
hostGetMin(dvRes, BMUID);
// Check partial results for global BMU in all devices
if(fLastBMU > dvRes[BMUID]) {
fLastBMU = dvRes[BMUID];
thrust::host_vector<float> vPos = SExp.at(iDev).f2dPositions.getCol(BMUID);
retBMU = BMUExport(BMUID, iDev, vPos);
}
}
}
return retBMU;
}
/*
* Layout of SOMPositionMatrix:
* COL1 COL2 COL3 COL(n+1)
* ROW1 Xpos Xpos Xpos ..
* ROW2 Ypos Ypos Ypos ..
* ROW3 Zpos Zpos Zpos ..
* ROW(n+1) .. .. .. ..
*/
void hostSOMPropagateBW( std::vector<SplittedNetExport> &SExp,
const thrust::device_vector<float> &dvInputVector,
const BMUExport &BMU,
const float &fSigmaT,
const float &fLearningRate
)
{
#pragma omp parallel for
for(int iDev = 0; iDev < static_cast<int>(SExp.size() ); iDev++) {
if(cudaSetDevice(iDev) != cudaSuccess) {
std::cout<<"hostSOMTraining(): Setting new cuda-capable device failed."<<std::endl;
continue;
} else {
unsigned int iWidth = SExp.at(iDev).f2dPositions.getW();
unsigned int iHeight = SExp.at(iDev).f2dPositions.getH();
thrust::device_vector<float> dvBMUPos = BMU.dvBMUPos;
thrust::device_vector<float> dvTmp(iWidth, 0.f); // temporary
thrust::device_vector<float> dvInfluence(iWidth, 0.f);
thrust::device_vector<float> dvDist(iWidth, 0.f);
// 1. Calc distances for all neurons to BMNeuron
// Distance = sqrt(pow(x,2)+pow(y,2)+pow(z,2)+pow(n+1,2) );
for(unsigned int y = 0; y < iHeight; y++) { // for each coordinate position of the neuron
thrust::transform(
SExp.at(iDev).f2dPositions.getRowBegin(y), // input
SExp.at(iDev).f2dPositions.getRowEnd(y), // input
dvTmp.begin(), // result
minus_pow_functor(dvBMUPos[y]) ); // functor
thrust::transform(
dvDist.begin(), // input
dvDist.end(), // input
dvTmp.begin(), // input
dvDist.begin(), // result
thrust::plus<float>() ); // functor
}
thrust::transform(
dvDist.begin(), // input
dvDist.end(), // input
dvDist.begin(), // result
sqrt_functor() ); // functor
// 2. Calculate the influence for each neuron
thrust::transform(
dvDist.begin(), // input
dvDist.end(), // input
dvInfluence.begin(), // result
gaussian_bell_functor(fSigmaT) ); // functor
// 3. Only handle neurons in radius:
// 3a. Make stencil
dvTmp.assign(iWidth, fSigmaT);
thrust::transform(
dvDist.begin(), // input 1
dvDist.end(), // input 1
dvTmp.begin(), // input 1
dvTmp.begin(), // result
thrust::less_equal<float>() // functor
);
// 3b. Use stencil to modify only neurons inside the radius
// Save result in the ANN::Matrix
iWidth = SExp.at(iDev).f2dEdges.getW();
iHeight = SExp.at(iDev).f2dEdges.getH();
for(unsigned int y = 0; y < iHeight; y++) { // for each edge of the neuron
thrust::transform_if(
SExp.at(iDev).f2dEdges.getRowBegin(y), // input 1
SExp.at(iDev).f2dEdges.getRowEnd(y), // input 1
dvInfluence.begin(), // input 2
dvTmp.begin(), // stencil
SExp.at(iDev).f2dEdges.getRowBegin(y), // result
hebbian_functor(fLearningRate, dvInputVector[y]), // functor
thrust::identity<int>() ); // predicate
}
}
}
}
void hostSOMTraining( std::vector<SplittedNetExport> &SExp,
const ANN::TrainingSet &InputSet,
const unsigned int &iCycles,
const float &fSigma0,
const float &fLearningRate0,
const float &fConscienceRate,
float (*pfnDecay)(const float &, const float &, const float &) )
{
float fLambda = iCycles / log(fSigma0);
int iMin = 0;
int iMax = InputSet.GetNrElements()-1;
unsigned int iProgCount = 1;
// use 8 proximal neurons as standard
float fSigmaT = sqrt(2.f);
for(unsigned int i = 0; i < iCycles; i++) {
if(iCycles >= 10) {
if(((i+1) / (iCycles/10)) == iProgCount && (i+1) % (iCycles/10) == 0) {
std::cout<<"Current training progress calculated by the GPU is: "<<iProgCount*10.f<<"%/Step="<<i+1<<std::endl;
iProgCount++;
}
}
else {
std::cout<<"Current training progress calculated by the CPU is: "<<(float)(i+1.f)/(float)iCycles*100.f<<"%/Step="<<i+1<<std::endl;
}
// Set input
std::vector<float> vCurInput = InputSet.GetInput(ANN::RandInt(iMin, iMax) );
thrust::device_vector<float> dvInputVector(vCurInput.size() );
thrust::copy(vCurInput.begin(), vCurInput.end(), dvInputVector.begin() );
// Find BMNeuron
BMUExport BMUExp;
BMUExp = hostSOMFindBMNeuronID(SExp, dvInputVector, fConscienceRate);
// Calc m_fSigmaT if conscience is _not_ used
if(fConscienceRate <= 0.f)
fSigmaT = pfnDecay(fSigma0, i, fLambda);
float fLearningRate = pfnDecay(fLearningRate0, i, iCycles);
// Propagate BW
hostSOMPropagateBW( SExp,
dvInputVector, // const
BMUExp, // const
fSigmaT, // const
fLearningRate ); // const
}
}
#endif
|
0975bef51774d8cc63b5fac95fa4231150600b6b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <vector>
#include <iostream>
__global__ void vector_add(const float *a, const float *b, float *c, int num_elements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_elements)
{
c[i] = a[i] + b[i];
}
}
int main(void)
{
size_t const num_elements = 5000000;
size_t const elements_size = num_elements * sizeof(float);
// allocate cpu data
std::vector<float> host_a(num_elements);
std::vector<float> host_b(num_elements);
std::vector<float> host_c(num_elements);
for (size_t i = 0; i < num_elements; ++i)
{
host_a[i] = rand() / (float)RAND_MAX;
host_b[i] = rand() / (float)RAND_MAX;
}
// allocate device data
float *dev_a = nullptr;
float *dev_b = nullptr;
float *dev_c = nullptr;
hipMalloc((void **)&dev_a, elements_size);
hipMalloc((void **)&dev_b, elements_size);
hipMalloc((void **)&dev_c, elements_size);
// Copy "A" and "B" from host to device
hipMemcpy(dev_a, &host_a[0], elements_size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, &host_b[0], elements_size, hipMemcpyHostToDevice);
// launch_kernel
int block_size = 256;
int num_blocks = (num_elements + block_size - 1) / block_size;
hipLaunchKernelGGL(( vector_add) , dim3(num_blocks), dim3(block_size) , 0, 0, dev_a, dev_b, dev_c, num_elements);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
std::cerr << "Failed to launch vectorAdd kernel" << hipGetErrorString(err);
exit(1);
}
// Copy "C" from device to host
hipMemcpy(&host_c[0], dev_c, elements_size, hipMemcpyDeviceToHost);
// verify result
for (size_t i = 0; i < num_elements; ++i)
{
if (fabs(host_a[i] + host_b[i] - host_c[i]) > 1e-5)
{
std::cerr << "Failure at " << i << std::endl;
exit(1);
}
}
// free device memory
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
std::cout << "Done" << std::endl;
return 0;
}
| 0975bef51774d8cc63b5fac95fa4231150600b6b.cu | #include <cuda_runtime.h>
#include <vector>
#include <iostream>
__global__ void vector_add(const float *a, const float *b, float *c, int num_elements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_elements)
{
c[i] = a[i] + b[i];
}
}
int main(void)
{
size_t const num_elements = 5000000;
size_t const elements_size = num_elements * sizeof(float);
// allocate cpu data
std::vector<float> host_a(num_elements);
std::vector<float> host_b(num_elements);
std::vector<float> host_c(num_elements);
for (size_t i = 0; i < num_elements; ++i)
{
host_a[i] = rand() / (float)RAND_MAX;
host_b[i] = rand() / (float)RAND_MAX;
}
// allocate device data
float *dev_a = nullptr;
float *dev_b = nullptr;
float *dev_c = nullptr;
cudaMalloc((void **)&dev_a, elements_size);
cudaMalloc((void **)&dev_b, elements_size);
cudaMalloc((void **)&dev_c, elements_size);
// Copy "A" and "B" from host to device
cudaMemcpy(dev_a, &host_a[0], elements_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &host_b[0], elements_size, cudaMemcpyHostToDevice);
// launch_kernel
int block_size = 256;
int num_blocks = (num_elements + block_size - 1) / block_size;
vector_add <<< num_blocks, block_size >>>(dev_a, dev_b, dev_c, num_elements);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cerr << "Failed to launch vectorAdd kernel" << cudaGetErrorString(err);
exit(1);
}
// Copy "C" from device to host
cudaMemcpy(&host_c[0], dev_c, elements_size, cudaMemcpyDeviceToHost);
// verify result
for (size_t i = 0; i < num_elements; ++i)
{
if (fabs(host_a[i] + host_b[i] - host_c[i]) > 1e-5)
{
std::cerr << "Failure at " << i << std::endl;
exit(1);
}
}
// free device memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
std::cout << "Done" << std::endl;
return 0;
}
|
2a1cc84acf38b98ecbcb51bd444db6beeaffccc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
// ----------------------------------------------------------------------------
// kernel helper
// ----------------------------------------------------------------------------
template <typename T>
__global__ void k_set(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] = value;
}
}
template <typename T>
__global__ void k_single_set(T* ptr, int i, T value) {
ptr[i] = value;
}
template <typename T>
__global__ void k_add(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] += value;
}
}
template <typename T>
__global__ void k_single_add(T* ptr, int i, T value) {
ptr[i] += value;
}
// --------------------------------------------------------
// Testcase: Empty
// --------------------------------------------------------
template <typename T>
void empty() {
std::atomic<int> counter{0};
tf::Taskflow taskflow;
tf::Executor executor;
taskflow.emplace([&](T&){
++counter;
});
taskflow.emplace([&](T&){
++counter;
});
taskflow.emplace([&](T&){
++counter;
});
executor.run_n(taskflow, 100).wait();
REQUIRE(counter == 300);
}
TEST_CASE("Empty" * doctest::timeout(300)) {
empty<tf::cudaFlow>();
}
TEST_CASE("EmptyCapture" * doctest::timeout(300)) {
empty<tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Set
// --------------------------------------------------------
template <typename T>
void set() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_set<T>, gpu, n, (T)17);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
}
TEST_CASE("Set.i8" * doctest::timeout(300)) {
set<int8_t>();
}
TEST_CASE("Set.i16" * doctest::timeout(300)) {
set<int16_t>();
}
TEST_CASE("Set.i32" * doctest::timeout(300)) {
set<int32_t>();
}
// --------------------------------------------------------
// Testcase: Add
// --------------------------------------------------------
template <typename T>
void add() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto ad1 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 1);
auto ad2 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 2);
auto ad3 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 3);
auto ad4 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 4);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(ad1);
ad1.precede(ad2);
ad2.precede(ad3);
ad3.precede(ad4);
ad4.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 10);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
}
TEST_CASE("Add.i8" * doctest::timeout(300)) {
add<int8_t>();
}
TEST_CASE("Add.i16" * doctest::timeout(300)) {
add<int16_t>();
}
TEST_CASE("Add.i32" * doctest::timeout(300)) {
add<int32_t>();
}
// TODO: 64-bit fail?
//TEST_CASE("Add.i64" * doctest::timeout(300)) {
// add<int64_t>();
//}
// --------------------------------------------------------
// Testcase: Binary Set
// --------------------------------------------------------
template <typename T, typename F>
void bset() {
const unsigned n = 10000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](F& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
std::vector<tf::cudaTask> tasks(n+1);
for(unsigned i=1; i<=n; ++i) {
tasks[i] = cf.kernel(g, b, 0, k_single_set<T>, gpu, i-1, (T)17);
auto p = i/2;
if(p != 0) {
tasks[p].precede(tasks[i]);
}
tasks[i].precede(d2h);
h2d.precede(tasks[i]);
}
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("BSet.i8" * doctest::timeout(300)) {
bset<int8_t, tf::cudaFlow>();
}
TEST_CASE("BSet.i16" * doctest::timeout(300)) {
bset<int16_t, tf::cudaFlow>();
}
TEST_CASE("BSet.i32" * doctest::timeout(300)) {
bset<int32_t, tf::cudaFlow>();
}
TEST_CASE("CapturedBSet.i8" * doctest::timeout(300)) {
bset<int8_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedBSet.i16" * doctest::timeout(300)) {
bset<int16_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedBSet.i32" * doctest::timeout(300)) {
bset<int32_t, tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Memset
// --------------------------------------------------------
template <typename F>
void memset() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
int* cpu = new int [N];
int* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(int)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = 999;
}
taskflow.emplace([&](F& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<int>, gpu, N, 123);
auto copy = cf.copy(cpu, gpu, N);
auto zero = cf.memset(gpu+start, 0x3f, (N-start)*sizeof(int));
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(cpu[i] == 123);
}
for(int i=start; i<N; ++i) {
REQUIRE(cpu[i] == 0x3f3f3f3f);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Memset" * doctest::timeout(300)) {
memset<tf::cudaFlow>();
}
TEST_CASE("CapturedMemset" * doctest::timeout(300)) {
memset<tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Memset0
// --------------------------------------------------------
template <typename T, typename F>
void memset0() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(T)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](F& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Memset0.i8") {
memset0<int8_t, tf::cudaFlow>();
}
TEST_CASE("Memset0.i16") {
memset0<int16_t, tf::cudaFlow>();
}
TEST_CASE("Memset0.i32") {
memset0<int32_t, tf::cudaFlow>();
}
TEST_CASE("Memset0.f32") {
memset0<float, tf::cudaFlow>();
}
TEST_CASE("Memset0.f64") {
memset0<double, tf::cudaFlow>();
}
TEST_CASE("CapturedMemset0.i8") {
memset0<int8_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.i16") {
memset0<int16_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.i32") {
memset0<int32_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.f32") {
memset0<float, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.f64") {
memset0<double, tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Memcpy
// --------------------------------------------------------
template <typename T, typename F>
void memcpy() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(T)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](F& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.memcpy(cpu, gpu, N*sizeof(T));
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Memcpy.i8") {
memcpy<int8_t, tf::cudaFlow>();
}
TEST_CASE("Memcpy.i16") {
memcpy<int16_t, tf::cudaFlow>();
}
TEST_CASE("Memcpy.i32") {
memcpy<int32_t, tf::cudaFlow>();
}
TEST_CASE("Memcpy.f32") {
memcpy<float, tf::cudaFlow>();
}
TEST_CASE("Memcpy.f64") {
memcpy<double, tf::cudaFlow>();
}
TEST_CASE("CapturedMemcpy.i8") {
memcpy<int8_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.i16") {
memcpy<int16_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.i32") {
memcpy<int32_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.f32") {
memcpy<float, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.f64") {
memcpy<double, tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: fill
// --------------------------------------------------------
template <typename T>
void fill(T value) {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 107;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(T)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto fill = cf.fill(gpu+start, value, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(fill);
fill.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(::fabs(cpu[i] - value) < 1e-4);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Fill.i8") {
fill<int8_t>(+123);
fill<int8_t>(-123);
}
TEST_CASE("Fill.i16") {
fill<int16_t>(+12345);
fill<int16_t>(-12345);
}
TEST_CASE("Fill.i32") {
fill<int32_t>(+123456789);
fill<int32_t>(-123456789);
}
TEST_CASE("Fill.f32") {
fill<float>(+123456789.0f);
fill<float>(-123456789.0f);
}
// --------------------------------------------------------
// Testcase: Zero
// --------------------------------------------------------
template <typename T>
void zero() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(hipMalloc(&gpu, N*sizeof(T)) == hipSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.zero(gpu+start, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Zero.i8") {
zero<int8_t>();
}
TEST_CASE("Zero.i16") {
zero<int16_t>();
}
TEST_CASE("Zero.i32") {
zero<int32_t>();
}
TEST_CASE("Zero.f32") {
zero<float>();
}
// --------------------------------------------------------
// Testcase: Barrier
// --------------------------------------------------------
template <typename T>
void barrier() {
const unsigned n = 1000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto br1 = cf.noop();
auto br2 = cf.noop();
auto br3 = cf.noop();
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(br1);
for(unsigned i=0; i<n; ++i) {
auto k1 = cf.kernel(g, b, 0, k_single_set<T>, gpu, i, (T)17);
k1.succeed(br1)
.precede(br2);
auto k2 = cf.kernel(g, b, 0, k_single_add<T>, gpu, i, (T)3);
k2.succeed(br2)
.precede(br3);
}
br3.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)20);
}
std::free(cpu);
REQUIRE(hipFree(gpu) == hipSuccess);
}
TEST_CASE("Barrier.i8" * doctest::timeout(300)) {
barrier<int8_t>();
}
TEST_CASE("Barrier.i16" * doctest::timeout(300)) {
barrier<int16_t>();
}
TEST_CASE("Barrier.i32" * doctest::timeout(300)) {
barrier<int32_t>();
}
// ----------------------------------------------------------------------------
// NestedRuns
// ----------------------------------------------------------------------------
template <typename F>
void nested_runs() {
int* cpu = nullptr;
int* gpu = nullptr;
constexpr unsigned n = 1000;
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
struct A {
tf::Executor executor;
tf::Taskflow taskflow;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto A1 = taskflow.emplace([&](F& cf) {
cf.copy(gpu, cpu, n);
});
auto A2 = taskflow.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
});
auto A3 = taskflow.emplace([&] (F& cf) {
cf.copy(cpu, gpu, n);
});
A1.precede(A2);
A2.precede(A3);
executor.run_n(taskflow, 10).wait();
}
};
struct B {
tf::Taskflow taskflow;
tf::Executor executor;
A a;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto B0 = taskflow.emplace([] () {});
auto B1 = taskflow.emplace([&] (F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto B2 = taskflow.emplace([&] () { a.run(cpu, gpu, n); });
auto B3 = taskflow.emplace([&] (F&) {
for(unsigned i=0; i<n; ++i) {
cpu[i]++;
}
});
B0.precede(B1);
B1.precede(B2);
B2.precede(B3);
executor.run_n(taskflow, 100).wait();
}
};
B b;
b.run(cpu, gpu, n);
for(unsigned i=0; i<n; i++) {
REQUIRE(cpu[i] == 1200);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
}
TEST_CASE("NestedRuns" * doctest::timeout(300)) {
nested_runs<tf::cudaFlow>();
}
TEST_CASE("CapturedNestedRuns" * doctest::timeout(300)) {
nested_runs<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// WorkerID
// ----------------------------------------------------------------------------
void worker_id(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N, M);
REQUIRE(executor.num_workers() == (N + M));
REQUIRE(executor.num_domains() == 2);
const unsigned s = 100;
for(unsigned k=0; k<s; ++k) {
auto cputask = taskflow.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
auto chktask = taskflow.emplace([&] () {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
taskflow.emplace([&]() {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto subflow = taskflow.emplace([&](tf::Subflow& sf){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
auto t1 = sf.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto t2 = sf.emplace([&](tf::cudaFlow&){
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
t1.precede(t2);
});
cputask.precede(gputask);
gputask.precede(chktask);
chktask.precede(subflow);
}
executor.run_n(taskflow, 10).wait();
}
TEST_CASE("WorkerID.1C1G") {
worker_id(1, 1);
}
TEST_CASE("WorkerID.1C2G") {
worker_id(1, 2);
}
TEST_CASE("WorkerID.1C3G") {
worker_id(1, 3);
}
TEST_CASE("WorkerID.1C4G") {
worker_id(1, 4);
}
TEST_CASE("WorkerID.2C1G") {
worker_id(2, 1);
}
TEST_CASE("WorkerID.2C2G") {
worker_id(2, 2);
}
TEST_CASE("WorkerID.2C3G") {
worker_id(2, 3);
}
TEST_CASE("WorkerID.2C4G") {
worker_id(2, 4);
}
TEST_CASE("WorkerID.3C1G") {
worker_id(3, 1);
}
TEST_CASE("WorkerID.3C2G") {
worker_id(3, 2);
}
TEST_CASE("WorkerID.3C3G") {
worker_id(3, 3);
}
TEST_CASE("WorkerID.3C4G") {
worker_id(3, 4);
}
TEST_CASE("WorkerID.4C1G") {
worker_id(4, 1);
}
TEST_CASE("WorkerID.4C2G") {
worker_id(4, 2);
}
TEST_CASE("WorkerID.4C3G") {
worker_id(4, 3);
}
TEST_CASE("WorkerID.4C4G") {
worker_id(4, 4);
}
// ----------------------------------------------------------------------------
// Multiruns
// ----------------------------------------------------------------------------
void multiruns(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N, M);
const unsigned n = 1000;
const unsigned s = 100;
int *cpu[s] = {0};
int *gpu[s] = {0};
for(unsigned k=0; k<s; ++k) {
int number = ::rand()%100;
auto cputask = taskflow.emplace([&, k](){
cpu[k] = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu[k], n*sizeof(int)) == hipSuccess);
});
auto gputask = taskflow.emplace([&, k, number](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu[k], cpu[k], n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu[k], n, number);
auto d2h = cf.copy(cpu[k], gpu[k], n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto chktask = taskflow.emplace([&, k, number] () {
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[k][i] == number);
}
});
cputask.precede(gputask);
gputask.precede(chktask);
}
executor.run(taskflow).wait();
}
TEST_CASE("Multiruns.1C1G") {
multiruns(1, 1);
}
TEST_CASE("Multiruns.1C2G") {
multiruns(1, 2);
}
TEST_CASE("Multiruns.1C3G") {
multiruns(1, 3);
}
TEST_CASE("Multiruns.1C4G") {
multiruns(1, 4);
}
TEST_CASE("Multiruns.2C1G") {
multiruns(2, 1);
}
TEST_CASE("Multiruns.2C2G") {
multiruns(2, 2);
}
TEST_CASE("Multiruns.2C3G") {
multiruns(2, 3);
}
TEST_CASE("Multiruns.2C4G") {
multiruns(2, 4);
}
TEST_CASE("Multiruns.3C1G") {
multiruns(3, 1);
}
TEST_CASE("Multiruns.3C2G") {
multiruns(3, 2);
}
TEST_CASE("Multiruns.3C3G") {
multiruns(3, 3);
}
TEST_CASE("Multiruns.3C4G") {
multiruns(3, 4);
}
TEST_CASE("Multiruns.4C1G") {
multiruns(4, 1);
}
TEST_CASE("Multiruns.4C2G") {
multiruns(4, 2);
}
TEST_CASE("Multiruns.4C3G") {
multiruns(4, 3);
}
TEST_CASE("Multiruns.4C4G") {
multiruns(4, 4);
}
// ----------------------------------------------------------------------------
// Subflow
// ----------------------------------------------------------------------------
template <typename F>
void subflow() {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto gputask = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
partask.precede(chktask);
executor.run(taskflow).wait();
}
TEST_CASE("Subflow" * doctest::timeout(300)) {
subflow<tf::cudaFlow>();
}
TEST_CASE("CapturedSubflow" * doctest::timeout(300)) {
subflow<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// NestedSubflow
// ----------------------------------------------------------------------------
template <typename F>
void nested_subflow() {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto gputask1 = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask1 = sf.emplace([&](tf::Subflow& sf) {
auto gputask2 = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask2 = sf.emplace([&](tf::Subflow& sf){
sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
});
gputask2.precede(subtask2);
});
gputask1.precede(subtask1);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 3);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
partask.precede(chktask)
.succeed(cputask);
executor.run(taskflow).wait();
}
TEST_CASE("NestedSubflow" * doctest::timeout(300) ) {
nested_subflow<tf::cudaFlow>();
}
TEST_CASE("CapturedNestedSubflow" * doctest::timeout(300) ) {
nested_subflow<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// DetachedSubflow
// ----------------------------------------------------------------------------
template <typename F>
void detached_subflow() {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto gputask = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
sf.detach();
});
executor.run(taskflow).wait();
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
}
TEST_CASE("DetachedSubflow" * doctest::timeout(300)) {
detached_subflow<tf::cudaFlow>();
}
TEST_CASE("CapturedDetachedSubflow" * doctest::timeout(300)) {
detached_subflow<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// Conditional GPU tasking
// ----------------------------------------------------------------------------
template <typename F>
void loop() {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
});
auto gputask = taskflow.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto condition = taskflow.emplace([&cpu, round=0] () mutable {
++round;
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == round);
}
return round >= 100;
});
auto freetask = taskflow.emplace([&](){
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(condition);
condition.precede(gputask, freetask);
executor.run(taskflow).wait();
}
TEST_CASE("Loop" * doctest::timeout(300)) {
loop<tf::cudaFlow>();
}
TEST_CASE("CapturedLoop" * doctest::timeout(300)) {
loop<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// Predicate
// ----------------------------------------------------------------------------
TEST_CASE("Predicate") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
REQUIRE(hipMemcpy(gpu, cpu, n*sizeof(int), hipMemcpyHostToDevice) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.join_until([i=100]() mutable { return i-- == 0; });
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// Repeat
// ----------------------------------------------------------------------------
TEST_CASE("Repeat") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(hipMalloc(&gpu, n*sizeof(int)) == hipSuccess);
REQUIRE(hipMemcpy(gpu, cpu, n*sizeof(int), hipMemcpyHostToDevice) == hipSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.join_n(100);
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(hipFree(gpu) == hipSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
}
| 2a1cc84acf38b98ecbcb51bd444db6beeaffccc1.cu | #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
// ----------------------------------------------------------------------------
// kernel helper
// ----------------------------------------------------------------------------
template <typename T>
__global__ void k_set(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] = value;
}
}
template <typename T>
__global__ void k_single_set(T* ptr, int i, T value) {
ptr[i] = value;
}
template <typename T>
__global__ void k_add(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] += value;
}
}
template <typename T>
__global__ void k_single_add(T* ptr, int i, T value) {
ptr[i] += value;
}
// --------------------------------------------------------
// Testcase: Empty
// --------------------------------------------------------
template <typename T>
void empty() {
std::atomic<int> counter{0};
tf::Taskflow taskflow;
tf::Executor executor;
taskflow.emplace([&](T&){
++counter;
});
taskflow.emplace([&](T&){
++counter;
});
taskflow.emplace([&](T&){
++counter;
});
executor.run_n(taskflow, 100).wait();
REQUIRE(counter == 300);
}
TEST_CASE("Empty" * doctest::timeout(300)) {
empty<tf::cudaFlow>();
}
TEST_CASE("EmptyCapture" * doctest::timeout(300)) {
empty<tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Set
// --------------------------------------------------------
template <typename T>
void set() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_set<T>, gpu, n, (T)17);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
}
TEST_CASE("Set.i8" * doctest::timeout(300)) {
set<int8_t>();
}
TEST_CASE("Set.i16" * doctest::timeout(300)) {
set<int16_t>();
}
TEST_CASE("Set.i32" * doctest::timeout(300)) {
set<int32_t>();
}
// --------------------------------------------------------
// Testcase: Add
// --------------------------------------------------------
template <typename T>
void add() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto ad1 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 1);
auto ad2 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 2);
auto ad3 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 3);
auto ad4 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 4);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(ad1);
ad1.precede(ad2);
ad2.precede(ad3);
ad3.precede(ad4);
ad4.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 10);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
}
TEST_CASE("Add.i8" * doctest::timeout(300)) {
add<int8_t>();
}
TEST_CASE("Add.i16" * doctest::timeout(300)) {
add<int16_t>();
}
TEST_CASE("Add.i32" * doctest::timeout(300)) {
add<int32_t>();
}
// TODO: 64-bit fail?
//TEST_CASE("Add.i64" * doctest::timeout(300)) {
// add<int64_t>();
//}
// --------------------------------------------------------
// Testcase: Binary Set
// --------------------------------------------------------
template <typename T, typename F>
void bset() {
const unsigned n = 10000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](F& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
std::vector<tf::cudaTask> tasks(n+1);
for(unsigned i=1; i<=n; ++i) {
tasks[i] = cf.kernel(g, b, 0, k_single_set<T>, gpu, i-1, (T)17);
auto p = i/2;
if(p != 0) {
tasks[p].precede(tasks[i]);
}
tasks[i].precede(d2h);
h2d.precede(tasks[i]);
}
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("BSet.i8" * doctest::timeout(300)) {
bset<int8_t, tf::cudaFlow>();
}
TEST_CASE("BSet.i16" * doctest::timeout(300)) {
bset<int16_t, tf::cudaFlow>();
}
TEST_CASE("BSet.i32" * doctest::timeout(300)) {
bset<int32_t, tf::cudaFlow>();
}
TEST_CASE("CapturedBSet.i8" * doctest::timeout(300)) {
bset<int8_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedBSet.i16" * doctest::timeout(300)) {
bset<int16_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedBSet.i32" * doctest::timeout(300)) {
bset<int32_t, tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Memset
// --------------------------------------------------------
template <typename F>
void memset() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
int* cpu = new int [N];
int* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(int)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = 999;
}
taskflow.emplace([&](F& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<int>, gpu, N, 123);
auto copy = cf.copy(cpu, gpu, N);
auto zero = cf.memset(gpu+start, 0x3f, (N-start)*sizeof(int));
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(cpu[i] == 123);
}
for(int i=start; i<N; ++i) {
REQUIRE(cpu[i] == 0x3f3f3f3f);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Memset" * doctest::timeout(300)) {
memset<tf::cudaFlow>();
}
TEST_CASE("CapturedMemset" * doctest::timeout(300)) {
memset<tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Memset0
// --------------------------------------------------------
template <typename T, typename F>
void memset0() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](F& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Memset0.i8") {
memset0<int8_t, tf::cudaFlow>();
}
TEST_CASE("Memset0.i16") {
memset0<int16_t, tf::cudaFlow>();
}
TEST_CASE("Memset0.i32") {
memset0<int32_t, tf::cudaFlow>();
}
TEST_CASE("Memset0.f32") {
memset0<float, tf::cudaFlow>();
}
TEST_CASE("Memset0.f64") {
memset0<double, tf::cudaFlow>();
}
TEST_CASE("CapturedMemset0.i8") {
memset0<int8_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.i16") {
memset0<int16_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.i32") {
memset0<int32_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.f32") {
memset0<float, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.f64") {
memset0<double, tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Memcpy
// --------------------------------------------------------
template <typename T, typename F>
void memcpy() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](F& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.memcpy(cpu, gpu, N*sizeof(T));
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Memcpy.i8") {
memcpy<int8_t, tf::cudaFlow>();
}
TEST_CASE("Memcpy.i16") {
memcpy<int16_t, tf::cudaFlow>();
}
TEST_CASE("Memcpy.i32") {
memcpy<int32_t, tf::cudaFlow>();
}
TEST_CASE("Memcpy.f32") {
memcpy<float, tf::cudaFlow>();
}
TEST_CASE("Memcpy.f64") {
memcpy<double, tf::cudaFlow>();
}
TEST_CASE("CapturedMemcpy.i8") {
memcpy<int8_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.i16") {
memcpy<int16_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.i32") {
memcpy<int32_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.f32") {
memcpy<float, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.f64") {
memcpy<double, tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: fill
// --------------------------------------------------------
template <typename T>
void fill(T value) {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 107;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto fill = cf.fill(gpu+start, value, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(fill);
fill.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - value) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Fill.i8") {
fill<int8_t>(+123);
fill<int8_t>(-123);
}
TEST_CASE("Fill.i16") {
fill<int16_t>(+12345);
fill<int16_t>(-12345);
}
TEST_CASE("Fill.i32") {
fill<int32_t>(+123456789);
fill<int32_t>(-123456789);
}
TEST_CASE("Fill.f32") {
fill<float>(+123456789.0f);
fill<float>(-123456789.0f);
}
// --------------------------------------------------------
// Testcase: Zero
// --------------------------------------------------------
template <typename T>
void zero() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.zero(gpu+start, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Zero.i8") {
zero<int8_t>();
}
TEST_CASE("Zero.i16") {
zero<int16_t>();
}
TEST_CASE("Zero.i32") {
zero<int32_t>();
}
TEST_CASE("Zero.f32") {
zero<float>();
}
// --------------------------------------------------------
// Testcase: Barrier
// --------------------------------------------------------
template <typename T>
void barrier() {
const unsigned n = 1000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto br1 = cf.noop();
auto br2 = cf.noop();
auto br3 = cf.noop();
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(br1);
for(unsigned i=0; i<n; ++i) {
auto k1 = cf.kernel(g, b, 0, k_single_set<T>, gpu, i, (T)17);
k1.succeed(br1)
.precede(br2);
auto k2 = cf.kernel(g, b, 0, k_single_add<T>, gpu, i, (T)3);
k2.succeed(br2)
.precede(br3);
}
br3.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)20);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Barrier.i8" * doctest::timeout(300)) {
barrier<int8_t>();
}
TEST_CASE("Barrier.i16" * doctest::timeout(300)) {
barrier<int16_t>();
}
TEST_CASE("Barrier.i32" * doctest::timeout(300)) {
barrier<int32_t>();
}
// ----------------------------------------------------------------------------
// NestedRuns
// ----------------------------------------------------------------------------
template <typename F>
void nested_runs() {
int* cpu = nullptr;
int* gpu = nullptr;
constexpr unsigned n = 1000;
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
struct A {
tf::Executor executor;
tf::Taskflow taskflow;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto A1 = taskflow.emplace([&](F& cf) {
cf.copy(gpu, cpu, n);
});
auto A2 = taskflow.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
});
auto A3 = taskflow.emplace([&] (F& cf) {
cf.copy(cpu, gpu, n);
});
A1.precede(A2);
A2.precede(A3);
executor.run_n(taskflow, 10).wait();
}
};
struct B {
tf::Taskflow taskflow;
tf::Executor executor;
A a;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto B0 = taskflow.emplace([] () {});
auto B1 = taskflow.emplace([&] (F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto B2 = taskflow.emplace([&] () { a.run(cpu, gpu, n); });
auto B3 = taskflow.emplace([&] (F&) {
for(unsigned i=0; i<n; ++i) {
cpu[i]++;
}
});
B0.precede(B1);
B1.precede(B2);
B2.precede(B3);
executor.run_n(taskflow, 100).wait();
}
};
B b;
b.run(cpu, gpu, n);
for(unsigned i=0; i<n; i++) {
REQUIRE(cpu[i] == 1200);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
}
TEST_CASE("NestedRuns" * doctest::timeout(300)) {
nested_runs<tf::cudaFlow>();
}
TEST_CASE("CapturedNestedRuns" * doctest::timeout(300)) {
nested_runs<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// WorkerID
// ----------------------------------------------------------------------------
void worker_id(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N, M);
REQUIRE(executor.num_workers() == (N + M));
REQUIRE(executor.num_domains() == 2);
const unsigned s = 100;
for(unsigned k=0; k<s; ++k) {
auto cputask = taskflow.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
auto chktask = taskflow.emplace([&] () {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
taskflow.emplace([&]() {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto subflow = taskflow.emplace([&](tf::Subflow& sf){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
auto t1 = sf.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N);
});
auto t2 = sf.emplace([&](tf::cudaFlow&){
auto id = executor.this_worker_id();
REQUIRE(id >= N);
REQUIRE(id < N+M);
});
t1.precede(t2);
});
cputask.precede(gputask);
gputask.precede(chktask);
chktask.precede(subflow);
}
executor.run_n(taskflow, 10).wait();
}
TEST_CASE("WorkerID.1C1G") {
worker_id(1, 1);
}
TEST_CASE("WorkerID.1C2G") {
worker_id(1, 2);
}
TEST_CASE("WorkerID.1C3G") {
worker_id(1, 3);
}
TEST_CASE("WorkerID.1C4G") {
worker_id(1, 4);
}
TEST_CASE("WorkerID.2C1G") {
worker_id(2, 1);
}
TEST_CASE("WorkerID.2C2G") {
worker_id(2, 2);
}
TEST_CASE("WorkerID.2C3G") {
worker_id(2, 3);
}
TEST_CASE("WorkerID.2C4G") {
worker_id(2, 4);
}
TEST_CASE("WorkerID.3C1G") {
worker_id(3, 1);
}
TEST_CASE("WorkerID.3C2G") {
worker_id(3, 2);
}
TEST_CASE("WorkerID.3C3G") {
worker_id(3, 3);
}
TEST_CASE("WorkerID.3C4G") {
worker_id(3, 4);
}
TEST_CASE("WorkerID.4C1G") {
worker_id(4, 1);
}
TEST_CASE("WorkerID.4C2G") {
worker_id(4, 2);
}
TEST_CASE("WorkerID.4C3G") {
worker_id(4, 3);
}
TEST_CASE("WorkerID.4C4G") {
worker_id(4, 4);
}
// ----------------------------------------------------------------------------
// Multiruns
// ----------------------------------------------------------------------------
void multiruns(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N, M);
const unsigned n = 1000;
const unsigned s = 100;
int *cpu[s] = {0};
int *gpu[s] = {0};
for(unsigned k=0; k<s; ++k) {
int number = ::rand()%100;
auto cputask = taskflow.emplace([&, k](){
cpu[k] = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu[k], n*sizeof(int)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&, k, number](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu[k], cpu[k], n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu[k], n, number);
auto d2h = cf.copy(cpu[k], gpu[k], n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto chktask = taskflow.emplace([&, k, number] () {
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[k][i] == number);
}
});
cputask.precede(gputask);
gputask.precede(chktask);
}
executor.run(taskflow).wait();
}
TEST_CASE("Multiruns.1C1G") {
multiruns(1, 1);
}
TEST_CASE("Multiruns.1C2G") {
multiruns(1, 2);
}
TEST_CASE("Multiruns.1C3G") {
multiruns(1, 3);
}
TEST_CASE("Multiruns.1C4G") {
multiruns(1, 4);
}
TEST_CASE("Multiruns.2C1G") {
multiruns(2, 1);
}
TEST_CASE("Multiruns.2C2G") {
multiruns(2, 2);
}
TEST_CASE("Multiruns.2C3G") {
multiruns(2, 3);
}
TEST_CASE("Multiruns.2C4G") {
multiruns(2, 4);
}
TEST_CASE("Multiruns.3C1G") {
multiruns(3, 1);
}
TEST_CASE("Multiruns.3C2G") {
multiruns(3, 2);
}
TEST_CASE("Multiruns.3C3G") {
multiruns(3, 3);
}
TEST_CASE("Multiruns.3C4G") {
multiruns(3, 4);
}
TEST_CASE("Multiruns.4C1G") {
multiruns(4, 1);
}
TEST_CASE("Multiruns.4C2G") {
multiruns(4, 2);
}
TEST_CASE("Multiruns.4C3G") {
multiruns(4, 3);
}
TEST_CASE("Multiruns.4C4G") {
multiruns(4, 4);
}
// ----------------------------------------------------------------------------
// Subflow
// ----------------------------------------------------------------------------
template <typename F>
void subflow() {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
partask.precede(chktask);
executor.run(taskflow).wait();
}
TEST_CASE("Subflow" * doctest::timeout(300)) {
subflow<tf::cudaFlow>();
}
TEST_CASE("CapturedSubflow" * doctest::timeout(300)) {
subflow<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// NestedSubflow
// ----------------------------------------------------------------------------
template <typename F>
void nested_subflow() {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto gputask1 = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask1 = sf.emplace([&](tf::Subflow& sf) {
auto gputask2 = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask2 = sf.emplace([&](tf::Subflow& sf){
sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
});
gputask2.precede(subtask2);
});
gputask1.precede(subtask1);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 3);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
partask.precede(chktask)
.succeed(cputask);
executor.run(taskflow).wait();
}
TEST_CASE("NestedSubflow" * doctest::timeout(300) ) {
nested_subflow<tf::cudaFlow>();
}
TEST_CASE("CapturedNestedSubflow" * doctest::timeout(300) ) {
nested_subflow<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// DetachedSubflow
// ----------------------------------------------------------------------------
template <typename F>
void detached_subflow() {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
sf.detach();
});
executor.run(taskflow).wait();
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
}
TEST_CASE("DetachedSubflow" * doctest::timeout(300)) {
detached_subflow<tf::cudaFlow>();
}
TEST_CASE("CapturedDetachedSubflow" * doctest::timeout(300)) {
detached_subflow<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// Conditional GPU tasking
// ----------------------------------------------------------------------------
template <typename F>
void loop() {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto condition = taskflow.emplace([&cpu, round=0] () mutable {
++round;
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == round);
}
return round >= 100;
});
auto freetask = taskflow.emplace([&](){
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(condition);
condition.precede(gputask, freetask);
executor.run(taskflow).wait();
}
TEST_CASE("Loop" * doctest::timeout(300)) {
loop<tf::cudaFlow>();
}
TEST_CASE("CapturedLoop" * doctest::timeout(300)) {
loop<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// Predicate
// ----------------------------------------------------------------------------
TEST_CASE("Predicate") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
REQUIRE(cudaMemcpy(gpu, cpu, n*sizeof(int), cudaMemcpyHostToDevice) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.join_until([i=100]() mutable { return i-- == 0; });
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// Repeat
// ----------------------------------------------------------------------------
TEST_CASE("Repeat") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
REQUIRE(cudaMemcpy(gpu, cpu, n*sizeof(int), cudaMemcpyHostToDevice) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.join_n(100);
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
}
|
2052c344c2e6995034f7d6f2073829a59d14767d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* compile as: nvcc bindlessTexture.cu */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 1024
// texture object is a kernel argument
__global__ void kernel(hipTextureObject_t tex) {
int i = blockIdx.x *blockDim.x + threadIdx.x;
float x = tex1Dfetch<float>(tex, i);
// do some work using x ...
}
void call_kernel(hipTextureObject_t tex) {
dim3 block(128,1,1);
dim3 grid(N/block.x,1,1);
hipLaunchKernelGGL(( kernel) , dim3(grid), dim3(block), 0, 0, tex);
}
int main() {
// declare and allocate memory
float *buffer;
hipMalloc(&buffer, N*sizeof(float));
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = buffer;
resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = N*sizeof(float);
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
// create texture object: we only have to do this once!
hipTextureObject_t tex=0;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
call_kernel(tex); // pass texture as argument
// destroy texture object
hipDestroyTextureObject(tex);
hipFree(buffer);
}
| 2052c344c2e6995034f7d6f2073829a59d14767d.cu | /* compile as: nvcc bindlessTexture.cu */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 1024
// texture object is a kernel argument
__global__ void kernel(cudaTextureObject_t tex) {
int i = blockIdx.x *blockDim.x + threadIdx.x;
float x = tex1Dfetch<float>(tex, i);
// do some work using x ...
}
void call_kernel(cudaTextureObject_t tex) {
dim3 block(128,1,1);
dim3 grid(N/block.x,1,1);
kernel <<<grid, block>>>(tex);
}
int main() {
// declare and allocate memory
float *buffer;
cudaMalloc(&buffer, N*sizeof(float));
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = buffer;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = N*sizeof(float);
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
// create texture object: we only have to do this once!
cudaTextureObject_t tex=0;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
call_kernel(tex); // pass texture as argument
// destroy texture object
cudaDestroyTextureObject(tex);
cudaFree(buffer);
}
|
08a4dc8d02e76718848e7630b84784081c4abcc4.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************* ternarytest2.cu ***************************************/
/*mostra 0 no ndice 0, "c" no ndice 1 e nos ndices pares, mostra valor lixo nos demais ndices */
#include <stdio.h>
#include "hip/hip_runtime.h"
#include <assert.h>
#define N 2 //64
__global__ void foo(float* A, float c) {
A[threadIdx.x ? 2*threadIdx.x : 1] = c ;
}
| 08a4dc8d02e76718848e7630b84784081c4abcc4.cu | /******************************************* ternarytest2.cu ***************************************/
/*mostra 0 no índice 0, "c" no índice 1 e nos índices pares, mostra valor lixo nos demais índices */
#include <stdio.h>
#include "cuda.h"
#include <assert.h>
#define N 2 //64
__global__ void foo(float* A, float c) {
A[threadIdx.x ? 2*threadIdx.x : 1] = c ;
}
|
80248e06e14a83e27ea49f08d008ed15361e1110.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void vector_add(const int *a, const int *b, int *c)
{
*c = *a + *b;
}
int main(void)
{
const int a = 2, b = 5;
int c = 0;
int *dev_a, *dev_b, *dev_c;
hipMalloc((void **)&dev_a, sizeof(int));
hipMalloc((void **)&dev_b, sizeof(int));
hipMalloc((void **)&dev_c, sizeof(int));
hipMemcpy(dev_a, &a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, &b, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vector_add), dim3(1), dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d + %d = %d, Is that right?\n", a, b, c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 80248e06e14a83e27ea49f08d008ed15361e1110.cu | #include <stdio.h>
__global__ void vector_add(const int *a, const int *b, int *c)
{
*c = *a + *b;
}
int main(void)
{
const int a = 2, b = 5;
int c = 0;
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)&dev_a, sizeof(int));
cudaMalloc((void **)&dev_b, sizeof(int));
cudaMalloc((void **)&dev_c, sizeof(int));
cudaMemcpy(dev_a, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &b, sizeof(int), cudaMemcpyHostToDevice);
vector_add<<<1, 1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d + %d = %d, Is that right?\n", a, b, c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
76ac8a9e5b6dd0898bfca2e29f55623d9f74266a.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef HAMC_SCRATCH_H
#define HAMC_SCRATCH_H
#include <bits/getopt_core.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime_api.h>
#include <hip/driver_types.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <stdint.h>
#include <sys/time.h>
#include "../../src/hamc/hamc_cpu_code.c"
#include "../../src/hamc/LU_inverse_block.cu"
int main(int argc, char *argv[]){
bool verbose = false;
int flag=0;
int n = 2;
int p = 6;
int N = p;
int t = 10;
int w = 30;
int seed = 10;
int opt;
while ((opt = getopt(argc, argv, "n:")) != -1){
switch(opt){
case 'n':
p = atoi(optarg);
break;
}
}
N = p;
printf("Size of input matrix: %s%d%s\n", YELLOW, p, NC);
printf("Generating QC_MDPC code...\n");
mdpc code = qc_mdpc_init_cpu(n, p, t, w, seed);
printf("Generating Binary Circulant Matrix...\n");
bin_matrix invertible_matrix = make_matrix_cpu(
code->p, code->p,
splice_cpu(code->row, (code->n0 - 1) * code->p, code->n),
1);
printf("Generated test matrix\n");
// Copy matrix in two test matrices
bin_matrix extra_matrix = mat_init_cpu(p, p);
bin_matrix extra_matrix2 = mat_init_cpu(p, p);
for (int i =0; i < p*p; i++) {
HAMC_DATA_TYPE_t temp = invertible_matrix->data[i];
extra_matrix->data[i] = temp;
extra_matrix2->data[i] = temp;
}
clock_t hamc_cpu_start = clock();
bin_matrix cpu_sol = circ_matrix_inverse_cpu(extra_matrix);
clock_t hamc_cpu_end = clock();
double hamc_cpu_time_used =
((double) (hamc_cpu_end - hamc_cpu_start))/ CLOCKS_PER_SEC;
// Print input and expected result
if (true) {
printf("\nInput matrix A:\n");
print_bin_matrix(invertible_matrix);
printf("\nExpected solution is:\n");
print_bin_matrix(cpu_sol);
}
clock_t lu_gpu_start = clock();
bin_matrix new_gpu_sol = inverse_GF2_LU_block_gpu(extra_matrix2);
clock_t lu_gpu_end = clock();
double lu_gpu_time_used = ((double) (lu_gpu_end - lu_gpu_start))/
CLOCKS_PER_SEC;
if (verbose) print_bin_matrix(new_gpu_sol);
// check results
for (int i = 0; i < N*N; i++) {
if (new_gpu_sol->data[i] != cpu_sol->data[i]) {
flag = 1;
break;
}
}
if(flag==0)
printf("correctq: true");
else
printf("correctq: Failure\n");
printf("\n");
//printf("Time for LU Decomposition CPU code: %lf\n", lu_cpu_time_used);
printf("Time for HAMC CPU code: %lfs\n", hamc_cpu_time_used);
printf("Time for LU Decomposition GPU code: %lfs\n", lu_gpu_time_used);
printf("Speed difference: %.2lfX ", lu_gpu_time_used/hamc_cpu_time_used);
if (lu_gpu_time_used > hamc_cpu_time_used)
printf("slower\n");
else
printf("faster\n");
free(invertible_matrix);
free(extra_matrix);
free(cpu_sol);
free(new_gpu_sol);
return 0;
}
#endif /* HAMC_SCRATCH_H */ | 76ac8a9e5b6dd0898bfca2e29f55623d9f74266a.cu | #ifndef HAMC_SCRATCH_H
#define HAMC_SCRATCH_H
#include <bits/getopt_core.h>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime_api.h>
#include <driver_types.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <stdint.h>
#include <sys/time.h>
#include "../../src/hamc/hamc_cpu_code.c"
#include "../../src/hamc/LU_inverse_block.cu"
int main(int argc, char *argv[]){
bool verbose = false;
int flag=0;
int n = 2;
int p = 6;
int N = p;
int t = 10;
int w = 30;
int seed = 10;
int opt;
while ((opt = getopt(argc, argv, "n:")) != -1){
switch(opt){
case 'n':
p = atoi(optarg);
break;
}
}
N = p;
printf("Size of input matrix: %s%d%s\n", YELLOW, p, NC);
printf("Generating QC_MDPC code...\n");
mdpc code = qc_mdpc_init_cpu(n, p, t, w, seed);
printf("Generating Binary Circulant Matrix...\n");
bin_matrix invertible_matrix = make_matrix_cpu(
code->p, code->p,
splice_cpu(code->row, (code->n0 - 1) * code->p, code->n),
1);
printf("Generated test matrix\n");
// Copy matrix in two test matrices
bin_matrix extra_matrix = mat_init_cpu(p, p);
bin_matrix extra_matrix2 = mat_init_cpu(p, p);
for (int i =0; i < p*p; i++) {
HAMC_DATA_TYPE_t temp = invertible_matrix->data[i];
extra_matrix->data[i] = temp;
extra_matrix2->data[i] = temp;
}
clock_t hamc_cpu_start = clock();
bin_matrix cpu_sol = circ_matrix_inverse_cpu(extra_matrix);
clock_t hamc_cpu_end = clock();
double hamc_cpu_time_used =
((double) (hamc_cpu_end - hamc_cpu_start))/ CLOCKS_PER_SEC;
// Print input and expected result
if (true) {
printf("\nInput matrix A:\n");
print_bin_matrix(invertible_matrix);
printf("\nExpected solution is:\n");
print_bin_matrix(cpu_sol);
}
clock_t lu_gpu_start = clock();
bin_matrix new_gpu_sol = inverse_GF2_LU_block_gpu(extra_matrix2);
clock_t lu_gpu_end = clock();
double lu_gpu_time_used = ((double) (lu_gpu_end - lu_gpu_start))/
CLOCKS_PER_SEC;
if (verbose) print_bin_matrix(new_gpu_sol);
// check results
for (int i = 0; i < N*N; i++) {
if (new_gpu_sol->data[i] != cpu_sol->data[i]) {
flag = 1;
break;
}
}
if(flag==0)
printf("correctq: true");
else
printf("correctq: Failure\n");
printf("\n");
//printf("Time for LU Decomposition CPU code: %lf\n", lu_cpu_time_used);
printf("Time for HAMC CPU code: %lfs\n", hamc_cpu_time_used);
printf("Time for LU Decomposition GPU code: %lfs\n", lu_gpu_time_used);
printf("Speed difference: %.2lfX ", lu_gpu_time_used/hamc_cpu_time_used);
if (lu_gpu_time_used > hamc_cpu_time_used)
printf("slower\n");
else
printf("faster\n");
free(invertible_matrix);
free(extra_matrix);
free(cpu_sol);
free(new_gpu_sol);
return 0;
}
#endif /* HAMC_SCRATCH_H */ |
7683f02f42ad8db96b6308a96b97db1946368b79.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
///////////////////////////////////////////////////////////////////////////////
// This sample implements Mersenne Twister random number generator
// and Cartesian Box-Muller transformation on the GPU
///////////////////////////////////////////////////////////////////////////////
// standard utilities and systems includes
#include <stdio.h>
#include "MT.h"
#include <hip/hip_runtime.h>
// comment the below line if not doing Box-Muller transformation
#define DO_BOXMULLER
// Reference CPU MT and Box-Muller transformation
extern "C" void initMTRef(const char *fname);
extern "C" void RandomRef(float *h_Rand, int nPerRng, unsigned int seed);
#ifdef DO_BOXMULLER
extern "C" void BoxMullerRef(float *h_Rand, int nPerRng);
#endif
#include <chrono>
using namespace std::chrono;
///////////////////////////////////////////////////////////////////////////////
//Load twister configurations
///////////////////////////////////////////////////////////////////////////////
void loadMTGPU(const char *fname,
const unsigned int seed,
mt_struct_stripped *h_MT,
const size_t size)
{
// open the file for binary read
FILE* fd = fopen(fname, "rb");
if (fd == NULL)
{
printf("Failed to open file %s\n", fname);
exit(-1);
}
for (unsigned int i = 0; i < size; i++)
fread(&h_MT[i], sizeof(mt_struct_stripped), 1, fd);
fclose(fd);
for(unsigned int i = 0; i < size; i++)
h_MT[i].seed = seed;
}
__device__
void BoxMullerTrans(float *u1, float *u2)
{
const float r = sqrtf(-2.0f * logf(*u1));
const float phi = 2 * PI * (*u2);
*u1 = r * cosf(phi);
*u2 = r * sinf(phi);
}
__global__ void boxmuller (float* Rand, const int nPerRng)
{
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
for (int iOut = 0; iOut < nPerRng; iOut += 2) {
BoxMullerTrans(&Rand[globalID + (iOut + 0) * MT_RNG_COUNT],
&Rand[globalID + (iOut + 1) * MT_RNG_COUNT]);
}
}
__global__ void mt (const mt_struct_stripped* MT, float* Rand, const int nPerRng)
{
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN], matrix_a, mask_b, mask_c;
//Load bit-vector Mersenne Twister parameters
matrix_a = MT[globalID].matrix_a;
mask_b = MT[globalID].mask_b;
mask_c = MT[globalID].mask_c;
//Initialize current state
mt[0] = MT[globalID].seed;
for (iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for (iOut = 0; iOut < nPerRng; iOut++) {
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
// MT recurrence
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & mask_b;
x ^= (x << MT_SHIFTC) & mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
Rand[globalID + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
///////////////////////////////////////////////////////////////////////////////
// Main function
///////////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv)
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
int numIterations = atoi(argv[1]);
size_t globalWorkSize = {MT_RNG_COUNT}; // 1D var for Total # of work items
size_t localWorkSize = {128}; // 1D var for # of work items in the work group
dim3 gridBlocks (globalWorkSize/localWorkSize);
dim3 threadBlocks (localWorkSize);
const int seed = 777;
const int nPerRng = 5860; // # of recurrence steps, must be even if do Box-Muller transformation
const int nRand = MT_RNG_COUNT * nPerRng;// Output size
printf("Initialization: load MT parameters and init host buffers...\n");
mt_struct_stripped *h_MT = (mt_struct_stripped*) malloc (
sizeof(mt_struct_stripped) * MT_RNG_COUNT); // MT para
const char *cDatPath = "./data/MersenneTwister.dat";
loadMTGPU(cDatPath, seed, h_MT, MT_RNG_COUNT);
const char *cRawPath = "./data/MersenneTwister.raw";
initMTRef(cRawPath);
float *h_RandGPU = (float*)malloc(sizeof(float)*nRand); // Host buffers for GPU output
float *h_RandCPU = (float*)malloc(sizeof(float)*nRand); // Host buffers for CPU test
printf("Allocate memory...\n");
mt_struct_stripped* d_MT;
hipMalloc((void**)&d_MT, sizeof(mt_struct_stripped)*MT_RNG_COUNT);
hipMemcpy(d_MT, h_MT, sizeof(mt_struct_stripped)*MT_RNG_COUNT, hipMemcpyHostToDevice);
float* d_Rand;
hipMalloc((void**)&d_Rand, sizeof(float)*nRand);
printf("Call Mersenne Twister kernel... (%d iterations)\n\n", numIterations);
high_resolution_clock::time_point t1 = high_resolution_clock::now();
for (int i = 0; i < numIterations; i++)
{
hipLaunchKernelGGL(mt, gridBlocks, threadBlocks, 0, 0, d_MT, d_Rand, nPerRng);
#ifdef DO_BOXMULLER
hipLaunchKernelGGL(boxmuller, gridBlocks, threadBlocks, 0, 0, d_Rand, nPerRng);
#endif
}
hipDeviceSynchronize();
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
double gpuTime = time_span.count() / (double)numIterations;
printf("MersenneTwister, Throughput = %.4f GNumbers/s, "
"Time = %.5f s, Size = %u Numbers, Workgroup = %lu\n",
((double)nRand * 1.0E-9 / gpuTime), gpuTime, nRand, localWorkSize);
printf("\nRead back results...\n");
hipMemcpy(h_RandGPU, d_Rand, sizeof(float)*nRand, hipMemcpyDeviceToHost);
printf("Compute CPU reference solution...\n");
RandomRef(h_RandCPU, nPerRng, seed);
#ifdef DO_BOXMULLER
BoxMullerRef(h_RandCPU, nPerRng);
#endif
printf("Compare CPU and GPU results...\n");
double sum_delta = 0;
double sum_ref = 0;
for(int i = 0; i < MT_RNG_COUNT; i++) {
for(int j = 0; j < nPerRng; j++) {
double rCPU = h_RandCPU[i * nPerRng + j];
double rGPU = h_RandGPU[i + j * MT_RNG_COUNT];
double delta = ::fabs(rCPU - rGPU);
sum_delta += delta;
sum_ref += ::fabs(rCPU);
}
}
double L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n\n", L1norm);
free(h_MT);
free(h_RandGPU);
free(h_RandCPU);
hipFree(d_MT);
hipFree(d_Rand);
// finish
printf("%s\n", (L1norm < 1e-6) ? "PASS" : "FAIL");
return 0;
}
| 7683f02f42ad8db96b6308a96b97db1946368b79.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
///////////////////////////////////////////////////////////////////////////////
// This sample implements Mersenne Twister random number generator
// and Cartesian Box-Muller transformation on the GPU
///////////////////////////////////////////////////////////////////////////////
// standard utilities and systems includes
#include <stdio.h>
#include "MT.h"
#include <hip/hip_runtime.h>
// comment the below line if not doing Box-Muller transformation
#define DO_BOXMULLER
// Reference CPU MT and Box-Muller transformation
extern "C" void initMTRef(const char *fname);
extern "C" void RandomRef(float *h_Rand, int nPerRng, unsigned int seed);
#ifdef DO_BOXMULLER
extern "C" void BoxMullerRef(float *h_Rand, int nPerRng);
#endif
#include <chrono>
using namespace std::chrono;
///////////////////////////////////////////////////////////////////////////////
//Load twister configurations
///////////////////////////////////////////////////////////////////////////////
void loadMTGPU(const char *fname,
const unsigned int seed,
mt_struct_stripped *h_MT,
const size_t size)
{
// open the file for binary read
FILE* fd = fopen(fname, "rb");
if (fd == NULL)
{
printf("Failed to open file %s\n", fname);
exit(-1);
}
for (unsigned int i = 0; i < size; i++)
fread(&h_MT[i], sizeof(mt_struct_stripped), 1, fd);
fclose(fd);
for(unsigned int i = 0; i < size; i++)
h_MT[i].seed = seed;
}
__device__
void BoxMullerTrans(float *u1, float *u2)
{
const float r = sqrtf(-2.0f * logf(*u1));
const float phi = 2 * PI * (*u2);
*u1 = r * cosf(phi);
*u2 = r * sinf(phi);
}
__global__ void boxmuller (float* Rand, const int nPerRng)
{
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
for (int iOut = 0; iOut < nPerRng; iOut += 2) {
BoxMullerTrans(&Rand[globalID + (iOut + 0) * MT_RNG_COUNT],
&Rand[globalID + (iOut + 1) * MT_RNG_COUNT]);
}
}
__global__ void mt (const mt_struct_stripped* MT, float* Rand, const int nPerRng)
{
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN], matrix_a, mask_b, mask_c;
//Load bit-vector Mersenne Twister parameters
matrix_a = MT[globalID].matrix_a;
mask_b = MT[globalID].mask_b;
mask_c = MT[globalID].mask_c;
//Initialize current state
mt[0] = MT[globalID].seed;
for (iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for (iOut = 0; iOut < nPerRng; iOut++) {
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
// MT recurrence
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & mask_b;
x ^= (x << MT_SHIFTC) & mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
Rand[globalID + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
///////////////////////////////////////////////////////////////////////////////
// Main function
///////////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv)
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
int numIterations = atoi(argv[1]);
size_t globalWorkSize = {MT_RNG_COUNT}; // 1D var for Total # of work items
size_t localWorkSize = {128}; // 1D var for # of work items in the work group
dim3 gridBlocks (globalWorkSize/localWorkSize);
dim3 threadBlocks (localWorkSize);
const int seed = 777;
const int nPerRng = 5860; // # of recurrence steps, must be even if do Box-Muller transformation
const int nRand = MT_RNG_COUNT * nPerRng;// Output size
printf("Initialization: load MT parameters and init host buffers...\n");
mt_struct_stripped *h_MT = (mt_struct_stripped*) malloc (
sizeof(mt_struct_stripped) * MT_RNG_COUNT); // MT para
const char *cDatPath = "./data/MersenneTwister.dat";
loadMTGPU(cDatPath, seed, h_MT, MT_RNG_COUNT);
const char *cRawPath = "./data/MersenneTwister.raw";
initMTRef(cRawPath);
float *h_RandGPU = (float*)malloc(sizeof(float)*nRand); // Host buffers for GPU output
float *h_RandCPU = (float*)malloc(sizeof(float)*nRand); // Host buffers for CPU test
printf("Allocate memory...\n");
mt_struct_stripped* d_MT;
hipMalloc((void**)&d_MT, sizeof(mt_struct_stripped)*MT_RNG_COUNT);
hipMemcpy(d_MT, h_MT, sizeof(mt_struct_stripped)*MT_RNG_COUNT, hipMemcpyHostToDevice);
float* d_Rand;
hipMalloc((void**)&d_Rand, sizeof(float)*nRand);
printf("Call Mersenne Twister kernel... (%d iterations)\n\n", numIterations);
high_resolution_clock::time_point t1 = high_resolution_clock::now();
for (int i = 0; i < numIterations; i++)
{
hipLaunchKernelGGL(mt, gridBlocks, threadBlocks, 0, 0, d_MT, d_Rand, nPerRng);
#ifdef DO_BOXMULLER
hipLaunchKernelGGL(boxmuller, gridBlocks, threadBlocks, 0, 0, d_Rand, nPerRng);
#endif
}
hipDeviceSynchronize();
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
double gpuTime = time_span.count() / (double)numIterations;
printf("MersenneTwister, Throughput = %.4f GNumbers/s, "
"Time = %.5f s, Size = %u Numbers, Workgroup = %lu\n",
((double)nRand * 1.0E-9 / gpuTime), gpuTime, nRand, localWorkSize);
printf("\nRead back results...\n");
hipMemcpy(h_RandGPU, d_Rand, sizeof(float)*nRand, hipMemcpyDeviceToHost);
printf("Compute CPU reference solution...\n");
RandomRef(h_RandCPU, nPerRng, seed);
#ifdef DO_BOXMULLER
BoxMullerRef(h_RandCPU, nPerRng);
#endif
printf("Compare CPU and GPU results...\n");
double sum_delta = 0;
double sum_ref = 0;
for(int i = 0; i < MT_RNG_COUNT; i++) {
for(int j = 0; j < nPerRng; j++) {
double rCPU = h_RandCPU[i * nPerRng + j];
double rGPU = h_RandGPU[i + j * MT_RNG_COUNT];
double delta = std::fabs(rCPU - rGPU);
sum_delta += delta;
sum_ref += std::fabs(rCPU);
}
}
double L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n\n", L1norm);
free(h_MT);
free(h_RandGPU);
free(h_RandCPU);
hipFree(d_MT);
hipFree(d_Rand);
// finish
printf("%s\n", (L1norm < 1e-6) ? "PASS" : "FAIL");
return 0;
}
|
004cedaeb181f769e5551b3c50743a3c30d7f4f5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include "ptx.cuh"
#include "secp256k1.cuh"
#include "sha256.cuh"
#include "ripemd160.cuh"
#include "secp256k1.h"
#include "DeviceContextShared.h"
#define MAX_TARGETS_CONSTANT_MEM 16
#define BLOOM_FILTER_SIZE_WORDS 2048
__constant__ unsigned int _TARGET_HASH[MAX_TARGETS_CONSTANT_MEM][5];
__constant__ unsigned int _NUM_TARGET_HASHES[1];
__constant__ unsigned int *_BLOOM_FILTER[1];
__constant__ unsigned int _USE_BLOOM_FILTER[1];
__constant__ unsigned int _INC_X[8];
__constant__ unsigned int _INC_Y[8];
__constant__ unsigned int *_CHAIN[1];
static bool _useBloomFilter = false;
static unsigned int *_bloomFilterPtr = NULL;
static unsigned int *_chainBufferPtr = NULL;
static const unsigned int _RIPEMD160_IV_HOST[5] = {
0x67452301,
0xefcdab89,
0x98badcfe,
0x10325476,
0xc3d2e1f0
};
static unsigned int swp(unsigned int x)
{
return (x << 24) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | (x >> 24);
}
static void undoRMD160FinalRound(const unsigned int hIn[5], unsigned int hOut[5])
{
for(int i = 0; i < 5; i++) {
hOut[i] = swp(hIn[i]) - _RIPEMD160_IV_HOST[(i + 1) % 5];
}
}
/**
Copies the target hashes to constant memory
*/
static hipError_t setTargetConstantMemory(const std::vector<struct hash160> &targets)
{
unsigned int count = targets.size();
for(unsigned int i = 0; i < count; i++) {
unsigned int h[5];
undoRMD160FinalRound(targets[i].h, h);
hipError_t err = hipMemcpyToSymbol(_TARGET_HASH, h, sizeof(unsigned int) * 5, i * sizeof(unsigned int) * 5);
if(err) {
return err;
}
}
hipError_t err = hipMemcpyToSymbol(_NUM_TARGET_HASHES, &count, sizeof(unsigned int));
if(err) {
return err;
}
unsigned int useBloomFilter = 0;
err = hipMemcpyToSymbol(_USE_BLOOM_FILTER, &useBloomFilter, sizeof(bool));
if(err) {
return err;
}
return hipSuccess;
}
/**
Populates the bloom filter with the target hashes
*/
static hipError_t setTargetBloomFilter(const std::vector<struct hash160> &targets)
{
unsigned int filter[BLOOM_FILTER_SIZE_WORDS];
hipError_t err = hipMalloc(&_bloomFilterPtr, sizeof(unsigned int) *BLOOM_FILTER_SIZE_WORDS);
if(err) {
return err;
}
memset(filter, 0, sizeof(unsigned int) * BLOOM_FILTER_SIZE_WORDS);
// Use the low 16 bits of each word in the hash as the index into the bloom filter
for(unsigned int i = 0; i < targets.size(); i++) {
unsigned int h[5];
undoRMD160FinalRound(targets[i].h, h);
for(int j = 0; j < 5; j++) {
unsigned int idx = h[i] & 0xffff;
filter[idx / 32] |= (0x01 << (idx % 32));
}
}
// Copy to device
err = hipMemcpy(_bloomFilterPtr, filter, sizeof(unsigned int) * BLOOM_FILTER_SIZE_WORDS, hipMemcpyHostToDevice);
if(err) {
hipFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
return err;
}
// Copy device memory pointer to constant memory
err = hipMemcpyToSymbol(_BLOOM_FILTER, &_bloomFilterPtr, sizeof(unsigned int *));
if(err) {
hipFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
return err;
}
unsigned int useBloomFilter = 1;
err = hipMemcpyToSymbol(_USE_BLOOM_FILTER, &useBloomFilter, sizeof(unsigned int));
return err;
}
void cleanupTargets()
{
if(_useBloomFilter && _bloomFilterPtr != NULL) {
hipFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
}
}
/**
*Copies the target hashes to either constant memory, or the bloom filter depending
on how many targets there are
*/
hipError_t setTargetHash(const std::vector<struct hash160> &targets)
{
cleanupTargets();
if(targets.size() <= MAX_TARGETS_CONSTANT_MEM) {
return setTargetConstantMemory(targets);
} else {
return setTargetBloomFilter(targets);
}
}
/**
* Allocates device memory for storing the multiplication chain used in
the batch inversion operation
*/
hipError_t allocateChainBuf(unsigned int count)
{
hipError_t err = hipMalloc(&_chainBufferPtr, count * sizeof(unsigned int) * 8);
if(err) {
return err;
}
err = hipMemcpyToSymbol(_CHAIN, &_chainBufferPtr, sizeof(unsigned int *));
if(err) {
hipFree(_chainBufferPtr);
}
return err;
}
void cleanupChainBuf()
{
if(_chainBufferPtr != NULL) {
hipFree(_chainBufferPtr);
_chainBufferPtr = NULL;
}
}
/**
*Sets the EC point which all points will be incremented by
*/
hipError_t setIncrementorPoint(const secp256k1::uint256 &x, const secp256k1::uint256 &y)
{
unsigned int xWords[8];
unsigned int yWords[8];
x.exportWords(xWords, 8, secp256k1::uint256::BigEndian);
y.exportWords(yWords, 8, secp256k1::uint256::BigEndian);
hipError_t err = hipMemcpyToSymbol(_INC_X, xWords, sizeof(unsigned int) * 8);
if(err) {
return err;
}
return hipMemcpyToSymbol(_INC_Y, yWords, sizeof(unsigned int) * 8);
}
__device__ void hashPublicKey(const unsigned int *x, const unsigned int *y, unsigned int *digestOut)
{
unsigned int hash[8];
sha256PublicKey(x, y, hash);
// Swap to little-endian
for(int i = 0; i < 8; i++) {
hash[i] = endian(hash[i]);
}
ripemd160sha256NoFinal(hash, digestOut);
}
__device__ void hashPublicKeyCompressed(const unsigned int *x, unsigned int yParity, unsigned int *digestOut)
{
unsigned int hash[8];
sha256PublicKeyCompressed(x, yParity, hash);
// Swap to little-endian
for(int i = 0; i < 8; i++) {
hash[i] = endian(hash[i]);
}
ripemd160sha256NoFinal(hash, digestOut);
}
__device__ void addResult(unsigned int *numResultsPtr, void *results, void *info, unsigned int size)
{
unsigned int count = atomicAdd(numResultsPtr, 1);
unsigned char *ptr = (unsigned char *)results + count * size;
memcpy(ptr, info, size);
}
__device__ void setResultFound(unsigned int *numResultsPtr, void *results, int idx, bool compressed, unsigned int x[8], unsigned int y[8], unsigned int digest[5])
{
struct KeyFinderDeviceResult r;
r.block = blockIdx.x;
r.thread = threadIdx.x;
r.idx = idx;
r.compressed = compressed;
for(int i = 0; i < 8; i++) {
r.x[i] = x[i];
r.y[i] = y[i];
}
for(int i = 0; i < 5; i++) {
r.digest[i] = endian(digest[i] + _RIPEMD160_IV[(i + 1) % 5]);
}
addResult(numResultsPtr, results, &r, sizeof(r));
}
__device__ bool checkHash(unsigned int hash[5])
{
bool foundMatch = false;
if(*_USE_BLOOM_FILTER) {
foundMatch = true;
for(int i = 0; i < 5; i++) {
unsigned int idx = hash[i] & 0xffff;
unsigned int f = ((unsigned int *)(_BLOOM_FILTER[0]))[idx / 32];
if((f & (0x01 << (idx % 32))) == 0) {
foundMatch = false;
}
}
} else {
for(int j = 0; j < *_NUM_TARGET_HASHES; j++) {
bool equal = true;
for(int i = 0; i < 5; i++) {
equal &= (hash[i] == _TARGET_HASH[j][i]);
}
foundMatch |= equal;
}
}
return foundMatch;
}
__device__ void doIteration(unsigned int *xPtr, unsigned int *yPtr, int pointsPerThread, unsigned int *numResults, void *results, int compression)
{
unsigned int *chain = _CHAIN[0];
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 };
for(int i = 0; i < pointsPerThread; i++) {
unsigned int x[8];
unsigned int digest[5];
readInt(xPtr, i, x);
if(compression == PointCompressionType::UNCOMPRESSED || compression == PointCompressionType::BOTH) {
unsigned int y[8];
readInt(yPtr, i, y);
hashPublicKey(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, false, x, y, digest);
}
}
if(compression == PointCompressionType::COMPRESSED || compression == PointCompressionType::BOTH) {
hashPublicKeyCompressed(x, readIntLSW(yPtr, i), digest);
if(checkHash(digest)) {
unsigned int y[8];
readInt(yPtr, i, y);
setResultFound(numResults, results, i, true, x, y, digest);
}
}
beginBatchAdd(_INC_X, x, chain, i, inverse);
}
doBatchInverse(inverse);
for(int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
completeBatchAdd(_INC_X, _INC_Y, xPtr, yPtr, i, chain, inverse, newX, newY);
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
__device__ void doIterationWithDouble(unsigned int *xPtr, unsigned int *yPtr, int pointsPerThread, unsigned int *numResults, void *results, int compression)
{
unsigned int *chain = _CHAIN[0];
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 };
for(int i = 0; i < pointsPerThread; i++) {
unsigned int x[8];
unsigned int digest[5];
readInt(xPtr, i, x);
// uncompressed
if(compression == 1 || compression == 2) {
unsigned int y[8];
readInt(yPtr, i, y);
hashPublicKey(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, false, x, y, digest);
}
}
// compressed
if(compression == 0 || compression == 2) {
hashPublicKeyCompressed(x, readIntLSW(yPtr, i), digest);
if(checkHash(digest)) {
unsigned int y[8];
readInt(yPtr, i, y);
setResultFound(numResults, results, i, true, x, y, digest);
}
}
beginBatchAddWithDouble(_INC_X, _INC_Y, xPtr, chain, i, inverse);
}
doBatchInverse(inverse);
for(int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
completeBatchAddWithDouble(_INC_X, _INC_Y, xPtr, yPtr, i, chain, inverse, newX, newY);
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
/**
* Performs a single iteration
*/
__global__ void keyFinderKernel(int points, unsigned int *x, unsigned int *y, unsigned int *numResults, void *results, int compression)
{
doIteration(x, y, points, numResults, results, compression);
}
__global__ void keyFinderKernelWithDouble(int points, unsigned int *x, unsigned int *y, unsigned int *numResults, void *results, int compression)
{
doIterationWithDouble(x, y, points, numResults, results, compression);
} | 004cedaeb181f769e5551b3c50743a3c30d7f4f5.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include "ptx.cuh"
#include "secp256k1.cuh"
#include "sha256.cuh"
#include "ripemd160.cuh"
#include "secp256k1.h"
#include "DeviceContextShared.h"
#define MAX_TARGETS_CONSTANT_MEM 16
#define BLOOM_FILTER_SIZE_WORDS 2048
__constant__ unsigned int _TARGET_HASH[MAX_TARGETS_CONSTANT_MEM][5];
__constant__ unsigned int _NUM_TARGET_HASHES[1];
__constant__ unsigned int *_BLOOM_FILTER[1];
__constant__ unsigned int _USE_BLOOM_FILTER[1];
__constant__ unsigned int _INC_X[8];
__constant__ unsigned int _INC_Y[8];
__constant__ unsigned int *_CHAIN[1];
static bool _useBloomFilter = false;
static unsigned int *_bloomFilterPtr = NULL;
static unsigned int *_chainBufferPtr = NULL;
static const unsigned int _RIPEMD160_IV_HOST[5] = {
0x67452301,
0xefcdab89,
0x98badcfe,
0x10325476,
0xc3d2e1f0
};
static unsigned int swp(unsigned int x)
{
return (x << 24) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | (x >> 24);
}
static void undoRMD160FinalRound(const unsigned int hIn[5], unsigned int hOut[5])
{
for(int i = 0; i < 5; i++) {
hOut[i] = swp(hIn[i]) - _RIPEMD160_IV_HOST[(i + 1) % 5];
}
}
/**
Copies the target hashes to constant memory
*/
static cudaError_t setTargetConstantMemory(const std::vector<struct hash160> &targets)
{
unsigned int count = targets.size();
for(unsigned int i = 0; i < count; i++) {
unsigned int h[5];
undoRMD160FinalRound(targets[i].h, h);
cudaError_t err = cudaMemcpyToSymbol(_TARGET_HASH, h, sizeof(unsigned int) * 5, i * sizeof(unsigned int) * 5);
if(err) {
return err;
}
}
cudaError_t err = cudaMemcpyToSymbol(_NUM_TARGET_HASHES, &count, sizeof(unsigned int));
if(err) {
return err;
}
unsigned int useBloomFilter = 0;
err = cudaMemcpyToSymbol(_USE_BLOOM_FILTER, &useBloomFilter, sizeof(bool));
if(err) {
return err;
}
return cudaSuccess;
}
/**
Populates the bloom filter with the target hashes
*/
static cudaError_t setTargetBloomFilter(const std::vector<struct hash160> &targets)
{
unsigned int filter[BLOOM_FILTER_SIZE_WORDS];
cudaError_t err = cudaMalloc(&_bloomFilterPtr, sizeof(unsigned int) *BLOOM_FILTER_SIZE_WORDS);
if(err) {
return err;
}
memset(filter, 0, sizeof(unsigned int) * BLOOM_FILTER_SIZE_WORDS);
// Use the low 16 bits of each word in the hash as the index into the bloom filter
for(unsigned int i = 0; i < targets.size(); i++) {
unsigned int h[5];
undoRMD160FinalRound(targets[i].h, h);
for(int j = 0; j < 5; j++) {
unsigned int idx = h[i] & 0xffff;
filter[idx / 32] |= (0x01 << (idx % 32));
}
}
// Copy to device
err = cudaMemcpy(_bloomFilterPtr, filter, sizeof(unsigned int) * BLOOM_FILTER_SIZE_WORDS, cudaMemcpyHostToDevice);
if(err) {
cudaFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
return err;
}
// Copy device memory pointer to constant memory
err = cudaMemcpyToSymbol(_BLOOM_FILTER, &_bloomFilterPtr, sizeof(unsigned int *));
if(err) {
cudaFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
return err;
}
unsigned int useBloomFilter = 1;
err = cudaMemcpyToSymbol(_USE_BLOOM_FILTER, &useBloomFilter, sizeof(unsigned int));
return err;
}
void cleanupTargets()
{
if(_useBloomFilter && _bloomFilterPtr != NULL) {
cudaFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
}
}
/**
*Copies the target hashes to either constant memory, or the bloom filter depending
on how many targets there are
*/
cudaError_t setTargetHash(const std::vector<struct hash160> &targets)
{
cleanupTargets();
if(targets.size() <= MAX_TARGETS_CONSTANT_MEM) {
return setTargetConstantMemory(targets);
} else {
return setTargetBloomFilter(targets);
}
}
/**
* Allocates device memory for storing the multiplication chain used in
the batch inversion operation
*/
cudaError_t allocateChainBuf(unsigned int count)
{
cudaError_t err = cudaMalloc(&_chainBufferPtr, count * sizeof(unsigned int) * 8);
if(err) {
return err;
}
err = cudaMemcpyToSymbol(_CHAIN, &_chainBufferPtr, sizeof(unsigned int *));
if(err) {
cudaFree(_chainBufferPtr);
}
return err;
}
void cleanupChainBuf()
{
if(_chainBufferPtr != NULL) {
cudaFree(_chainBufferPtr);
_chainBufferPtr = NULL;
}
}
/**
*Sets the EC point which all points will be incremented by
*/
cudaError_t setIncrementorPoint(const secp256k1::uint256 &x, const secp256k1::uint256 &y)
{
unsigned int xWords[8];
unsigned int yWords[8];
x.exportWords(xWords, 8, secp256k1::uint256::BigEndian);
y.exportWords(yWords, 8, secp256k1::uint256::BigEndian);
cudaError_t err = cudaMemcpyToSymbol(_INC_X, xWords, sizeof(unsigned int) * 8);
if(err) {
return err;
}
return cudaMemcpyToSymbol(_INC_Y, yWords, sizeof(unsigned int) * 8);
}
__device__ void hashPublicKey(const unsigned int *x, const unsigned int *y, unsigned int *digestOut)
{
unsigned int hash[8];
sha256PublicKey(x, y, hash);
// Swap to little-endian
for(int i = 0; i < 8; i++) {
hash[i] = endian(hash[i]);
}
ripemd160sha256NoFinal(hash, digestOut);
}
__device__ void hashPublicKeyCompressed(const unsigned int *x, unsigned int yParity, unsigned int *digestOut)
{
unsigned int hash[8];
sha256PublicKeyCompressed(x, yParity, hash);
// Swap to little-endian
for(int i = 0; i < 8; i++) {
hash[i] = endian(hash[i]);
}
ripemd160sha256NoFinal(hash, digestOut);
}
__device__ void addResult(unsigned int *numResultsPtr, void *results, void *info, unsigned int size)
{
unsigned int count = atomicAdd(numResultsPtr, 1);
unsigned char *ptr = (unsigned char *)results + count * size;
memcpy(ptr, info, size);
}
__device__ void setResultFound(unsigned int *numResultsPtr, void *results, int idx, bool compressed, unsigned int x[8], unsigned int y[8], unsigned int digest[5])
{
struct KeyFinderDeviceResult r;
r.block = blockIdx.x;
r.thread = threadIdx.x;
r.idx = idx;
r.compressed = compressed;
for(int i = 0; i < 8; i++) {
r.x[i] = x[i];
r.y[i] = y[i];
}
for(int i = 0; i < 5; i++) {
r.digest[i] = endian(digest[i] + _RIPEMD160_IV[(i + 1) % 5]);
}
addResult(numResultsPtr, results, &r, sizeof(r));
}
__device__ bool checkHash(unsigned int hash[5])
{
bool foundMatch = false;
if(*_USE_BLOOM_FILTER) {
foundMatch = true;
for(int i = 0; i < 5; i++) {
unsigned int idx = hash[i] & 0xffff;
unsigned int f = ((unsigned int *)(_BLOOM_FILTER[0]))[idx / 32];
if((f & (0x01 << (idx % 32))) == 0) {
foundMatch = false;
}
}
} else {
for(int j = 0; j < *_NUM_TARGET_HASHES; j++) {
bool equal = true;
for(int i = 0; i < 5; i++) {
equal &= (hash[i] == _TARGET_HASH[j][i]);
}
foundMatch |= equal;
}
}
return foundMatch;
}
__device__ void doIteration(unsigned int *xPtr, unsigned int *yPtr, int pointsPerThread, unsigned int *numResults, void *results, int compression)
{
unsigned int *chain = _CHAIN[0];
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 };
for(int i = 0; i < pointsPerThread; i++) {
unsigned int x[8];
unsigned int digest[5];
readInt(xPtr, i, x);
if(compression == PointCompressionType::UNCOMPRESSED || compression == PointCompressionType::BOTH) {
unsigned int y[8];
readInt(yPtr, i, y);
hashPublicKey(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, false, x, y, digest);
}
}
if(compression == PointCompressionType::COMPRESSED || compression == PointCompressionType::BOTH) {
hashPublicKeyCompressed(x, readIntLSW(yPtr, i), digest);
if(checkHash(digest)) {
unsigned int y[8];
readInt(yPtr, i, y);
setResultFound(numResults, results, i, true, x, y, digest);
}
}
beginBatchAdd(_INC_X, x, chain, i, inverse);
}
doBatchInverse(inverse);
for(int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
completeBatchAdd(_INC_X, _INC_Y, xPtr, yPtr, i, chain, inverse, newX, newY);
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
__device__ void doIterationWithDouble(unsigned int *xPtr, unsigned int *yPtr, int pointsPerThread, unsigned int *numResults, void *results, int compression)
{
unsigned int *chain = _CHAIN[0];
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 };
for(int i = 0; i < pointsPerThread; i++) {
unsigned int x[8];
unsigned int digest[5];
readInt(xPtr, i, x);
// uncompressed
if(compression == 1 || compression == 2) {
unsigned int y[8];
readInt(yPtr, i, y);
hashPublicKey(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, false, x, y, digest);
}
}
// compressed
if(compression == 0 || compression == 2) {
hashPublicKeyCompressed(x, readIntLSW(yPtr, i), digest);
if(checkHash(digest)) {
unsigned int y[8];
readInt(yPtr, i, y);
setResultFound(numResults, results, i, true, x, y, digest);
}
}
beginBatchAddWithDouble(_INC_X, _INC_Y, xPtr, chain, i, inverse);
}
doBatchInverse(inverse);
for(int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
completeBatchAddWithDouble(_INC_X, _INC_Y, xPtr, yPtr, i, chain, inverse, newX, newY);
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
/**
* Performs a single iteration
*/
__global__ void keyFinderKernel(int points, unsigned int *x, unsigned int *y, unsigned int *numResults, void *results, int compression)
{
doIteration(x, y, points, numResults, results, compression);
}
__global__ void keyFinderKernelWithDouble(int points, unsigned int *x, unsigned int *y, unsigned int *numResults, void *results, int compression)
{
doIterationWithDouble(x, y, points, numResults, results, compression);
} |
f367e630c309db98c3075da5cc665dcb11674425.hip | // !!! This is a file automatically generated by hipify!!!
#include <unittest/unittest.h>
#include <thrust/detail/seq.h>
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_HIP
#include <thrust/system/hip/detail/par.h>
#elif THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
#include <thrust/system/hip/detail/par.h>
#endif
#include <thrust/system/cpp/detail/par.h>
#include <thrust/system/omp/detail/par.h>
#include <thrust/system/tbb/detail/par.h>
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
#include <thrust/system/hip/detail/par.h>
#endif
template<typename T>
struct test_allocator_t
{
};
test_allocator_t<int> test_allocator = test_allocator_t<int>();
const test_allocator_t<int> const_test_allocator = test_allocator_t<int>();
struct test_memory_resource_t THRUST_FINAL : thrust::mr::memory_resource<>
{
void * do_allocate(std::size_t size, std::size_t) THRUST_OVERRIDE
{
return reinterpret_cast<void *>(size);
}
void do_deallocate(void * ptr, std::size_t size, std::size_t) THRUST_OVERRIDE
{
ASSERT_EQUAL(ptr, reinterpret_cast<void *>(size));
}
} test_memory_resource;
template<typename Policy, template <typename> class CRTPBase>
struct policy_info
{
typedef Policy policy;
template<template <typename, template <typename> class> class Template, typename Argument>
struct apply_base_second
{
typedef Template<Argument, CRTPBase> type;
};
};
template<typename PolicyInfo>
struct TestAllocatorAttachment
{
template<typename Expected, typename T>
static void assert_correct(T)
{
ASSERT_EQUAL(
(thrust::detail::is_same<
T,
typename PolicyInfo::template apply_base_second<
thrust::detail::execute_with_allocator,
Expected
>::type
>::value), true);
}
template<typename ExpectedResource, typename T>
static void assert_npa_correct(T)
{
ASSERT_EQUAL(
(thrust::detail::is_same<
T,
typename PolicyInfo::template apply_base_second<
thrust::detail::execute_with_allocator,
thrust::mr::allocator<
thrust::detail::max_align_t,
ExpectedResource
>
>::type
>::value), true);
}
template<typename Policy>
void test_temporary_allocation_valid(Policy policy)
{
using thrust::detail::get_temporary_buffer;
return_temporary_buffer(
policy,
get_temporary_buffer<int>(
policy,
123
).first,
123
);
}
void operator()()
{
typename PolicyInfo::policy policy;
// test correctness of attachment
assert_correct<test_allocator_t<int> >(policy(test_allocator_t<int>()));
assert_correct<test_allocator_t<int>&>(policy(test_allocator));
assert_correct<test_allocator_t<int> >(policy(const_test_allocator));
assert_npa_correct<test_memory_resource_t>(policy(&test_memory_resource));
// test whether the resulting policy is actually usable
// a real allocator is necessary here, unlike above
std::allocator<int> alloc;
const std::allocator<int> const_alloc;
test_temporary_allocation_valid(policy(std::allocator<int>()));
test_temporary_allocation_valid(policy(alloc));
test_temporary_allocation_valid(policy(const_alloc));
test_temporary_allocation_valid(policy(&test_memory_resource));
#if THRUST_CPP_DIALECT >= 2011
test_temporary_allocation_valid(policy(std::allocator<int>()).after(1));
test_temporary_allocation_valid(policy(alloc).after(1));
test_temporary_allocation_valid(policy(const_alloc).after(1));
#endif
}
};
typedef policy_info<
thrust::detail::seq_t,
thrust::system::detail::sequential::execution_policy
> sequential_info;
typedef policy_info<
thrust::system::cpp::detail::par_t,
thrust::system::cpp::detail::execution_policy
> cpp_par_info;
typedef policy_info<
thrust::system::THRUST_DEVICE_BACKEND::detail::par_t,
thrust::THRUST_DEVICE_BACKEND_DETAIL::execute_on_stream_base
> THRUST_DEVICE_BACKEND_par_info;
typedef policy_info<
thrust::system::omp::detail::par_t,
thrust::system::omp::detail::execution_policy
> omp_par_info;
typedef policy_info<
thrust::system::tbb::detail::par_t,
thrust::system::tbb::detail::execution_policy
> tbb_par_info;
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
typedef policy_info<
thrust::system::cuda::detail::par_t,
thrust::cuda_cub::execute_on_stream_base
> cuda_par_info;
#endif
SimpleUnitTest<
TestAllocatorAttachment,
unittest::type_list<
sequential_info,
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
cuda_par_info,
#endif
cpp_par_info,
THRUST_DEVICE_BACKEND_par_info,
omp_par_info,
tbb_par_info
>
> TestAllocatorAttachmentInstance;
| f367e630c309db98c3075da5cc665dcb11674425.cu | #include <unittest/unittest.h>
#include <thrust/detail/seq.h>
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_HIP
#include <thrust/system/hip/detail/par.h>
#elif THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
#include <thrust/system/cuda/detail/par.h>
#endif
#include <thrust/system/cpp/detail/par.h>
#include <thrust/system/omp/detail/par.h>
#include <thrust/system/tbb/detail/par.h>
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
#include <thrust/system/cuda/detail/par.h>
#endif
template<typename T>
struct test_allocator_t
{
};
test_allocator_t<int> test_allocator = test_allocator_t<int>();
const test_allocator_t<int> const_test_allocator = test_allocator_t<int>();
struct test_memory_resource_t THRUST_FINAL : thrust::mr::memory_resource<>
{
void * do_allocate(std::size_t size, std::size_t) THRUST_OVERRIDE
{
return reinterpret_cast<void *>(size);
}
void do_deallocate(void * ptr, std::size_t size, std::size_t) THRUST_OVERRIDE
{
ASSERT_EQUAL(ptr, reinterpret_cast<void *>(size));
}
} test_memory_resource;
template<typename Policy, template <typename> class CRTPBase>
struct policy_info
{
typedef Policy policy;
template<template <typename, template <typename> class> class Template, typename Argument>
struct apply_base_second
{
typedef Template<Argument, CRTPBase> type;
};
};
template<typename PolicyInfo>
struct TestAllocatorAttachment
{
template<typename Expected, typename T>
static void assert_correct(T)
{
ASSERT_EQUAL(
(thrust::detail::is_same<
T,
typename PolicyInfo::template apply_base_second<
thrust::detail::execute_with_allocator,
Expected
>::type
>::value), true);
}
template<typename ExpectedResource, typename T>
static void assert_npa_correct(T)
{
ASSERT_EQUAL(
(thrust::detail::is_same<
T,
typename PolicyInfo::template apply_base_second<
thrust::detail::execute_with_allocator,
thrust::mr::allocator<
thrust::detail::max_align_t,
ExpectedResource
>
>::type
>::value), true);
}
template<typename Policy>
void test_temporary_allocation_valid(Policy policy)
{
using thrust::detail::get_temporary_buffer;
return_temporary_buffer(
policy,
get_temporary_buffer<int>(
policy,
123
).first,
123
);
}
void operator()()
{
typename PolicyInfo::policy policy;
// test correctness of attachment
assert_correct<test_allocator_t<int> >(policy(test_allocator_t<int>()));
assert_correct<test_allocator_t<int>&>(policy(test_allocator));
assert_correct<test_allocator_t<int> >(policy(const_test_allocator));
assert_npa_correct<test_memory_resource_t>(policy(&test_memory_resource));
// test whether the resulting policy is actually usable
// a real allocator is necessary here, unlike above
std::allocator<int> alloc;
const std::allocator<int> const_alloc;
test_temporary_allocation_valid(policy(std::allocator<int>()));
test_temporary_allocation_valid(policy(alloc));
test_temporary_allocation_valid(policy(const_alloc));
test_temporary_allocation_valid(policy(&test_memory_resource));
#if THRUST_CPP_DIALECT >= 2011
test_temporary_allocation_valid(policy(std::allocator<int>()).after(1));
test_temporary_allocation_valid(policy(alloc).after(1));
test_temporary_allocation_valid(policy(const_alloc).after(1));
#endif
}
};
typedef policy_info<
thrust::detail::seq_t,
thrust::system::detail::sequential::execution_policy
> sequential_info;
typedef policy_info<
thrust::system::cpp::detail::par_t,
thrust::system::cpp::detail::execution_policy
> cpp_par_info;
typedef policy_info<
thrust::system::THRUST_DEVICE_BACKEND::detail::par_t,
thrust::THRUST_DEVICE_BACKEND_DETAIL::execute_on_stream_base
> THRUST_DEVICE_BACKEND_par_info;
typedef policy_info<
thrust::system::omp::detail::par_t,
thrust::system::omp::detail::execution_policy
> omp_par_info;
typedef policy_info<
thrust::system::tbb::detail::par_t,
thrust::system::tbb::detail::execution_policy
> tbb_par_info;
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
typedef policy_info<
thrust::system::cuda::detail::par_t,
thrust::cuda_cub::execute_on_stream_base
> cuda_par_info;
#endif
SimpleUnitTest<
TestAllocatorAttachment,
unittest::type_list<
sequential_info,
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
cuda_par_info,
#endif
cpp_par_info,
THRUST_DEVICE_BACKEND_par_info,
omp_par_info,
tbb_par_info
>
> TestAllocatorAttachmentInstance;
|
3f85e11dac34d9800b74fe098cde865eab9e4bf9.hip | // !!! This is a file automatically generated by hipify!!!
#include "util.cu.h"
#include "log.h"
#include <linux/ip.h>
#include <linux/udp.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
//#include <sys/types.h>
//#include <sys/socket.h>
#include <arpa/inet.h> // IPPROTO_TCP, IPPROTO_ICMP
// returns a timestamp in nanoseconds
// based on rdtsc on reasonably configured systems and is hence fast
uint64_t monotonic_time() {
struct timespec timespec;
clock_gettime(CLOCK_MONOTONIC, ×pec);
return timespec.tv_sec * 1000 * 1000 * 1000 + timespec.tv_nsec;
}
void monitoring_loop(uint32_t** pkt_cnt, uint32_t** pkt_size)
{
START_GRN
printf("[Monitoring] Control is returned to CPU!\n");
END
uint32_t prev_pkt[2] = {0,}, cur_pkt[2] = {0,};
double pkts[2] = {0};
char units[] = {' ', 'K', 'M', 'G', 'T'};
char pps[2][40] = {0};
char bps[2][40] = {0};
uint32_t p_size = 0;
int i = 0, j = 0;
int elapsed_time = 0;
uint64_t last_stats_printed = monotonic_time();
uint64_t time = 0;
while(1)
{
time = monotonic_time();
if(time - last_stats_printed > 1000 * 1000 * 1000){
elapsed_time++; // 1 sec +
last_stats_printed = time;
#if 1
ASSERTRT(hipMemcpy(&cur_pkt[0], &(*pkt_cnt)[0], sizeof(uint32_t), hipMemcpyDeviceToHost));
ASSERTRT(hipMemcpy(&cur_pkt[1], &(*pkt_cnt)[1], sizeof(uint32_t), hipMemcpyDeviceToHost));
#else
ASSERTRT(hipMemcpy(&cur_pkt[0], pkt_cnt[0], sizeof(int), hipMemcpyDeviceToHost));
ASSERTRT(hipMemcpy(&cur_pkt[1], pkt_cnt[1], sizeof(int), hipMemcpyDeviceToHost));
#endif
ASSERTRT(hipMemcpy(&p_size, *pkt_size, sizeof(uint32_t), hipMemcpyDeviceToHost));
p_size += 4;
system("clear");
#if 0
printf("[CKJUNG] buf #0\n");
for(i = 0; i < 1024; i++){
printf("%d ", data[i]);
}
printf("\n\n");
#endif
for(i = 0; i < 2; i++){
double tmp_pps;
double tmp;
//double batch;
if (prev_pkt[i] != cur_pkt[i]){ // If we got a traffic flow
//printf("prev != cur________________prev_pkt[%d]: %d, cur_pkt[%d]: %d\n", i, prev_pkt[i], i, cur_pkt[i]);
pkts[i] = (double)(cur_pkt[i] - prev_pkt[i]);
#if 0
if(i == 0)
printf("RX_pkts: %d\n", (int)pkts[i]);
else
printf("TX_pkts: %d\n", (int)pkts[i]);
#endif
tmp = tmp_pps = pkts[i];
//batch = tmp/BATCH;
for(j = 0; tmp >= 1000 && j < sizeof(units)/sizeof(char) -1; j++)
tmp /= 1000;
sprintf(pps[i],"%.3lf %c" ,tmp, units[j]);
#if 0
p_size = PKT_SIZE;
#endif
//tmp = pkts[i] * p_size * 8; // Bytes -> Bits
tmp = pkts[i] * p_size * 8 + tmp_pps * 20 * 8; // Add IFG also, 20.01.15, CKJUNG
for(j = 0; tmp >= 1000 && j < sizeof(units)/sizeof(char) -1; j++)
tmp /= 1000;
double percent = 10.0;
percent = tmp/percent*100;
sprintf(bps[i],"%.3lf %c" ,tmp, units[j]);
if(i == 0){
//printf("[RX] pps: %spps %sbps(%.2lf %), pkt_size: %d \n", pps[i], bps[i], percent, p_size);
printf("[RX] pps: %spps %sbps(", pps[i], bps[i]);
if(percent >= 99){
START_GRN
printf("%.2lf %%",percent);
END
}else{
START_YLW
printf("%.2lf %%",percent);
END
}
printf("), pkt_size: ");
START_RED
printf("%d \n", p_size);
END
}else{
/*
printf("[TX] pps: %spps %sbps(%.2lf %%), pkt_size: ", pps[i], bps[i], percent);
*/
printf("[TX] pps: %spps %sbps(", pps[i], bps[i]);
if(percent >= 99){
START_GRN
printf("%.2lf %%",percent);
END
}else{
START_YLW
printf("%.2lf %%",percent);
END
}
printf("), pkt_size: ");
START_RED
printf("%d \n", p_size);
END
}
}else{
if(i == 0)
printf("[RX] pps: None\n");
else
printf("[TX] pps: None\n");
}
}
int second = elapsed_time%60;
int minute = elapsed_time/60;
printf("\nElapsed:%3d m %3d s\n(ctrl + c) to stop.\n", minute, second);
#if 0
for(i = 0; i<STATUS_SIZE; i++)
{
if(i % 512 ==0)
printf("\n\n");
if(buf_idx[i] == 1){
START_GRN
printf("%d ", buf_idx[i]);
END
}else if(buf_idx[i] == 2){
START_RED
printf("%d ", buf_idx[i]);
END
}else if(buf_idx[i] == 3){
START_BLU
printf("%d ", buf_idx[i]);
END
}else{
printf("%d ", buf_idx[i]);
}
}
printf("\n");
#endif
prev_pkt[0] = cur_pkt[0];
prev_pkt[1] = cur_pkt[1];
}
//sleep(1);
}
}
| 3f85e11dac34d9800b74fe098cde865eab9e4bf9.cu | #include "util.cu.h"
#include "log.h"
#include <linux/ip.h>
#include <linux/udp.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
//#include <sys/types.h>
//#include <sys/socket.h>
#include <arpa/inet.h> // IPPROTO_TCP, IPPROTO_ICMP
// returns a timestamp in nanoseconds
// based on rdtsc on reasonably configured systems and is hence fast
uint64_t monotonic_time() {
struct timespec timespec;
clock_gettime(CLOCK_MONOTONIC, ×pec);
return timespec.tv_sec * 1000 * 1000 * 1000 + timespec.tv_nsec;
}
void monitoring_loop(uint32_t** pkt_cnt, uint32_t** pkt_size)
{
START_GRN
printf("[Monitoring] Control is returned to CPU!\n");
END
uint32_t prev_pkt[2] = {0,}, cur_pkt[2] = {0,};
double pkts[2] = {0};
char units[] = {' ', 'K', 'M', 'G', 'T'};
char pps[2][40] = {0};
char bps[2][40] = {0};
uint32_t p_size = 0;
int i = 0, j = 0;
int elapsed_time = 0;
uint64_t last_stats_printed = monotonic_time();
uint64_t time = 0;
while(1)
{
time = monotonic_time();
if(time - last_stats_printed > 1000 * 1000 * 1000){
elapsed_time++; // 1 sec +
last_stats_printed = time;
#if 1
ASSERTRT(cudaMemcpy(&cur_pkt[0], &(*pkt_cnt)[0], sizeof(uint32_t), cudaMemcpyDeviceToHost));
ASSERTRT(cudaMemcpy(&cur_pkt[1], &(*pkt_cnt)[1], sizeof(uint32_t), cudaMemcpyDeviceToHost));
#else
ASSERTRT(cudaMemcpy(&cur_pkt[0], pkt_cnt[0], sizeof(int), cudaMemcpyDeviceToHost));
ASSERTRT(cudaMemcpy(&cur_pkt[1], pkt_cnt[1], sizeof(int), cudaMemcpyDeviceToHost));
#endif
ASSERTRT(cudaMemcpy(&p_size, *pkt_size, sizeof(uint32_t), cudaMemcpyDeviceToHost));
p_size += 4;
system("clear");
#if 0
printf("[CKJUNG] buf #0\n");
for(i = 0; i < 1024; i++){
printf("%d ", data[i]);
}
printf("\n\n");
#endif
for(i = 0; i < 2; i++){
double tmp_pps;
double tmp;
//double batch;
if (prev_pkt[i] != cur_pkt[i]){ // If we got a traffic flow
//printf("prev != cur________________prev_pkt[%d]: %d, cur_pkt[%d]: %d\n", i, prev_pkt[i], i, cur_pkt[i]);
pkts[i] = (double)(cur_pkt[i] - prev_pkt[i]);
#if 0
if(i == 0)
printf("RX_pkts: %d\n", (int)pkts[i]);
else
printf("TX_pkts: %d\n", (int)pkts[i]);
#endif
tmp = tmp_pps = pkts[i];
//batch = tmp/BATCH;
for(j = 0; tmp >= 1000 && j < sizeof(units)/sizeof(char) -1; j++)
tmp /= 1000;
sprintf(pps[i],"%.3lf %c" ,tmp, units[j]);
#if 0
p_size = PKT_SIZE;
#endif
//tmp = pkts[i] * p_size * 8; // Bytes -> Bits
tmp = pkts[i] * p_size * 8 + tmp_pps * 20 * 8; // Add IFG also, 20.01.15, CKJUNG
for(j = 0; tmp >= 1000 && j < sizeof(units)/sizeof(char) -1; j++)
tmp /= 1000;
double percent = 10.0;
percent = tmp/percent*100;
sprintf(bps[i],"%.3lf %c" ,tmp, units[j]);
if(i == 0){
//printf("[RX] pps: %spps %sbps(%.2lf %), pkt_size: %d \n", pps[i], bps[i], percent, p_size);
printf("[RX] pps: %spps %sbps(", pps[i], bps[i]);
if(percent >= 99){
START_GRN
printf("%.2lf %%",percent);
END
}else{
START_YLW
printf("%.2lf %%",percent);
END
}
printf("), pkt_size: ");
START_RED
printf("%d \n", p_size);
END
}else{
/*
printf("[TX] pps: %spps %sbps(%.2lf %%), pkt_size: ", pps[i], bps[i], percent);
*/
printf("[TX] pps: %spps %sbps(", pps[i], bps[i]);
if(percent >= 99){
START_GRN
printf("%.2lf %%",percent);
END
}else{
START_YLW
printf("%.2lf %%",percent);
END
}
printf("), pkt_size: ");
START_RED
printf("%d \n", p_size);
END
}
}else{
if(i == 0)
printf("[RX] pps: None\n");
else
printf("[TX] pps: None\n");
}
}
int second = elapsed_time%60;
int minute = elapsed_time/60;
printf("\nElapsed:%3d m %3d s\n(ctrl + c) to stop.\n", minute, second);
#if 0
for(i = 0; i<STATUS_SIZE; i++)
{
if(i % 512 ==0)
printf("\n\n");
if(buf_idx[i] == 1){
START_GRN
printf("%d ", buf_idx[i]);
END
}else if(buf_idx[i] == 2){
START_RED
printf("%d ", buf_idx[i]);
END
}else if(buf_idx[i] == 3){
START_BLU
printf("%d ", buf_idx[i]);
END
}else{
printf("%d ", buf_idx[i]);
}
}
printf("\n");
#endif
prev_pkt[0] = cur_pkt[0];
prev_pkt[1] = cur_pkt[1];
}
//sleep(1);
}
}
|
7bf242465649832fd142bfa9dd757c07f4235cba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================================================
// Copyright 2015 Asgeir Bjorgan, Lise Lyngsnes Randeberg, Norwegian University of Science and Technology
// Distributed under the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================================================
#include "melanin.h"
#define div13 1.0f/3.0f
#include "inv.h"
#define A 0.14386f
//#define A 0.17f
#define d1 0.0001f
#define de 0.0001f
#define NUM_ITERATIONS 15
__global__ void ReflIsoL2InvertMuae(float *muae, float *muse, float *muad, float *musd, float *gcol, float *lineData, size_t pitch, int startblockInd){
#ifdef INV_MUAE_BLOCKDIM_64_QUICKFIX
//NB FIXME: indeksuthentingen er bygget for en mindre blokkdimensjon enn 160
int egBlockInd = (blockIdx.x*64 + threadIdx.x)/160; //indeksen til blokkene slik de egentlig var ment vre i originalgriddet
int ind = ((gridDim.x*(blockIdx.y+startblockInd))*pitch*2)/5 + egBlockInd*pitch + blockIdx.x*64-egBlockInd*32*5 + threadIdx.x; //the pitch is made for the largest possible block size, but this uses too many registers for that block size to be valid and needs something less. Therefore some index hackin'
#else
int ind = (gridDim.x*(blockIdx.y+startblockInd) + blockIdx.x)*pitch+ threadIdx.x;
#endif
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//move mua into shared memory
float mua1 = muae[ind];
float mua2 = muad[ind];
//diffusion constant
float D2 = fdividef(1.0f,3.0f*(musr2 + mua2));
float del2 = sqrtf(fdividef(D2,mua2));
float res;
float derivmuae;
float currLineData = lineData[ind];
float f2 = 1.0f + fdividef(del2 ,(D2 * 3.0f));
float f6 = D2-del2*fdividef(del2, (D2*9.0f));
for (int i=0; i < NUM_ITERATIONS; ++i){
float D1 = fdividef(1.0f,3.0f*(musr1 + mua1));
float del1 = sqrtf(fdividef(D1,mua1));
float div1D1D1 = fdividef(1.0f, D1*D1);
float coshval = coshf(fdividef(d1, del1));
float sinhval = sinhf(fdividef(d1, del1));
//result
float f1 = (del1*del1 * fdividef(del2 , 3.0f) - del1*del1 * D2) * coshval + (powf(del1, 3.0f) * fdividef(D2 , D1*3.0f) - del1 * del2 * D1) * sinhval;
float f3 = fdividef(musr2, musr1) * del2 * del2 * D1 * (del1*del1 * fdividef(div1D1D1,9.0f) - 1.0f) + del1*del1 * f6;
float f4 = expf(-fdividef(d1 , (D1 *3.0f)));
float f5 = del1*del1 * fdividef(div1D1D1,9.0f) - 1.0f;
float f7 = D1 * del1 * (D2 + del2 * A) * coshval + (del2 * D1*D1 + D2 * del1*del1 * A) * sinhval;
float fact = fdividef((f1 * f2 + f3 * f4) , (f5 * f2 * f7));
res = del1 * musr1 * A * fact;
float dD1d1 = -3.0f*D1*D1;
float ddel1d1 = (dD1d1*mua1-D1)*fdividef(1.0f, mua1*mua1)*fdividef(1.0f, 2.0f*del1);
float df1d1 = (fdividef(2.0f , 3.0f) * del1 * del2 * ddel1d1 - 2.0f * del1 * D2 * ddel1d1) * coshval - (del1*del1 * fdividef(del2 , 3.0f) - del1*del1 * D2) * sinhval * d1 * fdividef(1.0f, del1*del1) * ddel1d1 + (del1*del1 * fdividef(D2 , D1) * ddel1d1 - powf(del1, 3.0f) * D2 * (div1D1D1) * fdividef(dD1d1 , 3.0f) - ddel1d1 * del2 * D1 - del1 * del2 * dD1d1) * sinhval - (powf(del1, 3.0f) * fdividef(D2 , (D1 * 3.0f)) - del1 * del2 * D1) * coshval * d1 * fdividef(1.0f, del1*del1) * ddel1d1;
float df3d1 = fdividef(musr2 , musr1) * del2 * del2 * dD1d1 * (del1*del1 * fdividef(div1D1D1,9.0f) - 1.0f) + fdividef(musr2 , musr1) * del2 * del2 * D1 * (fdividef(2.0f , 9.0f) * del1 * (div1D1D1) * ddel1d1 - fdividef(2.0f , 9.0f) * del1*del1 * powf(D1, -3.0f) * dD1d1) + 2.0f * del1 * (f6) * ddel1d1;
float df4d1 = d1 * (div1D1D1) * dD1d1 * f4 *fdividef(1.0f, 3.0f);
float df5d1 = fdividef(2.0f , 9.0f) * del1 * (div1D1D1) * ddel1d1 - fdividef(2.0f , 9.0f) * del1*del1 * powf(D1, -3.0f) * dD1d1;
float df7d1 = dD1d1 * del1 * (D2 + del2 * A) * coshval + D1 * ddel1d1 * (D2 + del2 * A) * coshval - fdividef(D1 , del1) * (D2 + del2 * A) * sinhval * d1 * ddel1d1 + (2.0f * del2 * D1 * dD1d1 + 2.0f * D2 * del1 * A * ddel1d1) * sinhval - (del2 * D1*D1 + D2 * del1*del1 * A) * coshval * d1 * fdividef(1.0f, del1*del1) * ddel1d1;
derivmuae = ddel1d1 * musr1 * A * fact + del1 * musr1 * A * (df1d1 * f2 + df3d1 * f4 + f3 * df4d1) *fdividef(1.0f, (f5 * f2 * f7)) - del1 * musr1 * A * fact * fdividef(1.0f, f5) * df5d1 - del1 * musr1 * A * fact * fdividef(1.0f, f7) * df7d1;
//newton's method
mua1 = mua1 - fdividef(res-currLineData, derivmuae);
//correction in case muad wants to be negative, which we seriously don't want
mua1 = mua1*(1-signbit(mua1)) + signbit(mua1);
}
muae[ind] = mua1;
}
//takes in pre-allocated arrays and the arrays containing the bases of the different absorption coefficients, fills the skin data arrays with the optical properties
__global__ void calcSkinData(float *wlens, float *oxy_arr, float *Bd_arr, float *muam694_arr, float *melanintype_arr, float *muae, float *muse, float *muad, float *musd,
float *muh_oxy, float *muh_deoxy, float *melanin_base, float *musm, float *musr, float *musb_base, size_t pitch, int startblockind){
//walk down the lines, walk along the blocks, walk along the threads inside the block
int index = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch + threadIdx.x;
//absorption properties
float H = 0.41;
float H0 = 0.45;
float Be = 0.002;
int chromInd = blockIdx.x*pitch + threadIdx.x;
float oxy = oxy_arr[chromInd];
float Bd = Bd_arr[chromInd];
float muam694 = muam694_arr[chromInd];
int melanintype = melanintype_arr[chromInd];
float mua_other = 25; //FIXME
float muab_blood = (muh_oxy[index]*oxy + muh_deoxy[index]*(1-oxy))*fdividef(H,H0);
__shared__ float wlen;
if (threadIdx.x == 0){
wlen = wlens[blockIdx.y+startblockind];
}
__syncthreads();
float mua_melanin = muam694*((melanintype == SVAASAND_MELANIN_GPU)*powf(fdividef(694.0f,wlen), 3.46) + (melanintype == EUMELANIN_GPU)*expf(-kEu*fdividef(wlen-694.0f,694.0f)) + (melanintype == PHEOMELANIN_GPU)*expf(-kPheo*fdividef(wlen-694.0f,694.0f)));
muae[index] = mua_melanin + muab_blood*Be + mua_other*(1-Be);
muad[index] = muab_blood*Bd + mua_other*(1-Bd);
//scattering properties
float c_ray = 1.05e12;
float c_mie = 105;
float must = musm[index]*c_mie*100 + musr[index]*c_ray*100;
//float f = 0.64;
float aMie = 18.780, bMie = 0.22, aRay = 17.6;
must = 100*(aMie*pow(wlen/500.0f, -bMie) + aRay*pow(wlen/500, -4));
float gcol = 0.62 + wlen*29e-5;
must /= (1-gcol);
//float b = 0.91;
//must = 3000*(f*powf(wlen/500.0f,-4) + (1-f)*powf(wlen/500.0f, -b))/(1-(0.62 + wlen*29e-5));
float musb685 = 55.09e-12;
float ve = 1.25e-16;
float musb = musb685*H*(1-H)*(1.4-H)*fdividef(1.0f,ve)*musb_base[index];
muse[index] = must;//*(1-Be);//+musb*Be;
musd[index] = must;//*(1-Bd);//+musb*Bd;
}
__global__ void test(float *muae, float *muse, float *muad, float *musd, float *gcol, float *lineData, size_t pitch, int startblockind){
int ind = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch+ threadIdx.x;
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//move mua into local memory
float mua1 = muae[ind];
float mua2 = muad[ind];
float currLineData = lineData[ind];
float res;
float derivmuad;
//diffusion constant
float D1 = fdividef(1.0f,3.0f*(musr1 + mua1));
float musr2dmusr1 = fdividef(musr2,musr1); //musr2 divided by musr1, keep for derivative calc
//optical penetration depth
float del1 = sqrtf(fdividef(D1,mua1));
float sinhval = sinhf(fdividef(de, del1));
float coshval = coshf(fdividef(de, del1));
float expval = expf(-fdividef(de,D1)*div13);
float D2 = fdividef(1.0f,3.0f*(musr2 + mua2));
float del2 = sqrtf(fdividef(D2,mua2));
//from Svaasand 1995
//calculate the reflectance value
float f1 = (del1*del1*del2*div13-del1*del1*D2)*coshval+(del1*del1*del1*fdividef(D2,D1)*div13 - del1*del2*D1)*sinhval; //keeping for derivative calc
float f2 = 1.0f+fdividef(del2, D2)*div13; //keeping for derivative calc
float f3 = (musr2dmusr1*del2*del2*D1*(del1*fdividef(del1,D1*D1)*div13*div13-1.0f)+del1*del1*(D2-del2*fdividef(del2,D2)*div13*div13))*expval; //keeping for derivative calc
float f4 = del1*fdividef(del1,D1*D1)*div13*div13 - 1.0f; //keep for derivative calc, f4
float f5 = fdividef(del2,D2)*div13+1.0f; //keep for derivative calc, f5
float f6 = D1*del1*(D2+del2*A)*coshval+(D1*D1*del2 + D2*del1*del1*A)*sinhval; //keep for derivative calc, f6
float num = del1*musr1*A*(f1*f2+f3);
float denom = f4*f5*f6;
res = fdividef(num, denom);
//calculate the derivative with respect to muad
float dD2dmuad = -3.0f*D2*D2;
float ddel2dmuad = (dD2dmuad*mua2-D2)*fdividef(1.0f, mua2*mua2)*fdividef(1.0f, 2.0f*del2);
float df2dmuad = div13*(ddel2dmuad*D2 - del2*dD2dmuad)*fdividef(1.0f, D2*D2);
derivmuad = (del1*musr1*A*((coshval*(del1*del1*div13*ddel2dmuad - del1*del1*dD2dmuad) + sinhval*(del1*del1*del1*fdividef(1.0f, D1)*div13*dD2dmuad - del1*D1*ddel2dmuad))*f2 + f1*df2dmuad + expval*(musr2dmusr1*2.0f*del2*ddel2dmuad*D1*(fdividef(del1*del1,9.0f*D1*D1)-1.0f) + del1*del1*(dD2dmuad + fdividef(1.0f, 9.0f*mua2*mua2))))*denom - (f4*(df2dmuad*f6 + D1*del1*(dD2dmuad + ddel2dmuad*A)*coshval + (D1*D1*ddel2dmuad + dD2dmuad*del1*del1*A)*sinhval*f5)*num))*fdividef(1.0f, denom*denom);
//calculate next mua2
//newton's method
mua2 = mua2 - fdividef(res-currLineData, derivmuad);
//correction in case muad wants to be negative, which we seriously don't want
mua2 = mua2*(1-signbit(mua2)) + signbit(mua2);
}
#define BLOCK_DIM 160
__global__ void ReflIsoL2InvertMuad(float *muae, float *muse, float *muad, float *musd, float *gcol, float *lineData, size_t pitch, int startblockind){
int ind = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch+ threadIdx.x;
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//move mua into local memory
float mua1 = muae[ind];
float mua2 = muad[ind];
float currLineData = lineData[ind];
float res;
float derivmuad;
//diffusion constant
float D1 = fdividef(1.0f,3.0f*(musr1 + mua1));
float musr2dmusr1 = fdividef(musr2,musr1); //musr2 divided by musr1, keep for derivative calc
//optical penetration depth
float del1 = sqrtf(fdividef(D1,mua1));
float sinhval = sinhf(fdividef(de, del1));
float coshval = coshf(fdividef(de, del1));
float expval = expf(-fdividef(de,D1)*div13);
for (int i=0; i < NUM_ITERATIONS; ++i){
//result
//res = (musr1 * sqrtf(1.0f / (musr1 + mua1) / mua1) * sqrtf(3.0f) * ((-musr1 / 2.0f + mua1) * (3 * musr2 + mua2) * mua2 * sqrtf((1 / (3 * musr2 + mua2) / mua2)) + (3.0f / 2.0f * musr2 - mua2) * mua1 - 3.0f / 2.0f * musr1 * mua2) * sinh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) + 4.0f * (-3.0f / 8.0f * musr2 + mua2) * musr1 * cosh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) + expf(-de * (musr1 + mua1)) * (3.0f * musr2 * mua1 - 4.0f * musr1 * mua2)) * sqrtf(1.0f / (musr1 + mua1) / mua1) * mua1 * sqrtf(3.0f) * (musr1 + mua1) * A / (-musr1 / 2.0f + mua1) / (3.0f + sqrtf((1 / (3 * musr2 + mua2) / mua2)) * (3 * musr2 + mua2)) / ((mua1 * (3 * musr2 + mua2) * sqrtf((1 / (3 * musr2 + mua2) / mua2)) + 3.0f * A * (musr1 + mua1)) * sinh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) + sqrtf(1.0f / (musr1 + mua1) / mua1) * mua1 * sqrtf(3.0f) * (musr1 + mua1) * cosh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) * (1.0f + A * (3 * musr2 + mua2) * sqrtf((1 / (3 * musr2 + mua2) / mua2)))) / mua2;
//derivative
//derivmuad = -powf((3 * musr2 + mua2), -1.0f / 2.0f) * A * musr2 * (expf(-de * (musr1 + mua1)) * (3.0f * sqrtf(mua2) * (A * (musr1 + mua1) + mua1 / 9.0f + 4.0f / 9.0f * musr1) * mua1 * sqrtf((3 * musr2 + mua2)) + (A * (musr1 + mua1) + mua1) * (2.0f * musr1 * mua2 + (3.0f / 2.0f * musr2 + mua2) * mua1)) * sinh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) + 2.0f * (expf(-de * (musr1 + mua1)) * (2.0f / 3.0f * ((A / 4.0f + 3.0f / 4.0f) * powf(mua1, 3.0f / 2.0f) + musr1 * sqrtf(mua1) * A) * sqrtf(mua2) * sqrtf((3 * musr2 + mua2)) + (A + 1.0f / 3.0f) * ((3.0f / 4.0f * musr2 + mua2 / 2.0f) * powf(mua1, 3.0f / 2.0f) + mua2 * musr1 * sqrtf(mua1))) * cosh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) - 5.0f / 4.0f * (mua2 + 3.0f / 5.0f * sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) + 3.0f / 0.10e2 * musr2) * (A + 1.0f / 3.0f) * musr1 * sqrtf(mua1)) * sqrtf(3.0f) * sqrtf(musr1 + mua1)) * sqrtf(3.0f) * sqrtf(musr1 + mua1) * powf(mua2, 3.0f / 2.0f) * sqrtf(mua1) * powf(mua2 + sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) / 3.0f, -2.0f) / (-musr1 / 2.0f + mua1) * powf((mua1 * sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) / 3.0f + A * mua2 * (musr1 + mua1)) * sinh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) + sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1) * cosh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) * (mua2 + sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) * A) / 3.0f, -2.0f) / 9.0f;
//res += currLineData*(i+1);
//derivmuad += currLineData*2*(i+1);
//res += gcol[ind];
float D2 = fdividef(1.0f,3.0f*(musr2 + mua2));
float del2 = sqrtf(fdividef(D2,mua2));
//from Svaasand 1995
//calculate the reflectance value
float f1 = (del1*del1*del2*div13-del1*del1*D2)*coshval+(del1*del1*del1*fdividef(D2,D1)*div13 - del1*del2*D1)*sinhval; //keeping for derivative calc
float f2 = 1.0f+fdividef(del2, D2)*div13; //keeping for derivative calc
float f3 = (musr2dmusr1*del2*del2*D1*(del1*fdividef(del1,D1*D1)*div13*div13-1.0f)+del1*del1*(D2-del2*fdividef(del2,D2)*div13*div13))*expval; //keeping for derivative calc
float f4 = del1*fdividef(del1,D1*D1)*div13*div13 - 1.0f; //keep for derivative calc, f4
float f5 = fdividef(del2,D2)*div13+1.0f; //keep for derivative calc, f5
float f6 = D1*del1*(D2+del2*A)*coshval+(D1*D1*del2 + D2*del1*del1*A)*sinhval; //keep for derivative calc, f6
float num = del1*musr1*A*(f1*f2+f3);
float denom = f4*f5*f6;
res = fdividef(num, denom);
//calculate the derivative with respect to muad
float dD2dmuad = -3.0f*D2*D2;
float ddel2dmuad = (dD2dmuad*mua2-D2)*fdividef(1.0f, mua2*mua2)*fdividef(1.0f, 2.0f*del2);
float df2dmuad = div13*(ddel2dmuad*D2 - del2*dD2dmuad)*fdividef(1.0f, D2*D2);
derivmuad = (del1*musr1*A*((coshval*(del1*del1*div13*ddel2dmuad - del1*del1*dD2dmuad) + sinhval*(del1*del1*del1*fdividef(1.0f, D1)*div13*dD2dmuad - del1*D1*ddel2dmuad))*f2 + f1*df2dmuad + expval*(musr2dmusr1*2.0f*del2*ddel2dmuad*D1*(fdividef(del1*del1,9.0f*D1*D1)-1.0f) + del1*del1*(dD2dmuad + fdividef(1.0f, 9.0f*mua2*mua2))))*denom - (f4*(df2dmuad*f6 + D1*del1*(dD2dmuad + ddel2dmuad*A)*coshval + (D1*D1*ddel2dmuad + dD2dmuad*del1*del1*A)*sinhval*f5)*num))*fdividef(1.0f, denom*denom);
//calculate next mua2
//newton's method
mua2 = mua2 - fdividef(res-currLineData, derivmuad);
//correction in case muad wants to be negative, which we seriously don't want
mua2 = mua2*(1-signbit(mua2)) + signbit(mua2);
}
muad[ind] = mua2;
}
//find straight line through input mua, return as the value at wavelength w
__global__ void StraightLine(float *wavelengths, float *mua, float wlen, float *res, int startwlenind, int endwlenind, size_t pitch){
float xbar = 0;
float ybar = 0;
float xybar = 0;
float x2bar = 0;
int num = 0;
for (int i=startwlenind; i < endwlenind; i++){
num++;
int ind = (gridDim.x*i + blockIdx.x)*pitch + threadIdx.x;
__shared__ float w;
if (threadIdx.x == 0){
w = wavelengths[i];
}
__syncthreads();
//float w = wavelengths[i];
float abs = mua[ind];
xbar += w;
ybar += abs;
xybar += w*abs;
x2bar += w*w;
}
xbar /= num;
ybar /= num;
xybar /= num;
x2bar /= num;
float a = (xybar - xbar*ybar)/(x2bar - xbar*xbar);
float b = ybar - a*xbar;
int ind = blockIdx.x*pitch + threadIdx.x;
res[ind] = a*wlen + b;
}
__global__ void MultVector(float *multVec, float *mua, float *res, float factor, int startwlenind, int endwlenind, size_t pitch){
int i=0;
float restemp = 0;
for (i=startwlenind; i < endwlenind; i++){
int ind = (gridDim.x*i + blockIdx.x)*pitch + threadIdx.x;
restemp += mua[ind]*multVec[ind];
}
restemp *= factor;
int ind = blockIdx.x*pitch + threadIdx.x;
res[ind] = restemp;
}
__global__ void ReflIsoL2(float *muae, float *muse, float *muad, float *musd, float *gcol, float *res, size_t pitch, int startblockind){
int ind = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch+ threadIdx.x;
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//move mua into local memory
float mua1 = muae[ind];
float mua2 = muad[ind];
res[ind] = ReflIsoL2Calc(mua1, musr1, mua2, musr2);
}
__device__ float ReflIsoL2Calc(float mua1, float musr1, float mua2, float musr2){
//diffusion constant
float D1 = fdividef(1.0f,3.0f*(musr1 + mua1));
float musr2dmusr1 = fdividef(musr2,musr1); //musr2 divided by musr1, keep for derivative calc
//optical penetration depth
float del1 = sqrtf(fdividef(D1,mua1));
float sinhval = sinhf(fdividef(de, del1));
float coshval = coshf(fdividef(de, del1));
float fact1 = del1*musr1*A;
float expval = expf(-fdividef(de,D1)*div13);
float D2 = fdividef(1.0f,3.0f*(musr2 + mua2));
float del2 = sqrtf(fdividef(D2,mua2));
//from Svaasand 1995
//calculate the reflectance value
float f1 = (del1*del1*del2*div13-del1*del1*D2)*coshval+(del1*del1*del1*fdividef(D2,D1)*div13 - del1*del2*D1)*sinhval; //keeping for derivative calc
float f2 = 1.0f+fdividef(del2, D2)*div13; //keeping for derivative calc
float f3 = (musr2dmusr1*del2*del2*D1*(del1*fdividef(del1,D1*D1)*div13*div13-1.0f)+del1*del1*(D2-del2*fdividef(del2,D2)*div13*div13))*expval; //keeping for derivative calc
float f4 = del1*fdividef(del1,D1*D1)*div13*div13 - 1.0f; //keep for derivative calc, f4
float f5 = fdividef(del2,D2)*div13+1.0f; //keep for derivative calc, f5
float f6 = D1*del1*(D2+del2*A)*coshval+(D1*D1*del2 + D2*del1*del1*A)*sinhval; //keep for derivative calc, f6
float num = fact1*(f1*f2+f3);
float denom = f4*f5*f6;
return fdividef(num,denom);
}
__global__ void ReflIsoL2ErrorCheck(float *muae, float *muse, float *musd, float *gcol, float *AT, float *x, int endmembers, int numbands, float *inputres, float *outputres, size_t pitch, int startblockind, int diff){
float error = 0;
for (int i=0; i < numbands; i++){
int ind = (gridDim.x*(i+startblockind) + blockIdx.x)*pitch+ threadIdx.x;
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//calculate mua
float mua1 = muae[ind];
float mua2 = 0;
float temp;
for (int j=0; j < endmembers; j++){
__shared__ float Aval;
if (threadIdx.x == 0){
Aval = AT[j*numbands + i + diff];
}
__syncthreads();
mua2 += Aval*x[(gridDim.x*j + blockIdx.x)*pitch + threadIdx.x];
}
float res = ReflIsoL2Calc(mua1, musr1, mua2, musr2);
float measval = inputres[ind];
error += (res-measval)*(res-measval);
}
outputres[blockIdx.x*pitch + threadIdx.x] = sqrtf(error);
}
| 7bf242465649832fd142bfa9dd757c07f4235cba.cu | //=======================================================================================================
// Copyright 2015 Asgeir Bjorgan, Lise Lyngsnes Randeberg, Norwegian University of Science and Technology
// Distributed under the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================================================
#include "melanin.h"
#define div13 1.0f/3.0f
#include "inv.h"
#define A 0.14386f
//#define A 0.17f
#define d1 0.0001f
#define de 0.0001f
#define NUM_ITERATIONS 15
__global__ void ReflIsoL2InvertMuae(float *muae, float *muse, float *muad, float *musd, float *gcol, float *lineData, size_t pitch, int startblockInd){
#ifdef INV_MUAE_BLOCKDIM_64_QUICKFIX
//NB FIXME: indeksuthentingen er bygget for en mindre blokkdimensjon enn 160
int egBlockInd = (blockIdx.x*64 + threadIdx.x)/160; //indeksen til blokkene slik de egentlig var ment å være i originalgriddet
int ind = ((gridDim.x*(blockIdx.y+startblockInd))*pitch*2)/5 + egBlockInd*pitch + blockIdx.x*64-egBlockInd*32*5 + threadIdx.x; //the pitch is made for the largest possible block size, but this uses too many registers for that block size to be valid and needs something less. Therefore some index hackin'
#else
int ind = (gridDim.x*(blockIdx.y+startblockInd) + blockIdx.x)*pitch+ threadIdx.x;
#endif
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//move mua into shared memory
float mua1 = muae[ind];
float mua2 = muad[ind];
//diffusion constant
float D2 = fdividef(1.0f,3.0f*(musr2 + mua2));
float del2 = sqrtf(fdividef(D2,mua2));
float res;
float derivmuae;
float currLineData = lineData[ind];
float f2 = 1.0f + fdividef(del2 ,(D2 * 3.0f));
float f6 = D2-del2*fdividef(del2, (D2*9.0f));
for (int i=0; i < NUM_ITERATIONS; ++i){
float D1 = fdividef(1.0f,3.0f*(musr1 + mua1));
float del1 = sqrtf(fdividef(D1,mua1));
float div1D1D1 = fdividef(1.0f, D1*D1);
float coshval = coshf(fdividef(d1, del1));
float sinhval = sinhf(fdividef(d1, del1));
//result
float f1 = (del1*del1 * fdividef(del2 , 3.0f) - del1*del1 * D2) * coshval + (powf(del1, 3.0f) * fdividef(D2 , D1*3.0f) - del1 * del2 * D1) * sinhval;
float f3 = fdividef(musr2, musr1) * del2 * del2 * D1 * (del1*del1 * fdividef(div1D1D1,9.0f) - 1.0f) + del1*del1 * f6;
float f4 = expf(-fdividef(d1 , (D1 *3.0f)));
float f5 = del1*del1 * fdividef(div1D1D1,9.0f) - 1.0f;
float f7 = D1 * del1 * (D2 + del2 * A) * coshval + (del2 * D1*D1 + D2 * del1*del1 * A) * sinhval;
float fact = fdividef((f1 * f2 + f3 * f4) , (f5 * f2 * f7));
res = del1 * musr1 * A * fact;
float dD1d1 = -3.0f*D1*D1;
float ddel1d1 = (dD1d1*mua1-D1)*fdividef(1.0f, mua1*mua1)*fdividef(1.0f, 2.0f*del1);
float df1d1 = (fdividef(2.0f , 3.0f) * del1 * del2 * ddel1d1 - 2.0f * del1 * D2 * ddel1d1) * coshval - (del1*del1 * fdividef(del2 , 3.0f) - del1*del1 * D2) * sinhval * d1 * fdividef(1.0f, del1*del1) * ddel1d1 + (del1*del1 * fdividef(D2 , D1) * ddel1d1 - powf(del1, 3.0f) * D2 * (div1D1D1) * fdividef(dD1d1 , 3.0f) - ddel1d1 * del2 * D1 - del1 * del2 * dD1d1) * sinhval - (powf(del1, 3.0f) * fdividef(D2 , (D1 * 3.0f)) - del1 * del2 * D1) * coshval * d1 * fdividef(1.0f, del1*del1) * ddel1d1;
float df3d1 = fdividef(musr2 , musr1) * del2 * del2 * dD1d1 * (del1*del1 * fdividef(div1D1D1,9.0f) - 1.0f) + fdividef(musr2 , musr1) * del2 * del2 * D1 * (fdividef(2.0f , 9.0f) * del1 * (div1D1D1) * ddel1d1 - fdividef(2.0f , 9.0f) * del1*del1 * powf(D1, -3.0f) * dD1d1) + 2.0f * del1 * (f6) * ddel1d1;
float df4d1 = d1 * (div1D1D1) * dD1d1 * f4 *fdividef(1.0f, 3.0f);
float df5d1 = fdividef(2.0f , 9.0f) * del1 * (div1D1D1) * ddel1d1 - fdividef(2.0f , 9.0f) * del1*del1 * powf(D1, -3.0f) * dD1d1;
float df7d1 = dD1d1 * del1 * (D2 + del2 * A) * coshval + D1 * ddel1d1 * (D2 + del2 * A) * coshval - fdividef(D1 , del1) * (D2 + del2 * A) * sinhval * d1 * ddel1d1 + (2.0f * del2 * D1 * dD1d1 + 2.0f * D2 * del1 * A * ddel1d1) * sinhval - (del2 * D1*D1 + D2 * del1*del1 * A) * coshval * d1 * fdividef(1.0f, del1*del1) * ddel1d1;
derivmuae = ddel1d1 * musr1 * A * fact + del1 * musr1 * A * (df1d1 * f2 + df3d1 * f4 + f3 * df4d1) *fdividef(1.0f, (f5 * f2 * f7)) - del1 * musr1 * A * fact * fdividef(1.0f, f5) * df5d1 - del1 * musr1 * A * fact * fdividef(1.0f, f7) * df7d1;
//newton's method
mua1 = mua1 - fdividef(res-currLineData, derivmuae);
//correction in case muad wants to be negative, which we seriously don't want
mua1 = mua1*(1-signbit(mua1)) + signbit(mua1);
}
muae[ind] = mua1;
}
//takes in pre-allocated arrays and the arrays containing the bases of the different absorption coefficients, fills the skin data arrays with the optical properties
__global__ void calcSkinData(float *wlens, float *oxy_arr, float *Bd_arr, float *muam694_arr, float *melanintype_arr, float *muae, float *muse, float *muad, float *musd,
float *muh_oxy, float *muh_deoxy, float *melanin_base, float *musm, float *musr, float *musb_base, size_t pitch, int startblockind){
//walk down the lines, walk along the blocks, walk along the threads inside the block
int index = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch + threadIdx.x;
//absorption properties
float H = 0.41;
float H0 = 0.45;
float Be = 0.002;
int chromInd = blockIdx.x*pitch + threadIdx.x;
float oxy = oxy_arr[chromInd];
float Bd = Bd_arr[chromInd];
float muam694 = muam694_arr[chromInd];
int melanintype = melanintype_arr[chromInd];
float mua_other = 25; //FIXME
float muab_blood = (muh_oxy[index]*oxy + muh_deoxy[index]*(1-oxy))*fdividef(H,H0);
__shared__ float wlen;
if (threadIdx.x == 0){
wlen = wlens[blockIdx.y+startblockind];
}
__syncthreads();
float mua_melanin = muam694*((melanintype == SVAASAND_MELANIN_GPU)*powf(fdividef(694.0f,wlen), 3.46) + (melanintype == EUMELANIN_GPU)*expf(-kEu*fdividef(wlen-694.0f,694.0f)) + (melanintype == PHEOMELANIN_GPU)*expf(-kPheo*fdividef(wlen-694.0f,694.0f)));
muae[index] = mua_melanin + muab_blood*Be + mua_other*(1-Be);
muad[index] = muab_blood*Bd + mua_other*(1-Bd);
//scattering properties
float c_ray = 1.05e12;
float c_mie = 105;
float must = musm[index]*c_mie*100 + musr[index]*c_ray*100;
//float f = 0.64;
float aMie = 18.780, bMie = 0.22, aRay = 17.6;
must = 100*(aMie*pow(wlen/500.0f, -bMie) + aRay*pow(wlen/500, -4));
float gcol = 0.62 + wlen*29e-5;
must /= (1-gcol);
//float b = 0.91;
//must = 3000*(f*powf(wlen/500.0f,-4) + (1-f)*powf(wlen/500.0f, -b))/(1-(0.62 + wlen*29e-5));
float musb685 = 55.09e-12;
float ve = 1.25e-16;
float musb = musb685*H*(1-H)*(1.4-H)*fdividef(1.0f,ve)*musb_base[index];
muse[index] = must;//*(1-Be);//+musb*Be;
musd[index] = must;//*(1-Bd);//+musb*Bd;
}
__global__ void test(float *muae, float *muse, float *muad, float *musd, float *gcol, float *lineData, size_t pitch, int startblockind){
int ind = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch+ threadIdx.x;
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//move mua into local memory
float mua1 = muae[ind];
float mua2 = muad[ind];
float currLineData = lineData[ind];
float res;
float derivmuad;
//diffusion constant
float D1 = fdividef(1.0f,3.0f*(musr1 + mua1));
float musr2dmusr1 = fdividef(musr2,musr1); //musr2 divided by musr1, keep for derivative calc
//optical penetration depth
float del1 = sqrtf(fdividef(D1,mua1));
float sinhval = sinhf(fdividef(de, del1));
float coshval = coshf(fdividef(de, del1));
float expval = expf(-fdividef(de,D1)*div13);
float D2 = fdividef(1.0f,3.0f*(musr2 + mua2));
float del2 = sqrtf(fdividef(D2,mua2));
//from Svaasand 1995
//calculate the reflectance value
float f1 = (del1*del1*del2*div13-del1*del1*D2)*coshval+(del1*del1*del1*fdividef(D2,D1)*div13 - del1*del2*D1)*sinhval; //keeping for derivative calc
float f2 = 1.0f+fdividef(del2, D2)*div13; //keeping for derivative calc
float f3 = (musr2dmusr1*del2*del2*D1*(del1*fdividef(del1,D1*D1)*div13*div13-1.0f)+del1*del1*(D2-del2*fdividef(del2,D2)*div13*div13))*expval; //keeping for derivative calc
float f4 = del1*fdividef(del1,D1*D1)*div13*div13 - 1.0f; //keep for derivative calc, f4
float f5 = fdividef(del2,D2)*div13+1.0f; //keep for derivative calc, f5
float f6 = D1*del1*(D2+del2*A)*coshval+(D1*D1*del2 + D2*del1*del1*A)*sinhval; //keep for derivative calc, f6
float num = del1*musr1*A*(f1*f2+f3);
float denom = f4*f5*f6;
res = fdividef(num, denom);
//calculate the derivative with respect to muad
float dD2dmuad = -3.0f*D2*D2;
float ddel2dmuad = (dD2dmuad*mua2-D2)*fdividef(1.0f, mua2*mua2)*fdividef(1.0f, 2.0f*del2);
float df2dmuad = div13*(ddel2dmuad*D2 - del2*dD2dmuad)*fdividef(1.0f, D2*D2);
derivmuad = (del1*musr1*A*((coshval*(del1*del1*div13*ddel2dmuad - del1*del1*dD2dmuad) + sinhval*(del1*del1*del1*fdividef(1.0f, D1)*div13*dD2dmuad - del1*D1*ddel2dmuad))*f2 + f1*df2dmuad + expval*(musr2dmusr1*2.0f*del2*ddel2dmuad*D1*(fdividef(del1*del1,9.0f*D1*D1)-1.0f) + del1*del1*(dD2dmuad + fdividef(1.0f, 9.0f*mua2*mua2))))*denom - (f4*(df2dmuad*f6 + D1*del1*(dD2dmuad + ddel2dmuad*A)*coshval + (D1*D1*ddel2dmuad + dD2dmuad*del1*del1*A)*sinhval*f5)*num))*fdividef(1.0f, denom*denom);
//calculate next mua2
//newton's method
mua2 = mua2 - fdividef(res-currLineData, derivmuad);
//correction in case muad wants to be negative, which we seriously don't want
mua2 = mua2*(1-signbit(mua2)) + signbit(mua2);
}
#define BLOCK_DIM 160
__global__ void ReflIsoL2InvertMuad(float *muae, float *muse, float *muad, float *musd, float *gcol, float *lineData, size_t pitch, int startblockind){
int ind = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch+ threadIdx.x;
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//move mua into local memory
float mua1 = muae[ind];
float mua2 = muad[ind];
float currLineData = lineData[ind];
float res;
float derivmuad;
//diffusion constant
float D1 = fdividef(1.0f,3.0f*(musr1 + mua1));
float musr2dmusr1 = fdividef(musr2,musr1); //musr2 divided by musr1, keep for derivative calc
//optical penetration depth
float del1 = sqrtf(fdividef(D1,mua1));
float sinhval = sinhf(fdividef(de, del1));
float coshval = coshf(fdividef(de, del1));
float expval = expf(-fdividef(de,D1)*div13);
for (int i=0; i < NUM_ITERATIONS; ++i){
//result
//res = (musr1 * sqrtf(1.0f / (musr1 + mua1) / mua1) * sqrtf(3.0f) * ((-musr1 / 2.0f + mua1) * (3 * musr2 + mua2) * mua2 * sqrtf((1 / (3 * musr2 + mua2) / mua2)) + (3.0f / 2.0f * musr2 - mua2) * mua1 - 3.0f / 2.0f * musr1 * mua2) * sinh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) + 4.0f * (-3.0f / 8.0f * musr2 + mua2) * musr1 * cosh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) + expf(-de * (musr1 + mua1)) * (3.0f * musr2 * mua1 - 4.0f * musr1 * mua2)) * sqrtf(1.0f / (musr1 + mua1) / mua1) * mua1 * sqrtf(3.0f) * (musr1 + mua1) * A / (-musr1 / 2.0f + mua1) / (3.0f + sqrtf((1 / (3 * musr2 + mua2) / mua2)) * (3 * musr2 + mua2)) / ((mua1 * (3 * musr2 + mua2) * sqrtf((1 / (3 * musr2 + mua2) / mua2)) + 3.0f * A * (musr1 + mua1)) * sinh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) + sqrtf(1.0f / (musr1 + mua1) / mua1) * mua1 * sqrtf(3.0f) * (musr1 + mua1) * cosh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) * (1.0f + A * (3 * musr2 + mua2) * sqrtf((1 / (3 * musr2 + mua2) / mua2)))) / mua2;
//derivative
//derivmuad = -powf((3 * musr2 + mua2), -1.0f / 2.0f) * A * musr2 * (expf(-de * (musr1 + mua1)) * (3.0f * sqrtf(mua2) * (A * (musr1 + mua1) + mua1 / 9.0f + 4.0f / 9.0f * musr1) * mua1 * sqrtf((3 * musr2 + mua2)) + (A * (musr1 + mua1) + mua1) * (2.0f * musr1 * mua2 + (3.0f / 2.0f * musr2 + mua2) * mua1)) * sinh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) + 2.0f * (expf(-de * (musr1 + mua1)) * (2.0f / 3.0f * ((A / 4.0f + 3.0f / 4.0f) * powf(mua1, 3.0f / 2.0f) + musr1 * sqrtf(mua1) * A) * sqrtf(mua2) * sqrtf((3 * musr2 + mua2)) + (A + 1.0f / 3.0f) * ((3.0f / 4.0f * musr2 + mua2 / 2.0f) * powf(mua1, 3.0f / 2.0f) + mua2 * musr1 * sqrtf(mua1))) * cosh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) - 5.0f / 4.0f * (mua2 + 3.0f / 5.0f * sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) + 3.0f / 0.10e2 * musr2) * (A + 1.0f / 3.0f) * musr1 * sqrtf(mua1)) * sqrtf(3.0f) * sqrtf(musr1 + mua1)) * sqrtf(3.0f) * sqrtf(musr1 + mua1) * powf(mua2, 3.0f / 2.0f) * sqrtf(mua1) * powf(mua2 + sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) / 3.0f, -2.0f) / (-musr1 / 2.0f + mua1) * powf((mua1 * sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) / 3.0f + A * mua2 * (musr1 + mua1)) * sinh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) + sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1) * cosh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) * (mua2 + sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) * A) / 3.0f, -2.0f) / 9.0f;
//res += currLineData*(i+1);
//derivmuad += currLineData*2*(i+1);
//res += gcol[ind];
float D2 = fdividef(1.0f,3.0f*(musr2 + mua2));
float del2 = sqrtf(fdividef(D2,mua2));
//from Svaasand 1995
//calculate the reflectance value
float f1 = (del1*del1*del2*div13-del1*del1*D2)*coshval+(del1*del1*del1*fdividef(D2,D1)*div13 - del1*del2*D1)*sinhval; //keeping for derivative calc
float f2 = 1.0f+fdividef(del2, D2)*div13; //keeping for derivative calc
float f3 = (musr2dmusr1*del2*del2*D1*(del1*fdividef(del1,D1*D1)*div13*div13-1.0f)+del1*del1*(D2-del2*fdividef(del2,D2)*div13*div13))*expval; //keeping for derivative calc
float f4 = del1*fdividef(del1,D1*D1)*div13*div13 - 1.0f; //keep for derivative calc, f4
float f5 = fdividef(del2,D2)*div13+1.0f; //keep for derivative calc, f5
float f6 = D1*del1*(D2+del2*A)*coshval+(D1*D1*del2 + D2*del1*del1*A)*sinhval; //keep for derivative calc, f6
float num = del1*musr1*A*(f1*f2+f3);
float denom = f4*f5*f6;
res = fdividef(num, denom);
//calculate the derivative with respect to muad
float dD2dmuad = -3.0f*D2*D2;
float ddel2dmuad = (dD2dmuad*mua2-D2)*fdividef(1.0f, mua2*mua2)*fdividef(1.0f, 2.0f*del2);
float df2dmuad = div13*(ddel2dmuad*D2 - del2*dD2dmuad)*fdividef(1.0f, D2*D2);
derivmuad = (del1*musr1*A*((coshval*(del1*del1*div13*ddel2dmuad - del1*del1*dD2dmuad) + sinhval*(del1*del1*del1*fdividef(1.0f, D1)*div13*dD2dmuad - del1*D1*ddel2dmuad))*f2 + f1*df2dmuad + expval*(musr2dmusr1*2.0f*del2*ddel2dmuad*D1*(fdividef(del1*del1,9.0f*D1*D1)-1.0f) + del1*del1*(dD2dmuad + fdividef(1.0f, 9.0f*mua2*mua2))))*denom - (f4*(df2dmuad*f6 + D1*del1*(dD2dmuad + ddel2dmuad*A)*coshval + (D1*D1*ddel2dmuad + dD2dmuad*del1*del1*A)*sinhval*f5)*num))*fdividef(1.0f, denom*denom);
//calculate next mua2
//newton's method
mua2 = mua2 - fdividef(res-currLineData, derivmuad);
//correction in case muad wants to be negative, which we seriously don't want
mua2 = mua2*(1-signbit(mua2)) + signbit(mua2);
}
muad[ind] = mua2;
}
//find straight line through input mua, return as the value at wavelength w
__global__ void StraightLine(float *wavelengths, float *mua, float wlen, float *res, int startwlenind, int endwlenind, size_t pitch){
float xbar = 0;
float ybar = 0;
float xybar = 0;
float x2bar = 0;
int num = 0;
for (int i=startwlenind; i < endwlenind; i++){
num++;
int ind = (gridDim.x*i + blockIdx.x)*pitch + threadIdx.x;
__shared__ float w;
if (threadIdx.x == 0){
w = wavelengths[i];
}
__syncthreads();
//float w = wavelengths[i];
float abs = mua[ind];
xbar += w;
ybar += abs;
xybar += w*abs;
x2bar += w*w;
}
xbar /= num;
ybar /= num;
xybar /= num;
x2bar /= num;
float a = (xybar - xbar*ybar)/(x2bar - xbar*xbar);
float b = ybar - a*xbar;
int ind = blockIdx.x*pitch + threadIdx.x;
res[ind] = a*wlen + b;
}
__global__ void MultVector(float *multVec, float *mua, float *res, float factor, int startwlenind, int endwlenind, size_t pitch){
int i=0;
float restemp = 0;
for (i=startwlenind; i < endwlenind; i++){
int ind = (gridDim.x*i + blockIdx.x)*pitch + threadIdx.x;
restemp += mua[ind]*multVec[ind];
}
restemp *= factor;
int ind = blockIdx.x*pitch + threadIdx.x;
res[ind] = restemp;
}
__global__ void ReflIsoL2(float *muae, float *muse, float *muad, float *musd, float *gcol, float *res, size_t pitch, int startblockind){
int ind = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch+ threadIdx.x;
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//move mua into local memory
float mua1 = muae[ind];
float mua2 = muad[ind];
res[ind] = ReflIsoL2Calc(mua1, musr1, mua2, musr2);
}
__device__ float ReflIsoL2Calc(float mua1, float musr1, float mua2, float musr2){
//diffusion constant
float D1 = fdividef(1.0f,3.0f*(musr1 + mua1));
float musr2dmusr1 = fdividef(musr2,musr1); //musr2 divided by musr1, keep for derivative calc
//optical penetration depth
float del1 = sqrtf(fdividef(D1,mua1));
float sinhval = sinhf(fdividef(de, del1));
float coshval = coshf(fdividef(de, del1));
float fact1 = del1*musr1*A;
float expval = expf(-fdividef(de,D1)*div13);
float D2 = fdividef(1.0f,3.0f*(musr2 + mua2));
float del2 = sqrtf(fdividef(D2,mua2));
//from Svaasand 1995
//calculate the reflectance value
float f1 = (del1*del1*del2*div13-del1*del1*D2)*coshval+(del1*del1*del1*fdividef(D2,D1)*div13 - del1*del2*D1)*sinhval; //keeping for derivative calc
float f2 = 1.0f+fdividef(del2, D2)*div13; //keeping for derivative calc
float f3 = (musr2dmusr1*del2*del2*D1*(del1*fdividef(del1,D1*D1)*div13*div13-1.0f)+del1*del1*(D2-del2*fdividef(del2,D2)*div13*div13))*expval; //keeping for derivative calc
float f4 = del1*fdividef(del1,D1*D1)*div13*div13 - 1.0f; //keep for derivative calc, f4
float f5 = fdividef(del2,D2)*div13+1.0f; //keep for derivative calc, f5
float f6 = D1*del1*(D2+del2*A)*coshval+(D1*D1*del2 + D2*del1*del1*A)*sinhval; //keep for derivative calc, f6
float num = fact1*(f1*f2+f3);
float denom = f4*f5*f6;
return fdividef(num,denom);
}
__global__ void ReflIsoL2ErrorCheck(float *muae, float *muse, float *musd, float *gcol, float *AT, float *x, int endmembers, int numbands, float *inputres, float *outputres, size_t pitch, int startblockind, int diff){
float error = 0;
for (int i=0; i < numbands; i++){
int ind = (gridDim.x*(i+startblockind) + blockIdx.x)*pitch+ threadIdx.x;
float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME
//reduced scattering coefficients
float musr1 = muse[ind]*(1.0f-musr2);
musr2 = musd[ind]*(1.0f-musr2);
//calculate mua
float mua1 = muae[ind];
float mua2 = 0;
float temp;
for (int j=0; j < endmembers; j++){
__shared__ float Aval;
if (threadIdx.x == 0){
Aval = AT[j*numbands + i + diff];
}
__syncthreads();
mua2 += Aval*x[(gridDim.x*j + blockIdx.x)*pitch + threadIdx.x];
}
float res = ReflIsoL2Calc(mua1, musr1, mua2, musr2);
float measval = inputres[ind];
error += (res-measval)*(res-measval);
}
outputres[blockIdx.x*pitch + threadIdx.x] = sqrtf(error);
}
|
826f8d1a05b47f82baa9a1802dab965afea5cdb6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "concat_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _ConcatKernel(const fast_divmod block_size_including_axis_dim_div,
const fast_divmod block_size_inside_axis_dim_div,
const int64_t* concat_sizes,
const int64_t* concat_sizes_range,
const int64_t* axis_dimension_input_output_mapping,
T* output_data,
const void** input_ptr,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG input_pos = 0;
int outter_block_index = 0;
int block_index = 0;
int offset = 0;
block_size_including_axis_dim_div.divmod(id, outter_block_index, offset);
block_size_inside_axis_dim_div.divmod(offset, block_index, offset);
int input_index = axis_dimension_input_output_mapping[block_index];
int64_t range_left = (input_index == 0) ? 0 : concat_sizes_range[input_index - 1];
int block_offset = block_index - range_left;
input_pos = (outter_block_index * concat_sizes[input_index] + block_offset) *
block_size_inside_axis_dim_div.d_ +
offset;
output_data[id] = reinterpret_cast<const T*>(input_ptr[input_index])[input_pos];
}
Status ConcatImpl(const size_t element_bytes,
const int block_size_including_axis_dim,
const int block_size_inside_axis_dim,
const int64_t* concat_sizes,
const int64_t* concat_sizes_range,
const int64_t* axis_dimension_input_output_mapping,
void* output_data,
const void** input_ptr,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
fast_divmod block_size_including_axis_dim_div = fast_divmod(block_size_including_axis_dim);
fast_divmod block_size_inside_axis_dim_div = fast_divmod(block_size_inside_axis_dim);
switch (element_bytes) {
case sizeof(int8_t):
hipLaunchKernelGGL(( _ConcatKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
block_size_including_axis_dim_div, block_size_inside_axis_dim_div,
concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping,
reinterpret_cast<int8_t*>(output_data),
input_ptr,
(CUDA_LONG)N);
break;
case sizeof(int16_t):
hipLaunchKernelGGL(( _ConcatKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
block_size_including_axis_dim_div, block_size_inside_axis_dim_div,
concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping,
reinterpret_cast<int16_t*>(output_data),
input_ptr,
(CUDA_LONG)N);
break;
case sizeof(int32_t):
hipLaunchKernelGGL(( _ConcatKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
block_size_including_axis_dim_div, block_size_inside_axis_dim_div,
concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping,
reinterpret_cast<int32_t*>(output_data),
input_ptr,
(CUDA_LONG)N);
break;
case sizeof(int64_t):
hipLaunchKernelGGL(( _ConcatKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
block_size_including_axis_dim_div, block_size_inside_axis_dim_div,
concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping,
reinterpret_cast<int64_t*>(output_data),
input_ptr,
(CUDA_LONG)N);
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Concat operator");
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
| 826f8d1a05b47f82baa9a1802dab965afea5cdb6.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "concat_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _ConcatKernel(const fast_divmod block_size_including_axis_dim_div,
const fast_divmod block_size_inside_axis_dim_div,
const int64_t* concat_sizes,
const int64_t* concat_sizes_range,
const int64_t* axis_dimension_input_output_mapping,
T* output_data,
const void** input_ptr,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG input_pos = 0;
int outter_block_index = 0;
int block_index = 0;
int offset = 0;
block_size_including_axis_dim_div.divmod(id, outter_block_index, offset);
block_size_inside_axis_dim_div.divmod(offset, block_index, offset);
int input_index = axis_dimension_input_output_mapping[block_index];
int64_t range_left = (input_index == 0) ? 0 : concat_sizes_range[input_index - 1];
int block_offset = block_index - range_left;
input_pos = (outter_block_index * concat_sizes[input_index] + block_offset) *
block_size_inside_axis_dim_div.d_ +
offset;
output_data[id] = reinterpret_cast<const T*>(input_ptr[input_index])[input_pos];
}
Status ConcatImpl(const size_t element_bytes,
const int block_size_including_axis_dim,
const int block_size_inside_axis_dim,
const int64_t* concat_sizes,
const int64_t* concat_sizes_range,
const int64_t* axis_dimension_input_output_mapping,
void* output_data,
const void** input_ptr,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
fast_divmod block_size_including_axis_dim_div = fast_divmod(block_size_including_axis_dim);
fast_divmod block_size_inside_axis_dim_div = fast_divmod(block_size_inside_axis_dim);
switch (element_bytes) {
case sizeof(int8_t):
_ConcatKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
block_size_including_axis_dim_div, block_size_inside_axis_dim_div,
concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping,
reinterpret_cast<int8_t*>(output_data),
input_ptr,
(CUDA_LONG)N);
break;
case sizeof(int16_t):
_ConcatKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
block_size_including_axis_dim_div, block_size_inside_axis_dim_div,
concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping,
reinterpret_cast<int16_t*>(output_data),
input_ptr,
(CUDA_LONG)N);
break;
case sizeof(int32_t):
_ConcatKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
block_size_including_axis_dim_div, block_size_inside_axis_dim_div,
concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping,
reinterpret_cast<int32_t*>(output_data),
input_ptr,
(CUDA_LONG)N);
break;
case sizeof(int64_t):
_ConcatKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
block_size_including_axis_dim_div, block_size_inside_axis_dim_div,
concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping,
reinterpret_cast<int64_t*>(output_data),
input_ptr,
(CUDA_LONG)N);
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Concat operator");
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
|
b5a2cab1f6f9cc217e1545e8f12bc91d160549db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2012 by Jrn Dinkla, www.dinkla.com, All rights reserved.
*/
#include "CExtent.h"
#include "CAccum.h"
#include "CExecConfig.h"
#include "CPinnedHostBuffer.h"
#include "CDeviceBuffer.h"
void initialize(uchar4* h_input, const CExtent& extent) {
for (int y=0; y<extent.height; y++) {
for (int x=0; x<extent.width; x++) {
h_input[extent.index(x, y)] = make_uchar4(x, y, 0, x+y);
}
}
}
void initialize(uchar4* h_input, const int width, const int height) {
for (int y=0; y<height; y++) {
for (int x=0; x<width; x++) {
h_input[y * width + x] = make_uchar4(x, y, 0, x+y);
}
}
}
/*
__device__ inline void add(int4& a, const uchar4& b) {
a.x += b.x; a.y += b.y; a.z += b.z;
}
__global__ void smooth(const uchar4* d_input, uchar4* d_output,
const int width, const int height, const int windowSize) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
...
d_output[y * width + x] = make_uchar4(a.x/c, a.y/c, a.z/c, 255);
}
}
*/
__global__
void smooth(const CExtent extent,
const uchar4* d_input, uchar4* d_output,
const int windowSize) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (extent.inBounds(x, y, z)) {
CAccum acc;
for (int dz = -windowSize; dz <= windowSize; dz ++) {
for (int dy = -windowSize; dy <= windowSize; dy ++) {
for (int dx = -windowSize; dx <= windowSize; dx ++) {
int nx=x+dx; int ny=y+dy; int nz=z+dz;
if (extent.inBoundsStrict(nx, ny, nz)) {
acc.add(d_input[extent.index(nx, ny, nz)]);
}
}
}
}
d_output[extent.index(x, y, z)] = acc.avg();
}
}
int main2(int argc, char** argv) {
CExtent extent(1024, 1024, 1); int windowSize = 1;
int size = extent.getNumberOfElements() * sizeof(uchar4);
CPinnedHostBuffer<uchar4> h_input(extent);
CPinnedHostBuffer<uchar4> h_output(extent);
h_input.malloc(); h_output.malloc();
initialize(h_input.getPtr(), extent);
CDeviceBuffer<uchar4> d_input(extent);
CDeviceBuffer<uchar4> d_output(extent);
d_input.malloc(); d_output.malloc();
d_input.copyTo(h_input.getPtr());
CExecConfig config(extent);
hipLaunchKernelGGL(( smooth), dim3(config.grid),dim3(config.threads), 0, 0, extent,
d_input.getPtr(), d_output.getPtr(), windowSize);
d_input.copyFrom(h_input.getPtr());
d_input.free(); d_output.free(); h_input.free(); h_output.free();
return 0;
}
| b5a2cab1f6f9cc217e1545e8f12bc91d160549db.cu | /*
* Copyright (c) 2012 by Jörn Dinkla, www.dinkla.com, All rights reserved.
*/
#include "CExtent.h"
#include "CAccum.h"
#include "CExecConfig.h"
#include "CPinnedHostBuffer.h"
#include "CDeviceBuffer.h"
void initialize(uchar4* h_input, const CExtent& extent) {
for (int y=0; y<extent.height; y++) {
for (int x=0; x<extent.width; x++) {
h_input[extent.index(x, y)] = make_uchar4(x, y, 0, x+y);
}
}
}
void initialize(uchar4* h_input, const int width, const int height) {
for (int y=0; y<height; y++) {
for (int x=0; x<width; x++) {
h_input[y * width + x] = make_uchar4(x, y, 0, x+y);
}
}
}
/*
__device__ inline void add(int4& a, const uchar4& b) {
a.x += b.x; a.y += b.y; a.z += b.z;
}
__global__ void smooth(const uchar4* d_input, uchar4* d_output,
const int width, const int height, const int windowSize) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
...
d_output[y * width + x] = make_uchar4(a.x/c, a.y/c, a.z/c, 255);
}
}
*/
__global__
void smooth(const CExtent extent,
const uchar4* d_input, uchar4* d_output,
const int windowSize) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (extent.inBounds(x, y, z)) {
CAccum acc;
for (int dz = -windowSize; dz <= windowSize; dz ++) {
for (int dy = -windowSize; dy <= windowSize; dy ++) {
for (int dx = -windowSize; dx <= windowSize; dx ++) {
int nx=x+dx; int ny=y+dy; int nz=z+dz;
if (extent.inBoundsStrict(nx, ny, nz)) {
acc.add(d_input[extent.index(nx, ny, nz)]);
}
}
}
}
d_output[extent.index(x, y, z)] = acc.avg();
}
}
int main2(int argc, char** argv) {
CExtent extent(1024, 1024, 1); int windowSize = 1;
int size = extent.getNumberOfElements() * sizeof(uchar4);
CPinnedHostBuffer<uchar4> h_input(extent);
CPinnedHostBuffer<uchar4> h_output(extent);
h_input.malloc(); h_output.malloc();
initialize(h_input.getPtr(), extent);
CDeviceBuffer<uchar4> d_input(extent);
CDeviceBuffer<uchar4> d_output(extent);
d_input.malloc(); d_output.malloc();
d_input.copyTo(h_input.getPtr());
CExecConfig config(extent);
smooth<<<config.grid,config.threads>>>(extent,
d_input.getPtr(), d_output.getPtr(), windowSize);
d_input.copyFrom(h_input.getPtr());
d_input.free(); d_output.free(); h_input.free(); h_output.free();
return 0;
}
|
39bb5922dd974ecd8217115a1a0a935ea1143386.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/fc_functor.h"
namespace phi {
namespace funcs {
template <typename T>
struct FcTypeTraits;
template <>
struct FcTypeTraits<float> {
typedef float4 Type;
};
template <>
struct FcTypeTraits<double> {
typedef double4 Type;
};
template <typename T, bool DoRelu>
__global__ void bias_relu_v4(const int num, const T* bias, T* data, int K) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num) {
int bias_idx = tid % K;
const T bias_ptr = bias[bias_idx];
const T in_ptr = data[tid];
T packed_val;
packed_val.x = in_ptr.x + bias_ptr.x;
packed_val.y = in_ptr.y + bias_ptr.y;
packed_val.z = in_ptr.z + bias_ptr.z;
packed_val.w = in_ptr.w + bias_ptr.w;
if (DoRelu) {
packed_val.x = fmaxf(0.f, packed_val.x);
packed_val.y = fmaxf(0.f, packed_val.y);
packed_val.z = fmaxf(0.f, packed_val.z);
packed_val.w = fmaxf(0.f, packed_val.w);
}
data[tid] = packed_val;
}
}
template <typename T, bool DoRelu, int BlockDim>
__global__ void InplaceAddReluKernel(const int N, const T* bias, T* data) {
int offset = blockIdx.x * N;
for (int i = threadIdx.x; i < N; i += BlockDim) {
T temp;
#if defined(__HIPCC__) || __CUDA_ARCH__ >= 350
temp = __ldg(data + offset + i) + __ldg(bias + i);
#else
temp = data[offset + i] + bias[i];
#endif
if (DoRelu) {
data[offset + i] = static_cast<int>(temp > 0) * temp;
} else {
data[offset + i] = temp;
}
}
}
template <typename DeviceContext, typename T>
void FCFunctor<DeviceContext, T>::operator()(const DeviceContext& context,
const int M,
const int N,
const int K,
const T* X,
const T* W,
T* Y,
const T* B,
bool relu,
bool padding_weights) {
PADDLE_ENFORCE_EQ(padding_weights,
false,
errors::PermissionDenied(
"Weight padding in fc can not be used in GPU scope."));
auto blas = phi::funcs::GetBlas<DeviceContext, T>(context);
blas.GEMM(false,
false,
M,
N,
K,
static_cast<T>(1.0),
X,
K,
W,
N,
static_cast<T>(0.0),
Y,
N);
if (B == NULL) {
return;
}
// M * N
if (N % 4 == 0) {
const int threads = 256;
const int num = M * N / 4;
const int blocks = (num + threads - 1) / threads;
typedef typename FcTypeTraits<T>::Type trans_type;
auto* bias_ptr_v4 = reinterpret_cast<const trans_type*>(B);
auto* data_ptr_v4 = reinterpret_cast<trans_type*>(Y);
if (relu) {
hipLaunchKernelGGL(( bias_relu_v4<trans_type, true>), dim3(blocks), dim3(threads), 0, context.stream(),
num, bias_ptr_v4, data_ptr_v4, N / 4);
} else {
hipLaunchKernelGGL(( bias_relu_v4<trans_type, false>), dim3(blocks), dim3(threads), 0, context.stream(),
num, bias_ptr_v4, data_ptr_v4, N / 4);
}
} else {
const int threads = 256;
const int blocks = M;
if (relu) {
hipLaunchKernelGGL(( InplaceAddReluKernel<T,
true,
threads>), dim3(blocks), dim3(threads), 0, context.stream(),
N, B, Y);
} else {
hipLaunchKernelGGL(( InplaceAddReluKernel<T,
false,
threads>), dim3(blocks), dim3(threads), 0, context.stream(),
N, B, Y);
}
}
}
template class FCFunctor<paddle::platform::CUDADeviceContext, float>;
template class FCFunctor<paddle::platform::CUDADeviceContext, double>;
template class FCFunctor<GPUContext, float>;
template class FCFunctor<GPUContext, double>;
} // namespace funcs
} // namespace phi
| 39bb5922dd974ecd8217115a1a0a935ea1143386.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/fc_functor.h"
namespace phi {
namespace funcs {
template <typename T>
struct FcTypeTraits;
template <>
struct FcTypeTraits<float> {
typedef float4 Type;
};
template <>
struct FcTypeTraits<double> {
typedef double4 Type;
};
template <typename T, bool DoRelu>
__global__ void bias_relu_v4(const int num, const T* bias, T* data, int K) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num) {
int bias_idx = tid % K;
const T bias_ptr = bias[bias_idx];
const T in_ptr = data[tid];
T packed_val;
packed_val.x = in_ptr.x + bias_ptr.x;
packed_val.y = in_ptr.y + bias_ptr.y;
packed_val.z = in_ptr.z + bias_ptr.z;
packed_val.w = in_ptr.w + bias_ptr.w;
if (DoRelu) {
packed_val.x = fmaxf(0.f, packed_val.x);
packed_val.y = fmaxf(0.f, packed_val.y);
packed_val.z = fmaxf(0.f, packed_val.z);
packed_val.w = fmaxf(0.f, packed_val.w);
}
data[tid] = packed_val;
}
}
template <typename T, bool DoRelu, int BlockDim>
__global__ void InplaceAddReluKernel(const int N, const T* bias, T* data) {
int offset = blockIdx.x * N;
for (int i = threadIdx.x; i < N; i += BlockDim) {
T temp;
#if defined(__HIPCC__) || __CUDA_ARCH__ >= 350
temp = __ldg(data + offset + i) + __ldg(bias + i);
#else
temp = data[offset + i] + bias[i];
#endif
if (DoRelu) {
data[offset + i] = static_cast<int>(temp > 0) * temp;
} else {
data[offset + i] = temp;
}
}
}
template <typename DeviceContext, typename T>
void FCFunctor<DeviceContext, T>::operator()(const DeviceContext& context,
const int M,
const int N,
const int K,
const T* X,
const T* W,
T* Y,
const T* B,
bool relu,
bool padding_weights) {
PADDLE_ENFORCE_EQ(padding_weights,
false,
errors::PermissionDenied(
"Weight padding in fc can not be used in GPU scope."));
auto blas = phi::funcs::GetBlas<DeviceContext, T>(context);
blas.GEMM(false,
false,
M,
N,
K,
static_cast<T>(1.0),
X,
K,
W,
N,
static_cast<T>(0.0),
Y,
N);
if (B == NULL) {
return;
}
// M * N
if (N % 4 == 0) {
const int threads = 256;
const int num = M * N / 4;
const int blocks = (num + threads - 1) / threads;
typedef typename FcTypeTraits<T>::Type trans_type;
auto* bias_ptr_v4 = reinterpret_cast<const trans_type*>(B);
auto* data_ptr_v4 = reinterpret_cast<trans_type*>(Y);
if (relu) {
bias_relu_v4<trans_type, true><<<blocks, threads, 0, context.stream()>>>(
num, bias_ptr_v4, data_ptr_v4, N / 4);
} else {
bias_relu_v4<trans_type, false><<<blocks, threads, 0, context.stream()>>>(
num, bias_ptr_v4, data_ptr_v4, N / 4);
}
} else {
const int threads = 256;
const int blocks = M;
if (relu) {
InplaceAddReluKernel<T,
true,
threads><<<blocks, threads, 0, context.stream()>>>(
N, B, Y);
} else {
InplaceAddReluKernel<T,
false,
threads><<<blocks, threads, 0, context.stream()>>>(
N, B, Y);
}
}
}
template class FCFunctor<paddle::platform::CUDADeviceContext, float>;
template class FCFunctor<paddle::platform::CUDADeviceContext, double>;
template class FCFunctor<GPUContext, float>;
template class FCFunctor<GPUContext, double>;
} // namespace funcs
} // namespace phi
|
0531452757c254ec859a5bebb0160e392d78b924.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void segmentSieve(char *primes, uint64_t max) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index>0){
const uint64_t maxRoot = sqrt((double)max);
int low = maxRoot*index;
int high = low + maxRoot;
if(high > max) high = max;
for (int i = 2; i < maxRoot; i++){ //sqrt(n)lglg(sqrt(n))
if(primes[i]==0){
int loLim = (low / i) * i;
if (loLim < low)
loLim += i;
for (int j=loLim; j<high; j+=i)
primes[j] = 1;
}
}
}
} | 0531452757c254ec859a5bebb0160e392d78b924.cu | #include "includes.h"
__global__ static void segmentSieve(char *primes, uint64_t max) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index>0){
const uint64_t maxRoot = sqrt((double)max);
int low = maxRoot*index;
int high = low + maxRoot;
if(high > max) high = max;
for (int i = 2; i < maxRoot; i++){ //sqrt(n)lglg(sqrt(n))
if(primes[i]==0){
int loLim = (low / i) * i;
if (loLim < low)
loLim += i;
for (int j=loLim; j<high; j+=i)
primes[j] = 1;
}
}
}
} |
df6fba966dc89a463fdad2d5c01fc475db2bb414.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
void main()
{
const int count = 5;
int size = count * sizeof(int);
int ha[] = { 1,2,3,4,5};
int hb[] = { 10,20,30,40,5};
int hc[count];
int *da, *db, *dc;
hipMalloc(&da, size);
hipMalloc(&db, size);
hipMalloc(&dc, size);
hipMemcpy(da, ha, size,hipMemcpyHostToDevice);
hipMemcpy(db, hb, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addArrays) , dim3(1),dim3(count), 0, 0, da, db, dc);
hipMemcpy(hc, dc, size, hipMemcpyDeviceToHost);
for (int i = 0; i < count; i++)
printf("%d\t", hc[i]);
getchar();
}
| df6fba966dc89a463fdad2d5c01fc475db2bb414.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
void main()
{
const int count = 5;
int size = count * sizeof(int);
int ha[] = { 1,2,3,4,5};
int hb[] = { 10,20,30,40,5};
int hc[count];
int *da, *db, *dc;
cudaMalloc(&da, size);
cudaMalloc(&db, size);
cudaMalloc(&dc, size);
cudaMemcpy(da, ha, size,cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, size, cudaMemcpyHostToDevice);
addArrays <<<1,count>>> (da, db, dc);
cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < count; i++)
printf("%d\t", hc[i]);
getchar();
}
|
27636c186cd1fd1ccbe23d97295be876b01e9152.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*---------------------------------------------------.
| STACK and Memory Management Functions |
`---------------------------------------------------*/
/*-------------------------------------.
| How to Test Stack Implementaion |
`-------------------------------------*/
/*
__global__ void myStackTesting( byte* buffer )
{
int i;
int stackID;
stackID = allocateStack();
PUSH( stackID , (byte)'3' );
PUSH( stackID , (byte)'o' );
PUSH( stackID , (byte)'m' );
PUSH( stackID , (byte)'a' );
PUSH( stackID , (byte)'r' );
PUSH( stackID , (byte)'z' );
//PUSH( stack , sp , 0 , '\0' );
buffer[0] = 0;
for(i = 0; i < 6; i++)
{
buffer[i] = POP( stackID );
}
buffer[6] = ' ';
buffer[7] = 'I';
buffer[8] = 'D';
buffer[9] = ' ';
buffer[10] = '=';
buffer[11] = ' ';
buffer[12] = stackID + '0';
buffer[13] = 0;
deallocateStack( stackID );
}
*/
__device__ int stackMainLock = 0;
__device__ void superCudaMemcpy( byte *destination , byte *source , unsigned int len)
{
unsigned int counter;
for(counter = 0; counter < len; counter++)
destination[counter] = source[counter];
}
__device__ byte PUSH( int segment , uint16 DATA )
{
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return PUSH_FAIL;
// precaution for overflow
if(mainStack.sp[segment] - mainStack.lowerLimit[segment] < sizeof(DATA))
return PUSH_FAIL;
//Push a value on the stack for the respectful element.
mainStack.sp[segment] = mainStack.sp[segment] - sizeof(DATA);
*((uint16*)mainStack.sp[segment]) = DATA;
return PUSH_SUCCESS;
}
__device__ byte PUSH( int segment , unsigned int DATA )
{
if( segment == STACK_ID_INVALID ) return PUSH_FAIL;
//Push a value on the stack for the respectful element.
mainStack.sp[segment] = mainStack.sp[segment] - sizeof(DATA);
*((unsigned int*)mainStack.sp[segment]) = DATA;
return PUSH_SUCCESS;
}
__device__ byte PUSH( int segment , byte DATA )
{
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return PUSH_FAIL;
//Push a value on the stack for the respectful element.
mainStack.sp[segment]--;
*(mainStack.sp[segment]) = DATA;
return PUSH_SUCCESS;
}
__device__ byte POP( int segment )
{
//Pop a value from the stack and return it.
byte DATA;
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return 0;
DATA = *(mainStack.sp[segment]);
mainStack.sp[segment]++;
return DATA;
}
__device__ unsigned short POPshort( int segment )
{
//Pop a value from the stack and return it.
unsigned short DATA;
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return 0;
DATA = *((unsigned short*)(mainStack.sp[segment]));
mainStack.sp[segment] = mainStack.sp[segment] + sizeof(DATA);
return DATA;
}
__device__ unsigned int POPint( int segment )
{
// Pop a value from the stack and return it.
unsigned int DATA;
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return 0;
DATA = *((unsigned int*)mainStack.sp[segment]);
mainStack.sp[segment] = mainStack.sp[segment] + sizeof(DATA);
return DATA;
}
__device__ byte PEEK( int segment )
{
// Peek at the top byte of the stack.
byte DATA;
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return 0;
DATA = *(mainStack.sp[segment]);
return DATA;
}
__global__ void initializeDeviceStack( stack mStack )
{
int i;
superCudaMemcpy( (byte*)&mainStack , (byte*)&mStack , sizeof(stack) );
for(i = 0; i < mainStack.segments ; i++)
{
mainStack.sp[i] = (byte*)(mainStack.buffer + (mainStack.chunk * (i + 1)));
mainStack.lowerLimit[i] = (byte*)(mainStack.buffer + (mainStack.chunk * i));
mainStack.upperLimit[i] = (byte*)(mainStack.buffer + (mainStack.chunk * (i + 1)));
mainStack.allocated[i] = STACK_SEGMENT_UNALLCOATED;
}
}
__global__ void deinitializeDeviceStack()
{
int i;
mainStack.buffer = 0;
for(i = 0; i < mainStack.segments ; i++)
{
mainStack.sp[i] = 0;
mainStack.lowerLimit[i] = 0;
mainStack.upperLimit[i] = 0;
mainStack.allocated[i] = STACK_SEGMENT_UNALLCOATED;
}
}
__device__ int allocateStack()
{
int i = 0;
while( atomicCAS(&stackMainLock, 0, 1) != 0 );
for(i = 0; i < mainStack.segments; i++)
{
if(mainStack.allocated[i] == STACK_SEGMENT_UNALLCOATED)
{
mainStack.allocated[i] = STACK_SEGMENT_ALLCOATED;
mainStack.sp[i] = mainStack.upperLimit[i];
while( atomicCAS(&stackMainLock, 1, 0) != 1 );
return i;
}
}
while( atomicCAS(&stackMainLock, 1, 0) != 1 );
#if DEV_DEBUG_STACK
cuPrintf("NO STACK SPACE AVAILABLE FOR ME... I WILL DIE !!\n");
#endif
return STACK_ID_INVALID;
}
__device__ int allocateStack( int neededID , byte keepData )
{
int i = 0;
// if segment is not in range
if( neededID < 0 || neededID >= mainStack.segments ) return STACK_ID_INVALID;
for(i = 0; i < mainStack.segments; i++)
{
if(mainStack.allocated[i] == STACK_SEGMENT_UNALLCOATED)
{
mainStack.allocated[i] = STACK_SEGMENT_ALLCOATED;
if(keepData != 0)
mainStack.sp[i] = mainStack.upperLimit[i];
return i;
}
}
return STACK_ID_INVALID;
}
__device__ byte copyStack( int destination , int source )
{
// if segment is not in range
if( destination < 0 || destination >= mainStack.segments\
|| source < 0 || source >= mainStack.segments )\
return COPYSTACK_FAIL;
superCudaMemcpy( (byte*)mainStack.lowerLimit[destination] , (byte*)mainStack.lowerLimit[source] , mainStack.chunk );
mainStack.sp[destination] = mainStack.upperLimit[destination] - (mainStack.upperLimit[source] - mainStack.sp[source]);
return COPYSTACK_SUCCESS;
}
__device__ void deallocateStack( int stackID )
{
// if segment is not in range
if( stackID < 0 || stackID >= mainStack.segments ) return;
mainStack.allocated[stackID] = STACK_SEGMENT_UNALLCOATED;
#if DEV_DEBUG_STACK
//cuPrintf("DEALLOCATED ID %d\n", stackID);
#endif
}
void deinitializeStack()
{
if( mStack.buffer != 0 )
{
HANDLE_FREE( hipFree, mStack.buffer );
HANDLE_FREE( hipFree, (byte*)mStack.sp );
HANDLE_FREE( hipFree, (byte*)mStack.lowerLimit );
HANDLE_FREE( hipFree, (byte*)mStack.upperLimit );
HANDLE_FREE( hipFree, mStack.allocated );
}
hipLaunchKernelGGL(( deinitializeDeviceStack), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
}
void initializeStack( int chunk , int segments )
{
// it is necessary to make sure these are freed if we're going to \
call initializeStack more than once in the code
if( mStack.buffer != 0 )
{
HANDLE_FREE( hipFree, mStack.buffer );
HANDLE_FREE( hipFree, (byte*)mStack.sp );
HANDLE_FREE( hipFree, (byte*)mStack.lowerLimit );
HANDLE_FREE( hipFree, (byte*)mStack.upperLimit );
HANDLE_FREE( hipFree, mStack.allocated );
}
//Allocate Stack Space
hipMalloc( (void **)&(mStack.buffer) , chunk * segments );
hipMalloc( (void **)&(mStack.sp) , segments * sizeof(byte*) );
hipMalloc( (void **)&(mStack.lowerLimit) , segments * sizeof(byte*) );
hipMalloc( (void **)&(mStack.upperLimit) , segments * sizeof(byte*) );
hipMalloc( (void **)&(mStack.allocated) , segments * sizeof(byte) );
mStack.chunk = chunk;
mStack.segments = segments;
hipLaunchKernelGGL(( initializeDeviceStack), dim3(1),dim3(1), 0, 0, mStack );
hipDeviceSynchronize();
}
void initializeStack( stack defaultStack , int chunk , int segments )
{
if( defaultStack.buffer != 0 )
{
HANDLE_FREE( hipFree, defaultStack.buffer );
HANDLE_FREE( hipFree, (byte*)defaultStack.sp );
HANDLE_FREE( hipFree, (byte*)defaultStack.lowerLimit );
HANDLE_FREE( hipFree, (byte*)defaultStack.upperLimit );
}
//Allocate Stack Space
hipMalloc( (void **)&(defaultStack.buffer) , chunk * segments );
hipMalloc( (void **)&(defaultStack.sp) , segments * sizeof(byte*) );
hipMalloc( (void **)&(defaultStack.lowerLimit) , segments * sizeof(byte*) );
hipMalloc( (void **)&(defaultStack.upperLimit) , segments * sizeof(byte*) );
defaultStack.chunk = chunk;
defaultStack.segments = segments;
hipLaunchKernelGGL(( initializeDeviceStack), dim3(1),dim3(1), 0, 0, defaultStack );
hipDeviceSynchronize();
}
| 27636c186cd1fd1ccbe23d97295be876b01e9152.cu |
/*---------------------------------------------------.
| STACK and Memory Management Functions |
`---------------------------------------------------*/
/*-------------------------------------.
| How to Test Stack Implementaion |
`-------------------------------------*/
/*
__global__ void myStackTesting( byte* buffer )
{
int i;
int stackID;
stackID = allocateStack();
PUSH( stackID , (byte)'3' );
PUSH( stackID , (byte)'o' );
PUSH( stackID , (byte)'m' );
PUSH( stackID , (byte)'a' );
PUSH( stackID , (byte)'r' );
PUSH( stackID , (byte)'z' );
//PUSH( stack , sp , 0 , '\0' );
buffer[0] = 0;
for(i = 0; i < 6; i++)
{
buffer[i] = POP( stackID );
}
buffer[6] = ' ';
buffer[7] = 'I';
buffer[8] = 'D';
buffer[9] = ' ';
buffer[10] = '=';
buffer[11] = ' ';
buffer[12] = stackID + '0';
buffer[13] = 0;
deallocateStack( stackID );
}
*/
__device__ int stackMainLock = 0;
__device__ void superCudaMemcpy( byte *destination , byte *source , unsigned int len)
{
unsigned int counter;
for(counter = 0; counter < len; counter++)
destination[counter] = source[counter];
}
__device__ byte PUSH( int segment , uint16 DATA )
{
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return PUSH_FAIL;
// precaution for overflow
if(mainStack.sp[segment] - mainStack.lowerLimit[segment] < sizeof(DATA))
return PUSH_FAIL;
//Push a value on the stack for the respectful element.
mainStack.sp[segment] = mainStack.sp[segment] - sizeof(DATA);
*((uint16*)mainStack.sp[segment]) = DATA;
return PUSH_SUCCESS;
}
__device__ byte PUSH( int segment , unsigned int DATA )
{
if( segment == STACK_ID_INVALID ) return PUSH_FAIL;
//Push a value on the stack for the respectful element.
mainStack.sp[segment] = mainStack.sp[segment] - sizeof(DATA);
*((unsigned int*)mainStack.sp[segment]) = DATA;
return PUSH_SUCCESS;
}
__device__ byte PUSH( int segment , byte DATA )
{
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return PUSH_FAIL;
//Push a value on the stack for the respectful element.
mainStack.sp[segment]--;
*(mainStack.sp[segment]) = DATA;
return PUSH_SUCCESS;
}
__device__ byte POP( int segment )
{
//Pop a value from the stack and return it.
byte DATA;
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return 0;
DATA = *(mainStack.sp[segment]);
mainStack.sp[segment]++;
return DATA;
}
__device__ unsigned short POPshort( int segment )
{
//Pop a value from the stack and return it.
unsigned short DATA;
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return 0;
DATA = *((unsigned short*)(mainStack.sp[segment]));
mainStack.sp[segment] = mainStack.sp[segment] + sizeof(DATA);
return DATA;
}
__device__ unsigned int POPint( int segment )
{
// Pop a value from the stack and return it.
unsigned int DATA;
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return 0;
DATA = *((unsigned int*)mainStack.sp[segment]);
mainStack.sp[segment] = mainStack.sp[segment] + sizeof(DATA);
return DATA;
}
__device__ byte PEEK( int segment )
{
// Peek at the top byte of the stack.
byte DATA;
// if segment is not in range
if( segment < 0 || segment >= mainStack.segments ) return 0;
DATA = *(mainStack.sp[segment]);
return DATA;
}
__global__ void initializeDeviceStack( stack mStack )
{
int i;
superCudaMemcpy( (byte*)&mainStack , (byte*)&mStack , sizeof(stack) );
for(i = 0; i < mainStack.segments ; i++)
{
mainStack.sp[i] = (byte*)(mainStack.buffer + (mainStack.chunk * (i + 1)));
mainStack.lowerLimit[i] = (byte*)(mainStack.buffer + (mainStack.chunk * i));
mainStack.upperLimit[i] = (byte*)(mainStack.buffer + (mainStack.chunk * (i + 1)));
mainStack.allocated[i] = STACK_SEGMENT_UNALLCOATED;
}
}
__global__ void deinitializeDeviceStack()
{
int i;
mainStack.buffer = 0;
for(i = 0; i < mainStack.segments ; i++)
{
mainStack.sp[i] = 0;
mainStack.lowerLimit[i] = 0;
mainStack.upperLimit[i] = 0;
mainStack.allocated[i] = STACK_SEGMENT_UNALLCOATED;
}
}
__device__ int allocateStack()
{
int i = 0;
while( atomicCAS(&stackMainLock, 0, 1) != 0 );
for(i = 0; i < mainStack.segments; i++)
{
if(mainStack.allocated[i] == STACK_SEGMENT_UNALLCOATED)
{
mainStack.allocated[i] = STACK_SEGMENT_ALLCOATED;
mainStack.sp[i] = mainStack.upperLimit[i];
while( atomicCAS(&stackMainLock, 1, 0) != 1 );
return i;
}
}
while( atomicCAS(&stackMainLock, 1, 0) != 1 );
#if DEV_DEBUG_STACK
cuPrintf("NO STACK SPACE AVAILABLE FOR ME... I WILL DIE !!\n");
#endif
return STACK_ID_INVALID;
}
__device__ int allocateStack( int neededID , byte keepData )
{
int i = 0;
// if segment is not in range
if( neededID < 0 || neededID >= mainStack.segments ) return STACK_ID_INVALID;
for(i = 0; i < mainStack.segments; i++)
{
if(mainStack.allocated[i] == STACK_SEGMENT_UNALLCOATED)
{
mainStack.allocated[i] = STACK_SEGMENT_ALLCOATED;
if(keepData != 0)
mainStack.sp[i] = mainStack.upperLimit[i];
return i;
}
}
return STACK_ID_INVALID;
}
__device__ byte copyStack( int destination , int source )
{
// if segment is not in range
if( destination < 0 || destination >= mainStack.segments\
|| source < 0 || source >= mainStack.segments )\
return COPYSTACK_FAIL;
superCudaMemcpy( (byte*)mainStack.lowerLimit[destination] , (byte*)mainStack.lowerLimit[source] , mainStack.chunk );
mainStack.sp[destination] = mainStack.upperLimit[destination] - (mainStack.upperLimit[source] - mainStack.sp[source]);
return COPYSTACK_SUCCESS;
}
__device__ void deallocateStack( int stackID )
{
// if segment is not in range
if( stackID < 0 || stackID >= mainStack.segments ) return;
mainStack.allocated[stackID] = STACK_SEGMENT_UNALLCOATED;
#if DEV_DEBUG_STACK
//cuPrintf("DEALLOCATED ID %d\n", stackID);
#endif
}
void deinitializeStack()
{
if( mStack.buffer != 0 )
{
HANDLE_FREE( cudaFree, mStack.buffer );
HANDLE_FREE( cudaFree, (byte*)mStack.sp );
HANDLE_FREE( cudaFree, (byte*)mStack.lowerLimit );
HANDLE_FREE( cudaFree, (byte*)mStack.upperLimit );
HANDLE_FREE( cudaFree, mStack.allocated );
}
deinitializeDeviceStack<<<1,1>>>();
cudaThreadSynchronize();
}
void initializeStack( int chunk , int segments )
{
// it is necessary to make sure these are freed if we're going to \
call initializeStack more than once in the code
if( mStack.buffer != 0 )
{
HANDLE_FREE( cudaFree, mStack.buffer );
HANDLE_FREE( cudaFree, (byte*)mStack.sp );
HANDLE_FREE( cudaFree, (byte*)mStack.lowerLimit );
HANDLE_FREE( cudaFree, (byte*)mStack.upperLimit );
HANDLE_FREE( cudaFree, mStack.allocated );
}
//Allocate Stack Space
cudaMalloc( (void **)&(mStack.buffer) , chunk * segments );
cudaMalloc( (void **)&(mStack.sp) , segments * sizeof(byte*) );
cudaMalloc( (void **)&(mStack.lowerLimit) , segments * sizeof(byte*) );
cudaMalloc( (void **)&(mStack.upperLimit) , segments * sizeof(byte*) );
cudaMalloc( (void **)&(mStack.allocated) , segments * sizeof(byte) );
mStack.chunk = chunk;
mStack.segments = segments;
initializeDeviceStack<<<1,1>>>( mStack );
cudaThreadSynchronize();
}
void initializeStack( stack defaultStack , int chunk , int segments )
{
if( defaultStack.buffer != 0 )
{
HANDLE_FREE( cudaFree, defaultStack.buffer );
HANDLE_FREE( cudaFree, (byte*)defaultStack.sp );
HANDLE_FREE( cudaFree, (byte*)defaultStack.lowerLimit );
HANDLE_FREE( cudaFree, (byte*)defaultStack.upperLimit );
}
//Allocate Stack Space
cudaMalloc( (void **)&(defaultStack.buffer) , chunk * segments );
cudaMalloc( (void **)&(defaultStack.sp) , segments * sizeof(byte*) );
cudaMalloc( (void **)&(defaultStack.lowerLimit) , segments * sizeof(byte*) );
cudaMalloc( (void **)&(defaultStack.upperLimit) , segments * sizeof(byte*) );
defaultStack.chunk = chunk;
defaultStack.segments = segments;
initializeDeviceStack<<<1,1>>>( defaultStack );
cudaThreadSynchronize();
}
|
73277d74bcb9ad0277f71530d82e5320acfdfe96.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
#define BLOCK_SIZE 10
void CheckCudaError(const char *e);
__global__ void productMatrix(int *matrix_a, int *matrix_b, int *matrix_c)
{
int blockidx = blockIdx.x;
int blockidy = blockIdx.y;
int threadx = threadIdx.x;
int thready = threadIdx.y;
__shared__ int Asub[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int Bsub[BLOCK_SIZE][BLOCK_SIZE];
Asub[threadx][thready] = matrix_a[blockidx * BLOCK_SIZE + threadx + blockidy * BLOCK_SIZE + thready];
Bsub[threadx][thready] = matrix_b[blockidx * BLOCK_SIZE + threadx + blockidy * BLOCK_SIZE + thready];
__syncthreads();
int suma;
for (int i = 0; i < BLOCK_SIZE; ++i)
{
suma += Asub[i][thready]* Bsub[threadx][i];
}
__syncthreads();
matrix_c[blockidx * BLOCK_SIZE + threadx + blockidy * BLOCK_SIZE + thready] = suma;
}
int main(){
//Creamos punteros para apuntar tanto al dispositivo como a memoria.
int *h_a, *h_b;
int *d_a, *d_b, *d_c;
int NumBlocks = 100 * 100 / BLOCK_SIZE;
int num_elements = NumBlocks * BLOCK_SIZE;
//Apuntamos los punteros hacia un espacio de 100*100 elementos en el host
h_a = (int *) malloc(num_elements * sizeof(int));
h_b = (int *) malloc(num_elements * sizeof(int));
CheckCudaError("malloc_host_error");
//LLenamos la memoria
for (int i = 0; i < num_elements; ++i)
{
h_a[i] = i;
h_b[i] = num_elements - 1 - i;
}
//Apuntamos los punteros del dispositivo hacia una reserva de memoria de 100*100 elementos.
hipMalloc(&d_a, num_elements * sizeof(int));
hipMalloc(&d_b, num_elements * sizeof(int));
hipMalloc(&d_c, num_elements * sizeof(int));
CheckCudaError("malloc_device_error");
/*Copiamos los elementos del host ya llenados anteriormente (llenamos memoria,
copiando las matrizes del host hacia la tarjeta grfica (device).*/
hipMemcpy(d_a, h_a, num_elements * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, num_elements * sizeof(int), hipMemcpyHostToDevice);
CheckCudaError("memcpy_error");
free(h_b);
CheckCudaError("Free_host_error");
//Establecemos el num de threas y blocks que utilizaremos
dim3 gridDim (NumBlocks, NumBlocks);
dim3 blockDim (BLOCK_SIZE, BLOCK_SIZE);
//LLamamos la funcin.
hipLaunchKernelGGL(( productMatrix) , dim3(gridDim), dim3(blockDim) , 0, 0, d_a, d_b, d_c);
CheckCudaError("Calling_device_function_error");
/*Esperamos a que todos los threads hayan hecho su trabajo (multiplicar las matrizes)
antes de copy back.*/
hipDeviceSynchronize();
CheckCudaError("Syncronize_threads_error");
//Una vez sincronizados los volvemos a copiar hacia el host.
hipMemcpy(h_a, d_c, num_elements * sizeof(int), hipMemcpyDeviceToHost);
CheckCudaError("mempcy_host_error");
//Imprimimos por pantalla
for (int i = 0; i < num_elements; ++i) cout << h_a[i];
//Aliberamos memoria en el device
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//Aliveramos meomria en host.
free(h_a);
CheckCudaError("free_device_error");
}
void CheckCudaError(const char *e)
{
//Obtenemos el ultimo error.
hipError_t err = hipGetLastError();
//Si hay error imprime el error por pantalla
if(hipSuccess != err){
cout << e << endl;
}
}
| 73277d74bcb9ad0277f71530d82e5320acfdfe96.cu | #include <stdio.h>
#include <iostream>
#include <cuda.h>
using namespace std;
#define BLOCK_SIZE 10
void CheckCudaError(const char *e);
__global__ void productMatrix(int *matrix_a, int *matrix_b, int *matrix_c)
{
int blockidx = blockIdx.x;
int blockidy = blockIdx.y;
int threadx = threadIdx.x;
int thready = threadIdx.y;
__shared__ int Asub[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int Bsub[BLOCK_SIZE][BLOCK_SIZE];
Asub[threadx][thready] = matrix_a[blockidx * BLOCK_SIZE + threadx + blockidy * BLOCK_SIZE + thready];
Bsub[threadx][thready] = matrix_b[blockidx * BLOCK_SIZE + threadx + blockidy * BLOCK_SIZE + thready];
__syncthreads();
int suma;
for (int i = 0; i < BLOCK_SIZE; ++i)
{
suma += Asub[i][thready]* Bsub[threadx][i];
}
__syncthreads();
matrix_c[blockidx * BLOCK_SIZE + threadx + blockidy * BLOCK_SIZE + thready] = suma;
}
int main(){
//Creamos punteros para apuntar tanto al dispositivo como a memoria.
int *h_a, *h_b;
int *d_a, *d_b, *d_c;
int NumBlocks = 100 * 100 / BLOCK_SIZE;
int num_elements = NumBlocks * BLOCK_SIZE;
//Apuntamos los punteros hacia un espacio de 100*100 elementos en el host
h_a = (int *) malloc(num_elements * sizeof(int));
h_b = (int *) malloc(num_elements * sizeof(int));
CheckCudaError("malloc_host_error");
//LLenamos la memoria
for (int i = 0; i < num_elements; ++i)
{
h_a[i] = i;
h_b[i] = num_elements - 1 - i;
}
//Apuntamos los punteros del dispositivo hacia una reserva de memoria de 100*100 elementos.
cudaMalloc(&d_a, num_elements * sizeof(int));
cudaMalloc(&d_b, num_elements * sizeof(int));
cudaMalloc(&d_c, num_elements * sizeof(int));
CheckCudaError("malloc_device_error");
/*Copiamos los elementos del host ya llenados anteriormente (llenamos memoria,
copiando las matrizes del host hacia la tarjeta gráfica (device).*/
cudaMemcpy(d_a, h_a, num_elements * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, num_elements * sizeof(int), cudaMemcpyHostToDevice);
CheckCudaError("memcpy_error");
free(h_b);
CheckCudaError("Free_host_error");
//Establecemos el num de threas y blocks que utilizaremos
dim3 gridDim (NumBlocks, NumBlocks);
dim3 blockDim (BLOCK_SIZE, BLOCK_SIZE);
//LLamamos la función.
productMatrix <<< gridDim, blockDim >>> (d_a, d_b, d_c);
CheckCudaError("Calling_device_function_error");
/*Esperamos a que todos los threads hayan hecho su trabajo (multiplicar las matrizes)
antes de copy back.*/
cudaThreadSynchronize();
CheckCudaError("Syncronize_threads_error");
//Una vez sincronizados los volvemos a copiar hacia el host.
cudaMemcpy(h_a, d_c, num_elements * sizeof(int), cudaMemcpyDeviceToHost);
CheckCudaError("mempcy_host_error");
//Imprimimos por pantalla
for (int i = 0; i < num_elements; ++i) cout << h_a[i];
//Aliberamos memoria en el device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//Aliveramos meomria en host.
free(h_a);
CheckCudaError("free_device_error");
}
void CheckCudaError(const char *e)
{
//Obtenemos el ultimo error.
cudaError_t err = cudaGetLastError();
//Si hay error imprime el error por pantalla
if(cudaSuccess != err){
cout << e << endl;
}
}
|
e14f9f50f82834e70e00ddcf575f7deeb753d646.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void initRandomizer(unsigned int seed, hiprandState_t* state){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(seed, idx, 0, &state[idx]);
} | e14f9f50f82834e70e00ddcf575f7deeb753d646.cu | #include "includes.h"
__global__ void initRandomizer(unsigned int seed, curandState* state){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, idx, 0, &state[idx]);
} |
5bdaeb778e13e6604906f0fb48979775cc54bde1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convolutionRowGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
hipMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
hipMalloc(&d_Src, XSIZE*YSIZE);
float *d_Filter = NULL;
hipMalloc(&d_Filter, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int filterR = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convolutionRowGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,d_Filter,imageW,imageH,filterR);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convolutionRowGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,d_Filter,imageW,imageH,filterR);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convolutionRowGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,d_Filter,imageW,imageH,filterR);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5bdaeb778e13e6604906f0fb48979775cc54bde1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convolutionRowGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
cudaMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
cudaMalloc(&d_Src, XSIZE*YSIZE);
float *d_Filter = NULL;
cudaMalloc(&d_Filter, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int filterR = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convolutionRowGPU<<<gridBlock,threadBlock>>>(d_Dst,d_Src,d_Filter,imageW,imageH,filterR);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convolutionRowGPU<<<gridBlock,threadBlock>>>(d_Dst,d_Src,d_Filter,imageW,imageH,filterR);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convolutionRowGPU<<<gridBlock,threadBlock>>>(d_Dst,d_Src,d_Filter,imageW,imageH,filterR);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
87cd08d3afb705faec685eef80ae849247603dd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
const int INF = 10000000;
int *host_D;
int *dev_D;
int n, m;
void Input(char *inFileName) {
FILE *infile = fopen(inFileName, "r");
setvbuf(infile, new char[1 << 20], _IOFBF, 1 << 20);
fscanf(infile, "%d %d", &n, &m);
host_D = (int*)malloc(n * n * sizeof(int));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) host_D[i * n + j] = 0;
else host_D[i * n + j] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
host_D[(a - 1) * n + (b - 1)] = v;
}
fclose(infile);
}
void Output(char *outFileName) {
FILE *outfile = fopen(outFileName, "w");
setvbuf(outfile, new char[1 << 20], _IOFBF, 1 << 20);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (host_D[i * n + j] >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", host_D[i * n + j]);
}
fprintf(outfile, "\n");
}
fclose(outfile);
}
__global__ void func1(int n, int B, int k, int* arr) {
extern __shared__ int shared_memory[];
int* dBlock = shared_memory;
int i = threadIdx.x / B;
int j = threadIdx.x % B;
int x = i + k * B;
int y = j + k * B;
dBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
for (int l = 0; l < B; l++) {
__syncthreads();
int temp = dBlock[(i * B) + l] + dBlock[(l * B) + j];
if (dBlock[threadIdx.x] > temp) {
dBlock[threadIdx.x] = temp;
}
}
if (x < n && y < n) arr[x * n + y] = dBlock[threadIdx.x];
}
__global__ void func2(int n, int B, int k, int* arr) {
if (blockIdx.x == k) return;
extern __shared__ int shared_memory[];
int* dBlock = shared_memory;
int* cBlock = &shared_memory[B * B];
int i = threadIdx.x / B;
int j = threadIdx.x % B;
int x = i + k * B;
int y = j + k * B;
dBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
if (blockIdx.y != 0) x = i + blockIdx.x * B;
if (blockIdx.y == 0) y = j + blockIdx.x * B;
cBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
for (int l = 0; l < B; l++) {
__syncthreads();
int temp = (blockIdx.y == 0)? dBlock[i * B + l] + cBlock[l * B + j]: cBlock[i * B + l] + dBlock[l * B + j];
if (cBlock[threadIdx.x] > temp) {
cBlock[threadIdx.x] = temp;
}
}
if (x < n && y < n) arr[x * n + y] = cBlock[threadIdx.x];
}
__global__ void func3(int n, int B, int k, int* arr) {
if (blockIdx.x == k || blockIdx.y == k) return;
extern __shared__ int shared_memory[];
int* dyBlock = shared_memory;
int* dxBlock = &shared_memory[B * B];
int i = threadIdx.x / B;
int j = threadIdx.x % B;
int x = i + k * B;
int y = j + blockIdx.y * B;
dxBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
x = i + blockIdx.x * B;
y = j + k * B;
dyBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
x = i + blockIdx.x * B;
y = j + blockIdx.y * B;
int dist = (x < n && y < n)? arr[x * n + y]: INF;
__syncthreads();
for (int l = 0; l < B; l++) {
int temp = dyBlock[i * B + l] + dxBlock[l * B + j];
if (dist > temp) {
dist = temp;
}
}
if (x < n && y < n) arr[x * n + y] = dist;
}
void Block(int B) {
hipMalloc(&dev_D, n * n * sizeof(int));
hipMemcpy(dev_D, host_D, n * n * sizeof(int), hipMemcpyHostToDevice);
int round = (n + B - 1) / B;
dim3 bk1(1, 1);
dim3 bk2(round, 2);
dim3 bk3(round, round);
int gputhreads = B * B;
for (int k = 0; k < round; k++) {
hipLaunchKernelGGL(( func1), dim3(bk1), dim3(gputhreads), gputhreads * sizeof(int), 0, n, B, k, dev_D);
hipLaunchKernelGGL(( func2), dim3(bk2), dim3(gputhreads), 2 * gputhreads * sizeof(int), 0, n, B, k, dev_D);
hipLaunchKernelGGL(( func3), dim3(bk3), dim3(gputhreads), 2 * gputhreads * sizeof(int), 0, n, B, k, dev_D);
}
hipDeviceSynchronize();
hipMemcpy(host_D, dev_D, n * n * sizeof(int), hipMemcpyDeviceToHost);
}
int main(int argc, char **argv) {
Input(argv[1]);
int B = atoi(argv[3]);
Block(B);
Output(argv[2]);
return 0;
}
| 87cd08d3afb705faec685eef80ae849247603dd0.cu | #include <stdio.h>
#include <stdlib.h>
const int INF = 10000000;
int *host_D;
int *dev_D;
int n, m;
void Input(char *inFileName) {
FILE *infile = fopen(inFileName, "r");
setvbuf(infile, new char[1 << 20], _IOFBF, 1 << 20);
fscanf(infile, "%d %d", &n, &m);
host_D = (int*)malloc(n * n * sizeof(int));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) host_D[i * n + j] = 0;
else host_D[i * n + j] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
host_D[(a - 1) * n + (b - 1)] = v;
}
fclose(infile);
}
void Output(char *outFileName) {
FILE *outfile = fopen(outFileName, "w");
setvbuf(outfile, new char[1 << 20], _IOFBF, 1 << 20);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (host_D[i * n + j] >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", host_D[i * n + j]);
}
fprintf(outfile, "\n");
}
fclose(outfile);
}
__global__ void func1(int n, int B, int k, int* arr) {
extern __shared__ int shared_memory[];
int* dBlock = shared_memory;
int i = threadIdx.x / B;
int j = threadIdx.x % B;
int x = i + k * B;
int y = j + k * B;
dBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
for (int l = 0; l < B; l++) {
__syncthreads();
int temp = dBlock[(i * B) + l] + dBlock[(l * B) + j];
if (dBlock[threadIdx.x] > temp) {
dBlock[threadIdx.x] = temp;
}
}
if (x < n && y < n) arr[x * n + y] = dBlock[threadIdx.x];
}
__global__ void func2(int n, int B, int k, int* arr) {
if (blockIdx.x == k) return;
extern __shared__ int shared_memory[];
int* dBlock = shared_memory;
int* cBlock = &shared_memory[B * B];
int i = threadIdx.x / B;
int j = threadIdx.x % B;
int x = i + k * B;
int y = j + k * B;
dBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
if (blockIdx.y != 0) x = i + blockIdx.x * B;
if (blockIdx.y == 0) y = j + blockIdx.x * B;
cBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
for (int l = 0; l < B; l++) {
__syncthreads();
int temp = (blockIdx.y == 0)? dBlock[i * B + l] + cBlock[l * B + j]: cBlock[i * B + l] + dBlock[l * B + j];
if (cBlock[threadIdx.x] > temp) {
cBlock[threadIdx.x] = temp;
}
}
if (x < n && y < n) arr[x * n + y] = cBlock[threadIdx.x];
}
__global__ void func3(int n, int B, int k, int* arr) {
if (blockIdx.x == k || blockIdx.y == k) return;
extern __shared__ int shared_memory[];
int* dyBlock = shared_memory;
int* dxBlock = &shared_memory[B * B];
int i = threadIdx.x / B;
int j = threadIdx.x % B;
int x = i + k * B;
int y = j + blockIdx.y * B;
dxBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
x = i + blockIdx.x * B;
y = j + k * B;
dyBlock[threadIdx.x] = (x < n && y < n)? arr[x * n + y]: INF;
x = i + blockIdx.x * B;
y = j + blockIdx.y * B;
int dist = (x < n && y < n)? arr[x * n + y]: INF;
__syncthreads();
for (int l = 0; l < B; l++) {
int temp = dyBlock[i * B + l] + dxBlock[l * B + j];
if (dist > temp) {
dist = temp;
}
}
if (x < n && y < n) arr[x * n + y] = dist;
}
void Block(int B) {
cudaMalloc(&dev_D, n * n * sizeof(int));
cudaMemcpy(dev_D, host_D, n * n * sizeof(int), cudaMemcpyHostToDevice);
int round = (n + B - 1) / B;
dim3 bk1(1, 1);
dim3 bk2(round, 2);
dim3 bk3(round, round);
int gputhreads = B * B;
for (int k = 0; k < round; k++) {
func1<<<bk1, gputhreads, gputhreads * sizeof(int)>>>(n, B, k, dev_D);
func2<<<bk2, gputhreads, 2 * gputhreads * sizeof(int)>>>(n, B, k, dev_D);
func3<<<bk3, gputhreads, 2 * gputhreads * sizeof(int)>>>(n, B, k, dev_D);
}
cudaThreadSynchronize();
cudaMemcpy(host_D, dev_D, n * n * sizeof(int), cudaMemcpyDeviceToHost);
}
int main(int argc, char **argv) {
Input(argv[1]);
int B = atoi(argv[3]);
Block(B);
Output(argv[2]);
return 0;
}
|
6864c3a0efff082c8d68a10fb08e66e04e1e46db.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 H2O.ai, Inc.
* License Apache License Version 2.0 (see LICENSE for details)
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <iostream>
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <unistd.h>
#include "solver/kmeans.h"
#include "kmeans_impl.h"
#include "kmeans_general.h"
#include "kmeans_h2o4gpu.h"
#include <random>
#include <algorithm>
#include <vector>
#include <set>
#include <csignal>
#include "../../common/utils.h"
#include <math.h>
/**
* METHODS FOR DATA COPYING AND GENERATION
*/
template<typename T>
void random_data(int verbose, thrust::device_vector<T> &array, int m, int n) {
thrust::host_vector<T> host_array(m * n);
for (int i = 0; i < m * n; i++) {
host_array[i] = (T) rand() / (T) RAND_MAX;
}
array = host_array;
}
/**
* Copies data from srcdata to array
* @tparam T
* @param verbose Logging level
* @param ord Column on row order of data
* @param array Destination array
* @param srcdata Source data
* @param q Shard number (from 0 to n_gpu)
* @param n
* @param npergpu
* @param d
*/
template<typename T>
void copy_data(int verbose, const char ord, thrust::device_vector<T> &array, const T *srcdata,
int q, int n, size_t npergpu, int d) {
if (ord == 'c') {
thrust::host_vector<T> host_array(npergpu * d);
log_debug(verbose, "Copy data COL ORDER -> ROW ORDER");
for (size_t i = 0; i < npergpu * d; i++) {
size_t indexi = i % d; // col
size_t indexj = i / d + q * npergpu; // row (shifted by which gpu)
host_array[i] = srcdata[indexi * n + indexj];
}
array = host_array;
} else {
log_debug(verbose, "Copy data ROW ORDER not changed");
thrust::host_vector<T> host_array(srcdata + q * npergpu * d, srcdata + q * npergpu * d + npergpu * d);
array = host_array;
}
}
/**
* Like copy_data but shuffles the data according to mapping from v
* @tparam T
* @param verbose
* @param v
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
*/
template<typename T>
void copy_data_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array,
const T *srcdata, int q, int n, int npergpu, int d) {
thrust::host_vector<T> host_array(npergpu * d);
if (ord == 'c') {
log_debug(verbose, "Copy data shuffle COL ORDER -> ROW ORDER");
for (int i = 0; i < npergpu; i++) {
for (size_t j = 0; j < d; j++) {
host_array[i * d + j] = srcdata[v[q * npergpu + i] + j * n]; // shift by which gpu
}
}
} else {
log_debug(verbose, "Copy data shuffle ROW ORDER not changed");
for (int i = 0; i < npergpu; i++) {
for (size_t j = 0; j < d; j++) {
host_array[i * d + j] = srcdata[v[q * npergpu + i] * d + j]; // shift by which gpu
}
}
}
array = host_array;
}
template<typename T>
void copy_centroids_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array,
const T *srcdata, int n, int k, int d) {
copy_data_shuffled(verbose, v, ord, array, srcdata, 0, n, k, d);
}
/**
* Copies centroids from initial training set randomly.
* @tparam T
* @param verbose
* @param seed
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
* @param k
*/
template<typename T>
void random_centroids(int verbose, int seed, const char ord,
thrust::device_vector<T> &array, const T *srcdata,
int q, int n, int npergpu, int d, int k) {
thrust::host_vector<T> host_array(k * d);
if (seed < 0) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
seed = rd();
}
std::mt19937 gen(seed);
std::uniform_int_distribution<> dis(0, n - 1); // random i in range from 0..n-1 (i.e. only 1 gpu gets centroids)
if (ord == 'c') {
log_debug(verbose, "Random centroids COL ORDER -> ROW ORDER");
for (int i = 0; i < k; i++) { // clusters
size_t reali = dis(gen); // + q*npergpu; // row sampled (called indexj above)
for (size_t j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali + j * n];
}
}
} else {
log_debug(verbose, "Random centroids ROW ORDER not changed");
for (int i = 0; i < k; i++) { // rows
size_t reali = dis(gen); // + q*npergpu ; // row sampled
for (size_t j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali * d + j];
}
}
}
array = host_array;
}
/**
* KMEANS METHODS FIT, PREDICT, TRANSFORM
*/
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpukmeans {
template<typename T>
int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord,
int k, int max_iterations, int init_from_data,
T threshold,
const T *srcdata, T **pred_centroids, int **pred_labels);
template<typename T>
int pick_point_idx_weighted(
int seed,
std::vector<T> *data,
thrust::host_vector<T> weights) {
T weighted_sum = 0;
for(int i = 0; i < weights.size(); i++) {
if(data) {
weighted_sum += (data->data()[i] * weights.data()[i]);
} else {
weighted_sum += weights.data()[i];
}
}
T best_prob = 0.0;
int best_prob_idx = 0;
std::mt19937 mt(seed);
std::uniform_real_distribution<> dist(0.0, 1.0);
int i = 0;
for(i = 0; i <= weights.size(); i++) {
if(weights.size() == i) {
break;
}
T prob_threshold = (T) dist(mt);
T data_val = weights.data()[i];
if (data) {
data_val *= data->data()[i];
}
T prob_x = (data_val / weighted_sum);
if(prob_x > prob_threshold) {
break;
}
if (prob_x >= best_prob) {
best_prob = prob_x;
best_prob_idx = i;
}
}
return weights.size() == i ? best_prob_idx : i;
}
/**
* Copies cols records, starting at position idx*cols from data to centroids. Removes them afterwards from data.
* Removes record from weights at position idx.
* @tparam T
* @param idx
* @param cols
* @param data
* @param weights
* @param centroids
*/
template<typename T>
void add_centroid(int idx, int cols,
thrust::host_vector<T> &data,
thrust::host_vector<T> &weights,
std::vector<T> ¢roids) {
for (int i = 0; i < cols; i++) {
centroids.push_back(data[idx * cols + i]);
}
weights[idx] = 0;
}
/**
* K-Means++ algorithm
* @tparam T
* @param seed
* @param data
* @param weights
* @param k
* @param cols
* @param centroids
*/
template<typename T>
void kmeans_plus_plus(
int verbose,
int seed,
thrust::host_vector<T> data,
thrust::host_vector<T> weights,
int k,
int cols,
thrust::host_vector<T> ¢roids) {
std::vector<T> std_centroids(0);
std_centroids.reserve(k * cols);
int centroid_idx = pick_point_idx_weighted(
seed,
(std::vector<T> *) NULL,
weights
);
add_centroid(centroid_idx, cols, data, weights, std_centroids);
std::vector<T> best_pairwise_distances(data.size() / cols); // one for each row in data
std::vector<T> std_data(data.begin(), data.end());
compute_distances(std_data,
std_centroids,
best_pairwise_distances,
data.size() / cols, cols, 1);
std::vector<T> curr_pairwise_distances( std_data.size() / cols);
for (int iter = 0; iter < k - 1; iter++) {
log_verbose(verbose, "KMeans++ - Iteraton %d/%d.", iter, k-1);
centroid_idx = pick_point_idx_weighted(
seed,
&best_pairwise_distances,
weights
);
add_centroid(centroid_idx, cols, data, weights, std_centroids);
std::vector<T> most_recent_centroids;
most_recent_centroids.reserve(cols);
add_centroid(centroid_idx, cols, data, weights, most_recent_centroids);
best_pairwise_distances[centroid_idx] = 0;
compute_distances(std_data,
most_recent_centroids,
curr_pairwise_distances,
std_data.size() / cols, cols, 1);
for (int i = 0; i < curr_pairwise_distances.size(); i++) {
best_pairwise_distances[i] = ::min(curr_pairwise_distances[i], best_pairwise_distances[i]);
}
std::fill(curr_pairwise_distances.begin(), curr_pairwise_distances.end(), (T)0.0);
}
centroids.assign(std_centroids.begin(), std_centroids.end());
}
template<typename T>
struct min_calc_functor {
T* all_costs_ptr;
T* min_costs_ptr;
T max = std::numeric_limits<T>::max();
int potential_k_rows;
int rows_per_run;
min_calc_functor(T* _all_costs_ptr, T* _min_costs_ptr, int _potential_k_rows, int _rows_per_run) {
all_costs_ptr = _all_costs_ptr;
min_costs_ptr = _min_costs_ptr;
potential_k_rows = _potential_k_rows;
rows_per_run = _rows_per_run;
}
__host__ __device__
void operator()(int idx) const {
T best = max;
for (int j = 0; j < potential_k_rows; j++) {
best = min(best, std::abs(all_costs_ptr[j * rows_per_run + idx]));
}
min_costs_ptr[idx] = min(min_costs_ptr[idx], best);
}
};
/**
* K-Means|| initialization method implementation as described in "Scalable K-Means++".
*
* This is a probabilistic method, which tries to choose points as much spread out as possible as centroids.
*
* In case it finds more than k centroids a K-Means++ algorithm is ran on potential centroids to pick k best suited ones.
*
* http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf
*
* @tparam T
* @param verbose
* @param seed
* @param ord
* @param data
* @param data_dots
* @param centroids
* @param rows
* @param cols
* @param k
* @param num_gpu
* @param threshold
*/
template<typename T>
thrust::host_vector<T> kmeans_parallel(int verbose, int seed, const char ord,
thrust::device_vector<T> **data,
thrust::device_vector<T> **data_dots,
size_t rows, int cols, int k, int num_gpu, T threshold) {
if (seed < 0) {
std::random_device rd;
int seed = rd();
}
size_t rows_per_gpu = rows / num_gpu;
std::mt19937 gen(seed);
std::uniform_int_distribution<> dis(0, rows - 1);
// Find the position (GPU idx and idx on that GPU) of the initial centroid
int first_center = dis(gen);
int first_center_idx = first_center % rows_per_gpu;
int first_center_gpu = first_center / rows_per_gpu;
log_verbose(verbose, "KMeans|| - Initial centroid %d on GPU %d.", first_center_idx, first_center_gpu);
// Copies the initial centroid to potential centroids vector. That vector will store all potential centroids found
// in the previous iteration.
thrust::host_vector<T> h_potential_centroids(cols);
std::vector<thrust::host_vector<T>> h_potential_centroids_per_gpu(num_gpu);
CUDACHECK(hipSetDevice(first_center_gpu));
thrust::copy(
(*data[first_center_gpu]).begin() + first_center_idx * cols,
(*data[first_center_gpu]).begin() + (first_center_idx + 1) * cols,
h_potential_centroids.begin()
);
thrust::host_vector<T> h_all_potential_centroids = h_potential_centroids;
// Initial the cost-to-potential-centroids and cost-to-closest-potential-centroid matrices. Initial cost is +infinity
std::vector<thrust::device_vector<T>> d_min_costs(num_gpu);
for (int q = 0; q < num_gpu; q++) {
CUDACHECK(hipSetDevice(q));
d_min_costs[q].resize(rows_per_gpu);
thrust::fill(d_min_costs[q].begin(), d_min_costs[q].end(), std::numeric_limits<T>::max());
}
double t0 = timer<double>();
// The original white paper claims 8 should be enough
int max_iter = ::min(8, (int)(2 + log(k)) );
for (int counter = 0; counter < max_iter; counter++) {
log_verbose(verbose, "KMeans|| - Iteration %d.", counter);
T total_min_cost = 0.0;
int new_potential_centroids = 0;
#pragma omp parallel for
for (int i = 0; i < num_gpu; i++) {
CUDACHECK(hipSetDevice(i));
thrust::device_vector<T> d_potential_centroids = h_potential_centroids;
int potential_k_rows = d_potential_centroids.size() / cols;
// Compute all the costs to each potential centroid from previous iteration
thrust::device_vector<T> centroid_dots(potential_k_rows);
kmeans::detail::batch_calculate_distances(verbose, 0, rows_per_gpu, cols, potential_k_rows,
*data[i], d_potential_centroids, *data_dots[i], centroid_dots,
[&](int rows_per_run, size_t offset, thrust::device_vector<T> &pairwise_distances) {
// Find the closest potential center cost for each row
auto min_cost_counter = thrust::make_counting_iterator(0);
auto all_costs_ptr = thrust::raw_pointer_cast(pairwise_distances.data());
auto min_costs_ptr = thrust::raw_pointer_cast(d_min_costs[i].data() + offset);
thrust::for_each(min_cost_counter,
min_cost_counter + rows_per_run,
// Functor instead of a lambda b/c nvcc is complaining about
// nesting a __device__ lambda inside a regular lambda
min_calc_functor<T>(all_costs_ptr, min_costs_ptr, potential_k_rows, rows_per_run));
}
);
}
for (int i = 0; i < num_gpu; i++) {
CUDACHECK(hipSetDevice(i));
total_min_cost += thrust::reduce(
d_min_costs[i].begin(),
d_min_costs[i].end()
);
}
log_verbose(verbose, "KMeans|| - Total min cost from centers %g.", total_min_cost);
if(total_min_cost == (T) 0.0) {
continue;
}
std::set<int> copy_from_gpus;
#pragma omp parallel for
for (int i = 0; i < num_gpu; i++) {
CUDACHECK(hipSetDevice(i));
// Count how many potential centroids there are using probabilities
// The further the row is from the closest cluster center the higher the probability
auto pot_cent_filter_counter = thrust::make_counting_iterator(0);
auto min_costs_ptr = thrust::raw_pointer_cast(d_min_costs[i].data());
int pot_cent_num = thrust::count_if(
pot_cent_filter_counter,
pot_cent_filter_counter + rows_per_gpu, [=]__device__(int idx){
thrust::default_random_engine rng(seed);
thrust::uniform_real_distribution<> dist(0.0, 1.0);
int device;
hipGetDevice(&device);
rng.discard(idx + device * rows_per_gpu);
T prob_threshold = (T) dist(rng);
T prob_x = (( 2.0 * k * min_costs_ptr[idx]) / total_min_cost);
return prob_x > prob_threshold;
}
);
log_debug(verbose, "KMeans|| - Potential centroids on GPU %d = %d.", i, pot_cent_num);
if (pot_cent_num > 0) {
copy_from_gpus.insert(i);
// Copy all potential cluster centers
thrust::device_vector<T> d_new_potential_centroids(pot_cent_num * cols);
auto range = thrust::make_counting_iterator(0);
thrust::copy_if(
(*data[i]).begin(), (*data[i]).end(), range,
d_new_potential_centroids.begin(), [=] __device__(int idx){
int row = idx / cols;
thrust::default_random_engine rng(seed);
thrust::uniform_real_distribution<> dist(0.0, 1.0);
int device;
hipGetDevice(&device);
rng.discard(row + device * rows_per_gpu);
T prob_threshold = (T) dist(rng);
T prob_x = (( 2.0 * k * min_costs_ptr[row]) / total_min_cost);
return prob_x > prob_threshold;
});
h_potential_centroids_per_gpu[i].clear();
h_potential_centroids_per_gpu[i].resize(d_new_potential_centroids.size());
new_potential_centroids += d_new_potential_centroids.size();
thrust::copy(
d_new_potential_centroids.begin(),
d_new_potential_centroids.end(),
h_potential_centroids_per_gpu[i].begin()
);
}
}
log_verbose(verbose, "KMeans|| - New potential centroids %d.", new_potential_centroids);
// Gather potential cluster centers from all GPUs
if (new_potential_centroids > 0) {
h_potential_centroids.clear();
h_potential_centroids.resize(new_potential_centroids);
int old_pot_centroids_size = h_all_potential_centroids.size();
h_all_potential_centroids.resize(old_pot_centroids_size + new_potential_centroids);
int offset = 0;
for (int i = 0; i < num_gpu; i++) {
if(copy_from_gpus.find(i) != copy_from_gpus.end()) {
thrust::copy(
h_potential_centroids_per_gpu[i].begin(),
h_potential_centroids_per_gpu[i].end(),
h_potential_centroids.begin() + offset
);
offset += h_potential_centroids_per_gpu[i].size();
}
}
thrust::copy(
h_potential_centroids.begin(),
h_potential_centroids.end(),
h_all_potential_centroids.begin() + old_pot_centroids_size
);
}
}
double timeloop = static_cast<double>(timer<double>() - t0);
thrust::host_vector<T> final_centroids(0);
int potential_centroids_num = h_all_potential_centroids.size() / cols;
if (potential_centroids_num <= k) {
final_centroids.resize(k * cols);
thrust::copy(
h_all_potential_centroids.begin(),
h_all_potential_centroids.end(),
final_centroids.begin()
);
// TODO what if potential_centroids_num < k ?? we don't want 0s
} else {
// If we found more than k potential cluster centers we need to take only a subset
// This is done using a weighted k-means++ method, since the set should be very small
// it should converge very fast and is all done on the CPU.
thrust::host_vector<T> weights(potential_centroids_num);
double tc0 = timer<double>();
// Weights correspond to the number of data points assigned to each potential cluster center
count_pts_per_centroid(
verbose, num_gpu,
rows_per_gpu, cols,
data, data_dots,
h_all_potential_centroids,
weights
);
double timecount = static_cast<double>(timer<double>() - tc0);
double tkpp = timer<double>();
kmeans_plus_plus(
verbose,
seed,
h_all_potential_centroids,
weights,
k, cols,
final_centroids
);
double timekpp = static_cast<double>(timer<double>() - tkpp);
log_verbose(verbose, "KMeans|| - Time loop: %g Time count: %g Time kpp: %g.", timeloop, timecount, timekpp);
}
return final_centroids;
}
volatile std::atomic_int flaggpu(0);
inline void my_function_gpu(int sig) { // can be called asynchronously
fprintf(stderr, "Caught signal %d. Terminating shortly.\n", sig);
flaggpu = 1;
}
std::vector<int> kmeans_init(int verbose, int *final_n_gpu, int n_gputry, int gpu_idtry, int rows) {
if (rows > std::numeric_limits<int>::max()) {
fprintf(stderr, "rows > %d not implemented\n", std::numeric_limits<int>::max());
fflush(stderr);
exit(0);
}
std::signal(SIGINT, my_function_gpu);
std::signal(SIGTERM, my_function_gpu);
// no more gpus than visible gpus
int n_gpuvis;
hipGetDeviceCount(&n_gpuvis);
int n_gpu = ::min(n_gpuvis, n_gputry);
// no more than rows
n_gpu = ::min(n_gpu, rows);
if (verbose) {
std::cout << n_gpu << " gpus." << std::endl;
}
int gpu_id = gpu_idtry % n_gpuvis;
// setup GPU list to use
std::vector<int> dList(n_gpu);
for (int idx = 0; idx < n_gpu; idx++) {
int device_idx = (gpu_id + idx) % n_gpuvis;
dList[idx] = device_idx;
}
*final_n_gpu = n_gpu;
return dList;
}
template<typename T>
H2O4GPUKMeans<T>::H2O4GPUKMeans(const T *A, int k, int n, int d) {
_A = A;
_k = k;
_n = n;
_d = d;
}
template<typename T>
int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord,
int k, int max_iterations, int init_from_data,
T threshold,
const T *srcdata, T **pred_centroids, int **pred_labels) {
// init random seed if use the C function rand()
if (seed >= 0) {
srand(seed);
} else {
srand(unsigned(time(NULL)));
}
// no more clusters than rows
if (k > rows) {
k = static_cast<int>(rows);
fprintf(stderr, "Number of clusters adjusted to be equal to number of rows.\n");
fflush(stderr);
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
double t0t = timer<double>();
thrust::device_vector<T> *data[n_gpu];
thrust::device_vector<int> *labels[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
kmeans::detail::labels_init();
}
log_debug(verbose, "Number of points: %d", rows);
log_debug(verbose, "Number of dimensions: %d", cols);
log_debug(verbose, "Number of clusters: %d", k);
log_debug(verbose, "Max. number of iterations: %d", max_iterations);
log_debug(verbose, "Stopping threshold: %d", threshold);
std::vector<int> v(rows);
std::iota(std::begin(v), std::end(v), 0); // Fill with 0, 1, ..., rows.
if (seed >= 0) {
std::shuffle(v.begin(), v.end(), std::default_random_engine(seed));
} else {
std::random_shuffle(v.begin(), v.end());
}
// Copy the data to devices
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
if (verbose) { std::cout << "Copying data to device: " << dList[q] << std::endl; }
copy_data(verbose, ord, *data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
// Pre-compute the data matrix norms
kmeans::detail::make_self_dots(rows / n_gpu, cols, *data[q], *data_dots[q]);
}
// Get random points as centroids
int bytecount = cols * k * sizeof(T); // all centroids
if (0 == init_from_data) {
log_debug(verbose, "KMeans - Using random initialization.");
int masterq = 0;
CUDACHECK(hipSetDevice(dList[masterq]));
copy_centroids_shuffled(verbose, v, ord, *d_centroids[masterq], &srcdata[0], rows, k, cols);
// Copy centroids to all devices
std::vector < hipStream_t * > streams;
streams.resize(n_gpu);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
CUDACHECK(hipSetDevice(dList[q]));
if (verbose > 0) {
std::cout << "Copying centroid data to device: " << dList[q] << std::endl;
}
streams[q] = reinterpret_cast<hipStream_t *>(malloc(sizeof(hipStream_t)));
hipStreamCreate(streams[q]);
hipMemcpyPeerAsync(thrust::raw_pointer_cast(&(*d_centroids[q])[0]),
dList[q],
thrust::raw_pointer_cast(&(*d_centroids[masterq])[0]),
dList[masterq],
bytecount,
*(streams[q]));
}
//#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
hipSetDevice(dList[q]);
hipStreamDestroy(*(streams[q]));
#if(DEBUGKMEANS)
thrust::host_vector<T> h_centroidq=*d_centroids[q];
for(int ii=0;ii<k*d;ii++){
fprintf(stderr,"q=%d initcent[%d]=%g\n",q,ii,h_centroidq[ii]); fflush(stderr);
}
#endif
}
} else if (1 == init_from_data) { // kmeans||
log_debug(verbose, "KMeans - Using K-Means|| initialization.");
thrust::host_vector<T> final_centroids = kmeans_parallel(verbose, seed, ord, data, data_dots, rows, cols, k, n_gpu, threshold);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
hipMemcpy(
thrust::raw_pointer_cast(&(*d_centroids[q])[0]),
thrust::raw_pointer_cast(&final_centroids[0]),
bytecount,
hipMemcpyHostToDevice);
}
}
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
labels[q] = new thrust::device_vector<int>(rows / n_gpu);
}
double timetransfer = static_cast<double>(timer<double>() - t0t);
double t0 = timer<double>();
int iter = kmeans::kmeans<T>(verbose, &flaggpu, rows, cols, k, data, labels, d_centroids, data_dots,
dList, n_gpu, max_iterations, threshold, true);
if (iter < 0) {
log_error(verbose, "KMeans algorithm failed.");
return iter;
}
double timefit = static_cast<double>(timer<double>() - t0);
double t1 = timer<double>();
// copy result of centroids (sitting entirely on each device) back to host
// TODO FIXME: When do delete ctr and h_labels memory???
thrust::host_vector<T> *ctr = new thrust::host_vector<T>(*d_centroids[0]);
*pred_centroids = ctr->data();
// copy assigned labels
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(rows);
//#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
int offset = labels[q]->size()*q;
h_labels->insert(h_labels->begin() + offset, labels[q]->begin(), labels[q]->end());
}
*pred_labels = h_labels->data();
// debug
if (verbose >= H2O4GPU_LOG_VERBOSE) {
for (unsigned int ii = 0; ii < k; ii++) {
fprintf(stderr, "ii=%d of k=%d ", ii, k);
for (unsigned int jj = 0; jj < cols; jj++) {
fprintf(stderr, "%g ", (*pred_centroids)[cols * ii + jj]);
}
fprintf(stderr, "\n");
fflush(stderr);
}
}
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
delete (data[q]);
delete (labels[q]);
delete (d_centroids[q]);
delete (data_dots[q]);
kmeans::detail::labels_close();
}
double timecleanup = static_cast<double>(timer<double>() - t1);
if (verbose) {
std::cout << " Time fit: " << timefit << " s" << std::endl;
fprintf(stderr, "Timetransfer: %g Timefit: %g Timecleanup: %g\n", timetransfer, timefit, timecleanup);
fflush(stderr);
}
return 0;
}
template<typename T>
int kmeans_predict(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const T *srcdata, const T *centroids, int **pred_labels) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(0);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
centroid_dots[q] = new thrust::device_vector<T>(k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]);
thrust::device_vector<int> d_labels(rows / n_gpu);
kmeans::detail::batch_calculate_distances(verbose, q, rows / n_gpu, cols, k,
*d_data[q], *d_centroids[q], *data_dots[q], *centroid_dots[q],
[&](int n, size_t offset, thrust::device_vector<T> &pairwise_distances) {
kmeans::detail::relabel(n, k, pairwise_distances, d_labels, offset);
}
);
h_labels->insert(h_labels->end(), d_labels.begin(), d_labels.end());
}
*pred_labels = h_labels->data();
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
safe_cuda(hipSetDevice(dList[q]));
kmeans::detail::labels_close();
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
}
return 0;
}
template<typename T>
int kmeans_transform(int verbose,
int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord, int k,
const T *srcdata, const T *centroids,
T **preds) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *d_pairwise_distances[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(hipSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
centroid_dots[q] = new thrust::device_vector<T>(k);
d_pairwise_distances[q] = new thrust::device_vector<T>(rows / n_gpu * k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]);
// TODO batch this
kmeans::detail::calculate_distances(verbose, q, rows / n_gpu, cols, k,
*d_data[q], 0, *d_centroids[q], *data_dots[q],
*centroid_dots[q], *d_pairwise_distances[q]);
}
// Move the resulting labels into host memory from all devices
thrust::host_vector<T> *h_pairwise_distances = new thrust::host_vector<T>(0);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
h_pairwise_distances->insert(h_pairwise_distances->end(),
d_pairwise_distances[q]->begin(),
d_pairwise_distances[q]->end());
}
*preds = h_pairwise_distances->data();
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < rows * cols; i++) {
std::cout << h_pairwise_distances->data()[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
safe_cuda(hipSetDevice(dList[q]));
kmeans::detail::labels_close();
delete (d_pairwise_distances[q]);
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
}
return 0;
}
template<typename T>
int makePtr_dense(int dopredict, int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
T threshold, const T *srcdata, const T *centroids,
T **pred_centroids, int **pred_labels) {
if (dopredict == 0) {
return kmeans_fit(verbose, seed, gpu_idtry, n_gputry, rows, cols,
ord, k, max_iterations, init_from_data, threshold,
srcdata, pred_centroids, pred_labels);
} else {
return kmeans_predict(verbose, gpu_idtry, n_gputry, rows, cols,
ord, k,
srcdata, centroids, pred_labels);
}
}
template int
makePtr_dense<float>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
float threshold, const float *srcdata,
const float *centroids, float **pred_centroids, int **pred_labels);
template int
makePtr_dense<double>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
double threshold, const double *srcdata,
const double *centroids, double **pred_centroids, int **pred_labels);
template int kmeans_fit<float>(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k, int max_iterations,
int init_from_data, float threshold,
const float *srcdata,
float **pred_centroids, int **pred_labels);
template int kmeans_fit<double>(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k, int max_iterations,
int init_from_data, double threshold,
const double *srcdata,
double **pred_centroids, int **pred_labels);
template int kmeans_predict<float>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const float *srcdata, const float *centroids, int **pred_labels);
template int kmeans_predict<double>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const double *srcdata, const double *centroids, int **pred_labels);
template int kmeans_transform<float>(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const float *src_data, const float *centroids,
float **preds);
template int kmeans_transform<double>(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const double *src_data, const double *centroids,
double **preds);
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE == 1
template
class H2O4GPUKMeans<double>;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE == 1
template
class H2O4GPUKMeans<float>;
#endif
} // namespace h2o4gpukmeans
/*
* Interface for other languages
*/
// Fit and Predict
int make_ptr_float_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n,
const char ord, int k, int max_iterations, int init_from_data,
float threshold, const float *srcdata,
const float *centroids, float **pred_centroids, int **pred_labels) {
return h2o4gpukmeans::makePtr_dense<float>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k,
max_iterations, init_from_data, threshold,
srcdata, centroids, pred_centroids, pred_labels);
}
int make_ptr_double_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n,
const char ord, int k, int max_iterations, int init_from_data,
double threshold, const double *srcdata,
const double *centroids, double **pred_centroids, int **pred_labels) {
return h2o4gpukmeans::makePtr_dense<double>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k,
max_iterations, init_from_data, threshold,
srcdata, centroids, pred_centroids, pred_labels);
}
// Transform
int kmeans_transform_float(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const float *src_data, const float *centroids,
float **preds) {
return h2o4gpukmeans::kmeans_transform<float>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds);
}
int kmeans_transform_double(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const double *src_data, const double *centroids,
double **preds) {
return h2o4gpukmeans::kmeans_transform<double>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds);
}
| 6864c3a0efff082c8d68a10fb08e66e04e1e46db.cu | /*!
* Copyright 2017 H2O.ai, Inc.
* License Apache License Version 2.0 (see LICENSE for details)
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <iostream>
#include "cuda.h"
#include <cstdlib>
#include <unistd.h>
#include "solver/kmeans.h"
#include "kmeans_impl.h"
#include "kmeans_general.h"
#include "kmeans_h2o4gpu.h"
#include <random>
#include <algorithm>
#include <vector>
#include <set>
#include <csignal>
#include "../../common/utils.h"
#include <math.h>
/**
* METHODS FOR DATA COPYING AND GENERATION
*/
template<typename T>
void random_data(int verbose, thrust::device_vector<T> &array, int m, int n) {
thrust::host_vector<T> host_array(m * n);
for (int i = 0; i < m * n; i++) {
host_array[i] = (T) rand() / (T) RAND_MAX;
}
array = host_array;
}
/**
* Copies data from srcdata to array
* @tparam T
* @param verbose Logging level
* @param ord Column on row order of data
* @param array Destination array
* @param srcdata Source data
* @param q Shard number (from 0 to n_gpu)
* @param n
* @param npergpu
* @param d
*/
template<typename T>
void copy_data(int verbose, const char ord, thrust::device_vector<T> &array, const T *srcdata,
int q, int n, size_t npergpu, int d) {
if (ord == 'c') {
thrust::host_vector<T> host_array(npergpu * d);
log_debug(verbose, "Copy data COL ORDER -> ROW ORDER");
for (size_t i = 0; i < npergpu * d; i++) {
size_t indexi = i % d; // col
size_t indexj = i / d + q * npergpu; // row (shifted by which gpu)
host_array[i] = srcdata[indexi * n + indexj];
}
array = host_array;
} else {
log_debug(verbose, "Copy data ROW ORDER not changed");
thrust::host_vector<T> host_array(srcdata + q * npergpu * d, srcdata + q * npergpu * d + npergpu * d);
array = host_array;
}
}
/**
* Like copy_data but shuffles the data according to mapping from v
* @tparam T
* @param verbose
* @param v
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
*/
template<typename T>
void copy_data_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array,
const T *srcdata, int q, int n, int npergpu, int d) {
thrust::host_vector<T> host_array(npergpu * d);
if (ord == 'c') {
log_debug(verbose, "Copy data shuffle COL ORDER -> ROW ORDER");
for (int i = 0; i < npergpu; i++) {
for (size_t j = 0; j < d; j++) {
host_array[i * d + j] = srcdata[v[q * npergpu + i] + j * n]; // shift by which gpu
}
}
} else {
log_debug(verbose, "Copy data shuffle ROW ORDER not changed");
for (int i = 0; i < npergpu; i++) {
for (size_t j = 0; j < d; j++) {
host_array[i * d + j] = srcdata[v[q * npergpu + i] * d + j]; // shift by which gpu
}
}
}
array = host_array;
}
template<typename T>
void copy_centroids_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array,
const T *srcdata, int n, int k, int d) {
copy_data_shuffled(verbose, v, ord, array, srcdata, 0, n, k, d);
}
/**
* Copies centroids from initial training set randomly.
* @tparam T
* @param verbose
* @param seed
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
* @param k
*/
template<typename T>
void random_centroids(int verbose, int seed, const char ord,
thrust::device_vector<T> &array, const T *srcdata,
int q, int n, int npergpu, int d, int k) {
thrust::host_vector<T> host_array(k * d);
if (seed < 0) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
seed = rd();
}
std::mt19937 gen(seed);
std::uniform_int_distribution<> dis(0, n - 1); // random i in range from 0..n-1 (i.e. only 1 gpu gets centroids)
if (ord == 'c') {
log_debug(verbose, "Random centroids COL ORDER -> ROW ORDER");
for (int i = 0; i < k; i++) { // clusters
size_t reali = dis(gen); // + q*npergpu; // row sampled (called indexj above)
for (size_t j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali + j * n];
}
}
} else {
log_debug(verbose, "Random centroids ROW ORDER not changed");
for (int i = 0; i < k; i++) { // rows
size_t reali = dis(gen); // + q*npergpu ; // row sampled
for (size_t j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali * d + j];
}
}
}
array = host_array;
}
/**
* KMEANS METHODS FIT, PREDICT, TRANSFORM
*/
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpukmeans {
template<typename T>
int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord,
int k, int max_iterations, int init_from_data,
T threshold,
const T *srcdata, T **pred_centroids, int **pred_labels);
template<typename T>
int pick_point_idx_weighted(
int seed,
std::vector<T> *data,
thrust::host_vector<T> weights) {
T weighted_sum = 0;
for(int i = 0; i < weights.size(); i++) {
if(data) {
weighted_sum += (data->data()[i] * weights.data()[i]);
} else {
weighted_sum += weights.data()[i];
}
}
T best_prob = 0.0;
int best_prob_idx = 0;
std::mt19937 mt(seed);
std::uniform_real_distribution<> dist(0.0, 1.0);
int i = 0;
for(i = 0; i <= weights.size(); i++) {
if(weights.size() == i) {
break;
}
T prob_threshold = (T) dist(mt);
T data_val = weights.data()[i];
if (data) {
data_val *= data->data()[i];
}
T prob_x = (data_val / weighted_sum);
if(prob_x > prob_threshold) {
break;
}
if (prob_x >= best_prob) {
best_prob = prob_x;
best_prob_idx = i;
}
}
return weights.size() == i ? best_prob_idx : i;
}
/**
* Copies cols records, starting at position idx*cols from data to centroids. Removes them afterwards from data.
* Removes record from weights at position idx.
* @tparam T
* @param idx
* @param cols
* @param data
* @param weights
* @param centroids
*/
template<typename T>
void add_centroid(int idx, int cols,
thrust::host_vector<T> &data,
thrust::host_vector<T> &weights,
std::vector<T> ¢roids) {
for (int i = 0; i < cols; i++) {
centroids.push_back(data[idx * cols + i]);
}
weights[idx] = 0;
}
/**
* K-Means++ algorithm
* @tparam T
* @param seed
* @param data
* @param weights
* @param k
* @param cols
* @param centroids
*/
template<typename T>
void kmeans_plus_plus(
int verbose,
int seed,
thrust::host_vector<T> data,
thrust::host_vector<T> weights,
int k,
int cols,
thrust::host_vector<T> ¢roids) {
std::vector<T> std_centroids(0);
std_centroids.reserve(k * cols);
int centroid_idx = pick_point_idx_weighted(
seed,
(std::vector<T> *) NULL,
weights
);
add_centroid(centroid_idx, cols, data, weights, std_centroids);
std::vector<T> best_pairwise_distances(data.size() / cols); // one for each row in data
std::vector<T> std_data(data.begin(), data.end());
compute_distances(std_data,
std_centroids,
best_pairwise_distances,
data.size() / cols, cols, 1);
std::vector<T> curr_pairwise_distances( std_data.size() / cols);
for (int iter = 0; iter < k - 1; iter++) {
log_verbose(verbose, "KMeans++ - Iteraton %d/%d.", iter, k-1);
centroid_idx = pick_point_idx_weighted(
seed,
&best_pairwise_distances,
weights
);
add_centroid(centroid_idx, cols, data, weights, std_centroids);
std::vector<T> most_recent_centroids;
most_recent_centroids.reserve(cols);
add_centroid(centroid_idx, cols, data, weights, most_recent_centroids);
best_pairwise_distances[centroid_idx] = 0;
compute_distances(std_data,
most_recent_centroids,
curr_pairwise_distances,
std_data.size() / cols, cols, 1);
for (int i = 0; i < curr_pairwise_distances.size(); i++) {
best_pairwise_distances[i] = std::min(curr_pairwise_distances[i], best_pairwise_distances[i]);
}
std::fill(curr_pairwise_distances.begin(), curr_pairwise_distances.end(), (T)0.0);
}
centroids.assign(std_centroids.begin(), std_centroids.end());
}
template<typename T>
struct min_calc_functor {
T* all_costs_ptr;
T* min_costs_ptr;
T max = std::numeric_limits<T>::max();
int potential_k_rows;
int rows_per_run;
min_calc_functor(T* _all_costs_ptr, T* _min_costs_ptr, int _potential_k_rows, int _rows_per_run) {
all_costs_ptr = _all_costs_ptr;
min_costs_ptr = _min_costs_ptr;
potential_k_rows = _potential_k_rows;
rows_per_run = _rows_per_run;
}
__host__ __device__
void operator()(int idx) const {
T best = max;
for (int j = 0; j < potential_k_rows; j++) {
best = min(best, std::abs(all_costs_ptr[j * rows_per_run + idx]));
}
min_costs_ptr[idx] = min(min_costs_ptr[idx], best);
}
};
/**
* K-Means|| initialization method implementation as described in "Scalable K-Means++".
*
* This is a probabilistic method, which tries to choose points as much spread out as possible as centroids.
*
* In case it finds more than k centroids a K-Means++ algorithm is ran on potential centroids to pick k best suited ones.
*
* http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf
*
* @tparam T
* @param verbose
* @param seed
* @param ord
* @param data
* @param data_dots
* @param centroids
* @param rows
* @param cols
* @param k
* @param num_gpu
* @param threshold
*/
template<typename T>
thrust::host_vector<T> kmeans_parallel(int verbose, int seed, const char ord,
thrust::device_vector<T> **data,
thrust::device_vector<T> **data_dots,
size_t rows, int cols, int k, int num_gpu, T threshold) {
if (seed < 0) {
std::random_device rd;
int seed = rd();
}
size_t rows_per_gpu = rows / num_gpu;
std::mt19937 gen(seed);
std::uniform_int_distribution<> dis(0, rows - 1);
// Find the position (GPU idx and idx on that GPU) of the initial centroid
int first_center = dis(gen);
int first_center_idx = first_center % rows_per_gpu;
int first_center_gpu = first_center / rows_per_gpu;
log_verbose(verbose, "KMeans|| - Initial centroid %d on GPU %d.", first_center_idx, first_center_gpu);
// Copies the initial centroid to potential centroids vector. That vector will store all potential centroids found
// in the previous iteration.
thrust::host_vector<T> h_potential_centroids(cols);
std::vector<thrust::host_vector<T>> h_potential_centroids_per_gpu(num_gpu);
CUDACHECK(cudaSetDevice(first_center_gpu));
thrust::copy(
(*data[first_center_gpu]).begin() + first_center_idx * cols,
(*data[first_center_gpu]).begin() + (first_center_idx + 1) * cols,
h_potential_centroids.begin()
);
thrust::host_vector<T> h_all_potential_centroids = h_potential_centroids;
// Initial the cost-to-potential-centroids and cost-to-closest-potential-centroid matrices. Initial cost is +infinity
std::vector<thrust::device_vector<T>> d_min_costs(num_gpu);
for (int q = 0; q < num_gpu; q++) {
CUDACHECK(cudaSetDevice(q));
d_min_costs[q].resize(rows_per_gpu);
thrust::fill(d_min_costs[q].begin(), d_min_costs[q].end(), std::numeric_limits<T>::max());
}
double t0 = timer<double>();
// The original white paper claims 8 should be enough
int max_iter = std::min(8, (int)(2 + log(k)) );
for (int counter = 0; counter < max_iter; counter++) {
log_verbose(verbose, "KMeans|| - Iteration %d.", counter);
T total_min_cost = 0.0;
int new_potential_centroids = 0;
#pragma omp parallel for
for (int i = 0; i < num_gpu; i++) {
CUDACHECK(cudaSetDevice(i));
thrust::device_vector<T> d_potential_centroids = h_potential_centroids;
int potential_k_rows = d_potential_centroids.size() / cols;
// Compute all the costs to each potential centroid from previous iteration
thrust::device_vector<T> centroid_dots(potential_k_rows);
kmeans::detail::batch_calculate_distances(verbose, 0, rows_per_gpu, cols, potential_k_rows,
*data[i], d_potential_centroids, *data_dots[i], centroid_dots,
[&](int rows_per_run, size_t offset, thrust::device_vector<T> &pairwise_distances) {
// Find the closest potential center cost for each row
auto min_cost_counter = thrust::make_counting_iterator(0);
auto all_costs_ptr = thrust::raw_pointer_cast(pairwise_distances.data());
auto min_costs_ptr = thrust::raw_pointer_cast(d_min_costs[i].data() + offset);
thrust::for_each(min_cost_counter,
min_cost_counter + rows_per_run,
// Functor instead of a lambda b/c nvcc is complaining about
// nesting a __device__ lambda inside a regular lambda
min_calc_functor<T>(all_costs_ptr, min_costs_ptr, potential_k_rows, rows_per_run));
}
);
}
for (int i = 0; i < num_gpu; i++) {
CUDACHECK(cudaSetDevice(i));
total_min_cost += thrust::reduce(
d_min_costs[i].begin(),
d_min_costs[i].end()
);
}
log_verbose(verbose, "KMeans|| - Total min cost from centers %g.", total_min_cost);
if(total_min_cost == (T) 0.0) {
continue;
}
std::set<int> copy_from_gpus;
#pragma omp parallel for
for (int i = 0; i < num_gpu; i++) {
CUDACHECK(cudaSetDevice(i));
// Count how many potential centroids there are using probabilities
// The further the row is from the closest cluster center the higher the probability
auto pot_cent_filter_counter = thrust::make_counting_iterator(0);
auto min_costs_ptr = thrust::raw_pointer_cast(d_min_costs[i].data());
int pot_cent_num = thrust::count_if(
pot_cent_filter_counter,
pot_cent_filter_counter + rows_per_gpu, [=]__device__(int idx){
thrust::default_random_engine rng(seed);
thrust::uniform_real_distribution<> dist(0.0, 1.0);
int device;
cudaGetDevice(&device);
rng.discard(idx + device * rows_per_gpu);
T prob_threshold = (T) dist(rng);
T prob_x = (( 2.0 * k * min_costs_ptr[idx]) / total_min_cost);
return prob_x > prob_threshold;
}
);
log_debug(verbose, "KMeans|| - Potential centroids on GPU %d = %d.", i, pot_cent_num);
if (pot_cent_num > 0) {
copy_from_gpus.insert(i);
// Copy all potential cluster centers
thrust::device_vector<T> d_new_potential_centroids(pot_cent_num * cols);
auto range = thrust::make_counting_iterator(0);
thrust::copy_if(
(*data[i]).begin(), (*data[i]).end(), range,
d_new_potential_centroids.begin(), [=] __device__(int idx){
int row = idx / cols;
thrust::default_random_engine rng(seed);
thrust::uniform_real_distribution<> dist(0.0, 1.0);
int device;
cudaGetDevice(&device);
rng.discard(row + device * rows_per_gpu);
T prob_threshold = (T) dist(rng);
T prob_x = (( 2.0 * k * min_costs_ptr[row]) / total_min_cost);
return prob_x > prob_threshold;
});
h_potential_centroids_per_gpu[i].clear();
h_potential_centroids_per_gpu[i].resize(d_new_potential_centroids.size());
new_potential_centroids += d_new_potential_centroids.size();
thrust::copy(
d_new_potential_centroids.begin(),
d_new_potential_centroids.end(),
h_potential_centroids_per_gpu[i].begin()
);
}
}
log_verbose(verbose, "KMeans|| - New potential centroids %d.", new_potential_centroids);
// Gather potential cluster centers from all GPUs
if (new_potential_centroids > 0) {
h_potential_centroids.clear();
h_potential_centroids.resize(new_potential_centroids);
int old_pot_centroids_size = h_all_potential_centroids.size();
h_all_potential_centroids.resize(old_pot_centroids_size + new_potential_centroids);
int offset = 0;
for (int i = 0; i < num_gpu; i++) {
if(copy_from_gpus.find(i) != copy_from_gpus.end()) {
thrust::copy(
h_potential_centroids_per_gpu[i].begin(),
h_potential_centroids_per_gpu[i].end(),
h_potential_centroids.begin() + offset
);
offset += h_potential_centroids_per_gpu[i].size();
}
}
thrust::copy(
h_potential_centroids.begin(),
h_potential_centroids.end(),
h_all_potential_centroids.begin() + old_pot_centroids_size
);
}
}
double timeloop = static_cast<double>(timer<double>() - t0);
thrust::host_vector<T> final_centroids(0);
int potential_centroids_num = h_all_potential_centroids.size() / cols;
if (potential_centroids_num <= k) {
final_centroids.resize(k * cols);
thrust::copy(
h_all_potential_centroids.begin(),
h_all_potential_centroids.end(),
final_centroids.begin()
);
// TODO what if potential_centroids_num < k ?? we don't want 0s
} else {
// If we found more than k potential cluster centers we need to take only a subset
// This is done using a weighted k-means++ method, since the set should be very small
// it should converge very fast and is all done on the CPU.
thrust::host_vector<T> weights(potential_centroids_num);
double tc0 = timer<double>();
// Weights correspond to the number of data points assigned to each potential cluster center
count_pts_per_centroid(
verbose, num_gpu,
rows_per_gpu, cols,
data, data_dots,
h_all_potential_centroids,
weights
);
double timecount = static_cast<double>(timer<double>() - tc0);
double tkpp = timer<double>();
kmeans_plus_plus(
verbose,
seed,
h_all_potential_centroids,
weights,
k, cols,
final_centroids
);
double timekpp = static_cast<double>(timer<double>() - tkpp);
log_verbose(verbose, "KMeans|| - Time loop: %g Time count: %g Time kpp: %g.", timeloop, timecount, timekpp);
}
return final_centroids;
}
volatile std::atomic_int flaggpu(0);
inline void my_function_gpu(int sig) { // can be called asynchronously
fprintf(stderr, "Caught signal %d. Terminating shortly.\n", sig);
flaggpu = 1;
}
std::vector<int> kmeans_init(int verbose, int *final_n_gpu, int n_gputry, int gpu_idtry, int rows) {
if (rows > std::numeric_limits<int>::max()) {
fprintf(stderr, "rows > %d not implemented\n", std::numeric_limits<int>::max());
fflush(stderr);
exit(0);
}
std::signal(SIGINT, my_function_gpu);
std::signal(SIGTERM, my_function_gpu);
// no more gpus than visible gpus
int n_gpuvis;
cudaGetDeviceCount(&n_gpuvis);
int n_gpu = std::min(n_gpuvis, n_gputry);
// no more than rows
n_gpu = std::min(n_gpu, rows);
if (verbose) {
std::cout << n_gpu << " gpus." << std::endl;
}
int gpu_id = gpu_idtry % n_gpuvis;
// setup GPU list to use
std::vector<int> dList(n_gpu);
for (int idx = 0; idx < n_gpu; idx++) {
int device_idx = (gpu_id + idx) % n_gpuvis;
dList[idx] = device_idx;
}
*final_n_gpu = n_gpu;
return dList;
}
template<typename T>
H2O4GPUKMeans<T>::H2O4GPUKMeans(const T *A, int k, int n, int d) {
_A = A;
_k = k;
_n = n;
_d = d;
}
template<typename T>
int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord,
int k, int max_iterations, int init_from_data,
T threshold,
const T *srcdata, T **pred_centroids, int **pred_labels) {
// init random seed if use the C function rand()
if (seed >= 0) {
srand(seed);
} else {
srand(unsigned(time(NULL)));
}
// no more clusters than rows
if (k > rows) {
k = static_cast<int>(rows);
fprintf(stderr, "Number of clusters adjusted to be equal to number of rows.\n");
fflush(stderr);
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
double t0t = timer<double>();
thrust::device_vector<T> *data[n_gpu];
thrust::device_vector<int> *labels[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
kmeans::detail::labels_init();
}
log_debug(verbose, "Number of points: %d", rows);
log_debug(verbose, "Number of dimensions: %d", cols);
log_debug(verbose, "Number of clusters: %d", k);
log_debug(verbose, "Max. number of iterations: %d", max_iterations);
log_debug(verbose, "Stopping threshold: %d", threshold);
std::vector<int> v(rows);
std::iota(std::begin(v), std::end(v), 0); // Fill with 0, 1, ..., rows.
if (seed >= 0) {
std::shuffle(v.begin(), v.end(), std::default_random_engine(seed));
} else {
std::random_shuffle(v.begin(), v.end());
}
// Copy the data to devices
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
if (verbose) { std::cout << "Copying data to device: " << dList[q] << std::endl; }
copy_data(verbose, ord, *data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
// Pre-compute the data matrix norms
kmeans::detail::make_self_dots(rows / n_gpu, cols, *data[q], *data_dots[q]);
}
// Get random points as centroids
int bytecount = cols * k * sizeof(T); // all centroids
if (0 == init_from_data) {
log_debug(verbose, "KMeans - Using random initialization.");
int masterq = 0;
CUDACHECK(cudaSetDevice(dList[masterq]));
copy_centroids_shuffled(verbose, v, ord, *d_centroids[masterq], &srcdata[0], rows, k, cols);
// Copy centroids to all devices
std::vector < cudaStream_t * > streams;
streams.resize(n_gpu);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
CUDACHECK(cudaSetDevice(dList[q]));
if (verbose > 0) {
std::cout << "Copying centroid data to device: " << dList[q] << std::endl;
}
streams[q] = reinterpret_cast<cudaStream_t *>(malloc(sizeof(cudaStream_t)));
cudaStreamCreate(streams[q]);
cudaMemcpyPeerAsync(thrust::raw_pointer_cast(&(*d_centroids[q])[0]),
dList[q],
thrust::raw_pointer_cast(&(*d_centroids[masterq])[0]),
dList[masterq],
bytecount,
*(streams[q]));
}
//#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
cudaSetDevice(dList[q]);
cudaStreamDestroy(*(streams[q]));
#if(DEBUGKMEANS)
thrust::host_vector<T> h_centroidq=*d_centroids[q];
for(int ii=0;ii<k*d;ii++){
fprintf(stderr,"q=%d initcent[%d]=%g\n",q,ii,h_centroidq[ii]); fflush(stderr);
}
#endif
}
} else if (1 == init_from_data) { // kmeans||
log_debug(verbose, "KMeans - Using K-Means|| initialization.");
thrust::host_vector<T> final_centroids = kmeans_parallel(verbose, seed, ord, data, data_dots, rows, cols, k, n_gpu, threshold);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
cudaMemcpy(
thrust::raw_pointer_cast(&(*d_centroids[q])[0]),
thrust::raw_pointer_cast(&final_centroids[0]),
bytecount,
cudaMemcpyHostToDevice);
}
}
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
labels[q] = new thrust::device_vector<int>(rows / n_gpu);
}
double timetransfer = static_cast<double>(timer<double>() - t0t);
double t0 = timer<double>();
int iter = kmeans::kmeans<T>(verbose, &flaggpu, rows, cols, k, data, labels, d_centroids, data_dots,
dList, n_gpu, max_iterations, threshold, true);
if (iter < 0) {
log_error(verbose, "KMeans algorithm failed.");
return iter;
}
double timefit = static_cast<double>(timer<double>() - t0);
double t1 = timer<double>();
// copy result of centroids (sitting entirely on each device) back to host
// TODO FIXME: When do delete ctr and h_labels memory???
thrust::host_vector<T> *ctr = new thrust::host_vector<T>(*d_centroids[0]);
*pred_centroids = ctr->data();
// copy assigned labels
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(rows);
//#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
int offset = labels[q]->size()*q;
h_labels->insert(h_labels->begin() + offset, labels[q]->begin(), labels[q]->end());
}
*pred_labels = h_labels->data();
// debug
if (verbose >= H2O4GPU_LOG_VERBOSE) {
for (unsigned int ii = 0; ii < k; ii++) {
fprintf(stderr, "ii=%d of k=%d ", ii, k);
for (unsigned int jj = 0; jj < cols; jj++) {
fprintf(stderr, "%g ", (*pred_centroids)[cols * ii + jj]);
}
fprintf(stderr, "\n");
fflush(stderr);
}
}
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
delete (data[q]);
delete (labels[q]);
delete (d_centroids[q]);
delete (data_dots[q]);
kmeans::detail::labels_close();
}
double timecleanup = static_cast<double>(timer<double>() - t1);
if (verbose) {
std::cout << " Time fit: " << timefit << " s" << std::endl;
fprintf(stderr, "Timetransfer: %g Timefit: %g Timecleanup: %g\n", timetransfer, timefit, timecleanup);
fflush(stderr);
}
return 0;
}
template<typename T>
int kmeans_predict(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const T *srcdata, const T *centroids, int **pred_labels) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(0);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
centroid_dots[q] = new thrust::device_vector<T>(k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]);
thrust::device_vector<int> d_labels(rows / n_gpu);
kmeans::detail::batch_calculate_distances(verbose, q, rows / n_gpu, cols, k,
*d_data[q], *d_centroids[q], *data_dots[q], *centroid_dots[q],
[&](int n, size_t offset, thrust::device_vector<T> &pairwise_distances) {
kmeans::detail::relabel(n, k, pairwise_distances, d_labels, offset);
}
);
h_labels->insert(h_labels->end(), d_labels.begin(), d_labels.end());
}
*pred_labels = h_labels->data();
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
safe_cuda(cudaSetDevice(dList[q]));
kmeans::detail::labels_close();
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
}
return 0;
}
template<typename T>
int kmeans_transform(int verbose,
int gpu_idtry, int n_gputry,
size_t rows, size_t cols, const char ord, int k,
const T *srcdata, const T *centroids,
T **preds) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *d_pairwise_distances[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
centroid_dots[q] = new thrust::device_vector<T>(k);
d_pairwise_distances[q] = new thrust::device_vector<T>(rows / n_gpu * k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]);
// TODO batch this
kmeans::detail::calculate_distances(verbose, q, rows / n_gpu, cols, k,
*d_data[q], 0, *d_centroids[q], *data_dots[q],
*centroid_dots[q], *d_pairwise_distances[q]);
}
// Move the resulting labels into host memory from all devices
thrust::host_vector<T> *h_pairwise_distances = new thrust::host_vector<T>(0);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
h_pairwise_distances->insert(h_pairwise_distances->end(),
d_pairwise_distances[q]->begin(),
d_pairwise_distances[q]->end());
}
*preds = h_pairwise_distances->data();
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < rows * cols; i++) {
std::cout << h_pairwise_distances->data()[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
safe_cuda(cudaSetDevice(dList[q]));
kmeans::detail::labels_close();
delete (d_pairwise_distances[q]);
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
}
return 0;
}
template<typename T>
int makePtr_dense(int dopredict, int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
T threshold, const T *srcdata, const T *centroids,
T **pred_centroids, int **pred_labels) {
if (dopredict == 0) {
return kmeans_fit(verbose, seed, gpu_idtry, n_gputry, rows, cols,
ord, k, max_iterations, init_from_data, threshold,
srcdata, pred_centroids, pred_labels);
} else {
return kmeans_predict(verbose, gpu_idtry, n_gputry, rows, cols,
ord, k,
srcdata, centroids, pred_labels);
}
}
template int
makePtr_dense<float>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
float threshold, const float *srcdata,
const float *centroids, float **pred_centroids, int **pred_labels);
template int
makePtr_dense<double>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols,
const char ord, int k, int max_iterations, int init_from_data,
double threshold, const double *srcdata,
const double *centroids, double **pred_centroids, int **pred_labels);
template int kmeans_fit<float>(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k, int max_iterations,
int init_from_data, float threshold,
const float *srcdata,
float **pred_centroids, int **pred_labels);
template int kmeans_fit<double>(int verbose, int seed, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k, int max_iterations,
int init_from_data, double threshold,
const double *srcdata,
double **pred_centroids, int **pred_labels);
template int kmeans_predict<float>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const float *srcdata, const float *centroids, int **pred_labels);
template int kmeans_predict<double>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols,
const char ord, int k,
const double *srcdata, const double *centroids, int **pred_labels);
template int kmeans_transform<float>(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const float *src_data, const float *centroids,
float **preds);
template int kmeans_transform<double>(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const double *src_data, const double *centroids,
double **preds);
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE == 1
template
class H2O4GPUKMeans<double>;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE == 1
template
class H2O4GPUKMeans<float>;
#endif
} // namespace h2o4gpukmeans
/*
* Interface for other languages
*/
// Fit and Predict
int make_ptr_float_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n,
const char ord, int k, int max_iterations, int init_from_data,
float threshold, const float *srcdata,
const float *centroids, float **pred_centroids, int **pred_labels) {
return h2o4gpukmeans::makePtr_dense<float>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k,
max_iterations, init_from_data, threshold,
srcdata, centroids, pred_centroids, pred_labels);
}
int make_ptr_double_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n,
const char ord, int k, int max_iterations, int init_from_data,
double threshold, const double *srcdata,
const double *centroids, double **pred_centroids, int **pred_labels) {
return h2o4gpukmeans::makePtr_dense<double>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k,
max_iterations, init_from_data, threshold,
srcdata, centroids, pred_centroids, pred_labels);
}
// Transform
int kmeans_transform_float(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const float *src_data, const float *centroids,
float **preds) {
return h2o4gpukmeans::kmeans_transform<float>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds);
}
int kmeans_transform_double(int verbose,
int gpu_id, int n_gpu,
size_t m, size_t n, const char ord, int k,
const double *src_data, const double *centroids,
double **preds) {
return h2o4gpukmeans::kmeans_transform<double>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds);
}
|
81a82149d95e5ef344a3e74d6513e2dbf04a04a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include<cmath>
#include <array/DataTypeUtils.h>
#include<ops/declarable/helpers/betaInc.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// modified Lentzs algorithm for continued fractions,
// reference: Lentz, W.J. 1976, Generating Bessel Functions in Mie Scattering Calculations Using Continued Fractions,
template <typename T>
__device__ T continuedFractionCuda(const T a, const T b, const T x) {
extern __shared__ unsigned char shmem[];
T* coeffs = reinterpret_cast<T*>(shmem);
const T min = DataTypeUtils::min<T>() / DataTypeUtils::eps<T>();
const T aPlusb = a + b;
T val, aPlus2i;
T t2 = coeffs[1];
T t1 = coeffs[0];
if(math::nd4j_abs<T>(t1) < min)
t1 = min;
t1 = static_cast<T>(1) / t1;
T result = t1;
for(uint i = 1; i <= maxIter; ++i) {
const uint i2 = 2*i;
aPlus2i = a + static_cast<T>(i2);
// t1
t1 = static_cast<T>(1) + coeffs[i2] * t1;
if(math::nd4j_abs<T>(t1) < min)
t1 = min;
t1 = static_cast<T>(1) / t1;
// t2
t2 = static_cast<T>(1) + coeffs[i2] / t2;
if(math::nd4j_abs<T>(t2) < min)
t2 = min;
// result
result *= t2 * t1;
// t1
t1 = static_cast<T>(1) + coeffs[i2 + 1] * t1;
if(math::nd4j_abs<T>(t1) < min)
t1 = min;
t1 = static_cast<T>(1) / t1;
// t2
t2 = static_cast<T>(1) + coeffs[i2 + 1] / t2;
if(math::nd4j_abs<T>(t2) < min)
t2 = min;
// result
val = t2 * t1;
result *= val;
// condition to stop loop
if(math::nd4j_abs<T>(val - static_cast<T>(1)) <= DataTypeUtils::eps<T>())
return result;
}
return DataTypeUtils::infOrMax<T>(); // no convergence, more iterations is required, return infinity
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void betaIncForArrayCuda(const void* va, const Nd4jLong* aShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
extern __shared__ unsigned char shmem[];
T* sharedMem = reinterpret_cast<T*>(shmem);
const Nd4jLong j = blockIdx.x; // one block per each element
T& z = *(reinterpret_cast<T*>(vz) + shape::getIndexOffset(j, zShapeInfo));
__shared__ T a, b, x;
__shared__ bool symmCond;
if (threadIdx.x == 0) {
a = *(reinterpret_cast<const T*>(va) + shape::getIndexOffset(j, aShapeInfo));
b = *(reinterpret_cast<const T*>(vb) + shape::getIndexOffset(j, bShapeInfo));
x = *(reinterpret_cast<const T*>(vx) + shape::getIndexOffset(j, xShapeInfo));
symmCond = x > (a + static_cast<T>(1)) / (a + b + static_cast<T>(2));
if(symmCond) { // swap a and b, x = 1 - x
T temp = a;
a = b;
b = temp;
x = static_cast<T>(1) - x;
}
}
__syncthreads();
// t^{n-1} * (1 - t)^{n-1} is symmetric function with respect to x = 0.5
if(a == b && x == static_cast<T>(0.5)) {
z = static_cast<T>(0.5);
return;
}
if (x == static_cast<T>(0) || x == static_cast<T>(1)) {
z = symmCond ? static_cast<T>(1) - x : x;
return;
}
// calculate two coefficients per thread
if(threadIdx.x != 0) {
const int i = threadIdx.x;
const T aPlus2i = a + 2*i;
sharedMem[2*i] = i * (b - i) * x / ((aPlus2i - static_cast<T>(1)) * aPlus2i);
sharedMem[2*i + 1] = -(a + i) * (a + b + i) * x / ((aPlus2i + static_cast<T>(1)) * aPlus2i);
}
__syncthreads();
if(threadIdx.x == 0) {
const T gammaPart = lgamma(a) + lgamma(b) - lgamma(a + b);
const T front = math::nd4j_exp<T,T>(math::nd4j_log<T, T>(x) * a + math::nd4j_log<T, T>(1.f - x) * b - gammaPart);
sharedMem[0] = static_cast<T>(1) - (a + b) * x / (a + static_cast<T>(1));
sharedMem[1] = static_cast<T>(1);
z = front * continuedFractionCuda(a, b, x) / a;
if(symmCond) // symmetry relation
z = static_cast<T>(1) - z;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void betaIncForArrayCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* va, const Nd4jLong* aShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
hipLaunchKernelGGL(( betaIncForArrayCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, va, aShapeInfo, vb, bShapeInfo, vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
// overload betaInc for arrays, shapes of a, b and x must be the same !!!
void betaInc(sd::LaunchContext* context, const NDArray& a, const NDArray& b, const NDArray& x, NDArray& output) {
const int threadsPerBlock = maxIter;
const int blocksPerGrid = output.lengthOf();
const int sharedMem = 2 * output.sizeOfT() * threadsPerBlock + 128;
const auto xType = x.dataType();
PointersManager manager(context, "betaInc");
NDArray::prepareSpecialUse({&output}, {&a, &b, &x});
BUILD_SINGLE_SELECTOR(xType, betaIncForArrayCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), a.specialBuffer(), a.specialShapeInfo(), b.specialBuffer(), b.specialShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&a, &b, &x});
manager.synchronize();
}
}
}
}
| 81a82149d95e5ef344a3e74d6513e2dbf04a04a3.cu | /*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include<cmath>
#include <array/DataTypeUtils.h>
#include<ops/declarable/helpers/betaInc.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// modified Lentz’s algorithm for continued fractions,
// reference: Lentz, W.J. 1976, “Generating Bessel Functions in Mie Scattering Calculations Using Continued Fractions,”
template <typename T>
__device__ T continuedFractionCuda(const T a, const T b, const T x) {
extern __shared__ unsigned char shmem[];
T* coeffs = reinterpret_cast<T*>(shmem);
const T min = DataTypeUtils::min<T>() / DataTypeUtils::eps<T>();
const T aPlusb = a + b;
T val, aPlus2i;
T t2 = coeffs[1];
T t1 = coeffs[0];
if(math::nd4j_abs<T>(t1) < min)
t1 = min;
t1 = static_cast<T>(1) / t1;
T result = t1;
for(uint i = 1; i <= maxIter; ++i) {
const uint i2 = 2*i;
aPlus2i = a + static_cast<T>(i2);
// t1
t1 = static_cast<T>(1) + coeffs[i2] * t1;
if(math::nd4j_abs<T>(t1) < min)
t1 = min;
t1 = static_cast<T>(1) / t1;
// t2
t2 = static_cast<T>(1) + coeffs[i2] / t2;
if(math::nd4j_abs<T>(t2) < min)
t2 = min;
// result
result *= t2 * t1;
// t1
t1 = static_cast<T>(1) + coeffs[i2 + 1] * t1;
if(math::nd4j_abs<T>(t1) < min)
t1 = min;
t1 = static_cast<T>(1) / t1;
// t2
t2 = static_cast<T>(1) + coeffs[i2 + 1] / t2;
if(math::nd4j_abs<T>(t2) < min)
t2 = min;
// result
val = t2 * t1;
result *= val;
// condition to stop loop
if(math::nd4j_abs<T>(val - static_cast<T>(1)) <= DataTypeUtils::eps<T>())
return result;
}
return DataTypeUtils::infOrMax<T>(); // no convergence, more iterations is required, return infinity
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void betaIncForArrayCuda(const void* va, const Nd4jLong* aShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
extern __shared__ unsigned char shmem[];
T* sharedMem = reinterpret_cast<T*>(shmem);
const Nd4jLong j = blockIdx.x; // one block per each element
T& z = *(reinterpret_cast<T*>(vz) + shape::getIndexOffset(j, zShapeInfo));
__shared__ T a, b, x;
__shared__ bool symmCond;
if (threadIdx.x == 0) {
a = *(reinterpret_cast<const T*>(va) + shape::getIndexOffset(j, aShapeInfo));
b = *(reinterpret_cast<const T*>(vb) + shape::getIndexOffset(j, bShapeInfo));
x = *(reinterpret_cast<const T*>(vx) + shape::getIndexOffset(j, xShapeInfo));
symmCond = x > (a + static_cast<T>(1)) / (a + b + static_cast<T>(2));
if(symmCond) { // swap a and b, x = 1 - x
T temp = a;
a = b;
b = temp;
x = static_cast<T>(1) - x;
}
}
__syncthreads();
// t^{n-1} * (1 - t)^{n-1} is symmetric function with respect to x = 0.5
if(a == b && x == static_cast<T>(0.5)) {
z = static_cast<T>(0.5);
return;
}
if (x == static_cast<T>(0) || x == static_cast<T>(1)) {
z = symmCond ? static_cast<T>(1) - x : x;
return;
}
// calculate two coefficients per thread
if(threadIdx.x != 0) {
const int i = threadIdx.x;
const T aPlus2i = a + 2*i;
sharedMem[2*i] = i * (b - i) * x / ((aPlus2i - static_cast<T>(1)) * aPlus2i);
sharedMem[2*i + 1] = -(a + i) * (a + b + i) * x / ((aPlus2i + static_cast<T>(1)) * aPlus2i);
}
__syncthreads();
if(threadIdx.x == 0) {
const T gammaPart = lgamma(a) + lgamma(b) - lgamma(a + b);
const T front = math::nd4j_exp<T,T>(math::nd4j_log<T, T>(x) * a + math::nd4j_log<T, T>(1.f - x) * b - gammaPart);
sharedMem[0] = static_cast<T>(1) - (a + b) * x / (a + static_cast<T>(1));
sharedMem[1] = static_cast<T>(1);
z = front * continuedFractionCuda(a, b, x) / a;
if(symmCond) // symmetry relation
z = static_cast<T>(1) - z;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void betaIncForArrayCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* va, const Nd4jLong* aShapeInfo,
const void* vb, const Nd4jLong* bShapeInfo,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo) {
betaIncForArrayCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(va, aShapeInfo, vb, bShapeInfo, vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
// overload betaInc for arrays, shapes of a, b and x must be the same !!!
void betaInc(sd::LaunchContext* context, const NDArray& a, const NDArray& b, const NDArray& x, NDArray& output) {
const int threadsPerBlock = maxIter;
const int blocksPerGrid = output.lengthOf();
const int sharedMem = 2 * output.sizeOfT() * threadsPerBlock + 128;
const auto xType = x.dataType();
PointersManager manager(context, "betaInc");
NDArray::prepareSpecialUse({&output}, {&a, &b, &x});
BUILD_SINGLE_SELECTOR(xType, betaIncForArrayCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), a.specialBuffer(), a.specialShapeInfo(), b.specialBuffer(), b.specialShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&a, &b, &x});
manager.synchronize();
}
}
}
}
|
7b6807a8963ec7d1502d9bcef67d7a23ede98974.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Utilities.cuh"
#include "InputOutput.cuh"
#define BLOCKSIZE 128
/*******************/
/* KERNEL FUNCTION */
/*******************/
template<class T>
__global__ void kernelFunction(T * __restrict__ d_data, const unsigned int NperGPU) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < NperGPU) for (int k = 0; k < 1000; k++) d_data[tid] = d_data[tid] * d_data[tid];
}
/******************/
/* PLAN STRUCTURE */
/******************/
template<class T>
struct plan {
T *d_data;
};
/*********************/
/* SVD PLAN CREATION */
/*********************/
template<class T>
void createPlan(plan<T>& plan, unsigned int NperGPU, unsigned int gpuID) {
// --- Device allocation
gpuErrchk(hipSetDevice(gpuID));
gpuErrchk(hipMalloc(&(plan.d_data), NperGPU * sizeof(T)));
}
/********/
/* MAIN */
/********/
int main() {
const int numGPUs = 4;
const int NperGPU = 500000;
const int N = NperGPU * numGPUs;
plan<double> plan[numGPUs];
for (int k = 0; k < numGPUs; k++) createPlan(plan[k], NperGPU, k);
// --- "Depth-first" approach - no stream
double *inputMatrices; gpuErrchk(hipHostMalloc(&inputMatrices, N * sizeof(double)));
for (int k = 0; k < numGPUs; k++)
{
gpuErrchk(hipSetDevice(k));
gpuErrchk(hipMemcpyAsync(plan[k].d_data, inputMatrices + k * NperGPU, NperGPU * sizeof(double), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernelFunction), dim3(iDivUp(NperGPU, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, plan[k].d_data, NperGPU);
gpuErrchk(hipMemcpyAsync(inputMatrices + k * NperGPU, plan[k].d_data, NperGPU * sizeof(double), hipMemcpyDeviceToHost));
}
gpuErrchk(hipDeviceReset());
}
| 7b6807a8963ec7d1502d9bcef67d7a23ede98974.cu | #include "Utilities.cuh"
#include "InputOutput.cuh"
#define BLOCKSIZE 128
/*******************/
/* KERNEL FUNCTION */
/*******************/
template<class T>
__global__ void kernelFunction(T * __restrict__ d_data, const unsigned int NperGPU) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < NperGPU) for (int k = 0; k < 1000; k++) d_data[tid] = d_data[tid] * d_data[tid];
}
/******************/
/* PLAN STRUCTURE */
/******************/
template<class T>
struct plan {
T *d_data;
};
/*********************/
/* SVD PLAN CREATION */
/*********************/
template<class T>
void createPlan(plan<T>& plan, unsigned int NperGPU, unsigned int gpuID) {
// --- Device allocation
gpuErrchk(cudaSetDevice(gpuID));
gpuErrchk(cudaMalloc(&(plan.d_data), NperGPU * sizeof(T)));
}
/********/
/* MAIN */
/********/
int main() {
const int numGPUs = 4;
const int NperGPU = 500000;
const int N = NperGPU * numGPUs;
plan<double> plan[numGPUs];
for (int k = 0; k < numGPUs; k++) createPlan(plan[k], NperGPU, k);
// --- "Depth-first" approach - no stream
double *inputMatrices; gpuErrchk(cudaMallocHost(&inputMatrices, N * sizeof(double)));
for (int k = 0; k < numGPUs; k++)
{
gpuErrchk(cudaSetDevice(k));
gpuErrchk(cudaMemcpyAsync(plan[k].d_data, inputMatrices + k * NperGPU, NperGPU * sizeof(double), cudaMemcpyHostToDevice));
kernelFunction<<<iDivUp(NperGPU, BLOCKSIZE), BLOCKSIZE>>>(plan[k].d_data, NperGPU);
gpuErrchk(cudaMemcpyAsync(inputMatrices + k * NperGPU, plan[k].d_data, NperGPU * sizeof(double), cudaMemcpyDeviceToHost));
}
gpuErrchk(cudaDeviceReset());
}
|
e3d251667620e36b5bdfc034e0f08d9a3ab5a8d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_utils.h"
#include "lcg_rng.h"
#include "disney_bsdf.h"
#include "lights.h"
#include "optix_params.h"
#include "util/texture_channel_mask.h"
extern "C" {
__constant__ LaunchParams launch_params;
}
struct RayPayload {
float2 uv;
float t_hit;
uint32_t material_id;
float3 normal;
float pad;
};
__device__ RayPayload make_ray_payload() {
RayPayload p;
p.uv = make_float2(0.f);
p.t_hit = -1.f;
p.material_id = 0;
p.normal = make_float3(0.f);
return p;
}
__device__ float textured_scalar_param(const float x, const float2 &uv) {
const uint32_t mask = __float_as_int(x);
if (IS_TEXTURED_PARAM(mask)) {
const uint32_t tex_id = GET_TEXTURE_ID(mask);
const uint32_t channel = GET_TEXTURE_CHANNEL(mask);
return component(tex2D<float4>(launch_params.textures[tex_id], uv.x, uv.y), channel);
}
return x;
}
__device__ void unpack_material(const MaterialParams &p, float2 uv, DisneyMaterial &mat) {
uint32_t mask = __float_as_int(p.base_color.x);
if (IS_TEXTURED_PARAM(mask)) {
const uint32_t tex_id = GET_TEXTURE_ID(mask);
mat.base_color = make_float3(tex2D<float4>(launch_params.textures[tex_id], uv.x, uv.y));
} else {
mat.base_color = p.base_color;
}
mat.metallic = textured_scalar_param(p.metallic, uv);
mat.specular = textured_scalar_param(p.specular, uv);
mat.roughness = textured_scalar_param(p.roughness, uv);
mat.specular_tint = textured_scalar_param(p.specular_tint, uv);
mat.anisotropy = textured_scalar_param(p.anisotropy, uv);
mat.sheen = textured_scalar_param(p.sheen, uv);
mat.sheen_tint = textured_scalar_param(p.sheen_tint, uv);
mat.clearcoat = textured_scalar_param(p.clearcoat, uv);
mat.clearcoat_gloss = textured_scalar_param(p.clearcoat_gloss, uv);
mat.ior = textured_scalar_param(p.ior, uv);
mat.specular_transmission = textured_scalar_param(p.specular_transmission, uv);
}
__device__ float3 sample_direct_light(const DisneyMaterial &mat, const float3 &hit_p,
const float3 &n, const float3 &v_x, const float3 &v_y, const float3 &w_o,
const QuadLight *lights, const uint32_t num_lights, uint16_t &ray_count, LCGRand &rng)
{
float3 illum = make_float3(0.f);
uint32_t light_id = lcg_randomf(rng) * num_lights;
light_id = min(light_id, num_lights - 1);
QuadLight light = lights[light_id];
const uint32_t occlusion_flags = OPTIX_RAY_FLAG_DISABLE_ANYHIT
| OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT
| OPTIX_RAY_FLAG_DISABLE_CLOSESTHIT;
// Sample the light to compute an incident light ray to this point
{
float3 light_pos = sample_quad_light_position(light,
make_float2(lcg_randomf(rng), lcg_randomf(rng)));
float3 light_dir = light_pos - hit_p;
float light_dist = length(light_dir);
light_dir = normalize(light_dir);
float light_pdf = quad_light_pdf(light, light_pos, hit_p, light_dir);
float bsdf_pdf = disney_pdf(mat, n, w_o, light_dir, v_x, v_y);
uint32_t shadow_hit = 1;
optixTrace(launch_params.scene, hit_p, light_dir, EPSILON, light_dist, 0.f,
0xff, occlusion_flags, PRIMARY_RAY, 1, OCCLUSION_RAY,
shadow_hit);
#ifdef REPORT_RAY_STATS
++ray_count;
#endif
if (light_pdf >= EPSILON && bsdf_pdf >= EPSILON && !shadow_hit) {
float3 bsdf = disney_brdf(mat, n, w_o, light_dir, v_x, v_y);
float w = power_heuristic(1.f, light_pdf, 1.f, bsdf_pdf);
illum = bsdf * light.emission * fabs(dot(light_dir, n)) * w / light_pdf;
}
}
// Sample the BRDF to compute a light sample as well
{
float3 w_i;
float bsdf_pdf;
float3 bsdf = sample_disney_brdf(mat, n, w_o, v_x, v_y, rng, w_i, bsdf_pdf);
float light_dist;
float3 light_pos;
if (!all_zero(bsdf) && bsdf_pdf >= EPSILON && quad_intersect(light, hit_p, w_i, light_dist, light_pos)) {
float light_pdf = quad_light_pdf(light, light_pos, hit_p, w_i);
if (light_pdf >= EPSILON) {
float w = power_heuristic(1.f, bsdf_pdf, 1.f, light_pdf);
uint32_t shadow_hit = 1;
optixTrace(launch_params.scene, hit_p, w_i, EPSILON, light_dist, 0.f,
0xff, occlusion_flags, PRIMARY_RAY, 1, OCCLUSION_RAY,
shadow_hit);
#ifdef REPORT_RAY_STATS
++ray_count;
#endif
if (!shadow_hit) {
illum = illum + bsdf * light.emission * fabs(dot(w_i, n)) * w / bsdf_pdf;
}
}
}
}
return illum;
}
extern "C" __global__ void __raygen__perspective_camera() {
const RayGenParams ¶ms = get_shader_params<RayGenParams>();
const uint2 pixel = make_uint2(optixGetLaunchIndex().x, optixGetLaunchIndex().y);
const uint2 screen = make_uint2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y);
const uint32_t pixel_idx = pixel.x + pixel.y * screen.x;
float3 accum_sum = float3({0, 0, 0});
LCGRand rng = get_rng(launch_params.frame_id);
float3 albedo;
for(int i = 0; i < SAMPLES_PER_PIXEL; i++) {
const float2 d = make_float2(pixel.x + lcg_randomf(rng), pixel.y + lcg_randomf(rng)) / make_float2(screen);
float3 ray_dir = normalize(d.x * make_float3(launch_params.cam_du)
+ d.y * make_float3(launch_params.cam_dv) + make_float3(launch_params.cam_dir_top_left));
float3 ray_origin = make_float3(launch_params.cam_pos);
DisneyMaterial mat;
uint16_t ray_count = 0;
const float3 light_emission = make_float3(1.f);
int bounce = 0;
float3 illum = make_float3(0.f);
float3 path_throughput = make_float3(1.f);
do {
RayPayload payload = make_ray_payload();
uint2 payload_ptr;
pack_ptr(&payload, payload_ptr.x, payload_ptr.y);
optixTrace(launch_params.scene, ray_origin, ray_dir, EPSILON, 1e20f, 0.f,
0xff, OPTIX_RAY_FLAG_DISABLE_ANYHIT, PRIMARY_RAY, 1, PRIMARY_RAY,
payload_ptr.x, payload_ptr.y);
#ifdef REPORT_RAY_STATS
++ray_count;
#endif
if (payload.t_hit <= 0.f) {
illum = illum + path_throughput * payload.normal;
break;
}
// launch_params.framebuffer[pixel_idx] = ray_origin + payload.t_hit * ray_dir;
// return;
unpack_material(params.materials[payload.material_id], payload.uv, mat);
/* */
if (bounce == 0) {
albedo = mat.base_color;
}
/* */
const float3 w_o = -ray_dir;
const float3 hit_p = ray_origin + payload.t_hit * ray_dir;
float3 v_x, v_y;
float3 v_z = payload.normal;
if (mat.specular_transmission == 0.f && dot(w_o, v_z) < 0.f) {
v_z = -v_z;
}
ortho_basis(v_x, v_y, v_z);
illum = illum + path_throughput * sample_direct_light(mat, hit_p, v_z, v_x, v_y, w_o,
params.lights, params.num_lights, ray_count, rng);
float3 w_i;
float pdf;
float3 bsdf = sample_disney_brdf(mat, v_z, w_o, v_x, v_y, rng, w_i, pdf);
if (pdf < EPSILON || all_zero(bsdf)) {
break;
}
path_throughput = path_throughput * bsdf * fabs(dot(w_i, v_z)) / pdf;
if (path_throughput.x < EPSILON && path_throughput.y < EPSILON && path_throughput.z < EPSILON) {
break;
}
ray_origin = hit_p;
ray_dir = w_i;
++bounce;
} while (bounce < MAX_PATH_DEPTH);
accum_sum = accum_sum + illum;
}
accum_sum = accum_sum / SAMPLES_PER_PIXEL;
// Seems like original code did some kind of temporal smoothing.
// We ain't having any of that
/* const float3 prev_color = make_float3(launch_params.accum_buffer[pixel_idx]);
const float3 accum_color = (illum + launch_params.frame_id * prev_color) / (launch_params.frame_id + 1);
launch_params.accum_buffer[pixel_idx] = make_float4(accum_color, 1.f);
launch_params.framebuffer[pixel_idx] = make_uchar4(
clamp(linear_to_srgb(accum_color.x) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(accum_color.y) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(accum_color.z) * 255.f, 0.f, 255.f), 255); */
/* launch_params.framebuffer[pixel_idx] = make_uchar4(
clamp(linear_to_srgb(illum.x) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(illum.y) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(illum.z) * 255.f, 0.f, 255.f), 255); */
#if DEMODULATE_ALBEDO
float3 demodulated_illum =
make_float3(albedo.x > 1e-8 ? accum_sum.x / albedo.x : 0.0f,
albedo.y > 1e-8 ? accum_sum.y / albedo.y : 0.0f,
albedo.z > 1e-8 ? accum_sum.z / albedo.z : 0.0f);
launch_params.framebuffer[pixel_idx] =
make_float3(max(demodulated_illum.x, 0.f),
max(demodulated_illum.y, 0.f),
max(demodulated_illum.z, 0.f));
#else // DEMODULATE_ALBEDO
launch_params.framebuffer[pixel_idx] =
make_float3(clamp(linear_to_srgb(accum_sum.x), 0.f, 1.f),
clamp(linear_to_srgb(accum_sum.y), 0.f, 1.f),
clamp(linear_to_srgb(accum_sum.z), 0.f, 1.f));
#endif // DEMODULATE_ALBEDO
#ifdef REPORT_RAY_STATS
launch_params.ray_stats_buffer[pixel_idx] = ray_count;
#endif
}
extern "C" __global__ void __miss__miss() {
RayPayload &payload = get_payload<RayPayload>();
payload.t_hit = -1.f;
float3 dir = optixGetWorldRayDirection();
// Apply our miss "shader" to draw the checkerboard background
float u = (1.f + atan2(dir.x, -dir.z) * M_1_PI) * 0.5f;
float v = acos(dir.y) * M_1_PI;
int check_x = u * 10.f;
int check_y = v * 10.f;
if (dir.y > -0.1f && (check_x + check_y) % 2 == 0) {
payload.normal = make_float3(0.5f);
} else {
payload.normal = make_float3(0.1f);
}
}
extern "C" __global__ void __miss__occlusion_miss() {
optixSetPayload_0(0);
}
extern "C" __global__ void __closesthit__closest_hit() {
const HitGroupParams ¶ms = get_shader_params<HitGroupParams>();
const float2 bary = optixGetTriangleBarycentrics();
const uint3 indices = params.index_buffer[optixGetPrimitiveIndex()];
float3 normal;
if(params.normal_buffer != 0) {
const float3 n1 = params.normal_buffer[indices.x];
const float3 n2 = params.normal_buffer[indices.y];
const float3 n3 = params.normal_buffer[indices.z];
normal = n1 * (1.f - bary.x - bary.y) + n2 * bary.x + n3 * bary.y; // Normalized later
} else {
const float3 v0 = params.vertex_buffer[indices.x];
const float3 v1 = params.vertex_buffer[indices.y];
const float3 v2 = params.vertex_buffer[indices.z];
normal = normalize(cross(v1 - v0, v2 - v0));
}
float2 uv = make_float2(0.f);
if (params.uv_buffer) {
float2 uva = params.uv_buffer[indices.x];
float2 uvb = params.uv_buffer[indices.y];
float2 uvc = params.uv_buffer[indices.z];
uv = (1.f - bary.x - bary.y) * uva
+ bary.x * uvb + bary.y * uvc;
}
RayPayload &payload = get_payload<RayPayload>();
payload.uv = uv;
payload.t_hit = optixGetRayTmax();
payload.material_id = params.material_id;
payload.normal = normalize(optixTransformNormalFromObjectToWorldSpace(normal));
}
| e3d251667620e36b5bdfc034e0f08d9a3ab5a8d4.cu | #include "cuda_utils.h"
#include "lcg_rng.h"
#include "disney_bsdf.h"
#include "lights.h"
#include "optix_params.h"
#include "util/texture_channel_mask.h"
extern "C" {
__constant__ LaunchParams launch_params;
}
struct RayPayload {
float2 uv;
float t_hit;
uint32_t material_id;
float3 normal;
float pad;
};
__device__ RayPayload make_ray_payload() {
RayPayload p;
p.uv = make_float2(0.f);
p.t_hit = -1.f;
p.material_id = 0;
p.normal = make_float3(0.f);
return p;
}
__device__ float textured_scalar_param(const float x, const float2 &uv) {
const uint32_t mask = __float_as_int(x);
if (IS_TEXTURED_PARAM(mask)) {
const uint32_t tex_id = GET_TEXTURE_ID(mask);
const uint32_t channel = GET_TEXTURE_CHANNEL(mask);
return component(tex2D<float4>(launch_params.textures[tex_id], uv.x, uv.y), channel);
}
return x;
}
__device__ void unpack_material(const MaterialParams &p, float2 uv, DisneyMaterial &mat) {
uint32_t mask = __float_as_int(p.base_color.x);
if (IS_TEXTURED_PARAM(mask)) {
const uint32_t tex_id = GET_TEXTURE_ID(mask);
mat.base_color = make_float3(tex2D<float4>(launch_params.textures[tex_id], uv.x, uv.y));
} else {
mat.base_color = p.base_color;
}
mat.metallic = textured_scalar_param(p.metallic, uv);
mat.specular = textured_scalar_param(p.specular, uv);
mat.roughness = textured_scalar_param(p.roughness, uv);
mat.specular_tint = textured_scalar_param(p.specular_tint, uv);
mat.anisotropy = textured_scalar_param(p.anisotropy, uv);
mat.sheen = textured_scalar_param(p.sheen, uv);
mat.sheen_tint = textured_scalar_param(p.sheen_tint, uv);
mat.clearcoat = textured_scalar_param(p.clearcoat, uv);
mat.clearcoat_gloss = textured_scalar_param(p.clearcoat_gloss, uv);
mat.ior = textured_scalar_param(p.ior, uv);
mat.specular_transmission = textured_scalar_param(p.specular_transmission, uv);
}
__device__ float3 sample_direct_light(const DisneyMaterial &mat, const float3 &hit_p,
const float3 &n, const float3 &v_x, const float3 &v_y, const float3 &w_o,
const QuadLight *lights, const uint32_t num_lights, uint16_t &ray_count, LCGRand &rng)
{
float3 illum = make_float3(0.f);
uint32_t light_id = lcg_randomf(rng) * num_lights;
light_id = min(light_id, num_lights - 1);
QuadLight light = lights[light_id];
const uint32_t occlusion_flags = OPTIX_RAY_FLAG_DISABLE_ANYHIT
| OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT
| OPTIX_RAY_FLAG_DISABLE_CLOSESTHIT;
// Sample the light to compute an incident light ray to this point
{
float3 light_pos = sample_quad_light_position(light,
make_float2(lcg_randomf(rng), lcg_randomf(rng)));
float3 light_dir = light_pos - hit_p;
float light_dist = length(light_dir);
light_dir = normalize(light_dir);
float light_pdf = quad_light_pdf(light, light_pos, hit_p, light_dir);
float bsdf_pdf = disney_pdf(mat, n, w_o, light_dir, v_x, v_y);
uint32_t shadow_hit = 1;
optixTrace(launch_params.scene, hit_p, light_dir, EPSILON, light_dist, 0.f,
0xff, occlusion_flags, PRIMARY_RAY, 1, OCCLUSION_RAY,
shadow_hit);
#ifdef REPORT_RAY_STATS
++ray_count;
#endif
if (light_pdf >= EPSILON && bsdf_pdf >= EPSILON && !shadow_hit) {
float3 bsdf = disney_brdf(mat, n, w_o, light_dir, v_x, v_y);
float w = power_heuristic(1.f, light_pdf, 1.f, bsdf_pdf);
illum = bsdf * light.emission * fabs(dot(light_dir, n)) * w / light_pdf;
}
}
// Sample the BRDF to compute a light sample as well
{
float3 w_i;
float bsdf_pdf;
float3 bsdf = sample_disney_brdf(mat, n, w_o, v_x, v_y, rng, w_i, bsdf_pdf);
float light_dist;
float3 light_pos;
if (!all_zero(bsdf) && bsdf_pdf >= EPSILON && quad_intersect(light, hit_p, w_i, light_dist, light_pos)) {
float light_pdf = quad_light_pdf(light, light_pos, hit_p, w_i);
if (light_pdf >= EPSILON) {
float w = power_heuristic(1.f, bsdf_pdf, 1.f, light_pdf);
uint32_t shadow_hit = 1;
optixTrace(launch_params.scene, hit_p, w_i, EPSILON, light_dist, 0.f,
0xff, occlusion_flags, PRIMARY_RAY, 1, OCCLUSION_RAY,
shadow_hit);
#ifdef REPORT_RAY_STATS
++ray_count;
#endif
if (!shadow_hit) {
illum = illum + bsdf * light.emission * fabs(dot(w_i, n)) * w / bsdf_pdf;
}
}
}
}
return illum;
}
extern "C" __global__ void __raygen__perspective_camera() {
const RayGenParams ¶ms = get_shader_params<RayGenParams>();
const uint2 pixel = make_uint2(optixGetLaunchIndex().x, optixGetLaunchIndex().y);
const uint2 screen = make_uint2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y);
const uint32_t pixel_idx = pixel.x + pixel.y * screen.x;
float3 accum_sum = float3({0, 0, 0});
LCGRand rng = get_rng(launch_params.frame_id);
float3 albedo;
for(int i = 0; i < SAMPLES_PER_PIXEL; i++) {
const float2 d = make_float2(pixel.x + lcg_randomf(rng), pixel.y + lcg_randomf(rng)) / make_float2(screen);
float3 ray_dir = normalize(d.x * make_float3(launch_params.cam_du)
+ d.y * make_float3(launch_params.cam_dv) + make_float3(launch_params.cam_dir_top_left));
float3 ray_origin = make_float3(launch_params.cam_pos);
DisneyMaterial mat;
uint16_t ray_count = 0;
const float3 light_emission = make_float3(1.f);
int bounce = 0;
float3 illum = make_float3(0.f);
float3 path_throughput = make_float3(1.f);
do {
RayPayload payload = make_ray_payload();
uint2 payload_ptr;
pack_ptr(&payload, payload_ptr.x, payload_ptr.y);
optixTrace(launch_params.scene, ray_origin, ray_dir, EPSILON, 1e20f, 0.f,
0xff, OPTIX_RAY_FLAG_DISABLE_ANYHIT, PRIMARY_RAY, 1, PRIMARY_RAY,
payload_ptr.x, payload_ptr.y);
#ifdef REPORT_RAY_STATS
++ray_count;
#endif
if (payload.t_hit <= 0.f) {
illum = illum + path_throughput * payload.normal;
break;
}
// launch_params.framebuffer[pixel_idx] = ray_origin + payload.t_hit * ray_dir;
// return;
unpack_material(params.materials[payload.material_id], payload.uv, mat);
/* */
if (bounce == 0) {
albedo = mat.base_color;
}
/* */
const float3 w_o = -ray_dir;
const float3 hit_p = ray_origin + payload.t_hit * ray_dir;
float3 v_x, v_y;
float3 v_z = payload.normal;
if (mat.specular_transmission == 0.f && dot(w_o, v_z) < 0.f) {
v_z = -v_z;
}
ortho_basis(v_x, v_y, v_z);
illum = illum + path_throughput * sample_direct_light(mat, hit_p, v_z, v_x, v_y, w_o,
params.lights, params.num_lights, ray_count, rng);
float3 w_i;
float pdf;
float3 bsdf = sample_disney_brdf(mat, v_z, w_o, v_x, v_y, rng, w_i, pdf);
if (pdf < EPSILON || all_zero(bsdf)) {
break;
}
path_throughput = path_throughput * bsdf * fabs(dot(w_i, v_z)) / pdf;
if (path_throughput.x < EPSILON && path_throughput.y < EPSILON && path_throughput.z < EPSILON) {
break;
}
ray_origin = hit_p;
ray_dir = w_i;
++bounce;
} while (bounce < MAX_PATH_DEPTH);
accum_sum = accum_sum + illum;
}
accum_sum = accum_sum / SAMPLES_PER_PIXEL;
// Seems like original code did some kind of temporal smoothing.
// We ain't having any of that
/* const float3 prev_color = make_float3(launch_params.accum_buffer[pixel_idx]);
const float3 accum_color = (illum + launch_params.frame_id * prev_color) / (launch_params.frame_id + 1);
launch_params.accum_buffer[pixel_idx] = make_float4(accum_color, 1.f);
launch_params.framebuffer[pixel_idx] = make_uchar4(
clamp(linear_to_srgb(accum_color.x) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(accum_color.y) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(accum_color.z) * 255.f, 0.f, 255.f), 255); */
/* launch_params.framebuffer[pixel_idx] = make_uchar4(
clamp(linear_to_srgb(illum.x) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(illum.y) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(illum.z) * 255.f, 0.f, 255.f), 255); */
#if DEMODULATE_ALBEDO
float3 demodulated_illum =
make_float3(albedo.x > 1e-8 ? accum_sum.x / albedo.x : 0.0f,
albedo.y > 1e-8 ? accum_sum.y / albedo.y : 0.0f,
albedo.z > 1e-8 ? accum_sum.z / albedo.z : 0.0f);
launch_params.framebuffer[pixel_idx] =
make_float3(max(demodulated_illum.x, 0.f),
max(demodulated_illum.y, 0.f),
max(demodulated_illum.z, 0.f));
#else // DEMODULATE_ALBEDO
launch_params.framebuffer[pixel_idx] =
make_float3(clamp(linear_to_srgb(accum_sum.x), 0.f, 1.f),
clamp(linear_to_srgb(accum_sum.y), 0.f, 1.f),
clamp(linear_to_srgb(accum_sum.z), 0.f, 1.f));
#endif // DEMODULATE_ALBEDO
#ifdef REPORT_RAY_STATS
launch_params.ray_stats_buffer[pixel_idx] = ray_count;
#endif
}
extern "C" __global__ void __miss__miss() {
RayPayload &payload = get_payload<RayPayload>();
payload.t_hit = -1.f;
float3 dir = optixGetWorldRayDirection();
// Apply our miss "shader" to draw the checkerboard background
float u = (1.f + atan2(dir.x, -dir.z) * M_1_PI) * 0.5f;
float v = acos(dir.y) * M_1_PI;
int check_x = u * 10.f;
int check_y = v * 10.f;
if (dir.y > -0.1f && (check_x + check_y) % 2 == 0) {
payload.normal = make_float3(0.5f);
} else {
payload.normal = make_float3(0.1f);
}
}
extern "C" __global__ void __miss__occlusion_miss() {
optixSetPayload_0(0);
}
extern "C" __global__ void __closesthit__closest_hit() {
const HitGroupParams ¶ms = get_shader_params<HitGroupParams>();
const float2 bary = optixGetTriangleBarycentrics();
const uint3 indices = params.index_buffer[optixGetPrimitiveIndex()];
float3 normal;
if(params.normal_buffer != 0) {
const float3 n1 = params.normal_buffer[indices.x];
const float3 n2 = params.normal_buffer[indices.y];
const float3 n3 = params.normal_buffer[indices.z];
normal = n1 * (1.f - bary.x - bary.y) + n2 * bary.x + n3 * bary.y; // Normalized later
} else {
const float3 v0 = params.vertex_buffer[indices.x];
const float3 v1 = params.vertex_buffer[indices.y];
const float3 v2 = params.vertex_buffer[indices.z];
normal = normalize(cross(v1 - v0, v2 - v0));
}
float2 uv = make_float2(0.f);
if (params.uv_buffer) {
float2 uva = params.uv_buffer[indices.x];
float2 uvb = params.uv_buffer[indices.y];
float2 uvc = params.uv_buffer[indices.z];
uv = (1.f - bary.x - bary.y) * uva
+ bary.x * uvb + bary.y * uvc;
}
RayPayload &payload = get_payload<RayPayload>();
payload.uv = uv;
payload.t_hit = optixGetRayTmax();
payload.material_id = params.material_id;
payload.normal = normalize(optixTransformNormalFromObjectToWorldSpace(normal));
}
|
8b1b92e5f2862b58a054af5ff54e0444e3c1ad19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "utils/copygen.hpp"
#include "utils/boxutils.hpp"
#include "utils/scan_block.hpp"
#include "octree_iterator.hpp"
namespace pcl
{
namespace device
{
typedef OctreeImpl::PointType PointType;
template<typename RadiusStrategy, typename FetchStrategy>
struct Batch : public RadiusStrategy, public FetchStrategy
{
const int *indices;
PtrStep<float> points;
OctreeGlobalWithBox octree;
int max_results;
mutable int* output;
mutable int* output_sizes;
};
struct DirectQuery
{
PtrSz<PointType> queries;
__device__ __forceinline__ float3 fetch(int query_index) const
{
PointType q = queries.data[query_index];
return make_float3(q.x, q.y, q.z);
}
};
struct IndicesQuery : public DirectQuery
{
const int* queries_indices;
__device__ __forceinline__ float3 fetch(int query_index) const
{
PointType q = queries[queries_indices[query_index]];
return make_float3(q.x, q.y, q.z);
}
};
struct SharedRadius
{
float radius;
__device__ __forceinline__ float getRadius(int /*index*/) const { return radius; }
__device__ __forceinline__ float bradcastRadius2(float* /*ptr*/, bool /*active*/, float& /*radius_reg*/) const
{
return radius * radius;
}
};
struct IndividualRadius
{
const float* radiuses;
__device__ __forceinline__ float getRadius(int index) const { return radiuses[index]; }
__device__ __forceinline__ float bradcastRadius2(float* ptr, bool active, float& radius_reg) const
{
if (active)
*ptr = radius_reg * radius_reg;
return *ptr;
}
};
struct KernelPolicy
{
enum
{
CTA_SIZE = 512,
WARP_SIZE = 32,
WARPS_COUNT = CTA_SIZE/WARP_SIZE,
MAX_LEVELS_PLUS_ROOT = 11,
CHECK_FLAG = 1 << 31
};
struct SmemStorage
{
volatile int per_warp_buffer[WARPS_COUNT];
volatile int cta_buffer[CTA_SIZE];
};
};
__shared__ KernelPolicy::SmemStorage storage;
template<typename BatchType>
struct Warp_radiusSearch
{
public:
typedef OctreeIteratorDeviceNS OctreeIterator;
const BatchType& batch;
OctreeIterator iterator;
int found_count;
int query_index;
float3 query;
float radius;
__device__ __forceinline__ Warp_radiusSearch(const BatchType& batch_arg, int query_index_arg)
: batch(batch_arg), iterator(/**/batch.octree/*storage.paths*/), found_count(0), query_index(query_index_arg){}
__device__ __forceinline__ void launch(bool active)
{
if (active)
{
query = batch.fetch(query_index);
radius = batch.getRadius(query_index);
}
else
query_index = -1;
while(__any(active))
{
int leaf = -1;
if (active)
leaf = examineNode(iterator);
processLeaf(leaf);
active = active && iterator.level >= 0 && found_count < batch.max_results;
}
if (query_index != -1)
batch.output_sizes[query_index] = found_count;
}
private:
__device__ __forceinline__ int examineNode(OctreeIterator& iterator)
{
using namespace pcl::gpu;
int node_idx = *iterator;
int code = batch.octree.codes[node_idx];
float3 node_minp = batch.octree.minp;
float3 node_maxp = batch.octree.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, query, radius))
{
++iterator;
return -1;
}
if (checkIfNodeInsideSphere(node_minp, node_maxp, query, radius))
{
++iterator;
return node_idx; //return node to copy
}
//need to go to next level
int node = batch.octree.nodes[node_idx];
int children_mask = node & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
++iterator;
return (node_idx | KernelPolicy::CHECK_FLAG); // return node to check
}
//goto next level
int first = node >> 8;
int len = __popc(children_mask);
iterator.gotoNextLevel(first, len);
return -1;
};
__device__ __forceinline__ void processLeaf(int leaf)
{
int mask = __ballot(leaf != -1);
while(mask)
{
unsigned int laneId = Warp::laneId();
unsigned int warpId = Warp::id();
int active_lane = __ffs(mask) - 1; //[0..31]
mask &= ~(1 << active_lane);
//broadcast active_found_count
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = found_count;
int active_found_count = storage.per_warp_buffer[warpId];
int node_idx = leaf & ~KernelPolicy::CHECK_FLAG;
//broadcast beg
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = batch.octree.begs[node_idx];
int beg = storage.per_warp_buffer[warpId];
//broadcast end
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = batch.octree.ends[node_idx];
int end = storage.per_warp_buffer[warpId];
//broadcast active_query_index
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = query_index;
int active_query_index = storage.per_warp_buffer[warpId];
int length = end - beg;
int *out = batch.output + active_query_index * batch.max_results + active_found_count;
int length_left = batch.max_results - active_found_count;
int test = __any(active_lane == laneId && (leaf & KernelPolicy::CHECK_FLAG));
if (test)
{
float3 active_query;
//broadcast warp_query
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.x);
active_query.x = __int_as_float(storage.per_warp_buffer[warpId]);
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.y);
active_query.y = __int_as_float(storage.per_warp_buffer[warpId]);
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.z);
active_query.z = __int_as_float(storage.per_warp_buffer[warpId]);
float radius2 = batch.bradcastRadius2((float*)&storage.per_warp_buffer[warpId], (active_lane == laneId), radius);
length = TestWarpKernel(beg, active_query, radius2, length, out, length_left);
}
else
{
length = min(length, length_left);
Warp::copy(batch.indices + beg, batch.indices + beg + length, out);
}
if (active_lane == laneId)
found_count += length;
}
}
__device__ __forceinline__ int TestWarpKernel(int beg, const float3& active_query, float radius2, int length, int* out, int length_left)
{
unsigned int idx = Warp::laneId();
int last_threadIdx = threadIdx.x - idx + 31;
int total_new = 0;
for(;;)
{
int take = 0;
if (idx < length)
{
float dx = batch.points.ptr(0)[beg + idx] - active_query.x;
float dy = batch.points.ptr(1)[beg + idx] - active_query.y;
float dz = batch.points.ptr(2)[beg + idx] - active_query.z;
float d2 = dx * dx + dy * dy + dz * dz;
if (d2 < radius2)
take = 1;
}
storage.cta_buffer[threadIdx.x] = take;
int offset = scan_warp<exclusive>(storage.cta_buffer);
//ensure that we copy
bool out_of_bounds = (offset + total_new) >= length_left;
if (take && !out_of_bounds)
out[offset] = batch.indices[beg + idx];
int new_nodes = storage.cta_buffer[last_threadIdx];
idx += Warp::STRIDE;
total_new += new_nodes;
out += new_nodes;
if (__all(idx >= length) || __any(out_of_bounds) || total_new == length_left)
break;
}
return min(total_new, length_left);
}
};
template<typename BatchType>
__global__ void KernelRS(const BatchType batch)
{
int query_index = blockIdx.x * blockDim.x + threadIdx.x;
bool active = query_index < batch.queries.size;
if (__all(active == false))
return;
Warp_radiusSearch<BatchType> search(batch, query_index);
search.launch(active);
}
}
}
template<typename BatchType>
void pcl::device::OctreeImpl::radiusSearchEx(BatchType& batch, const Queries& queries, NeighborIndices& results)
{
batch.indices = indices;
batch.octree = octreeGlobal;
batch.max_results = results.max_elems;
batch.output = results.data;
batch.output_sizes = results.sizes;
batch.points = points_sorted;
cudaSafeCall( hipFuncSetCacheConfig(KernelRS<BatchType>, hipFuncCachePreferL1) );
int block = KernelPolicy::CTA_SIZE;
int grid = divUp((int)batch.queries.size, block);
hipLaunchKernelGGL(( KernelRS), dim3(grid), dim3(block), 0, 0, batch);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, float radius, NeighborIndices& results)
{
typedef Batch<SharedRadius, DirectQuery> BatchType;
BatchType batch;
batch.radius = radius;
batch.queries = queries;
radiusSearchEx(batch, queries, results);
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, const Radiuses& radiuses, NeighborIndices& results)
{
typedef Batch<IndividualRadius, DirectQuery> BatchType;
BatchType batch;
batch.radiuses = radiuses;
batch.queries = queries;
radiusSearchEx(batch, queries, results);
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, const Indices& indices, float radius, NeighborIndices& results)
{
typedef Batch<SharedRadius, IndicesQuery> BatchType;
BatchType batch;
batch.radius = radius;
batch.queries = queries;
batch.queries_indices = indices;
batch.queries.size = indices.size();
radiusSearchEx(batch, queries, results);
}
| 8b1b92e5f2862b58a054af5ff54e0444e3c1ad19.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "utils/copygen.hpp"
#include "utils/boxutils.hpp"
#include "utils/scan_block.hpp"
#include "octree_iterator.hpp"
namespace pcl
{
namespace device
{
typedef OctreeImpl::PointType PointType;
template<typename RadiusStrategy, typename FetchStrategy>
struct Batch : public RadiusStrategy, public FetchStrategy
{
const int *indices;
PtrStep<float> points;
OctreeGlobalWithBox octree;
int max_results;
mutable int* output;
mutable int* output_sizes;
};
struct DirectQuery
{
PtrSz<PointType> queries;
__device__ __forceinline__ float3 fetch(int query_index) const
{
PointType q = queries.data[query_index];
return make_float3(q.x, q.y, q.z);
}
};
struct IndicesQuery : public DirectQuery
{
const int* queries_indices;
__device__ __forceinline__ float3 fetch(int query_index) const
{
PointType q = queries[queries_indices[query_index]];
return make_float3(q.x, q.y, q.z);
}
};
struct SharedRadius
{
float radius;
__device__ __forceinline__ float getRadius(int /*index*/) const { return radius; }
__device__ __forceinline__ float bradcastRadius2(float* /*ptr*/, bool /*active*/, float& /*radius_reg*/) const
{
return radius * radius;
}
};
struct IndividualRadius
{
const float* radiuses;
__device__ __forceinline__ float getRadius(int index) const { return radiuses[index]; }
__device__ __forceinline__ float bradcastRadius2(float* ptr, bool active, float& radius_reg) const
{
if (active)
*ptr = radius_reg * radius_reg;
return *ptr;
}
};
struct KernelPolicy
{
enum
{
CTA_SIZE = 512,
WARP_SIZE = 32,
WARPS_COUNT = CTA_SIZE/WARP_SIZE,
MAX_LEVELS_PLUS_ROOT = 11,
CHECK_FLAG = 1 << 31
};
struct SmemStorage
{
volatile int per_warp_buffer[WARPS_COUNT];
volatile int cta_buffer[CTA_SIZE];
};
};
__shared__ KernelPolicy::SmemStorage storage;
template<typename BatchType>
struct Warp_radiusSearch
{
public:
typedef OctreeIteratorDeviceNS OctreeIterator;
const BatchType& batch;
OctreeIterator iterator;
int found_count;
int query_index;
float3 query;
float radius;
__device__ __forceinline__ Warp_radiusSearch(const BatchType& batch_arg, int query_index_arg)
: batch(batch_arg), iterator(/**/batch.octree/*storage.paths*/), found_count(0), query_index(query_index_arg){}
__device__ __forceinline__ void launch(bool active)
{
if (active)
{
query = batch.fetch(query_index);
radius = batch.getRadius(query_index);
}
else
query_index = -1;
while(__any(active))
{
int leaf = -1;
if (active)
leaf = examineNode(iterator);
processLeaf(leaf);
active = active && iterator.level >= 0 && found_count < batch.max_results;
}
if (query_index != -1)
batch.output_sizes[query_index] = found_count;
}
private:
__device__ __forceinline__ int examineNode(OctreeIterator& iterator)
{
using namespace pcl::gpu;
int node_idx = *iterator;
int code = batch.octree.codes[node_idx];
float3 node_minp = batch.octree.minp;
float3 node_maxp = batch.octree.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, query, radius))
{
++iterator;
return -1;
}
if (checkIfNodeInsideSphere(node_minp, node_maxp, query, radius))
{
++iterator;
return node_idx; //return node to copy
}
//need to go to next level
int node = batch.octree.nodes[node_idx];
int children_mask = node & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
++iterator;
return (node_idx | KernelPolicy::CHECK_FLAG); // return node to check
}
//goto next level
int first = node >> 8;
int len = __popc(children_mask);
iterator.gotoNextLevel(first, len);
return -1;
};
__device__ __forceinline__ void processLeaf(int leaf)
{
int mask = __ballot(leaf != -1);
while(mask)
{
unsigned int laneId = Warp::laneId();
unsigned int warpId = Warp::id();
int active_lane = __ffs(mask) - 1; //[0..31]
mask &= ~(1 << active_lane);
//broadcast active_found_count
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = found_count;
int active_found_count = storage.per_warp_buffer[warpId];
int node_idx = leaf & ~KernelPolicy::CHECK_FLAG;
//broadcast beg
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = batch.octree.begs[node_idx];
int beg = storage.per_warp_buffer[warpId];
//broadcast end
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = batch.octree.ends[node_idx];
int end = storage.per_warp_buffer[warpId];
//broadcast active_query_index
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = query_index;
int active_query_index = storage.per_warp_buffer[warpId];
int length = end - beg;
int *out = batch.output + active_query_index * batch.max_results + active_found_count;
int length_left = batch.max_results - active_found_count;
int test = __any(active_lane == laneId && (leaf & KernelPolicy::CHECK_FLAG));
if (test)
{
float3 active_query;
//broadcast warp_query
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.x);
active_query.x = __int_as_float(storage.per_warp_buffer[warpId]);
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.y);
active_query.y = __int_as_float(storage.per_warp_buffer[warpId]);
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.z);
active_query.z = __int_as_float(storage.per_warp_buffer[warpId]);
float radius2 = batch.bradcastRadius2((float*)&storage.per_warp_buffer[warpId], (active_lane == laneId), radius);
length = TestWarpKernel(beg, active_query, radius2, length, out, length_left);
}
else
{
length = min(length, length_left);
Warp::copy(batch.indices + beg, batch.indices + beg + length, out);
}
if (active_lane == laneId)
found_count += length;
}
}
__device__ __forceinline__ int TestWarpKernel(int beg, const float3& active_query, float radius2, int length, int* out, int length_left)
{
unsigned int idx = Warp::laneId();
int last_threadIdx = threadIdx.x - idx + 31;
int total_new = 0;
for(;;)
{
int take = 0;
if (idx < length)
{
float dx = batch.points.ptr(0)[beg + idx] - active_query.x;
float dy = batch.points.ptr(1)[beg + idx] - active_query.y;
float dz = batch.points.ptr(2)[beg + idx] - active_query.z;
float d2 = dx * dx + dy * dy + dz * dz;
if (d2 < radius2)
take = 1;
}
storage.cta_buffer[threadIdx.x] = take;
int offset = scan_warp<exclusive>(storage.cta_buffer);
//ensure that we copy
bool out_of_bounds = (offset + total_new) >= length_left;
if (take && !out_of_bounds)
out[offset] = batch.indices[beg + idx];
int new_nodes = storage.cta_buffer[last_threadIdx];
idx += Warp::STRIDE;
total_new += new_nodes;
out += new_nodes;
if (__all(idx >= length) || __any(out_of_bounds) || total_new == length_left)
break;
}
return min(total_new, length_left);
}
};
template<typename BatchType>
__global__ void KernelRS(const BatchType batch)
{
int query_index = blockIdx.x * blockDim.x + threadIdx.x;
bool active = query_index < batch.queries.size;
if (__all(active == false))
return;
Warp_radiusSearch<BatchType> search(batch, query_index);
search.launch(active);
}
}
}
template<typename BatchType>
void pcl::device::OctreeImpl::radiusSearchEx(BatchType& batch, const Queries& queries, NeighborIndices& results)
{
batch.indices = indices;
batch.octree = octreeGlobal;
batch.max_results = results.max_elems;
batch.output = results.data;
batch.output_sizes = results.sizes;
batch.points = points_sorted;
cudaSafeCall( cudaFuncSetCacheConfig(KernelRS<BatchType>, cudaFuncCachePreferL1) );
int block = KernelPolicy::CTA_SIZE;
int grid = divUp((int)batch.queries.size, block);
KernelRS<<<grid, block>>>(batch);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, float radius, NeighborIndices& results)
{
typedef Batch<SharedRadius, DirectQuery> BatchType;
BatchType batch;
batch.radius = radius;
batch.queries = queries;
radiusSearchEx(batch, queries, results);
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, const Radiuses& radiuses, NeighborIndices& results)
{
typedef Batch<IndividualRadius, DirectQuery> BatchType;
BatchType batch;
batch.radiuses = radiuses;
batch.queries = queries;
radiusSearchEx(batch, queries, results);
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, const Indices& indices, float radius, NeighborIndices& results)
{
typedef Batch<SharedRadius, IndicesQuery> BatchType;
BatchType batch;
batch.radius = radius;
batch.queries = queries;
batch.queries_indices = indices;
batch.queries.size = indices.size();
radiusSearchEx(batch, queries, results);
}
|
8e4ec00aba39d6a7214d719d4f1377174e81829d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
/* Pinned (page locked) memory example
*/
__global__ void addOne(int n, double *data) {
int nb = gridDim.x;
int nt = blockDim.x;
int compPerThread = n / (nb*nt);
int b = blockIdx.x;
int t = threadIdx.x;
int i = (b * nt + t)*compPerThread;
for (int j=0; j<compPerThread; j++)
data[i+j]++;
}
int main() {
time_t sTime = time(NULL);
struct timeval tt1, tt2;
int ms;
double fms;
int n = 8388608;
// paged memory is allocated
double *data;
data = (double*) malloc(n * sizeof(double));
// pinned (page locked) memory allocated
double *data1;
hipHostMalloc((void**) &data1, n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = 0.0;
}
for (int i=0; i<n; i++) {
data1[i] = 0.0;
}
double *data_dev;
hipMalloc((void**) &data_dev, n * sizeof(double));
dim3 nBlocks(1024,1,1);
dim3 nThreads(256,1,1);
// paged memory :
// timing for data transfer, kernel execution, data transfer back
hipDeviceSynchronize();
gettimeofday( &tt1, NULL );
hipMemcpy(data_dev, data, n * sizeof(double) , hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addOne) , dim3(nBlocks), dim3(nThreads) , 0, 0, n, data_dev);
hipMemcpy(data, data_dev, n * sizeof(double) , hipMemcpyDeviceToHost);
hipDeviceSynchronize();
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Paged memory : elapsed Time = " << fms << endl;
cout << "data[n-1] = " << data[n-1] << endl;
// pinned memory :
// timing for data transfer, kernel execution, data transfer back
hipDeviceSynchronize();
gettimeofday( &tt1, NULL );
hipMemcpy(data_dev, data1, n * sizeof(double) , hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addOne) , dim3(nBlocks), dim3(nThreads) , 0, 0, n, data_dev);
hipMemcpy(data1, data_dev, n * sizeof(double) , hipMemcpyDeviceToHost);
hipDeviceSynchronize();
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Pinned memory : elapsed Time = " << fms << endl;
cout << "data1[n-1] = " << data1[n-1] << endl;
hipFree(data_dev);
// paged memory is freed
free(data);
// pinned memory is freed
hipHostFree(data1);
}
| 8e4ec00aba39d6a7214d719d4f1377174e81829d.cu | #include <cuda.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
/* Pinned (page locked) memory example
*/
__global__ void addOne(int n, double *data) {
int nb = gridDim.x;
int nt = blockDim.x;
int compPerThread = n / (nb*nt);
int b = blockIdx.x;
int t = threadIdx.x;
int i = (b * nt + t)*compPerThread;
for (int j=0; j<compPerThread; j++)
data[i+j]++;
}
int main() {
time_t sTime = time(NULL);
struct timeval tt1, tt2;
int ms;
double fms;
int n = 8388608;
// paged memory is allocated
double *data;
data = (double*) malloc(n * sizeof(double));
// pinned (page locked) memory allocated
double *data1;
cudaMallocHost((void**) &data1, n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = 0.0;
}
for (int i=0; i<n; i++) {
data1[i] = 0.0;
}
double *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(double));
dim3 nBlocks(1024,1,1);
dim3 nThreads(256,1,1);
// paged memory :
// timing for data transfer, kernel execution, data transfer back
cudaThreadSynchronize();
gettimeofday( &tt1, NULL );
cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice);
addOne <<< nBlocks, nThreads >>>(n, data_dev);
cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Paged memory : elapsed Time = " << fms << endl;
cout << "data[n-1] = " << data[n-1] << endl;
// pinned memory :
// timing for data transfer, kernel execution, data transfer back
cudaThreadSynchronize();
gettimeofday( &tt1, NULL );
cudaMemcpy(data_dev, data1, n * sizeof(double) , cudaMemcpyHostToDevice);
addOne <<< nBlocks, nThreads >>>(n, data_dev);
cudaMemcpy(data1, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Pinned memory : elapsed Time = " << fms << endl;
cout << "data1[n-1] = " << data1[n-1] << endl;
cudaFree(data_dev);
// paged memory is freed
free(data);
// pinned memory is freed
cudaFreeHost(data1);
}
|
fe12810556b5256f84c66a0c025e6cfa2352e05a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
bool verify(int data[], int length)
{
for (int i = 1 ; i < length; ++i)
{
if (data[i] - data [i - 1] != i )
{ printf("error %d\n", i); return false; }
}
return true;
}
#define DUMP(x) printf("%s %d\n", #x, props.x)
void dumpCUDAProps(hipDeviceProp_t & props)
{
DUMP(canMapHostMemory);
DUMP(clockRate);
DUMP(computeMode);
DUMP(deviceOverlap);
DUMP(integrated);
DUMP(kernelExecTimeoutEnabled);
DUMP(major);
DUMP(maxGridSize[0]);
DUMP(maxGridSize[1]);
DUMP(maxGridSize[2]);
DUMP(maxThreadsDim[0]);
DUMP(maxThreadsDim[1]);
DUMP(maxThreadsDim[2]);
DUMP(maxThreadsPerBlock);
DUMP(memPitch);
DUMP(minor);
DUMP(multiProcessorCount);
printf("name %s\n", props.name);
DUMP(regsPerBlock);
DUMP(sharedMemPerBlock);
DUMP(textureAlignment);
DUMP(totalConstMem);
DUMP(totalGlobalMem);
DUMP(warpSize);
}
#define BLOCK_SIZE 64
__global__ void prefixsumblock(int *in, int *out, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
out[x] = in[x];
__syncthreads();
for ( int i = 1; i < BLOCK_SIZE; i <<= 1)
{
if (threadIdx.x + i < BLOCK_SIZE && x + i < length)
{
out[x + i] = in[x] + in[x + i];
}
__syncthreads();
if (x < length)
in[x] = out[x];
__syncthreads();
}
}
__global__ void correctsumends(int *ends, int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
int end = ends[blockIdx.x];
out[x] = in[x] + end;
}
__global__ void gathersumends(int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x > 0)
out[x] = in[x * BLOCK_SIZE - 1];
else
out[x] = 0;
}
__global__ void zarro(int *data, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
data[x] = 0;
}
void prefixsum(int* in, int *out, int length)
{
int blocks = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( zarro), dim3(dimGrid), dim3(dimBlock), 0, 0, out, length);
hipLaunchKernelGGL(( prefixsumblock), dim3(dimGrid), dim3(dimBlock), 0, 0, in, out, length);
if (blocks > 1) {
int *devEnds;
int *devTmpEnds;
hipMalloc((void**) &devEnds, blocks * sizeof(int));
hipMalloc((void**) &devTmpEnds, blocks * sizeof(int));
int subblocks = (blocks + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 subgrid(subblocks, 1, 1);
dim3 subblock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( gathersumends), dim3(subgrid), dim3(subblock), 0, 0, out, devEnds);
prefixsum(devEnds, devTmpEnds, blocks);
hipFree(devEnds);
hipLaunchKernelGGL(( correctsumends), dim3(dimGrid), dim3(dimBlock), 0, 0, devTmpEnds, in, out);
hipFree(devTmpEnds);
}
}
void cudasummer(int data[], int length)
{
int *devIn, *devOut;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMalloc((void**) &devIn, length * sizeof(int));
hipMalloc((void**) &devOut, length * sizeof(int));
hipMemcpy(devIn, data, length * sizeof(int), hipMemcpyHostToDevice);
prefixsum(devIn, devOut, length);
hipMemcpy(data, devOut, length * sizeof(int), hipMemcpyDeviceToHost);
hipFree(devIn);
hipFree(devOut);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float t;
hipEventElapsedTime(&t, start, stop);
printf("Elapsed time %3fms\n", t);
hipEventDestroy(start);
hipEventDestroy(stop);
}
void devicesDump()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
dumpCUDAProps(deviceProp);
}
}
int main(int argc, char *argv[])
{
int length;
if (argc < 2) {
length = 500;
}
else length = atoi(argv[1]);
int *data = (int*) malloc(length * sizeof(int));
for (int i = 0; i < length; ++i) {
data[i] = i; //rand();
}
devicesDump();
cudasummer(data, length);
if (length < 1000)
for (int i = 0 ; i < length; ++i)
{
printf("%d\n", data[i]);
}
verify(data, length);
}
| fe12810556b5256f84c66a0c025e6cfa2352e05a.cu | #include <stdio.h>
#include <stdlib.h>
bool verify(int data[], int length)
{
for (int i = 1 ; i < length; ++i)
{
if (data[i] - data [i - 1] != i )
{ printf("error %d\n", i); return false; }
}
return true;
}
#define DUMP(x) printf("%s %d\n", #x, props.x)
void dumpCUDAProps(cudaDeviceProp & props)
{
DUMP(canMapHostMemory);
DUMP(clockRate);
DUMP(computeMode);
DUMP(deviceOverlap);
DUMP(integrated);
DUMP(kernelExecTimeoutEnabled);
DUMP(major);
DUMP(maxGridSize[0]);
DUMP(maxGridSize[1]);
DUMP(maxGridSize[2]);
DUMP(maxThreadsDim[0]);
DUMP(maxThreadsDim[1]);
DUMP(maxThreadsDim[2]);
DUMP(maxThreadsPerBlock);
DUMP(memPitch);
DUMP(minor);
DUMP(multiProcessorCount);
printf("name %s\n", props.name);
DUMP(regsPerBlock);
DUMP(sharedMemPerBlock);
DUMP(textureAlignment);
DUMP(totalConstMem);
DUMP(totalGlobalMem);
DUMP(warpSize);
}
#define BLOCK_SIZE 64
__global__ void prefixsumblock(int *in, int *out, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
out[x] = in[x];
__syncthreads();
for ( int i = 1; i < BLOCK_SIZE; i <<= 1)
{
if (threadIdx.x + i < BLOCK_SIZE && x + i < length)
{
out[x + i] = in[x] + in[x + i];
}
__syncthreads();
if (x < length)
in[x] = out[x];
__syncthreads();
}
}
__global__ void correctsumends(int *ends, int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
int end = ends[blockIdx.x];
out[x] = in[x] + end;
}
__global__ void gathersumends(int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x > 0)
out[x] = in[x * BLOCK_SIZE - 1];
else
out[x] = 0;
}
__global__ void zarro(int *data, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
data[x] = 0;
}
void prefixsum(int* in, int *out, int length)
{
int blocks = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
zarro<<<dimGrid, dimBlock>>>(out, length);
prefixsumblock<<<dimGrid, dimBlock>>>(in, out, length);
if (blocks > 1) {
int *devEnds;
int *devTmpEnds;
cudaMalloc((void**) &devEnds, blocks * sizeof(int));
cudaMalloc((void**) &devTmpEnds, blocks * sizeof(int));
int subblocks = (blocks + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 subgrid(subblocks, 1, 1);
dim3 subblock(BLOCK_SIZE, 1, 1);
gathersumends<<<subgrid, subblock>>>(out, devEnds);
prefixsum(devEnds, devTmpEnds, blocks);
cudaFree(devEnds);
correctsumends<<<dimGrid, dimBlock>>>(devTmpEnds, in, out);
cudaFree(devTmpEnds);
}
}
void cudasummer(int data[], int length)
{
int *devIn, *devOut;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**) &devIn, length * sizeof(int));
cudaMalloc((void**) &devOut, length * sizeof(int));
cudaMemcpy(devIn, data, length * sizeof(int), cudaMemcpyHostToDevice);
prefixsum(devIn, devOut, length);
cudaMemcpy(data, devOut, length * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(devIn);
cudaFree(devOut);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float t;
cudaEventElapsedTime(&t, start, stop);
printf("Elapsed time %3fms\n", t);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void devicesDump()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
dumpCUDAProps(deviceProp);
}
}
int main(int argc, char *argv[])
{
int length;
if (argc < 2) {
length = 500;
}
else length = atoi(argv[1]);
int *data = (int*) malloc(length * sizeof(int));
for (int i = 0; i < length; ++i) {
data[i] = i; //rand();
}
devicesDump();
cudasummer(data, length);
if (length < 1000)
for (int i = 0 ; i < length; ++i)
{
printf("%d\n", data[i]);
}
verify(data, length);
}
|
0db66b0bff25ade6a95d974d506df011dba4870b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mpi.h>
#include <stdio.h>
#include <conf.h>
#include "inc/conf.h"
#include "utils/msg.h"
#include "mpi/wrapper.h" /* mini-MPI and -device */
#include "mpi/glb.h"
#include "d/api.h"
#include "utils/error.h"
#include "utils/cc.h"
#include "utils/mc.h"
#include "utils/kl.h"
#include "conf/imp.h"
#include "inc/type.h"
#include "inc/dev.h"
#include "dbg/imp.h"
#include "coords/ini.h"
#include "coords/imp.h"
const int n = 10;
Particle *pp;
Force *ff;
void alloc() {
CC(d::Malloc((void**) &pp, n * sizeof(Particle)));
CC(d::Malloc((void**) &ff, n * sizeof(Force)));
}
void free() {
CC(d::Free(pp));
CC(d::Free(ff));
}
namespace dev {
__global__ void fill_bugs(int3 L, Particle *pp, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
Particle p;
p.r[0] = p.r[1] = p.r[2] = 0;
p.v[0] = p.v[1] = p.v[2] = 0;
if (i >= n) return;
if (i == 1) p.r[0] = 1.5 * L.x; // invalid position
if (i < 1) p.v[0] = 0.f / 0.f; // nan
pp[i] = p;
}
__global__ void fill_bugs(Force *ff, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
Force f;
f.f[0] = f.f[1] = f.f[2] = 0;
if (i >= n) return;
if (i < 1) f.f[0] = 1.f / 0.f; // inf
ff[i] = f;
}
} // dev
void fill_bugs(int3 L) {
KL(dev::fill_bugs, (k_cnf(n)), (L, pp, n));
KL(dev::fill_bugs, (k_cnf(n)), (ff, n));
}
void check(float dt, const Coords *c, Dbg *dbg) {
UC(dbg_check_pos (c, "flu", dbg, n, pp));
UC(dbg_check_vel (dt, c, "flu", dbg, n, pp));
UC(dbg_check_forces (dt, c, "flu.ff", dbg, n, pp, ff));
}
int main(int argc, char **argv) {
Dbg *dbg;
Config *cfg;
Coords *coords;
int3 L;
float dt;
int dims[3];
MPI_Comm cart;
m::ini(&argc, &argv);
m::get_dims(&argc, &argv, dims);
m::get_cart(MPI_COMM_WORLD, dims, &cart);
UC(conf_ini(&cfg));
UC(dbg_ini(&dbg));
UC(conf_read(argc, argv, cfg));
UC(conf_lookup_float(cfg, "time.dt", &dt));
UC(dbg_set_conf(cfg, dbg));
UC(coords_ini_conf(cart, cfg, &coords));
L = subdomain(coords);
alloc();
fill_bugs(L);
check(dt, coords, dbg);
free();
UC(dbg_fin(dbg));
UC(conf_fin(cfg));
UC(coords_fin(coords));
MC(m::Barrier(cart));
m::fin();
}
| 0db66b0bff25ade6a95d974d506df011dba4870b.cu | #include <mpi.h>
#include <stdio.h>
#include <conf.h>
#include "inc/conf.h"
#include "utils/msg.h"
#include "mpi/wrapper.h" /* mini-MPI and -device */
#include "mpi/glb.h"
#include "d/api.h"
#include "utils/error.h"
#include "utils/cc.h"
#include "utils/mc.h"
#include "utils/kl.h"
#include "conf/imp.h"
#include "inc/type.h"
#include "inc/dev.h"
#include "dbg/imp.h"
#include "coords/ini.h"
#include "coords/imp.h"
const int n = 10;
Particle *pp;
Force *ff;
void alloc() {
CC(d::Malloc((void**) &pp, n * sizeof(Particle)));
CC(d::Malloc((void**) &ff, n * sizeof(Force)));
}
void free() {
CC(d::Free(pp));
CC(d::Free(ff));
}
namespace dev {
__global__ void fill_bugs(int3 L, Particle *pp, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
Particle p;
p.r[0] = p.r[1] = p.r[2] = 0;
p.v[0] = p.v[1] = p.v[2] = 0;
if (i >= n) return;
if (i == 1) p.r[0] = 1.5 * L.x; // invalid position
if (i < 1) p.v[0] = 0.f / 0.f; // nan
pp[i] = p;
}
__global__ void fill_bugs(Force *ff, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
Force f;
f.f[0] = f.f[1] = f.f[2] = 0;
if (i >= n) return;
if (i < 1) f.f[0] = 1.f / 0.f; // inf
ff[i] = f;
}
} // dev
void fill_bugs(int3 L) {
KL(dev::fill_bugs, (k_cnf(n)), (L, pp, n));
KL(dev::fill_bugs, (k_cnf(n)), (ff, n));
}
void check(float dt, const Coords *c, Dbg *dbg) {
UC(dbg_check_pos (c, "flu", dbg, n, pp));
UC(dbg_check_vel (dt, c, "flu", dbg, n, pp));
UC(dbg_check_forces (dt, c, "flu.ff", dbg, n, pp, ff));
}
int main(int argc, char **argv) {
Dbg *dbg;
Config *cfg;
Coords *coords;
int3 L;
float dt;
int dims[3];
MPI_Comm cart;
m::ini(&argc, &argv);
m::get_dims(&argc, &argv, dims);
m::get_cart(MPI_COMM_WORLD, dims, &cart);
UC(conf_ini(&cfg));
UC(dbg_ini(&dbg));
UC(conf_read(argc, argv, cfg));
UC(conf_lookup_float(cfg, "time.dt", &dt));
UC(dbg_set_conf(cfg, dbg));
UC(coords_ini_conf(cart, cfg, &coords));
L = subdomain(coords);
alloc();
fill_bugs(L);
check(dt, coords, dbg);
free();
UC(dbg_fin(dbg));
UC(conf_fin(cfg));
UC(coords_fin(coords));
MC(m::Barrier(cart));
m::fin();
}
|
2b0562b1852b5879984dd3be0fa4c36a3b960669.hip | // !!! This is a file automatically generated by hipify!!!
#include <gdf/gdf.h>
#include <gdf/ipc/Schema_generated.h>
#include <gdf/ipc/Message_generated.h>
#include "arrow/buffer.h"
#include "arrow/io/memory.h"
#include "arrow/ipc/reader.h"
#include "arrow/ipc/json.h"
#include "arrow/type.h"
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <memory>
#include <vector>
#include <string>
using namespace org::apache::arrow;
namespace {
using namespace arrow;
static std::string GetBufferTypeName(BufferType type) {
switch (type) {
case BufferType::DATA:
return "DATA";
case BufferType::OFFSET:
return "OFFSET";
case BufferType::TYPE:
return "TYPE";
case BufferType::VALIDITY:
return "VALIDITY";
default:
break;
}
return "UNKNOWN";
}
static std::string GetTypeName(Type::type id) {
switch (id) {
#define SHOW_TYPE_NAME(K) case Type::K: return #K;
SHOW_TYPE_NAME(NA)
SHOW_TYPE_NAME(BOOL)
SHOW_TYPE_NAME(UINT8)
SHOW_TYPE_NAME(INT8)
SHOW_TYPE_NAME(UINT16)
SHOW_TYPE_NAME(INT16)
SHOW_TYPE_NAME(UINT32)
SHOW_TYPE_NAME(INT32)
SHOW_TYPE_NAME(UINT64)
SHOW_TYPE_NAME(INT64)
SHOW_TYPE_NAME(HALF_FLOAT)
SHOW_TYPE_NAME(FLOAT)
SHOW_TYPE_NAME(DOUBLE)
SHOW_TYPE_NAME(STRING)
SHOW_TYPE_NAME(BINARY)
SHOW_TYPE_NAME(FIXED_SIZE_BINARY)
SHOW_TYPE_NAME(DATE32)
SHOW_TYPE_NAME(DATE64)
SHOW_TYPE_NAME(TIMESTAMP)
SHOW_TYPE_NAME(TIME32)
SHOW_TYPE_NAME(TIME64)
SHOW_TYPE_NAME(INTERVAL)
SHOW_TYPE_NAME(DECIMAL)
SHOW_TYPE_NAME(LIST)
SHOW_TYPE_NAME(STRUCT)
SHOW_TYPE_NAME(UNION)
SHOW_TYPE_NAME(DICTIONARY)
#undef SHOW_TYPE_NAME
}
return "UNKNOWN";
}
}
class IpcParser {
public:
typedef std::unique_ptr<const char []> unique_bytes_type;
class ParseError : public std::runtime_error {
using std::runtime_error::runtime_error;
};
struct MessageInfo {
const void *header;
int64_t body_length;
flatbuf::MessageHeader type;
};
struct LayoutDesc {
int bitwidth;
std::string vectortype;
};
struct FieldDesc {
std::string name;
std::string type;
std::vector<LayoutDesc> layouts;
};
struct BufferDesc {
int64_t offset, length;
};
struct DTypeDesc {
std::string name;
int bitwidth;
};
struct NodeDesc {
std::string name;
int64_t length;
int64_t null_count;
BufferDesc null_buffer, data_buffer;
DTypeDesc dtype;
};
IpcParser()
:_d_buffer(nullptr), _d_curptr(nullptr), _d_data_body(nullptr), _failed(false)
{ /* empty */ }
void open(const uint8_t *schema, size_t length) {
try {
read_schema(schema, length);
} catch ( ParseError e ) {
std::ostringstream oss;
oss << "ParseError: " << e.what();
_error_message = oss.str();
_failed = true;
}
}
void open_recordbatches(const uint8_t *recordbatches, size_t length) {
try {
read_record_batch(recordbatches, length);
} catch ( ParseError e ) {
std::ostringstream oss;
oss << "ParseError: " << e.what();
_error_message = oss.str();
_failed = true;
}
}
bool is_failed() const {
return _failed;
}
const std::string& get_error() const {
return _error_message;
}
/*
* Returns the GPU pointer to the start of the data region.
*/
const void* get_data() const {
return static_cast<const void*>(_d_data_body);
}
int64_t get_data_offset() const {
return _d_data_body - _d_buffer;
}
/*
* Returns the layout information in json.
* The json contains a list metadata for each column.
*/
const std::string& get_layout_json() {
if ( _json_output.size() == 0 ) {
std::ostringstream oss;
oss << "[";
int ct = 0;
for (auto i=_nodes.begin(); i!=_nodes.end(); ++i, ++ct) {
if ( ct > 0 ) {
oss << ", ";
}
jsonify_node(oss, *i);
}
oss << "]";
_json_output = oss.str();
}
return _json_output;
}
const std::string& get_schema_json() {
if ( _json_schema_output.size() == 0 ) {
// To JSON
std::unique_ptr<arrow::ipc::JsonWriter> json_writer;
arrow::ipc::JsonWriter::Open(_schema, &json_writer);
json_writer->Finish(&_json_schema_output);
}
return _json_schema_output;
}
protected:
void jsonify_node(std::ostream &os, const NodeDesc &node) {
os << "{";
os << "\"name\": " << '"' << node.name << '"';
os << ", ";
os << "\"length\": " << node.length;
os << ", ";
os << "\"null_count\": " << node.null_count;
os << ", ";
os << "\"dtype\": ";
jsonify_dtype(os, node.dtype);
os << ", ";
os << "\"data_buffer\": ";
jsonify_buffer(os, node.data_buffer);
os << ", ";
os << "\"null_buffer\": ";
jsonify_buffer(os, node.null_buffer);
os << "}";
}
void jsonify_dtype(std::ostream &os, const DTypeDesc &dtype) {
os << "{";
os << "\"name\": " << '"' << dtype.name << '"';
os << ", ";
os << "\"bitwidth\": " << dtype.bitwidth;
os << "}";
}
void jsonify_buffer(std::ostream &os, const BufferDesc &buffer) {
os << "{";
os << "\"length\": " << buffer.length;
os << ", ";
os << "\"offset\": " << buffer.offset;
os << "}";
}
void read_schema(const uint8_t *schema_buf, size_t length) {
if (_fields.size() || _nodes.size()) {
throw ParseError("cannot open more than once");
}
// Use Arrow to load the schema
const auto payload = std::make_shared<arrow::Buffer>(schema_buf, length);
auto buffer = std::make_shared<io::BufferReader>(payload);
std::shared_ptr<ipc::RecordBatchStreamReader> reader;
auto status = ipc::RecordBatchStreamReader::Open(buffer, &reader);
if ( !status.ok() ) throw ParseError(status.message());
_schema = reader->schema();
if (!_schema) throw ParseError("failed to parse schema");
// Parse the schema
parse_schema(_schema);
}
void read_record_batch(const uint8_t *recordbatches, size_t length) {
_d_curptr = _d_buffer = recordbatches;
int size = read_msg_size();
auto header_buf = read_bytes(size);
auto header = parse_msg_header(header_buf);
if ( header.body_length <= 0) {
throw ParseError("recordbatch should have a body");
}
// store the current ptr as the data ptr
_d_data_body = _d_curptr;
parse_record_batch(header);
}
MessageInfo parse_msg_header(const unique_bytes_type & header_buf) {
auto msg = flatbuf::GetMessage(header_buf.get());
MessageInfo mi;
mi.header = msg->header();
mi.body_length = msg->bodyLength();
mi.type = msg->header_type();
return mi;
}
void parse_schema(std::shared_ptr<arrow::Schema> schema) {
auto fields = schema->fields();
_fields.reserve(fields.size());
for ( int i=0; i < fields.size(); ++i ){
auto field = fields[i];
_fields.push_back(FieldDesc());
auto & out_field = _fields.back();
out_field.name = field->name();
out_field.type = GetTypeName(field->type()->id());
auto layouts = field->type()->GetBufferLayout();
for ( int j=0; j < layouts.size(); ++j ) {
auto layout = layouts[j];
LayoutDesc layout_desc;
layout_desc.bitwidth = layout.bit_width();
layout_desc.vectortype = GetBufferTypeName(layout.type());
out_field.layouts.push_back(layout_desc);
}
}
}
void parse_record_batch(MessageInfo msg) {
if ( msg.type != flatbuf::MessageHeader_RecordBatch ) {
throw ParseError("expecting recordbatch type");
}
auto rb = static_cast<const flatbuf::RecordBatch*>(msg.header);
int node_ct = rb->nodes()->Length();
int buffer_ct = rb->buffers()->Length();
int buffer_per_node = 2;
if ( node_ct * buffer_per_node != buffer_ct ) {
throw ParseError("unexpected: more than 2 buffers per node!?");
}
_nodes.reserve(node_ct);
for ( int i=0; i < node_ct; ++i ) {
const auto &fd = _fields[i];
auto node = rb->nodes()->Get(i);
_nodes.push_back(NodeDesc());
auto &out_node = _nodes.back();
for ( int j=0; j < buffer_per_node; ++j ) {
auto buf = rb->buffers()->Get(i * buffer_per_node + j);
if ( buf->page() != -1 ) {
std::cerr << "buf.Page() != -1; metadata format changed!\n";
}
const auto &layout = fd.layouts[j];
BufferDesc bufdesc;
bufdesc.offset = buf->offset();
bufdesc.length = buf->length();
if ( layout.vectortype == "DATA" ) {
out_node.data_buffer = bufdesc;
out_node.dtype.name = fd.type;
out_node.dtype.bitwidth = layout.bitwidth;
} else if ( layout.vectortype == "VALIDITY" ) {
out_node.null_buffer = bufdesc;
} else {
throw ParseError("unsupported vector type");
}
}
out_node.name = fd.name;
out_node.length = node->length();
out_node.null_count = node->null_count();
}
}
unique_bytes_type read_bytes(size_t size) {
if (size <= 0) {
throw ParseError("attempt to read zero or negative bytes");
}
char *buf = new char[size];
if (hipSuccess != hipMemcpy(buf, _d_curptr, size,
hipMemcpyDeviceToHost) )
throw ParseError("cannot read value");
_d_curptr += size;
return unique_bytes_type(buf);
}
template<typename T>
void read_value(T &val) {
if (hipSuccess != hipMemcpy(&val, _d_curptr, sizeof(T),
hipMemcpyDeviceToHost) )
throw ParseError("cannot read value");
_d_curptr += sizeof(T);
}
int read_msg_size() {
int size;
read_value(size);
if (size <= 0) {
throw ParseError("non-positive message size");
}
return size;
}
private:
const uint8_t *_d_buffer;
const uint8_t *_d_curptr;
const uint8_t *_d_data_body;
std::shared_ptr<arrow::Schema> _schema;
std::vector<FieldDesc> _fields;
std::vector<NodeDesc> _nodes;
bool _failed;
std::string _error_message;
// cache
std::string _json_output;
std::string _json_schema_output;
};
gdf_ipc_parser_type* cffi_wrap(IpcParser* obj){
return reinterpret_cast<gdf_ipc_parser_type*>(obj);
}
IpcParser* cffi_unwrap(gdf_ipc_parser_type* hdl){
return reinterpret_cast<IpcParser*>(hdl);
}
gdf_ipc_parser_type* gdf_ipc_parser_open(const uint8_t *schema, size_t length) {
IpcParser *parser = new IpcParser;
parser->open(schema, length);
return cffi_wrap(parser);
}
void gdf_ipc_parser_close(gdf_ipc_parser_type *handle) {
delete cffi_unwrap(handle);
}
int gdf_ipc_parser_failed(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->is_failed();
}
const char *gdf_ipc_parser_get_schema_json(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_schema_json().c_str();
}
const char* gdf_ipc_parser_get_layout_json(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_layout_json().c_str();
}
const char* gdf_ipc_parser_get_error(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_error().c_str();
}
const void* gdf_ipc_parser_get_data(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_data();
}
int64_t gdf_ipc_parser_get_data_offset(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_data_offset();
}
void gdf_ipc_parser_open_recordbatches(gdf_ipc_parser_type *handle,
const uint8_t *recordbatches,
size_t length)
{
return cffi_unwrap(handle)->open_recordbatches(recordbatches, length);
}
| 2b0562b1852b5879984dd3be0fa4c36a3b960669.cu | #include <gdf/gdf.h>
#include <gdf/ipc/Schema_generated.h>
#include <gdf/ipc/Message_generated.h>
#include "arrow/buffer.h"
#include "arrow/io/memory.h"
#include "arrow/ipc/reader.h"
#include "arrow/ipc/json.h"
#include "arrow/type.h"
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <memory>
#include <vector>
#include <string>
using namespace org::apache::arrow;
namespace {
using namespace arrow;
static std::string GetBufferTypeName(BufferType type) {
switch (type) {
case BufferType::DATA:
return "DATA";
case BufferType::OFFSET:
return "OFFSET";
case BufferType::TYPE:
return "TYPE";
case BufferType::VALIDITY:
return "VALIDITY";
default:
break;
}
return "UNKNOWN";
}
static std::string GetTypeName(Type::type id) {
switch (id) {
#define SHOW_TYPE_NAME(K) case Type::K: return #K;
SHOW_TYPE_NAME(NA)
SHOW_TYPE_NAME(BOOL)
SHOW_TYPE_NAME(UINT8)
SHOW_TYPE_NAME(INT8)
SHOW_TYPE_NAME(UINT16)
SHOW_TYPE_NAME(INT16)
SHOW_TYPE_NAME(UINT32)
SHOW_TYPE_NAME(INT32)
SHOW_TYPE_NAME(UINT64)
SHOW_TYPE_NAME(INT64)
SHOW_TYPE_NAME(HALF_FLOAT)
SHOW_TYPE_NAME(FLOAT)
SHOW_TYPE_NAME(DOUBLE)
SHOW_TYPE_NAME(STRING)
SHOW_TYPE_NAME(BINARY)
SHOW_TYPE_NAME(FIXED_SIZE_BINARY)
SHOW_TYPE_NAME(DATE32)
SHOW_TYPE_NAME(DATE64)
SHOW_TYPE_NAME(TIMESTAMP)
SHOW_TYPE_NAME(TIME32)
SHOW_TYPE_NAME(TIME64)
SHOW_TYPE_NAME(INTERVAL)
SHOW_TYPE_NAME(DECIMAL)
SHOW_TYPE_NAME(LIST)
SHOW_TYPE_NAME(STRUCT)
SHOW_TYPE_NAME(UNION)
SHOW_TYPE_NAME(DICTIONARY)
#undef SHOW_TYPE_NAME
}
return "UNKNOWN";
}
}
class IpcParser {
public:
typedef std::unique_ptr<const char []> unique_bytes_type;
class ParseError : public std::runtime_error {
using std::runtime_error::runtime_error;
};
struct MessageInfo {
const void *header;
int64_t body_length;
flatbuf::MessageHeader type;
};
struct LayoutDesc {
int bitwidth;
std::string vectortype;
};
struct FieldDesc {
std::string name;
std::string type;
std::vector<LayoutDesc> layouts;
};
struct BufferDesc {
int64_t offset, length;
};
struct DTypeDesc {
std::string name;
int bitwidth;
};
struct NodeDesc {
std::string name;
int64_t length;
int64_t null_count;
BufferDesc null_buffer, data_buffer;
DTypeDesc dtype;
};
IpcParser()
:_d_buffer(nullptr), _d_curptr(nullptr), _d_data_body(nullptr), _failed(false)
{ /* empty */ }
void open(const uint8_t *schema, size_t length) {
try {
read_schema(schema, length);
} catch ( ParseError e ) {
std::ostringstream oss;
oss << "ParseError: " << e.what();
_error_message = oss.str();
_failed = true;
}
}
void open_recordbatches(const uint8_t *recordbatches, size_t length) {
try {
read_record_batch(recordbatches, length);
} catch ( ParseError e ) {
std::ostringstream oss;
oss << "ParseError: " << e.what();
_error_message = oss.str();
_failed = true;
}
}
bool is_failed() const {
return _failed;
}
const std::string& get_error() const {
return _error_message;
}
/*
* Returns the GPU pointer to the start of the data region.
*/
const void* get_data() const {
return static_cast<const void*>(_d_data_body);
}
int64_t get_data_offset() const {
return _d_data_body - _d_buffer;
}
/*
* Returns the layout information in json.
* The json contains a list metadata for each column.
*/
const std::string& get_layout_json() {
if ( _json_output.size() == 0 ) {
std::ostringstream oss;
oss << "[";
int ct = 0;
for (auto i=_nodes.begin(); i!=_nodes.end(); ++i, ++ct) {
if ( ct > 0 ) {
oss << ", ";
}
jsonify_node(oss, *i);
}
oss << "]";
_json_output = oss.str();
}
return _json_output;
}
const std::string& get_schema_json() {
if ( _json_schema_output.size() == 0 ) {
// To JSON
std::unique_ptr<arrow::ipc::JsonWriter> json_writer;
arrow::ipc::JsonWriter::Open(_schema, &json_writer);
json_writer->Finish(&_json_schema_output);
}
return _json_schema_output;
}
protected:
void jsonify_node(std::ostream &os, const NodeDesc &node) {
os << "{";
os << "\"name\": " << '"' << node.name << '"';
os << ", ";
os << "\"length\": " << node.length;
os << ", ";
os << "\"null_count\": " << node.null_count;
os << ", ";
os << "\"dtype\": ";
jsonify_dtype(os, node.dtype);
os << ", ";
os << "\"data_buffer\": ";
jsonify_buffer(os, node.data_buffer);
os << ", ";
os << "\"null_buffer\": ";
jsonify_buffer(os, node.null_buffer);
os << "}";
}
void jsonify_dtype(std::ostream &os, const DTypeDesc &dtype) {
os << "{";
os << "\"name\": " << '"' << dtype.name << '"';
os << ", ";
os << "\"bitwidth\": " << dtype.bitwidth;
os << "}";
}
void jsonify_buffer(std::ostream &os, const BufferDesc &buffer) {
os << "{";
os << "\"length\": " << buffer.length;
os << ", ";
os << "\"offset\": " << buffer.offset;
os << "}";
}
void read_schema(const uint8_t *schema_buf, size_t length) {
if (_fields.size() || _nodes.size()) {
throw ParseError("cannot open more than once");
}
// Use Arrow to load the schema
const auto payload = std::make_shared<arrow::Buffer>(schema_buf, length);
auto buffer = std::make_shared<io::BufferReader>(payload);
std::shared_ptr<ipc::RecordBatchStreamReader> reader;
auto status = ipc::RecordBatchStreamReader::Open(buffer, &reader);
if ( !status.ok() ) throw ParseError(status.message());
_schema = reader->schema();
if (!_schema) throw ParseError("failed to parse schema");
// Parse the schema
parse_schema(_schema);
}
void read_record_batch(const uint8_t *recordbatches, size_t length) {
_d_curptr = _d_buffer = recordbatches;
int size = read_msg_size();
auto header_buf = read_bytes(size);
auto header = parse_msg_header(header_buf);
if ( header.body_length <= 0) {
throw ParseError("recordbatch should have a body");
}
// store the current ptr as the data ptr
_d_data_body = _d_curptr;
parse_record_batch(header);
}
MessageInfo parse_msg_header(const unique_bytes_type & header_buf) {
auto msg = flatbuf::GetMessage(header_buf.get());
MessageInfo mi;
mi.header = msg->header();
mi.body_length = msg->bodyLength();
mi.type = msg->header_type();
return mi;
}
void parse_schema(std::shared_ptr<arrow::Schema> schema) {
auto fields = schema->fields();
_fields.reserve(fields.size());
for ( int i=0; i < fields.size(); ++i ){
auto field = fields[i];
_fields.push_back(FieldDesc());
auto & out_field = _fields.back();
out_field.name = field->name();
out_field.type = GetTypeName(field->type()->id());
auto layouts = field->type()->GetBufferLayout();
for ( int j=0; j < layouts.size(); ++j ) {
auto layout = layouts[j];
LayoutDesc layout_desc;
layout_desc.bitwidth = layout.bit_width();
layout_desc.vectortype = GetBufferTypeName(layout.type());
out_field.layouts.push_back(layout_desc);
}
}
}
void parse_record_batch(MessageInfo msg) {
if ( msg.type != flatbuf::MessageHeader_RecordBatch ) {
throw ParseError("expecting recordbatch type");
}
auto rb = static_cast<const flatbuf::RecordBatch*>(msg.header);
int node_ct = rb->nodes()->Length();
int buffer_ct = rb->buffers()->Length();
int buffer_per_node = 2;
if ( node_ct * buffer_per_node != buffer_ct ) {
throw ParseError("unexpected: more than 2 buffers per node!?");
}
_nodes.reserve(node_ct);
for ( int i=0; i < node_ct; ++i ) {
const auto &fd = _fields[i];
auto node = rb->nodes()->Get(i);
_nodes.push_back(NodeDesc());
auto &out_node = _nodes.back();
for ( int j=0; j < buffer_per_node; ++j ) {
auto buf = rb->buffers()->Get(i * buffer_per_node + j);
if ( buf->page() != -1 ) {
std::cerr << "buf.Page() != -1; metadata format changed!\n";
}
const auto &layout = fd.layouts[j];
BufferDesc bufdesc;
bufdesc.offset = buf->offset();
bufdesc.length = buf->length();
if ( layout.vectortype == "DATA" ) {
out_node.data_buffer = bufdesc;
out_node.dtype.name = fd.type;
out_node.dtype.bitwidth = layout.bitwidth;
} else if ( layout.vectortype == "VALIDITY" ) {
out_node.null_buffer = bufdesc;
} else {
throw ParseError("unsupported vector type");
}
}
out_node.name = fd.name;
out_node.length = node->length();
out_node.null_count = node->null_count();
}
}
unique_bytes_type read_bytes(size_t size) {
if (size <= 0) {
throw ParseError("attempt to read zero or negative bytes");
}
char *buf = new char[size];
if (cudaSuccess != cudaMemcpy(buf, _d_curptr, size,
cudaMemcpyDeviceToHost) )
throw ParseError("cannot read value");
_d_curptr += size;
return unique_bytes_type(buf);
}
template<typename T>
void read_value(T &val) {
if (cudaSuccess != cudaMemcpy(&val, _d_curptr, sizeof(T),
cudaMemcpyDeviceToHost) )
throw ParseError("cannot read value");
_d_curptr += sizeof(T);
}
int read_msg_size() {
int size;
read_value(size);
if (size <= 0) {
throw ParseError("non-positive message size");
}
return size;
}
private:
const uint8_t *_d_buffer;
const uint8_t *_d_curptr;
const uint8_t *_d_data_body;
std::shared_ptr<arrow::Schema> _schema;
std::vector<FieldDesc> _fields;
std::vector<NodeDesc> _nodes;
bool _failed;
std::string _error_message;
// cache
std::string _json_output;
std::string _json_schema_output;
};
gdf_ipc_parser_type* cffi_wrap(IpcParser* obj){
return reinterpret_cast<gdf_ipc_parser_type*>(obj);
}
IpcParser* cffi_unwrap(gdf_ipc_parser_type* hdl){
return reinterpret_cast<IpcParser*>(hdl);
}
gdf_ipc_parser_type* gdf_ipc_parser_open(const uint8_t *schema, size_t length) {
IpcParser *parser = new IpcParser;
parser->open(schema, length);
return cffi_wrap(parser);
}
void gdf_ipc_parser_close(gdf_ipc_parser_type *handle) {
delete cffi_unwrap(handle);
}
int gdf_ipc_parser_failed(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->is_failed();
}
const char *gdf_ipc_parser_get_schema_json(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_schema_json().c_str();
}
const char* gdf_ipc_parser_get_layout_json(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_layout_json().c_str();
}
const char* gdf_ipc_parser_get_error(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_error().c_str();
}
const void* gdf_ipc_parser_get_data(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_data();
}
int64_t gdf_ipc_parser_get_data_offset(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_data_offset();
}
void gdf_ipc_parser_open_recordbatches(gdf_ipc_parser_type *handle,
const uint8_t *recordbatches,
size_t length)
{
return cffi_unwrap(handle)->open_recordbatches(recordbatches, length);
}
|
7454e4d22771e34659af8633b2be71cecf603dfd.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file kcore_app.cu
*
* @brief K-Core Gunrock Application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definitions
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/graphio/graphio.cuh>
// K-Core
#include <gunrock/app/kcore/kcore_enactor.cuh>
#include <gunrock/app/kcore/kcore_test.cuh>
// Others
#include <cstdio>
namespace gunrock {
namespace app {
namespace kcore {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
return retval;
}
/**
* @brief Run kcore tests
* @tparam GraphT Type of the graph
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return hipError_t error message(s), if any
*/
template <typename GraphT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT *ref_num_cores,
util::Location target) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("kcore", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
VertexT *h_num_cores = new VertexT[graph.nodes];
VertexT max_k = 0;
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(graph, target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_num_cores));
SizeT num_errors = Validate_Results(parameters, graph, h_num_cores,
ref_num_cores, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_num_cores));
if (validation == "last") {
SizeT num_errors = Validate_Results(parameters, graph, h_num_cores, ref_num_cores,
false);
}
// Print Max K-Core
for (SizeT v = 0; v < graph.nodes; v++) {
int k = h_num_cores[v];
if (k > max_k) {
max_k = k;
}
}
util::PrintMsg("Max K-Core: " + std::to_string(max_k), !quiet_mode);
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_num_cores;
h_num_cores = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace kcore
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_kcore function
* @tparam GraphT Type of the graph
* @tparam VertexT Type of the num_cores
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] num_cores Return generated core number for each run
* @param[out] max_k Return max K-Core generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename VertexT = typename GraphT::VertexT,
typename SizeT = typename GraphT::SizeT>
double gunrock_kcore(gunrock::util::Parameters ¶meters, GraphT &graph,
VertexT **num_cores, VertexT *max_k) {
typedef gunrock::app::kcore::Problem<GraphT> ProblemT;
typedef gunrock::app::kcore::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(graph, target);
enactor.Reset(target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(num_cores[run_num]);
// find max k
for (SizeT v = 0; v < graph.nodes; v++) {
int k = num_cores[run_num][v];
if (k > max_k[run_num]) {
max_k[run_num] = k;
}
}
}
enactor.Release(target);
problem.Release(target);
return total_time;
}
/*
* @brief Entry of gunrock_kcore function
* @tparam VertexT Type of the k-core
* @tparam SizeT Type of the num_cores
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] num_cores Return generated core number for each run
* @param[out] max_k Return max K-Core generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int>
double kcore(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, int **num_cores, int *max_k,
const GValueT edge_values = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("kcore");
gunrock::graphio::UseParameters(parameters);
gunrock::app::kcore::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the K-Core
double elapsed_time = gunrock_kcore(parameters, graph, num_cores, max_k);
// Cleanup
graph.Release();
return elapsed_time;
}
/*
* @brief Entry of gunrock_kcore function
* @tparam VertexT Type of the k-core
* @tparam SizeT Type of the num_cores
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] num_cores Return generated core number for each run
* @param[out] max_k Return max K-Core generated for each run
* \return double Return accumulated elapsed times for all runs
*/
double kcore(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, int *num_cores, int max_k) {
return kcore(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&num_cores, &max_k);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End: | 7454e4d22771e34659af8633b2be71cecf603dfd.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file kcore_app.cu
*
* @brief K-Core Gunrock Application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definitions
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/graphio/graphio.cuh>
// K-Core
#include <gunrock/app/kcore/kcore_enactor.cuh>
#include <gunrock/app/kcore/kcore_test.cuh>
// Others
#include <cstdio>
namespace gunrock {
namespace app {
namespace kcore {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
return retval;
}
/**
* @brief Run kcore tests
* @tparam GraphT Type of the graph
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
*/
template <typename GraphT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT *ref_num_cores,
util::Location target) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("kcore", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
VertexT *h_num_cores = new VertexT[graph.nodes];
VertexT max_k = 0;
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(graph, target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_num_cores));
SizeT num_errors = Validate_Results(parameters, graph, h_num_cores,
ref_num_cores, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_num_cores));
if (validation == "last") {
SizeT num_errors = Validate_Results(parameters, graph, h_num_cores, ref_num_cores,
false);
}
// Print Max K-Core
for (SizeT v = 0; v < graph.nodes; v++) {
int k = h_num_cores[v];
if (k > max_k) {
max_k = k;
}
}
util::PrintMsg("Max K-Core: " + std::to_string(max_k), !quiet_mode);
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_num_cores;
h_num_cores = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace kcore
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_kcore function
* @tparam GraphT Type of the graph
* @tparam VertexT Type of the num_cores
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] num_cores Return generated core number for each run
* @param[out] max_k Return max K-Core generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename VertexT = typename GraphT::VertexT,
typename SizeT = typename GraphT::SizeT>
double gunrock_kcore(gunrock::util::Parameters ¶meters, GraphT &graph,
VertexT **num_cores, VertexT *max_k) {
typedef gunrock::app::kcore::Problem<GraphT> ProblemT;
typedef gunrock::app::kcore::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(graph, target);
enactor.Reset(target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(num_cores[run_num]);
// find max k
for (SizeT v = 0; v < graph.nodes; v++) {
int k = num_cores[run_num][v];
if (k > max_k[run_num]) {
max_k[run_num] = k;
}
}
}
enactor.Release(target);
problem.Release(target);
return total_time;
}
/*
* @brief Entry of gunrock_kcore function
* @tparam VertexT Type of the k-core
* @tparam SizeT Type of the num_cores
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] num_cores Return generated core number for each run
* @param[out] max_k Return max K-Core generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int>
double kcore(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, int **num_cores, int *max_k,
const GValueT edge_values = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("kcore");
gunrock::graphio::UseParameters(parameters);
gunrock::app::kcore::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the K-Core
double elapsed_time = gunrock_kcore(parameters, graph, num_cores, max_k);
// Cleanup
graph.Release();
return elapsed_time;
}
/*
* @brief Entry of gunrock_kcore function
* @tparam VertexT Type of the k-core
* @tparam SizeT Type of the num_cores
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] num_cores Return generated core number for each run
* @param[out] max_k Return max K-Core generated for each run
* \return double Return accumulated elapsed times for all runs
*/
double kcore(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, int *num_cores, int max_k) {
return kcore(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&num_cores, &max_k);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End: |
0a082bc39ce083c7828e955514f4f0be537939dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <conio.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <windows.h>
using namespace std;
//Funciones que van a utilizarse a lo largo del programa
//CPU
void generarTablero(int *tablero, int filas, int columnas, int dificultad);
void imprimirTablero(int *tablero, int filas, int columnas);
void imprimirColumnas(int columnas);
void generarSemillas(int *tablero, int filas, int columnas, int dificultad);
void guardarPartida(int *tablero, int filas, int columnas, int dificultad);
void cargarPartida();
void modoManual(int *tablero, int filas, int columnas, int dificultad);
//GPU
__global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento);
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
int main(void){
//Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
char modo_juego;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mnimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
cout << "Elija dificultad: \n1. Bajo, se lanzaran 15 semillas de 2, 4 y 8 \n"
"2. Dificil, se lanzaran 8 semillas de 2 y 4 \n";
cin >> dificultad;
while (!(dificultad == 1 || dificultad == 2)){
cout << "Dificultad no vlida \n";
cout << "Selecccione 1 si desea jugar con nivel o 2 si desea jugar con nivel dificil \n";
cin >> dificultad;
}
cout << "Elija modo de juego: \n A. Automtico \n M. Manual \n";
cin >> modo_juego;
while (!(modo_juego == 'M' || modo_juego == 'A')){
cout << "Modo de juego no vlido \n";
cout << "Selecccione A para jugar en modo automtico o M para manual \n";
cin >> modo_juego;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas, dificultad);
modoManual(tablero, filas, columnas, dificultad);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Generar tablero con nmeros aleatorios
void generarTablero(int *tablero, int filas, int columnas, int dificultad){
srand(time(0));
int tamao = filas * columnas;
for (int i = 0; i < tamao; i++){
tablero[i] = 0;
}
generarSemillas(tablero, filas, columnas, dificultad);
}
//Genera los nmeros para jugar en el tablero
void generarSemillas(int *tablero, int filas, int columnas, int dificultad){
/*if (dificultad == 1){
int semillas = 0;
int valores[3] = { 2, 4, 8 };
while (semillas < 15){
int posicion = rand() % (filas*columnas + 1);
int valor = rand() % 3;
if (tablero[posicion] == 0){
tablero[posicion] = valores[valor];
semillas++;
}
}
}
if (dificultad == 2){
int semillas = 0;
int valores[3] = { 2, 4 };
while (semillas < 8){
int posicion = rand() % (filas*columnas + 1);
int valor = rand() % 2;
if (tablero[posicion] == 0){
tablero[posicion] = valores[valor];
semillas++;
}
}
}*/
int tamao = filas * columnas;
int contador = 0;
while (contador < 3){
int aux = rand() % 3;
int i = rand() % tamao;
if (tablero[i] == 0){
switch (aux){
case 0:
tablero[i] = 2;
break;
case 1:
tablero[i] = 4;
break;
case 2:
tablero[i] = 8;
break;
}
contador++;
}
}
}
//Funcin que imprime el nmero de columnas que va a tener el tablero para que sea ms facil elegir semillas
void imprimirColumnas(int columnas) {
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " " << i + 1;
}
else {
if (i < 9) {
cout << " " << i + 1;
}
else {
cout << " " << i + 1;
}
}
}
cout << "\n";
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " |";
}
else {
cout << " |";
}
}
cout << "\n";
}
//Imprimimos el tablero
void imprimirTablero(int *tablero, int filas, int columnas) {
cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n";
cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n";
imprimirColumnas(columnas);
for (int i = 0; i < filas; i++) {
if (i < 9) {
cout << i + 1 << " - ";
}
else {
cout << i + 1 << " - ";
}
for (int k = 0; k < columnas; k++) {
//Damos color en funcin del nmero imprimido
int bloque = tablero[i * filas + k];
switch (bloque) {
case 2:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo
break;
case 4:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo
break;
case 8:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado
break;
case 16:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul
break;
default:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco
}
if (bloque < 10) cout << "| " << bloque << " |";
else cout << "| " << bloque << "|";
}
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7);
cout << "\n";
}
}
//En funcin del movimiento, llama a la comprobacin correspondiente
__device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
switch (movimiento){
case 'W':
compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'S':
compruebaArriba(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'D':
compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'A':
compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento);
break;
}
}
//Desplaza los nmeros respecto a los ceros que haya, en funcin del movimiento
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
if (movimiento == 'W'){
for (int i = filas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){
tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
else if (movimiento == 'S'){
for (int i = 0; i < filas - 1; i++){
for (int j = i; j < filas - 1; j++){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){
tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
else if (movimiento == 'D'){
for (int i = 0; i < columnas - 1; i++){
for (int j = i; j < columnas - 1; j++){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j + 1)] == 0 && tablero[fila * columnas + (j + 1)] != columnas){
tablero[fila * columnas + (j + 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
else if (movimiento == 'A'){
for (int i = columnas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j - 1)] == 0){
tablero[fila * columnas + (j - 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
}
//Comprueba hacia arriba
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila - 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia abajo
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila + 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia la derecha
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna + 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia la izquierda
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna - 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){
//Guardamos la columna y la fila del hilo
int columnaHilo = threadIdx.x;
int filaHilo = threadIdx.y;
compruebaSemillas(tablero, filaHilo, columnaHilo, filas, columnas, movimiento);
__syncthreads();
}
//Guarda la partida con el tablero, las filas, las columnas y la dificultad
void guardarPartida(int *tablero, int filas, int columnas, int dificultad) {
ofstream doc;
doc.open("partida.txt");
doc << filas << "\n";
doc << columnas << "\n";
doc << dificultad << "\n";
for (int i = 0; i < filas * columnas; i++) {
doc << tablero[i] << " ";
}
doc.close();
system("cls");
cout << "Guardado correctamente.\n\n";
}
//Carga la partida guardada
void cargarPartida() {
const string fichero = "partida.txt";
ifstream leer;
leer.open(fichero.c_str());
int d, *tablero;
int i = 0;
int n = 48;
int f = 0;
int c = 0;
char fila[80];
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
f = f * 10 + (n - 48);
}
}
}
n = 48;
i = 0;
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
c = c * 10 + (n - 48);
}
}
}
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
d = (int)fila[0] - 48;
}
tablero = new int[f*c];
for (int i = 0; i < f * c; i++) {
leer.getline(fila, 80, ' ');
tablero[i] = (int)fila[0] - 48;
}
leer.close();
modoManual(tablero, f, c, d);
}
void modoManual(int *tablero, int filas, int columnas, int dificultad){
//system("cls");
char movimiento = ' ';
while (movimiento != 'Z'){
imprimirTablero(tablero, filas, columnas);
cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n";
cin >> movimiento;
//while (movimiento != (ARRIBA || ABAJO || IZQUIERDA || DERECHA)) {
while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') {
cout << "Tecla no valida, introduzca una valida:\n";
cin >> movimiento;
}
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
hipMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
hipMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), hipMemcpyHostToDevice);
//Creamos los hilos en un solo bloque
dim3 DimGrid(1, 1);
dim3 DimBlock(filas, columnas);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
hipMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, hipMemcpyDeviceToHost);
//system("cls");
generarSemillas(tablero, filas, columnas, dificultad);
hipFree(tablero_gpu);
}
//system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas, dificultad);
}
else {
cout << "Saliendo sin guardar...\n \n";
}
} | 0a082bc39ce083c7828e955514f4f0be537939dd.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <conio.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <windows.h>
using namespace std;
//Funciones que van a utilizarse a lo largo del programa
//CPU
void generarTablero(int *tablero, int filas, int columnas, int dificultad);
void imprimirTablero(int *tablero, int filas, int columnas);
void imprimirColumnas(int columnas);
void generarSemillas(int *tablero, int filas, int columnas, int dificultad);
void guardarPartida(int *tablero, int filas, int columnas, int dificultad);
void cargarPartida();
void modoManual(int *tablero, int filas, int columnas, int dificultad);
//GPU
__global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento);
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
int main(void){
//Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
char modo_juego;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mínimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
cout << "Elija dificultad: \n1. Bajo, se lanzaran 15 semillas de 2, 4 y 8 \n"
"2. Dificil, se lanzaran 8 semillas de 2 y 4 \n";
cin >> dificultad;
while (!(dificultad == 1 || dificultad == 2)){
cout << "Dificultad no válida \n";
cout << "Selecccione 1 si desea jugar con nivel o 2 si desea jugar con nivel dificil \n";
cin >> dificultad;
}
cout << "Elija modo de juego: \n A. Automático \n M. Manual \n";
cin >> modo_juego;
while (!(modo_juego == 'M' || modo_juego == 'A')){
cout << "Modo de juego no válido \n";
cout << "Selecccione A para jugar en modo automático o M para manual \n";
cin >> modo_juego;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas, dificultad);
modoManual(tablero, filas, columnas, dificultad);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Generar tablero con números aleatorios
void generarTablero(int *tablero, int filas, int columnas, int dificultad){
srand(time(0));
int tamaño = filas * columnas;
for (int i = 0; i < tamaño; i++){
tablero[i] = 0;
}
generarSemillas(tablero, filas, columnas, dificultad);
}
//Genera los números para jugar en el tablero
void generarSemillas(int *tablero, int filas, int columnas, int dificultad){
/*if (dificultad == 1){
int semillas = 0;
int valores[3] = { 2, 4, 8 };
while (semillas < 15){
int posicion = rand() % (filas*columnas + 1);
int valor = rand() % 3;
if (tablero[posicion] == 0){
tablero[posicion] = valores[valor];
semillas++;
}
}
}
if (dificultad == 2){
int semillas = 0;
int valores[3] = { 2, 4 };
while (semillas < 8){
int posicion = rand() % (filas*columnas + 1);
int valor = rand() % 2;
if (tablero[posicion] == 0){
tablero[posicion] = valores[valor];
semillas++;
}
}
}*/
int tamaño = filas * columnas;
int contador = 0;
while (contador < 3){
int aux = rand() % 3;
int i = rand() % tamaño;
if (tablero[i] == 0){
switch (aux){
case 0:
tablero[i] = 2;
break;
case 1:
tablero[i] = 4;
break;
case 2:
tablero[i] = 8;
break;
}
contador++;
}
}
}
//Función que imprime el número de columnas que va a tener el tablero para que sea más facil elegir semillas
void imprimirColumnas(int columnas) {
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " " << i + 1;
}
else {
if (i < 9) {
cout << " " << i + 1;
}
else {
cout << " " << i + 1;
}
}
}
cout << "\n";
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " |";
}
else {
cout << " |";
}
}
cout << "\n";
}
//Imprimimos el tablero
void imprimirTablero(int *tablero, int filas, int columnas) {
cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n";
cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n";
imprimirColumnas(columnas);
for (int i = 0; i < filas; i++) {
if (i < 9) {
cout << i + 1 << " - ";
}
else {
cout << i + 1 << " - ";
}
for (int k = 0; k < columnas; k++) {
//Damos color en función del número imprimido
int bloque = tablero[i * filas + k];
switch (bloque) {
case 2:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo
break;
case 4:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo
break;
case 8:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado
break;
case 16:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul
break;
default:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco
}
if (bloque < 10) cout << "| " << bloque << " |";
else cout << "| " << bloque << "|";
}
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7);
cout << "\n";
}
}
//En función del movimiento, llama a la comprobación correspondiente
__device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
switch (movimiento){
case 'W':
compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'S':
compruebaArriba(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'D':
compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'A':
compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento);
break;
}
}
//Desplaza los números respecto a los ceros que haya, en función del movimiento
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
if (movimiento == 'W'){
for (int i = filas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){
tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
else if (movimiento == 'S'){
for (int i = 0; i < filas - 1; i++){
for (int j = i; j < filas - 1; j++){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){
tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
else if (movimiento == 'D'){
for (int i = 0; i < columnas - 1; i++){
for (int j = i; j < columnas - 1; j++){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j + 1)] == 0 && tablero[fila * columnas + (j + 1)] != columnas){
tablero[fila * columnas + (j + 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
else if (movimiento == 'A'){
for (int i = columnas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j - 1)] == 0){
tablero[fila * columnas + (j - 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
}
//Comprueba hacia arriba
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila - 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia abajo
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila + 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia la derecha
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna + 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia la izquierda
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna - 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){
//Guardamos la columna y la fila del hilo
int columnaHilo = threadIdx.x;
int filaHilo = threadIdx.y;
compruebaSemillas(tablero, filaHilo, columnaHilo, filas, columnas, movimiento);
__syncthreads();
}
//Guarda la partida con el tablero, las filas, las columnas y la dificultad
void guardarPartida(int *tablero, int filas, int columnas, int dificultad) {
ofstream doc;
doc.open("partida.txt");
doc << filas << "\n";
doc << columnas << "\n";
doc << dificultad << "\n";
for (int i = 0; i < filas * columnas; i++) {
doc << tablero[i] << " ";
}
doc.close();
system("cls");
cout << "Guardado correctamente.\n\n";
}
//Carga la partida guardada
void cargarPartida() {
const string fichero = "partida.txt";
ifstream leer;
leer.open(fichero.c_str());
int d, *tablero;
int i = 0;
int n = 48;
int f = 0;
int c = 0;
char fila[80];
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
f = f * 10 + (n - 48);
}
}
}
n = 48;
i = 0;
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
c = c * 10 + (n - 48);
}
}
}
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
d = (int)fila[0] - 48;
}
tablero = new int[f*c];
for (int i = 0; i < f * c; i++) {
leer.getline(fila, 80, ' ');
tablero[i] = (int)fila[0] - 48;
}
leer.close();
modoManual(tablero, f, c, d);
}
void modoManual(int *tablero, int filas, int columnas, int dificultad){
//system("cls");
char movimiento = ' ';
while (movimiento != 'Z'){
imprimirTablero(tablero, filas, columnas);
cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n";
cin >> movimiento;
//while (movimiento != (ARRIBA || ABAJO || IZQUIERDA || DERECHA)) {
while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') {
cout << "Tecla no valida, introduzca una valida:\n";
cin >> movimiento;
}
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
cudaMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
cudaMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), cudaMemcpyHostToDevice);
//Creamos los hilos en un solo bloque
dim3 DimGrid(1, 1);
dim3 DimBlock(filas, columnas);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
cudaMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, cudaMemcpyDeviceToHost);
//system("cls");
generarSemillas(tablero, filas, columnas, dificultad);
cudaFree(tablero_gpu);
}
//system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas, dificultad);
}
else {
cout << "Saliendo sin guardar...\n \n";
}
} |
e5c18ed5ec1bfc2f108872bfc4a21b312e3da374.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
__global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int * result, char * cigar,int * cigar_int,int * direction) //, char * result
{
int offset=blockIdx.x;
__shared__ short2 read_reference_number;
__shared__ char * read_base_array;
__shared__ char4 * reference_base_array;
__shared__ int mismatch;
__shared__ int match;
__shared__ int open;
__shared__ int extend;
__shared__ short2 * direction_index;
__shared__ char * cigar_store;
__shared__ int *cigar_int_store;
while(offset<size)
{
if( threadIdx.x==0)
{
read_reference_number=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128);
direction_index=(short2 *) (direction+offset*640*1100);
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
}
__syncthreads();
__shared__ char reference_base_in_char[600];
int hh=(read_reference_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 reference_base_in_thread;
reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory
reference_base_in_char[aa*4]=reference_base_in_thread.x;
reference_base_in_char[aa*4+1]=reference_base_in_thread.y;
reference_base_in_char[aa*4+2]=reference_base_in_thread.z;
reference_base_in_char[aa*4+3]=reference_base_in_thread.w;
}
}
__shared__ int MM[130];
__shared__ int gap_h[130]; //insertion
__shared__ short2 gap_size_h[130]; //insertion
__shared__ int result_col;
__shared__ int result_row;
__shared__ int result_col_index;
__shared__ int result_row_index;
__shared__ char cigar_m[128];
__shared__ int cigar_int_m[128];
int final_result;
int final_i;
int final_j;
if(threadIdx.x==0)
{
MM[0]=0;
gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2;
gap_size_h[0].x=0;
gap_size_h[0].y=0;
match=200;
mismatch=-150;
open=-260;
extend=-11;
result_col=-1000000000;//std::numeric_limits<int>::min()/2;
result_row=-1000000000;//std::numeric_limits<int>::min()/2;
// for(int i=0;i<read_reference_number.y;i++)
// printf("%c",reference_base_in_char[i]);
// printf("\n");
// for(int i=0;i<read_reference_number.x;i++)
// printf("%c",read_base_array[i]);
}
__syncthreads();
int read_number=read_reference_number.x;
{
char read_base;
read_base=read_base_array[threadIdx.x];
int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;;
int gap_size_v=0; //Deletion
int M=0; //now
int step_right; //now
int ki=0;//insertion h negetive
//deletion v
int MMM=0;
short mt=0;
short2 curmt;
curmt.x=0;
curmt.y=0;
int current_reference_id=0;
for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_reference_id<read_reference_number.y))
{
int prev_gap=M+open; //M which is cacluated by last step in the same thread
gap_v+=extend;
if(prev_gap>gap_v)
{
gap_v=prev_gap;
gap_size_v=1;
}
else
gap_size_v++;
char reference_base_each=reference_base_in_char[current_reference_id];
M=MMM+(read_base==reference_base_each? match:mismatch);
prev_gap=MM[threadIdx.x]+open;
step_right=gap_h[threadIdx.x]+extend;
if(prev_gap>step_right)
{
step_right=prev_gap;
ki=1;
}
else
ki=gap_size_h[threadIdx.x].x+1;
bool diag=(M>=gap_v)&&(M>=step_right);
curmt.y=0;
if(diag)
{
curmt.x=0;
//if(threadIdx.x==0||current_reference_id==0)
// curmt.y=0;
// else
curmt.y=mt+1;
// curBtrack=0;
}
else
if(step_right>=gap_v)
{
M=step_right;
curmt.x=0-ki;
// curBtrack=0-ki;
}
else
{
M=gap_v;
curmt.x=gap_size_v;
//curBtrack=gap_size_v;
}
MMM=MM[threadIdx.x];
mt=gap_size_h[threadIdx.x].y;
direction_index[640*j+threadIdx.x]=curmt;
//if(threadIdx.x==read_reference_number.x-3)
//printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack);
if(current_reference_id==read_reference_number.y-1)
{
if(M>=result_row)
{
result_row=M;
result_row_index=threadIdx.x; //
}
//printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x);
}
if(threadIdx.x==read_reference_number.x-1)
{
if(M>=result_col)
{
result_col=M;
result_col_index=current_reference_id; // +1
}
}
current_reference_id++;
// if(threadIdx.x==5)
// printf("%d ", curmt.y);
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
gap_h[threadIdx.x+1]=step_right;
gap_size_h[threadIdx.x+1].x=ki;
gap_size_h[threadIdx.x+1].y=curmt.y;
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
__shared__ int cigar_index;
int segment_length;
short2 btr;
char new_state;
int step_length;
if(threadIdx.x==read_reference_number.x-1)
{
//printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index);
if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1))
{
final_result=result_row;
final_i=read_reference_number.y-1;
final_j=result_row_index;
segment_length=read_reference_number.x-1-result_row_index;
}
else
{
final_result=result_col;
final_i=result_col_index;
final_j=read_reference_number.x-1;
segment_length=0;
}
result[offset*3]=final_result;
//printf("%d\n",final_result);
cigar_index=0;
if(segment_length>0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=segment_length;
segment_length=0;
cigar_index++;
}
//printf("\n %d %d\n", final_i,final_j);
//state=4;
state='N';
do
{
btr=direction_index[(final_i+final_j)*640+final_j];
if(btr.x>0)
{
new_state='D';
// new_state=3;
step_length=btr.x;
final_i-=step_length;
}
else
if(btr.x<0)
{
new_state='I';
// new_state=2;
step_length=0-btr.x;
final_j-=step_length;
}
else
{
new_state='M';
// new_state=0;
//if(btr.y==0)
//step_length=1;
//else
step_length=btr.y;
final_i-=step_length;
final_j-=step_length;
}
// printf(" %d %d %d %d\n", state,new_state,final_i,final_j);
/* if(new_state==0)
{ final_i-=step_length;
final_j-=step_length;
}
else
if(new_state==2)
final_j-=step_length;
else
final_i-=step_length;
*/
//if(state==4) state=new_state;
if(state=='N') state=new_state;
if(state==new_state)
{
segment_length+=step_length;
}
else
{
// if(state==0) cigar_m[cigar_index]='M';
// if(state==2) cigar_m[cigar_index]='I';
// if(state==3) cigar_m[cigar_index]='D';
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
segment_length=step_length;
cigar_index++;
state=new_state;
}
}while(final_i>=0&&final_j>=0);
//if(state==0) cigar_m[cigar_index]='M';
//if(state==2) cigar_m[cigar_index]='I';
//if(state==3) cigar_m[cigar_index]='D';
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
cigar_index++;
if(final_j>=0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=final_j+1;
cigar_index++;
}
result[offset*3+1]=final_i+1;
result[offset*3+2]=cigar_index;
/* for(int i=cigar_index-1;i>=0;i--)
{
printf("%d%c",cigar_int_m[i],cigar_m[i]);
}
*/
}
__syncthreads();
if(threadIdx.x<cigar_index && cigar_index<=blockDim.x)
{
// if(threadIdx.x==0)
// printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]);
cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x];
cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x];
// if(threadIdx.x==0)
// printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]);
}
offset+=gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
FILE * file;
file=fopen(args[1],"r");
int size;
// fscanf(file,"%d",&size);
double computation_time=0;//total_time=0;
timespec start,finish;
/* char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[1]);
strcpy(inputdata[index].read_base,data[1]);
index++;
}
*/
//data preparation.
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; // it is 64*x .thus we donot need to worry about align
int data_size=0;
char * data_d_total;
hipMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*3);
int * result_h=(int *) malloc(sizeof(int)*size*3);
char * cigar_h=(char *) malloc(sizeof(char)*size*128);
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128);
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
int * result_d=(int *) (data_d_total+data_size_to_copy);
char * cigar;
hipMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
int * direction;
hipMalloc( (int **) & direction, size * (640*1100* sizeof (int)));
dim3 block(128);
dim3 grid(size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
hipLaunchKernelGGL(( calculate_cigar), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d,result_d,cigar,cigar_int,direction); //result
hipMemcpy(result_h,result_d,size*sizeof(int)*3,hipMemcpyDeviceToHost);
hipMemcpy(cigar_h,cigar,128*sizeof(char)*size, hipMemcpyDeviceToHost);
hipMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
for(int i=0;i<size;i++)
{
printf("%d %d\n",result_h[i*3],result_h[i*3+1]);
printf("[");
for(int j=0;j<result_h[i*3+2];j++)
{
if(j!=0) printf(", ");
printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]);
}
printf("]\n");
}
hipFree(direction);
free(data_h_total);
hipFree(data_d_total);
free(inputdata);
hipFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
printf(" computation_time= %e total_time=%e \n",computation_time,0);
return 0;
}
| e5c18ed5ec1bfc2f108872bfc4a21b312e3da374.cu | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
__global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int * result, char * cigar,int * cigar_int,int * direction) //, char * result
{
int offset=blockIdx.x;
__shared__ short2 read_reference_number;
__shared__ char * read_base_array;
__shared__ char4 * reference_base_array;
__shared__ int mismatch;
__shared__ int match;
__shared__ int open;
__shared__ int extend;
__shared__ short2 * direction_index;
__shared__ char * cigar_store;
__shared__ int *cigar_int_store;
while(offset<size)
{
if( threadIdx.x==0)
{
read_reference_number=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128);
direction_index=(short2 *) (direction+offset*640*1100);
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
}
__syncthreads();
__shared__ char reference_base_in_char[600];
int hh=(read_reference_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 reference_base_in_thread;
reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory
reference_base_in_char[aa*4]=reference_base_in_thread.x;
reference_base_in_char[aa*4+1]=reference_base_in_thread.y;
reference_base_in_char[aa*4+2]=reference_base_in_thread.z;
reference_base_in_char[aa*4+3]=reference_base_in_thread.w;
}
}
__shared__ int MM[130];
__shared__ int gap_h[130]; //insertion
__shared__ short2 gap_size_h[130]; //insertion
__shared__ int result_col;
__shared__ int result_row;
__shared__ int result_col_index;
__shared__ int result_row_index;
__shared__ char cigar_m[128];
__shared__ int cigar_int_m[128];
int final_result;
int final_i;
int final_j;
if(threadIdx.x==0)
{
MM[0]=0;
gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2;
gap_size_h[0].x=0;
gap_size_h[0].y=0;
match=200;
mismatch=-150;
open=-260;
extend=-11;
result_col=-1000000000;//std::numeric_limits<int>::min()/2;
result_row=-1000000000;//std::numeric_limits<int>::min()/2;
// for(int i=0;i<read_reference_number.y;i++)
// printf("%c",reference_base_in_char[i]);
// printf("\n");
// for(int i=0;i<read_reference_number.x;i++)
// printf("%c",read_base_array[i]);
}
__syncthreads();
int read_number=read_reference_number.x;
{
char read_base;
read_base=read_base_array[threadIdx.x];
int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;;
int gap_size_v=0; //Deletion
int M=0; //now
int step_right; //now
int ki=0;//insertion h negetive
//deletion v
int MMM=0;
short mt=0;
short2 curmt;
curmt.x=0;
curmt.y=0;
int current_reference_id=0;
for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_reference_id<read_reference_number.y))
{
int prev_gap=M+open; //M which is cacluated by last step in the same thread
gap_v+=extend;
if(prev_gap>gap_v)
{
gap_v=prev_gap;
gap_size_v=1;
}
else
gap_size_v++;
char reference_base_each=reference_base_in_char[current_reference_id];
M=MMM+(read_base==reference_base_each? match:mismatch);
prev_gap=MM[threadIdx.x]+open;
step_right=gap_h[threadIdx.x]+extend;
if(prev_gap>step_right)
{
step_right=prev_gap;
ki=1;
}
else
ki=gap_size_h[threadIdx.x].x+1;
bool diag=(M>=gap_v)&&(M>=step_right);
curmt.y=0;
if(diag)
{
curmt.x=0;
//if(threadIdx.x==0||current_reference_id==0)
// curmt.y=0;
// else
curmt.y=mt+1;
// curBtrack=0;
}
else
if(step_right>=gap_v)
{
M=step_right;
curmt.x=0-ki;
// curBtrack=0-ki;
}
else
{
M=gap_v;
curmt.x=gap_size_v;
//curBtrack=gap_size_v;
}
MMM=MM[threadIdx.x];
mt=gap_size_h[threadIdx.x].y;
direction_index[640*j+threadIdx.x]=curmt;
//if(threadIdx.x==read_reference_number.x-3)
//printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack);
if(current_reference_id==read_reference_number.y-1)
{
if(M>=result_row)
{
result_row=M;
result_row_index=threadIdx.x; //
}
//printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x);
}
if(threadIdx.x==read_reference_number.x-1)
{
if(M>=result_col)
{
result_col=M;
result_col_index=current_reference_id; // +1
}
}
current_reference_id++;
// if(threadIdx.x==5)
// printf("%d ", curmt.y);
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
gap_h[threadIdx.x+1]=step_right;
gap_size_h[threadIdx.x+1].x=ki;
gap_size_h[threadIdx.x+1].y=curmt.y;
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
__shared__ int cigar_index;
int segment_length;
short2 btr;
char new_state;
int step_length;
if(threadIdx.x==read_reference_number.x-1)
{
//printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index);
if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1))
{
final_result=result_row;
final_i=read_reference_number.y-1;
final_j=result_row_index;
segment_length=read_reference_number.x-1-result_row_index;
}
else
{
final_result=result_col;
final_i=result_col_index;
final_j=read_reference_number.x-1;
segment_length=0;
}
result[offset*3]=final_result;
//printf("%d\n",final_result);
cigar_index=0;
if(segment_length>0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=segment_length;
segment_length=0;
cigar_index++;
}
//printf("\n %d %d\n", final_i,final_j);
//state=4;
state='N';
do
{
btr=direction_index[(final_i+final_j)*640+final_j];
if(btr.x>0)
{
new_state='D';
// new_state=3;
step_length=btr.x;
final_i-=step_length;
}
else
if(btr.x<0)
{
new_state='I';
// new_state=2;
step_length=0-btr.x;
final_j-=step_length;
}
else
{
new_state='M';
// new_state=0;
//if(btr.y==0)
//step_length=1;
//else
step_length=btr.y;
final_i-=step_length;
final_j-=step_length;
}
// printf(" %d %d %d %d\n", state,new_state,final_i,final_j);
/* if(new_state==0)
{ final_i-=step_length;
final_j-=step_length;
}
else
if(new_state==2)
final_j-=step_length;
else
final_i-=step_length;
*/
//if(state==4) state=new_state;
if(state=='N') state=new_state;
if(state==new_state)
{
segment_length+=step_length;
}
else
{
// if(state==0) cigar_m[cigar_index]='M';
// if(state==2) cigar_m[cigar_index]='I';
// if(state==3) cigar_m[cigar_index]='D';
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
segment_length=step_length;
cigar_index++;
state=new_state;
}
}while(final_i>=0&&final_j>=0);
//if(state==0) cigar_m[cigar_index]='M';
//if(state==2) cigar_m[cigar_index]='I';
//if(state==3) cigar_m[cigar_index]='D';
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
cigar_index++;
if(final_j>=0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=final_j+1;
cigar_index++;
}
result[offset*3+1]=final_i+1;
result[offset*3+2]=cigar_index;
/* for(int i=cigar_index-1;i>=0;i--)
{
printf("%d%c",cigar_int_m[i],cigar_m[i]);
}
*/
}
__syncthreads();
if(threadIdx.x<cigar_index && cigar_index<=blockDim.x)
{
// if(threadIdx.x==0)
// printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]);
cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x];
cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x];
// if(threadIdx.x==0)
// printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]);
}
offset+=gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
FILE * file;
file=fopen(args[1],"r");
int size;
// fscanf(file,"%d",&size);
double computation_time=0;//total_time=0;
timespec start,finish;
/* char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[1]);
strcpy(inputdata[index].read_base,data[1]);
index++;
}
*/
//data preparation.
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; // it is 64*x .thus we donot need to worry about align
int data_size=0;
char * data_d_total;
cudaMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*3);
int * result_h=(int *) malloc(sizeof(int)*size*3);
char * cigar_h=(char *) malloc(sizeof(char)*size*128);
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128);
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
int * result_d=(int *) (data_d_total+data_size_to_copy);
char * cigar;
cudaMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
int * direction;
cudaMalloc( (int **) & direction, size * (640*1100* sizeof (int)));
dim3 block(128);
dim3 grid(size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
calculate_cigar<<<grid,block>>> (size,data_d,num_add_d,result_d,cigar,cigar_int,direction); //result
cudaMemcpy(result_h,result_d,size*sizeof(int)*3,cudaMemcpyDeviceToHost);
cudaMemcpy(cigar_h,cigar,128*sizeof(char)*size, cudaMemcpyDeviceToHost);
cudaMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
for(int i=0;i<size;i++)
{
printf("%d %d\n",result_h[i*3],result_h[i*3+1]);
printf("[");
for(int j=0;j<result_h[i*3+2];j++)
{
if(j!=0) printf(", ");
printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]);
}
printf("]\n");
}
cudaFree(direction);
free(data_h_total);
cudaFree(data_d_total);
free(inputdata);
cudaFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
printf(" computation_time= %e total_time=%e \n",computation_time,0);
return 0;
}
|
9d9ede39425190d791b5f35834b9ae335ffd492d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert correct index so that each element is calculated by a different thread */
c[FIXME] = a[FIXME] + b[FIXME];
}
/* experiment with different values of N */
/* how large can you make it? */
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice );
hipMemset( d_c, 0, size );
/* launch the kernel on the GPU */
/* insert the correct launch parameters to use 1 block and N threads */
/* how large can you make N? */
hipLaunchKernelGGL(( add), dim3(FIXME), dim3(FIXME) , 0, 0, d_a, d_b, d_c );
/* copy result back to host */
hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
} /* end for */
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| 9d9ede39425190d791b5f35834b9ae335ffd492d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert correct index so that each element is calculated by a different thread */
c[FIXME] = a[FIXME] + b[FIXME];
}
/* experiment with different values of N */
/* how large can you make it? */
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
cudaMemset( d_c, 0, size );
/* launch the kernel on the GPU */
/* insert the correct launch parameters to use 1 block and N threads */
/* how large can you make N? */
add<<< FIXME, FIXME >>>( d_a, d_b, d_c );
/* copy result back to host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
} /* end for */
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
79a2d07d622eaaf3064de0be8cea81aab3c9c0d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* The Binary Classification program implements the Binary Classification algorithm in parallel development,
* using MPI,OpenMP and Cuda (By Invidia)
*
* @author Sagiv Asraf
* @Id : 312527450
* @since 25-08-2019
* Lecturer : Dr. Boris Moroz.
*
*/
#include "MainApp.h"
#define NUM_OF_THREADS_PER_BLOCK 1024
__global__ void calcSign(int numOfPoints, int *expectedSigns, double* pointsArray, double* weightsArray, int weightsVectorSize)
{
/*CudaThread -> as index of point to check.
For example:
CudaThread: 20032 check the point 20032 from the points array and so on.
*/
int cudaThread = threadIdx.x + (blockIdx.x * blockDim.x);
if (cudaThread >= numOfPoints) {
return;
}
double sum = 0;
for (int i = 0; i < weightsVectorSize; i++)
{
/* Each point has a weight, we use the next formula for calculate the sign of the mulpilicity
between the weights and the point's coordiantes. */
sum += weightsArray[i] * pointsArray[(cudaThread * weightsVectorSize) + i];
}
sum >= 0 ? expectedSigns[cudaThread] = 1 : expectedSigns[cudaThread] = -1;
}
// Helper method for using CUDA to calcuate the sign of the points via the weights vector.
hipError_t signCalculationWithCuda(int* expectedSignsArray, double* points, int numOfPoints, double* weightsVector, int weightsVectorSize)
{
double* pointsArray_Cuda = 0;
double* weightsArray_Cuda = 0;
int* expctedArray_Cuda = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
printf("\n***hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n***");
goto Error;
}
/*Allocate memory for pointsArray_Cuda*/
cudaStatus = hipMalloc((void**)&pointsArray_Cuda, sizeof(double) * (numOfPoints * weightsVectorSize));
if (cudaStatus != hipSuccess) {
printf("\n***hipMalloc failed!\n***");
goto Error;
}
//Copy input arrays from host memory to GPU buffers.
cudaStatus = hipMemcpy(pointsArray_Cuda, points, sizeof(double) * (numOfPoints * weightsVectorSize), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
printf("\n***hipMemcpy Of PointsArray failed!\n***");
goto Error;
}
/*Allocate memory for weightsArray_Cuda*/
cudaStatus = hipMalloc((void**)&weightsArray_Cuda, weightsVectorSize * sizeof(double));
if (cudaStatus != hipSuccess) {
printf("\n***hipMalloc failed!\n***");
goto Error;
}
/*Allocate memory for expctedArray_Cuda*/
cudaStatus = hipMalloc((void**)&expctedArray_Cuda, numOfPoints * sizeof(int));
if (cudaStatus != hipSuccess) {
printf("\n***hipMalloc failed!\n***");
goto Error;
}
//Copy input arrays from host memory to GPU buffers.
cudaStatus = hipMemcpy(weightsArray_Cuda, weightsVector, weightsVectorSize * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
printf("\n***hipMemcpy failed!\n***");
goto Error;
}
/* Nvidia formula
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
*/
int numOfBlocks = (numOfPoints + NUM_OF_THREADS_PER_BLOCK - 1) / NUM_OF_THREADS_PER_BLOCK;
// Launch a kernel on the GPU with numOfBlocks blocks and NUM_OF_THREADS_PER_BLOCK threads per each block.
calcSign << <numOfBlocks, NUM_OF_THREADS_PER_BLOCK >> >(numOfPoints, expctedArray_Cuda, pointsArray_Cuda, weightsArray_Cuda, weightsVectorSize);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
printf("\n***calcSign launch failed: %s\n\n***", hipGetErrorString(cudaStatus));
goto Error;
}
// Copy output array from GPU buffer to host memory.
cudaStatus = hipMemcpy(expectedSignsArray, expctedArray_Cuda, (numOfPoints * sizeof(int)), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
printf("\n***hipMemcpy LAST failed!\n\n***");
goto Error;
}
Error:
hipFree(expctedArray_Cuda);
hipFree(pointsArray_Cuda);
hipFree(weightsArray_Cuda);
return cudaStatus;
} | 79a2d07d622eaaf3064de0be8cea81aab3c9c0d2.cu | /**
* The Binary Classification program implements the Binary Classification algorithm in parallel development,
* using MPI,OpenMP and Cuda (By Invidia)
*
* @author Sagiv Asraf
* @Id : 312527450
* @since 25-08-2019
* Lecturer : Dr. Boris Moroz.
*
*/
#include "MainApp.h"
#define NUM_OF_THREADS_PER_BLOCK 1024
__global__ void calcSign(int numOfPoints, int *expectedSigns, double* pointsArray, double* weightsArray, int weightsVectorSize)
{
/*CudaThread -> as index of point to check.
For example:
CudaThread: 20032 check the point 20032 from the points array and so on.
*/
int cudaThread = threadIdx.x + (blockIdx.x * blockDim.x);
if (cudaThread >= numOfPoints) {
return;
}
double sum = 0;
for (int i = 0; i < weightsVectorSize; i++)
{
/* Each point has a weight, we use the next formula for calculate the sign of the mulpilicity
between the weights and the point's coordiantes. */
sum += weightsArray[i] * pointsArray[(cudaThread * weightsVectorSize) + i];
}
sum >= 0 ? expectedSigns[cudaThread] = 1 : expectedSigns[cudaThread] = -1;
}
// Helper method for using CUDA to calcuate the sign of the points via the weights vector.
cudaError_t signCalculationWithCuda(int* expectedSignsArray, double* points, int numOfPoints, double* weightsVector, int weightsVectorSize)
{
double* pointsArray_Cuda = 0;
double* weightsArray_Cuda = 0;
int* expctedArray_Cuda = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
printf("\n***cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n***");
goto Error;
}
/*Allocate memory for pointsArray_Cuda*/
cudaStatus = cudaMalloc((void**)&pointsArray_Cuda, sizeof(double) * (numOfPoints * weightsVectorSize));
if (cudaStatus != cudaSuccess) {
printf("\n***cudaMalloc failed!\n***");
goto Error;
}
//Copy input arrays from host memory to GPU buffers.
cudaStatus = cudaMemcpy(pointsArray_Cuda, points, sizeof(double) * (numOfPoints * weightsVectorSize), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("\n***cudaMemcpy Of PointsArray failed!\n***");
goto Error;
}
/*Allocate memory for weightsArray_Cuda*/
cudaStatus = cudaMalloc((void**)&weightsArray_Cuda, weightsVectorSize * sizeof(double));
if (cudaStatus != cudaSuccess) {
printf("\n***cudaMalloc failed!\n***");
goto Error;
}
/*Allocate memory for expctedArray_Cuda*/
cudaStatus = cudaMalloc((void**)&expctedArray_Cuda, numOfPoints * sizeof(int));
if (cudaStatus != cudaSuccess) {
printf("\n***cudaMalloc failed!\n***");
goto Error;
}
//Copy input arrays from host memory to GPU buffers.
cudaStatus = cudaMemcpy(weightsArray_Cuda, weightsVector, weightsVectorSize * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("\n***cudaMemcpy failed!\n***");
goto Error;
}
/* Nvidia formula
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
*/
int numOfBlocks = (numOfPoints + NUM_OF_THREADS_PER_BLOCK - 1) / NUM_OF_THREADS_PER_BLOCK;
// Launch a kernel on the GPU with numOfBlocks blocks and NUM_OF_THREADS_PER_BLOCK threads per each block.
calcSign << <numOfBlocks, NUM_OF_THREADS_PER_BLOCK >> >(numOfPoints, expctedArray_Cuda, pointsArray_Cuda, weightsArray_Cuda, weightsVectorSize);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
printf("\n***calcSign launch failed: %s\n\n***", cudaGetErrorString(cudaStatus));
goto Error;
}
// Copy output array from GPU buffer to host memory.
cudaStatus = cudaMemcpy(expectedSignsArray, expctedArray_Cuda, (numOfPoints * sizeof(int)), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
printf("\n***cudaMemcpy LAST failed!\n\n***");
goto Error;
}
Error:
cudaFree(expctedArray_Cuda);
cudaFree(pointsArray_Cuda);
cudaFree(weightsArray_Cuda);
return cudaStatus;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.